diff --git a/Documentation/ABI/testing/sysfs-bus-hsi b/Documentation/ABI/testing/sysfs-bus-hsi new file mode 100644 index 000000000000..1b1b282a99e1 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-hsi @@ -0,0 +1,19 @@ +What: /sys/bus/hsi +Date: April 2012 +KernelVersion: 3.4 +Contact: Carlos Chinea +Description: + High Speed Synchronous Serial Interface (HSI) is a + serial interface mainly used for connecting application + engines (APE) with cellular modem engines (CMT) in cellular + handsets. + The bus will be populated with devices (hsi_clients) representing + the protocols available in the system. Bus drivers implement + those protocols. + +What: /sys/bus/hsi/devices/.../modalias +Date: April 2012 +KernelVersion: 3.4 +Contact: Carlos Chinea +Description: Stores the same MODALIAS value emitted by uevent + Format: hsi: diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml index 3fd3ce5df270..5274c24d11e0 100644 --- a/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml +++ b/Documentation/DocBook/media/v4l/pixfmt-nv12m.xml @@ -1,6 +1,6 @@ - V4L2_PIX_FMT_NV12M ('NV12M') + V4L2_PIX_FMT_NV12M ('NM12') &manvol; diff --git a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml index 9957863daf18..60308f1eefdf 100644 --- a/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml +++ b/Documentation/DocBook/media/v4l/pixfmt-yuv420m.xml @@ -1,6 +1,6 @@ - V4L2_PIX_FMT_YUV420M ('YU12M') + V4L2_PIX_FMT_YUV420M ('YM12') &manvol; diff --git a/Documentation/devicetree/bindings/ata/calxeda-sata.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt similarity index 90% rename from Documentation/devicetree/bindings/ata/calxeda-sata.txt rename to Documentation/devicetree/bindings/ata/ahci-platform.txt index 79caa5651f53..8bb8a76d42e8 100644 --- a/Documentation/devicetree/bindings/ata/calxeda-sata.txt +++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt @@ -1,10 +1,10 @@ -* Calxeda SATA Controller +* AHCI SATA Controller SATA nodes are defined to describe on-chip Serial ATA controllers. Each SATA controller should have its own node. Required properties: -- compatible : compatible list, contains "calxeda,hb-ahci" +- compatible : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci" - interrupts : - reg : @@ -14,4 +14,3 @@ Example: reg = <0xffe08000 0x1000>; interrupts = <115>; }; - diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt index 2c3cd413f042..9cc44449508d 100644 --- a/Documentation/devicetree/bindings/sound/sgtl5000.txt +++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt @@ -3,6 +3,8 @@ Required properties: - compatible : "fsl,sgtl5000". +- reg : the I2C address of the device + Example: codec: sgtl5000@0a { diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index bd80ba5847d2..1619a8c80873 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -147,7 +147,7 @@ tcp_adv_win_scale - INTEGER (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale), if it is <= 0. Possible values are [-31, 31], inclusive. - Default: 2 + Default: 1 tcp_allowed_congestion_control - STRING Show/set the congestion control choices available to non-privileged @@ -410,7 +410,7 @@ tcp_rmem - vector of 3 INTEGERs: min, default, max net.core.rmem_max. Calling setsockopt() with SO_RCVBUF disables automatic tuning of that socket's receive buffer size, in which case this value is ignored. - Default: between 87380B and 4MB, depending on RAM size. + Default: between 87380B and 6MB, depending on RAM size. tcp_sack - BOOLEAN Enable select acknowledgments (SACKS). diff --git a/Documentation/power/freezing-of-tasks.txt b/Documentation/power/freezing-of-tasks.txt index ec715cd78fbb..6ec291ea1c78 100644 --- a/Documentation/power/freezing-of-tasks.txt +++ b/Documentation/power/freezing-of-tasks.txt @@ -9,7 +9,7 @@ architectures). II. How does it work? -There are four per-task flags used for that, PF_NOFREEZE, PF_FROZEN, TIF_FREEZE +There are three per-task flags used for that, PF_NOFREEZE, PF_FROZEN and PF_FREEZER_SKIP (the last one is auxiliary). The tasks that have PF_NOFREEZE unset (all user space processes and some kernel threads) are regarded as 'freezable' and treated in a special way before the system enters a @@ -17,30 +17,31 @@ suspend state as well as before a hibernation image is created (in what follows we only consider hibernation, but the description also applies to suspend). Namely, as the first step of the hibernation procedure the function -freeze_processes() (defined in kernel/power/process.c) is called. It executes -try_to_freeze_tasks() that sets TIF_FREEZE for all of the freezable tasks and -either wakes them up, if they are kernel threads, or sends fake signals to them, -if they are user space processes. A task that has TIF_FREEZE set, should react -to it by calling the function called __refrigerator() (defined in -kernel/freezer.c), which sets the task's PF_FROZEN flag, changes its state -to TASK_UNINTERRUPTIBLE and makes it loop until PF_FROZEN is cleared for it. -Then, we say that the task is 'frozen' and therefore the set of functions -handling this mechanism is referred to as 'the freezer' (these functions are -defined in kernel/power/process.c, kernel/freezer.c & include/linux/freezer.h). -User space processes are generally frozen before kernel threads. +freeze_processes() (defined in kernel/power/process.c) is called. A system-wide +variable system_freezing_cnt (as opposed to a per-task flag) is used to indicate +whether the system is to undergo a freezing operation. And freeze_processes() +sets this variable. After this, it executes try_to_freeze_tasks() that sends a +fake signal to all user space processes, and wakes up all the kernel threads. +All freezable tasks must react to that by calling try_to_freeze(), which +results in a call to __refrigerator() (defined in kernel/freezer.c), which sets +the task's PF_FROZEN flag, changes its state to TASK_UNINTERRUPTIBLE and makes +it loop until PF_FROZEN is cleared for it. Then, we say that the task is +'frozen' and therefore the set of functions handling this mechanism is referred +to as 'the freezer' (these functions are defined in kernel/power/process.c, +kernel/freezer.c & include/linux/freezer.h). User space processes are generally +frozen before kernel threads. __refrigerator() must not be called directly. Instead, use the try_to_freeze() function (defined in include/linux/freezer.h), that checks -the task's TIF_FREEZE flag and makes the task enter __refrigerator() if the -flag is set. +if the task is to be frozen and makes the task enter __refrigerator(). For user space processes try_to_freeze() is called automatically from the signal-handling code, but the freezable kernel threads need to call it explicitly in suitable places or use the wait_event_freezable() or wait_event_freezable_timeout() macros (defined in include/linux/freezer.h) -that combine interruptible sleep with checking if TIF_FREEZE is set and calling -try_to_freeze(). The main loop of a freezable kernel thread may look like the -following one: +that combine interruptible sleep with checking if the task is to be frozen and +calling try_to_freeze(). The main loop of a freezable kernel thread may look +like the following one: set_freezable(); do { @@ -53,7 +54,7 @@ following one: (from drivers/usb/core/hub.c::hub_thread()). If a freezable kernel thread fails to call try_to_freeze() after the freezer has -set TIF_FREEZE for it, the freezing of tasks will fail and the entire +initiated a freezing operation, the freezing of tasks will fail and the entire hibernation operation will be cancelled. For this reason, freezable kernel threads must call try_to_freeze() somewhere or use one of the wait_event_freezable() and wait_event_freezable_timeout() macros. diff --git a/Documentation/security/keys.txt b/Documentation/security/keys.txt index 787717091421..d389acd31e19 100644 --- a/Documentation/security/keys.txt +++ b/Documentation/security/keys.txt @@ -123,7 +123,7 @@ KEY SERVICE OVERVIEW The key service provides a number of features besides keys: - (*) The key service defines two special key types: + (*) The key service defines three special key types: (+) "keyring" @@ -137,6 +137,18 @@ The key service provides a number of features besides keys: blobs of data. These can be created, updated and read by userspace, and aren't intended for use by kernel services. + (+) "logon" + + Like a "user" key, a "logon" key has a payload that is an arbitrary + blob of data. It is intended as a place to store secrets which are + accessible to the kernel but not to userspace programs. + + The description can be arbitrary, but must be prefixed with a non-zero + length string that describes the key "subclass". The subclass is + separated from the rest of the description by a ':'. "logon" keys can + be created and updated from userspace, but the payload is only + readable from kernel space. + (*) Each process subscribes to three keyrings: a thread-specific keyring, a process-specific keyring, and a session-specific keyring. diff --git a/MAINTAINERS b/MAINTAINERS index c004782a1359..879520452133 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1969,10 +1969,9 @@ S: Maintained F: drivers/net/ethernet/ti/cpmac.c CPU FREQUENCY DRIVERS -M: Dave Jones +M: Rafael J. Wysocki L: cpufreq@vger.kernel.org -W: http://www.codemonkey.org.uk/projects/cpufreq/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git +L: linux-pm@vger.kernel.org S: Maintained F: drivers/cpufreq/ F: include/linux/cpufreq.h @@ -3593,6 +3592,7 @@ S: Supported F: drivers/net/wireless/iwlegacy/ INTEL WIRELESS WIFI LINK (iwlwifi) +M: Johannes Berg M: Wey-Yi Guy M: Intel Linux Wireless L: linux-wireless@vger.kernel.org @@ -4037,6 +4037,7 @@ F: Documentation/scsi/53c700.txt F: drivers/scsi/53c700* LED SUBSYSTEM +M: Bryan Wu M: Richard Purdie S: Maintained F: drivers/leds/ @@ -5892,11 +5893,11 @@ F: Documentation/scsi/st.txt F: drivers/scsi/st* SCTP PROTOCOL -M: Vlad Yasevich +M: Vlad Yasevich M: Sridhar Samudrala L: linux-sctp@vger.kernel.org W: http://lksctp.sourceforge.net -S: Supported +S: Maintained F: Documentation/networking/sctp.txt F: include/linux/sctp.h F: include/net/sctp/ @@ -7587,8 +7588,8 @@ F: Documentation/filesystems/xfs.txt F: fs/xfs/ XILINX AXI ETHERNET DRIVER -M: Ariane Keller -M: Daniel Borkmann +M: Anirudha Sarangi +M: John Linn S: Maintained F: drivers/net/ethernet/xilinx/xilinx_axienet* diff --git a/Makefile b/Makefile index f6578f47e21e..48bd1f50dcc3 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 4 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc7 NAME = Saber-toothed Squirrel # *DOCUMENTATION* diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 56a4df952fb0..22e58a99f38b 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -477,7 +477,7 @@ config ALPHA_BROKEN_IRQ_MASK config VGA_HOSE bool - depends on ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI + depends on VGA_CONSOLE && (ALPHA_GENERIC || ALPHA_TITAN || ALPHA_MARVEL || ALPHA_TSUNAMI) default y help Support VGA on an arbitrary hose; needed for several platforms diff --git a/arch/alpha/include/asm/rtc.h b/arch/alpha/include/asm/rtc.h index 1f7fba671ae6..d70408d36677 100644 --- a/arch/alpha/include/asm/rtc.h +++ b/arch/alpha/include/asm/rtc.h @@ -1,14 +1,10 @@ #ifndef _ALPHA_RTC_H #define _ALPHA_RTC_H -#if defined(CONFIG_ALPHA_GENERIC) +#if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) \ + || defined(CONFIG_ALPHA_GENERIC) # define get_rtc_time alpha_mv.rtc_get_time # define set_rtc_time alpha_mv.rtc_set_time -#else -# if defined(CONFIG_ALPHA_MARVEL) && defined(CONFIG_SMP) -# define get_rtc_time marvel_get_rtc_time -# define set_rtc_time marvel_set_rtc_time -# endif #endif #include diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c index 5e7c28f92f19..61893d7bdda5 100644 --- a/arch/alpha/kernel/core_tsunami.c +++ b/arch/alpha/kernel/core_tsunami.c @@ -11,6 +11,7 @@ #include #undef __EXTERN_INLINE +#include #include #include #include diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 14a4b6a7cf59..407accc80877 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c @@ -317,7 +317,7 @@ marvel_init_irq(void) } static int -marvel_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) { struct pci_controller *hose = dev->sysdata; struct io7_port *io7_port = hose->sysdata; diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index cf006d40342c..36586dba6fa6 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1186,6 +1186,15 @@ if !MMU source "arch/arm/Kconfig-nommu" endif +config ARM_ERRATA_326103 + bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" + depends on CPU_V6 + help + Executing a SWP instruction to read-only memory does not set bit 11 + of the FSR on the ARM 1136 prior to r1p0. This causes the kernel to + treat the access as a read, preventing a COW from occurring and + causing the faulting task to livelock. + config ARM_ERRATA_411920 bool "ARM errata: Invalidation of the Instruction Cache operation can fail" depends on CPU_V6 || CPU_V6K diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts index 15ded0deaa79..45bc4bb04e57 100644 --- a/arch/arm/boot/dts/msm8660-surf.dts +++ b/arch/arm/boot/dts/msm8660-surf.dts @@ -10,7 +10,7 @@ intc: interrupt-controller@02080000 { compatible = "qcom,msm-8660-qgic"; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <3>; reg = < 0x02080000 0x1000 >, < 0x02081000 0x1000 >; }; @@ -19,6 +19,6 @@ compatible = "qcom,msm-hsuart", "qcom,msm-uart"; reg = <0x19c40000 0x1000>, <0x19c00000 0x1000>; - interrupts = <195>; + interrupts = <0 195 0x0>; }; }; diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts index 0b32925f2147..e2fe3195c0d1 100644 --- a/arch/arm/boot/dts/versatile-ab.dts +++ b/arch/arm/boot/dts/versatile-ab.dts @@ -173,7 +173,7 @@ mmc@5000 { compatible = "arm,primecell"; reg = < 0x5000 0x1000>; - interrupts = <22>; + interrupts = <22 34>; }; kmi@6000 { compatible = "arm,pl050", "arm,primecell"; diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts index 166461073b78..7e8175269064 100644 --- a/arch/arm/boot/dts/versatile-pb.dts +++ b/arch/arm/boot/dts/versatile-pb.dts @@ -41,7 +41,7 @@ mmc@b000 { compatible = "arm,primecell"; reg = <0xb000 0x1000>; - interrupts = <23>; + interrupts = <23 34>; }; }; }; diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig index b5ac644e12af..6b31cb60daab 100644 --- a/arch/arm/configs/imx_v4_v5_defconfig +++ b/arch/arm/configs/imx_v4_v5_defconfig @@ -112,6 +112,7 @@ CONFIG_WATCHDOG=y CONFIG_IMX2_WDT=y CONFIG_MFD_MC13XXX=y CONFIG_REGULATOR=y +CONFIG_REGULATOR_FIXED_VOLTAGE=y CONFIG_REGULATOR_MC13783=y CONFIG_REGULATOR_MC13892=y CONFIG_FB=y diff --git a/arch/arm/configs/mini2440_defconfig b/arch/arm/configs/mini2440_defconfig index 42da9183acc8..082175c54e7c 100644 --- a/arch/arm/configs/mini2440_defconfig +++ b/arch/arm/configs/mini2440_defconfig @@ -14,6 +14,8 @@ CONFIG_MODULE_FORCE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_BLK_DEV_INTEGRITY=y CONFIG_ARCH_S3C24XX=y +# CONFIG_CPU_S3C2410 is not set +CONFIG_CPU_S3C2440=y CONFIG_S3C_ADC=y CONFIG_S3C24XX_PWM=y CONFIG_MACH_MINI2440=y diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig index 889d73ac1ae1..7e84f453e8a6 100644 --- a/arch/arm/configs/u8500_defconfig +++ b/arch/arm/configs/u8500_defconfig @@ -8,8 +8,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_LBDAF is not set # CONFIG_BLK_DEV_BSG is not set CONFIG_ARCH_U8500=y -CONFIG_UX500_SOC_DB5500=y -CONFIG_UX500_SOC_DB8500=y CONFIG_MACH_HREFV60=y CONFIG_MACH_SNOWBALL=y CONFIG_MACH_U5500=y @@ -39,7 +37,6 @@ CONFIG_CAIF=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_MISC_DEVICES=y CONFIG_AB8500_PWM=y CONFIG_SENSORS_BH1780=y CONFIG_NETDEVICES=y @@ -65,16 +62,18 @@ CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_NOMADIK=y -CONFIG_I2C=y -CONFIG_I2C_NOMADIK=y CONFIG_SPI=y CONFIG_SPI_PL022=y CONFIG_GPIO_STMPE=y CONFIG_GPIO_TC3589X=y +CONFIG_POWER_SUPPLY=y +CONFIG_AB8500_BM=y +CONFIG_AB8500_BATTERY_THERM_ON_BATCTRL=y CONFIG_MFD_STMPE=y CONFIG_MFD_TC3589X=y CONFIG_AB5500_CORE=y CONFIG_AB8500_CORE=y +CONFIG_REGULATOR=y CONFIG_REGULATOR_AB8500=y # CONFIG_HID_SUPPORT is not set CONFIG_USB_GADGET=y diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index d4c24d412a8d..0f04d84582e1 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -118,6 +118,13 @@ extern void iwmmxt_task_switch(struct thread_info *); extern void vfp_sync_hwstate(struct thread_info *); extern void vfp_flush_hwstate(struct thread_info *); +struct user_vfp; +struct user_vfp_exc; + +extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); +extern int vfp_restore_user_hwstate(struct user_vfp __user *, + struct user_vfp_exc __user *); #endif /* diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 60843eb0f61c..73409e6c0251 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h @@ -7,6 +7,8 @@ .macro set_tls_v6k, tp, tmp1, tmp2 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register + mov \tmp1, #0 + mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register .endm .macro set_tls_v6, tp, tmp1, tmp2 @@ -15,6 +17,8 @@ mov \tmp2, #0xffff0fff tst \tmp1, #HWCAP_TLS @ hardware TLS available? mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register + movne \tmp1, #0 + mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 .endm diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 71ccdbfed662..8349d4e97e2b 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -155,10 +155,10 @@ static bool migrate_one_irq(struct irq_desc *desc) } c = irq_data_get_irq_chip(d); - if (c->irq_set_affinity) - c->irq_set_affinity(d, affinity, true); - else + if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); + else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + cpumask_copy(d->affinity, affinity); return ret; } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 80abafb9bf33..9650c143afc1 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -906,27 +906,14 @@ long arch_ptrace(struct task_struct *child, long request, return ret; } -#ifdef __ARMEB__ -#define AUDIT_ARCH_NR AUDIT_ARCH_ARMEB -#else -#define AUDIT_ARCH_NR AUDIT_ARCH_ARM -#endif - asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) { unsigned long ip; - /* - * Save IP. IP is used to denote syscall entry/exit: - * IP = 0 -> entry, = 1 -> exit - */ - ip = regs->ARM_ip; - regs->ARM_ip = why; - - if (!ip) + if (why) audit_syscall_exit(regs); else - audit_syscall_entry(AUDIT_ARCH_NR, scno, regs->ARM_r0, + audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); if (!test_thread_flag(TIF_SYSCALL_TRACE)) @@ -936,6 +923,13 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno) current_thread_info()->syscall = scno; + /* + * IP is used to denote syscall entry/exit: + * IP = 0 -> entry, =1 -> exit + */ + ip = regs->ARM_ip; + regs->ARM_ip = why; + /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7cb532fc8aa4..d68d1b694680 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -180,44 +180,23 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) static int preserve_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; const unsigned long magic = VFP_MAGIC; const unsigned long size = VFP_STORAGE_SIZE; int err = 0; - vfp_sync_hwstate(thread); __put_user_error(magic, &frame->magic, err); __put_user_error(size, &frame->size, err); - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __put_user_error(h->fpscr, &frame->ufp.fpscr, err); + if (err) + return -EFAULT; - /* - * Copy the exception registers. - */ - __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); - __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - - return err ? -EFAULT : 0; + return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc); } static int restore_vfp_context(struct vfp_sigframe __user *frame) { - struct thread_info *thread = current_thread_info(); - struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; - unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); @@ -228,33 +207,7 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame) if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; - vfp_flush_hwstate(thread); - - /* - * Copy the floating point registers. There can be unused - * registers see asm/hwcap.h for details. - */ - err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, - sizeof(h->fpregs)); - /* - * Copy the status and control register. - */ - __get_user_error(h->fpscr, &frame->ufp.fpscr, err); - - /* - * Sanitise and restore the exception registers. - */ - __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); - /* Ensure the VFP is enabled. */ - fpexc |= FPEXC_EN; - /* Ensure FPINST2 is invalid and the exception flag is cleared. */ - fpexc &= ~(FPEXC_EX | FPEXC_FP2V); - h->fpexc = fpexc; - - __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); - __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); - - return err ? -EFAULT : 0; + return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); } #endif diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index addbbe8028c2..8f4644659777 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -251,8 +251,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) struct mm_struct *mm = &init_mm; unsigned int cpu = smp_processor_id(); - printk("CPU%u: Booted secondary processor\n", cpu); - /* * All kernel threads share the same mm context; grab a * reference and switch to it. @@ -264,6 +262,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) enter_lazy_tlb(mm, current); local_flush_tlb_all(); + printk("CPU%u: Booted secondary processor\n", cpu); + cpu_init(); preempt_disable(); trace_hardirqs_off(); @@ -510,10 +510,6 @@ static void ipi_cpu_stop(unsigned int cpu) local_fiq_disable(); local_irq_disable(); -#ifdef CONFIG_HOTPLUG_CPU - platform_cpu_kill(cpu); -#endif - while (1) cpu_relax(); } @@ -576,17 +572,25 @@ void smp_send_reschedule(int cpu) smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); } +#ifdef CONFIG_HOTPLUG_CPU +static void smp_kill_cpus(cpumask_t *mask) +{ + unsigned int cpu; + for_each_cpu(cpu, mask) + platform_cpu_kill(cpu); +} +#else +static void smp_kill_cpus(cpumask_t *mask) { } +#endif + void smp_send_stop(void) { unsigned long timeout; + struct cpumask mask; - if (num_online_cpus() > 1) { - struct cpumask mask; - cpumask_copy(&mask, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &mask); - - smp_cross_call(&mask, IPI_CPU_STOP); - } + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + smp_cross_call(&mask, IPI_CPU_STOP); /* Wait up to one second for other CPUs to stop */ timeout = USEC_PER_SEC; @@ -595,6 +599,8 @@ void smp_send_stop(void) if (num_online_cpus() > 1) pr_warning("SMP: failed to stop secondary CPUs\n"); + + smp_kill_cpus(&mask); } /* diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 5b150afb995b..fef42b21cecb 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -118,14 +118,10 @@ static int twd_cpufreq_transition(struct notifier_block *nb, * The twd clock events must be reprogrammed to account for the new * frequency. The timer is local to a cpu, so cross-call to the * changing cpu. - * - * Only wait for it to finish, if the cpu is active to avoid - * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during - * booting of that cpu. */ if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) smp_call_function_single(freqs->cpu, twd_update_frequency, - NULL, cpu_active(freqs->cpu)); + NULL, 1); return NOTIFY_OK; } diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index d2b177905cdb..76cbb055dd05 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -115,7 +115,7 @@ int kernel_execve(const char *filename, "Ir" (THREAD_START_SP - sizeof(regs)), "r" (®s), "Ir" (sizeof(regs)) - : "r0", "r1", "r2", "r3", "ip", "lr", "memory"); + : "r0", "r1", "r2", "r3", "r8", "r9", "ip", "lr", "memory"); out: return ret; diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 99ce5c955e39..05774e5b1cba 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -1173,7 +1173,6 @@ void __init at91_add_device_serial(void) printk(KERN_INFO "AT91: No default serial console defined.\n"); } #else -void __init __deprecated at91_init_serial(struct at91_uart_config *config) {} void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {} void __init at91_set_serial_console(unsigned portnr) {} void __init at91_add_device_serial(void) {} diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index dd7f782b0b91..104ca40d8d18 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -176,6 +177,7 @@ static struct clock_event_device clkevt = { }; void __iomem *at91_st_base; +EXPORT_SYMBOL_GPL(at91_st_base); void __init at91rm9200_ioremap_st(u32 addr) { diff --git a/arch/arm/mach-at91/board-rm9200ek.c b/arch/arm/mach-at91/board-rm9200ek.c index 11cbaa8946fe..b2e4fe21f346 100644 --- a/arch/arm/mach-at91/board-rm9200ek.c +++ b/arch/arm/mach-at91/board-rm9200ek.c @@ -117,7 +117,7 @@ static struct i2c_board_info __initdata ek_i2c_devices[] = { }; #define EK_FLASH_BASE AT91_CHIPSELECT_0 -#define EK_FLASH_SIZE SZ_2M +#define EK_FLASH_SIZE SZ_8M static struct physmap_flash_data ek_flash_data = { .width = 2, diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index c3f994462864..065fed342424 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c @@ -85,8 +85,6 @@ static struct resource dm9000_resource[] = { .flags = IORESOURCE_MEM }, [2] = { - .start = AT91_PIN_PC11, - .end = AT91_PIN_PC11, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE, } @@ -130,6 +128,8 @@ static struct sam9_smc_config __initdata dm9000_smc_config = { static void __init ek_add_device_dm9000(void) { + struct resource *r = &dm9000_resource[2]; + /* Configure chip-select 2 (DM9000) */ sam9_smc_configure(0, 2, &dm9000_smc_config); @@ -139,6 +139,7 @@ static void __init ek_add_device_dm9000(void) /* Configure Interrupt pin as input, no pull-up */ at91_set_gpio_input(AT91_PIN_PC11, 0); + r->start = r->end = gpio_to_irq(AT91_PIN_PC11); platform_device_register(&dm9000_device); } #else diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c index a0f4d7424cdc..6b692824c988 100644 --- a/arch/arm/mach-at91/clock.c +++ b/arch/arm/mach-at91/clock.c @@ -35,6 +35,7 @@ #include "generic.h" void __iomem *at91_pmc_base; +EXPORT_SYMBOL_GPL(at91_pmc_base); /* * There's a lot more which can be done with clocks, including cpufreq diff --git a/arch/arm/mach-at91/include/mach/at91_pmc.h b/arch/arm/mach-at91/include/mach/at91_pmc.h index 36604782a78f..ea2c57a86ca6 100644 --- a/arch/arm/mach-at91/include/mach/at91_pmc.h +++ b/arch/arm/mach-at91/include/mach/at91_pmc.h @@ -25,7 +25,7 @@ extern void __iomem *at91_pmc_base; #define at91_pmc_write(field, value) \ __raw_writel(value, at91_pmc_base + field) #else -.extern at91_aic_base +.extern at91_pmc_base #endif #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ diff --git a/arch/arm/mach-at91/setup.c b/arch/arm/mach-at91/setup.c index 97cc04dc8073..f44a2e7272e3 100644 --- a/arch/arm/mach-at91/setup.c +++ b/arch/arm/mach-at91/setup.c @@ -54,6 +54,7 @@ void __init at91_init_interrupts(unsigned int *priority) } void __iomem *at91_ramc_base[2]; +EXPORT_SYMBOL_GPL(at91_ramc_base); void __init at91_ioremap_ramc(int id, u32 addr, u32 size) { @@ -292,6 +293,7 @@ void __init at91_ioremap_rstc(u32 base_addr) } void __iomem *at91_matrix_base; +EXPORT_SYMBOL_GPL(at91_matrix_base); void __init at91_ioremap_matrix(u32 base_addr) { diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c index 22e4e0a28ad1..adbfb1994582 100644 --- a/arch/arm/mach-bcmring/core.c +++ b/arch/arm/mach-bcmring/core.c @@ -52,8 +52,8 @@ #include #include -static AMBA_APB_DEVICE(uartA, "uarta", MM_ADDR_IO_UARTA, { IRQ_UARTA }, NULL); -static AMBA_APB_DEVICE(uartB, "uartb", MM_ADDR_IO_UARTB, { IRQ_UARTB }, NULL); +static AMBA_APB_DEVICE(uartA, "uartA", 0, MM_ADDR_IO_UARTA, {IRQ_UARTA}, NULL); +static AMBA_APB_DEVICE(uartB, "uartB", 0, MM_ADDR_IO_UARTB, {IRQ_UARTB}, NULL); static struct clk pll1_clk = { .name = "PLL1", diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e81c35f936b5..b8df521fb68e 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -232,6 +232,9 @@ config MACH_ARMLEX4210 config MACH_UNIVERSAL_C210 bool "Mobile UNIVERSAL_C210 Board" select CPU_EXYNOS4210 + select S5P_HRT + select CLKSRC_MMIO + select HAVE_SCHED_CLOCK select S5P_GPIO_INT select S5P_DEV_FIMC0 select S5P_DEV_FIMC1 diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c index df54c2a92225..6efd1e5919fd 100644 --- a/arch/arm/mach-exynos/clock-exynos4.c +++ b/arch/arm/mach-exynos/clock-exynos4.c @@ -497,25 +497,25 @@ static struct clk exynos4_init_clocks_off[] = { .ctrlbit = (1 << 3), }, { .name = "hsmmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 5), }, { .name = "hsmmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 6), }, { .name = "hsmmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 7), }, { .name = "hsmmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos4_clk_aclk_133.clk, .enable = exynos4_clk_ip_fsys_ctrl, .ctrlbit = (1 << 8), @@ -1202,7 +1202,7 @@ static struct clksrc_clk exynos4_clk_sclk_uart3 = { static struct clksrc_clk exynos4_clk_sclk_mmc0 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos4_clk_dout_mmc0.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 0), @@ -1213,7 +1213,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc0 = { static struct clksrc_clk exynos4_clk_sclk_mmc1 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos4_clk_dout_mmc1.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 4), @@ -1224,7 +1224,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc1 = { static struct clksrc_clk exynos4_clk_sclk_mmc2 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos4_clk_dout_mmc2.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 8), @@ -1235,7 +1235,7 @@ static struct clksrc_clk exynos4_clk_sclk_mmc2 = { static struct clksrc_clk exynos4_clk_sclk_mmc3 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos4_clk_dout_mmc3.clk, .enable = exynos4_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 12), @@ -1340,10 +1340,10 @@ static struct clk_lookup exynos4_clk_lookup[] = { CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos4_clk_sclk_uart1.clk), CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos4_clk_sclk_uart2.clk), CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos4_clk_sclk_uart3.clk), - CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk), - CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk), - CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk), - CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk), + CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos4_clk_sclk_mmc0.clk), + CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos4_clk_sclk_mmc1.clk), + CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos4_clk_sclk_mmc2.clk), + CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos4_clk_sclk_mmc3.clk), CLKDEV_INIT("exynos4-fb.0", "lcd", &exynos4_clk_fimd0), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos4_clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos4_clk_pdma1), diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c index d013982d0f8e..7ac6ff4c46bd 100644 --- a/arch/arm/mach-exynos/clock-exynos5.c +++ b/arch/arm/mach-exynos/clock-exynos5.c @@ -455,25 +455,25 @@ static struct clk exynos5_init_clocks_off[] = { .ctrlbit = (1 << 20), }, { .name = "hsmmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 12), }, { .name = "hsmmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 13), }, { .name = "hsmmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 14), }, { .name = "hsmmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos5_clk_aclk_200.clk, .enable = exynos5_clk_ip_fsys_ctrl, .ctrlbit = (1 << 15), @@ -678,7 +678,7 @@ static struct clk exynos5_clk_pdma1 = { .name = "dma", .devname = "dma-pl330.1", .enable = exynos5_clk_ip_fsys_ctrl, - .ctrlbit = (1 << 1), + .ctrlbit = (1 << 2), }; static struct clk exynos5_clk_mdma1 = { @@ -813,7 +813,7 @@ static struct clksrc_clk exynos5_clk_sclk_uart3 = { static struct clksrc_clk exynos5_clk_sclk_mmc0 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.0", + .devname = "exynos4-sdhci.0", .parent = &exynos5_clk_dout_mmc0.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 0), @@ -824,7 +824,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc0 = { static struct clksrc_clk exynos5_clk_sclk_mmc1 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.1", + .devname = "exynos4-sdhci.1", .parent = &exynos5_clk_dout_mmc1.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 4), @@ -835,7 +835,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc1 = { static struct clksrc_clk exynos5_clk_sclk_mmc2 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.2", + .devname = "exynos4-sdhci.2", .parent = &exynos5_clk_dout_mmc2.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 8), @@ -846,7 +846,7 @@ static struct clksrc_clk exynos5_clk_sclk_mmc2 = { static struct clksrc_clk exynos5_clk_sclk_mmc3 = { .clk = { .name = "sclk_mmc", - .devname = "s3c-sdhci.3", + .devname = "exynos4-sdhci.3", .parent = &exynos5_clk_dout_mmc3.clk, .enable = exynos5_clksrc_mask_fsys_ctrl, .ctrlbit = (1 << 12), @@ -990,10 +990,10 @@ static struct clk_lookup exynos5_clk_lookup[] = { CLKDEV_INIT("exynos4210-uart.1", "clk_uart_baud0", &exynos5_clk_sclk_uart1.clk), CLKDEV_INIT("exynos4210-uart.2", "clk_uart_baud0", &exynos5_clk_sclk_uart2.clk), CLKDEV_INIT("exynos4210-uart.3", "clk_uart_baud0", &exynos5_clk_sclk_uart3.clk), - CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk), - CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk), - CLKDEV_INIT("s3c-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk), - CLKDEV_INIT("s3c-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk), + CLKDEV_INIT("exynos4-sdhci.0", "mmc_busclk.2", &exynos5_clk_sclk_mmc0.clk), + CLKDEV_INIT("exynos4-sdhci.1", "mmc_busclk.2", &exynos5_clk_sclk_mmc1.clk), + CLKDEV_INIT("exynos4-sdhci.2", "mmc_busclk.2", &exynos5_clk_sclk_mmc2.clk), + CLKDEV_INIT("exynos4-sdhci.3", "mmc_busclk.2", &exynos5_clk_sclk_mmc3.clk), CLKDEV_INIT("dma-pl330.0", "apb_pclk", &exynos5_clk_pdma0), CLKDEV_INIT("dma-pl330.1", "apb_pclk", &exynos5_clk_pdma1), CLKDEV_INIT("dma-pl330.2", "apb_pclk", &exynos5_clk_mdma1), diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 8614aab47cc0..5ccd6e80a607 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c @@ -326,6 +326,11 @@ static void __init exynos4_map_io(void) s3c_fimc_setname(2, "exynos4-fimc"); s3c_fimc_setname(3, "exynos4-fimc"); + s3c_sdhci_setname(0, "exynos4-sdhci"); + s3c_sdhci_setname(1, "exynos4-sdhci"); + s3c_sdhci_setname(2, "exynos4-sdhci"); + s3c_sdhci_setname(3, "exynos4-sdhci"); + /* The I2C bus controllers are directly compatible with s3c2440 */ s3c_i2c0_setname("s3c2440-i2c"); s3c_i2c1_setname("s3c2440-i2c"); @@ -344,6 +349,11 @@ static void __init exynos5_map_io(void) s3c_device_i2c0.resource[1].start = EXYNOS5_IRQ_IIC; s3c_device_i2c0.resource[1].end = EXYNOS5_IRQ_IIC; + s3c_sdhci_setname(0, "exynos4-sdhci"); + s3c_sdhci_setname(1, "exynos4-sdhci"); + s3c_sdhci_setname(2, "exynos4-sdhci"); + s3c_sdhci_setname(3, "exynos4-sdhci"); + /* The I2C bus controllers are directly compatible with s3c2440 */ s3c_i2c0_setname("s3c2440-i2c"); s3c_i2c1_setname("s3c2440-i2c"); @@ -537,7 +547,9 @@ void __init exynos5_init_irq(void) { int irq; - gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU); +#ifdef CONFIG_OF + of_irq_init(exynos4_dt_irq_match); +#endif for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) { combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq), diff --git a/arch/arm/mach-exynos/dev-dwmci.c b/arch/arm/mach-exynos/dev-dwmci.c index b025db4bf602..79035018fb74 100644 --- a/arch/arm/mach-exynos/dev-dwmci.c +++ b/arch/arm/mach-exynos/dev-dwmci.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -33,16 +34,8 @@ static int exynos4_dwmci_init(u32 slot_id, irq_handler_t handler, void *data) } static struct resource exynos4_dwmci_resource[] = { - [0] = { - .start = EXYNOS4_PA_DWMCI, - .end = EXYNOS4_PA_DWMCI + SZ_4K - 1, - .flags = IORESOURCE_MEM, - }, - [1] = { - .start = IRQ_DWMCI, - .end = IRQ_DWMCI, - .flags = IORESOURCE_IRQ, - } + [0] = DEFINE_RES_MEM(EXYNOS4_PA_DWMCI, SZ_4K), + [1] = DEFINE_RES_IRQ(EXYNOS4_IRQ_DWMCI), }; static struct dw_mci_board exynos4_dwci_pdata = { diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c index b4f1f902ce6d..ed90aef404c3 100644 --- a/arch/arm/mach-exynos/mach-nuri.c +++ b/arch/arm/mach-exynos/mach-nuri.c @@ -112,6 +112,7 @@ static struct s3c_sdhci_platdata nuri_hsmmc0_data __initdata = { .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | MMC_CAP_ERASE), + .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE, .cd_type = S3C_SDHCI_CD_PERMANENT, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, }; diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c index 7ebf79c2ab34..a34036eb8ba2 100644 --- a/arch/arm/mach-exynos/mach-universal_c210.c +++ b/arch/arm/mach-exynos/mach-universal_c210.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -747,6 +748,7 @@ static struct s3c_sdhci_platdata universal_hsmmc0_data __initdata = { .max_width = 8, .host_caps = (MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED), + .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE, .cd_type = S3C_SDHCI_CD_PERMANENT, .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, }; @@ -1062,6 +1064,7 @@ static void __init universal_map_io(void) exynos_init_io(NULL, 0); s3c24xx_init_clocks(24000000); s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); + s5p_set_timer_source(S5P_PWM2, S5P_PWM4); } static void s5p_tv_setup(void) @@ -1112,7 +1115,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210") .map_io = universal_map_io, .handle_irq = gic_handle_irq, .init_machine = universal_machine_init, - .timer = &exynos4_timer, + .timer = &s5p_timer, .reserve = &universal_reserve, .restart = exynos4_restart, MACHINE_END diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c index 861ceb8232d6..ed38d03c61f2 100644 --- a/arch/arm/mach-imx/imx27-dt.c +++ b/arch/arm/mach-imx/imx27-dt.c @@ -35,7 +35,7 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = { static int __init imx27_avic_add_irq_domain(struct device_node *np, struct device_node *interrupt_parent) { - irq_domain_add_simple(np, 0); + irq_domain_add_legacy(np, 64, 0, 0, &irq_domain_simple_ops, NULL); return 0; } @@ -44,7 +44,9 @@ static int __init imx27_gpio_add_irq_domain(struct device_node *np, { static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; - irq_domain_add_simple(np, gpio_irq_base); + gpio_irq_base -= 32; + irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, + NULL); return 0; } diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c index 05250aed61fb..e10f3914fcfe 100644 --- a/arch/arm/mach-imx/mm-imx5.c +++ b/arch/arm/mach-imx/mm-imx5.c @@ -35,7 +35,7 @@ static void imx5_idle(void) } clk_enable(gpc_dvfs_clk); mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); - if (tzic_enable_wake() != 0) + if (!tzic_enable_wake()) cpu_do_idle(); clk_disable(gpc_dvfs_clk); } diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c index 1c672d9e6656..f7fe1b9f3170 100644 --- a/arch/arm/mach-kirkwood/board-dt.c +++ b/arch/arm/mach-kirkwood/board-dt.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c index 962e71169750..fb3496a52ef4 100644 --- a/arch/arm/mach-msm/board-msm8x60.c +++ b/arch/arm/mach-msm/board-msm8x60.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -49,10 +50,22 @@ static void __init msm8x60_map_io(void) msm_map_msm8x60_io(); } +#ifdef CONFIG_OF +static struct of_device_id msm_dt_gic_match[] __initdata = { + { .compatible = "qcom,msm-8660-qgic", .data = gic_of_init }, + {} +}; +#endif + static void __init msm8x60_init_irq(void) { - gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, - (void *)MSM_QGIC_CPU_BASE); + if (!of_have_populated_dt()) + gic_init(0, GIC_PPI_START, MSM_QGIC_DIST_BASE, + (void *)MSM_QGIC_CPU_BASE); +#ifdef CONFIG_OF + else + of_irq_init(msm_dt_gic_match); +#endif /* Edge trigger PPIs except AVS_SVICINT and AVS_SVICINTSWDONE */ writel(0xFFFFD7FF, MSM_QGIC_DIST_BASE + GIC_DIST_CONFIG + 4); @@ -73,16 +86,8 @@ static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = { {} }; -static struct of_device_id msm_dt_gic_match[] __initdata = { - { .compatible = "qcom,msm-8660-qgic", }, - {} -}; - static void __init msm8x60_dt_init(void) { - irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS, - GIC_SPI_START); - if (of_machine_is_compatible("qcom,msm8660-surf")) { printk(KERN_INFO "Init surf UART registers\n"); msm8x60_init_uart12dm(); diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c index fcce7ff37630..cfd98b186fcc 100644 --- a/arch/arm/mach-omap1/ams-delta-fiq.c +++ b/arch/arm/mach-omap1/ams-delta-fiq.c @@ -48,7 +48,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) struct irq_chip *irq_chip = NULL; int gpio, irq_num, fiq_count; - irq_desc = irq_to_desc(IH_GPIO_BASE); + irq_desc = irq_to_desc(gpio_to_irq(AMS_DELTA_GPIO_PIN_KEYBRD_CLK)); if (irq_desc) irq_chip = irq_desc->irq_data.chip; diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c index 087dba0df47e..e9cc52d4cb28 100644 --- a/arch/arm/mach-omap1/mux.c +++ b/arch/arm/mach-omap1/mux.c @@ -27,6 +27,7 @@ #include #include +#include #include diff --git a/arch/arm/mach-omap1/timer.c b/arch/arm/mach-omap1/timer.c index 6e90665a7c47..fb202af01d0d 100644 --- a/arch/arm/mach-omap1/timer.c +++ b/arch/arm/mach-omap1/timer.c @@ -47,9 +47,9 @@ static int omap1_dm_timer_set_src(struct platform_device *pdev, int n = (pdev->id - 1) << 1; u32 l; - l = __raw_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); + l = omap_readl(MOD_CONF_CTRL_1) & ~(0x03 << n); l |= source << n; - __raw_writel(l, MOD_CONF_CTRL_1); + omap_writel(l, MOD_CONF_CTRL_1); return 0; } diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index a39fc4bbd2b8..130ab00c09a2 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -560,7 +561,7 @@ static struct regulator_init_data sdp4430_vusim = { }, }; -static struct twl4030_codec_data twl6040_codec = { +static struct twl6040_codec_data twl6040_codec = { /* single-step ramp for headset and handsfree */ .hs_left_step = 0x0f, .hs_right_step = 0x0f, @@ -568,7 +569,7 @@ static struct twl4030_codec_data twl6040_codec = { .hf_right_step = 0x1d, }; -static struct twl4030_vibra_data twl6040_vibra = { +static struct twl6040_vibra_data twl6040_vibra = { .vibldrv_res = 8, .vibrdrv_res = 3, .viblmotor_res = 10, @@ -577,16 +578,14 @@ static struct twl4030_vibra_data twl6040_vibra = { .vddvibr_uV = 0, /* fixed volt supply - VBAT */ }; -static struct twl4030_audio_data twl6040_audio = { +static struct twl6040_platform_data twl6040_data = { .codec = &twl6040_codec, .vibra = &twl6040_vibra, .audpwron_gpio = 127, - .naudint_irq = OMAP44XX_IRQ_SYS_2N, .irq_base = TWL6040_CODEC_IRQ_BASE, }; static struct twl4030_platform_data sdp4430_twldata = { - .audio = &twl6040_audio, /* Regulators */ .vusim = &sdp4430_vusim, .vaux1 = &sdp4430_vaux1, @@ -617,7 +616,8 @@ static int __init omap4_i2c_init(void) TWL_COMMON_REGULATOR_VCXIO | TWL_COMMON_REGULATOR_VUSB | TWL_COMMON_REGULATOR_CLK32KG); - omap4_pmic_init("twl6030", &sdp4430_twldata); + omap4_pmic_init("twl6030", &sdp4430_twldata, + &twl6040_data, OMAP44XX_IRQ_SYS_2N); omap_register_i2c_bus(2, 400, NULL, 0); omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, ARRAY_SIZE(sdp4430_i2c_3_boardinfo)); diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 74e1687b5170..098d183a0086 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -137,7 +137,7 @@ static struct twl4030_platform_data sdp4430_twldata = { static void __init omap4_i2c_init(void) { - omap4_pmic_init("twl6030", &sdp4430_twldata); + omap4_pmic_init("twl6030", &sdp4430_twldata, NULL, 0); } static void __init omap4_init(void) diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 930c0d380435..740cee9369ba 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -641,7 +641,7 @@ static struct regulator_consumer_supply dummy_supplies[] = { static void __init igep_init(void) { - regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies)); + regulator_register_fixed(1, dummy_supplies, ARRAY_SIZE(dummy_supplies)); omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); /* Get IGEP2 hardware revision */ diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index d8c0e89f0126..1b782ba53433 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -284,7 +285,7 @@ static int __init omap4_twl6030_hsmmc_init(struct omap2_hsmmc_info *controllers) return 0; } -static struct twl4030_codec_data twl6040_codec = { +static struct twl6040_codec_data twl6040_codec = { /* single-step ramp for headset and handsfree */ .hs_left_step = 0x0f, .hs_right_step = 0x0f, @@ -292,17 +293,14 @@ static struct twl4030_codec_data twl6040_codec = { .hf_right_step = 0x1d, }; -static struct twl4030_audio_data twl6040_audio = { +static struct twl6040_platform_data twl6040_data = { .codec = &twl6040_codec, .audpwron_gpio = 127, - .naudint_irq = OMAP44XX_IRQ_SYS_2N, .irq_base = TWL6040_CODEC_IRQ_BASE, }; /* Panda board uses the common PMIC configuration */ -static struct twl4030_platform_data omap4_panda_twldata = { - .audio = &twl6040_audio, -}; +static struct twl4030_platform_data omap4_panda_twldata; /* * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM @@ -326,7 +324,8 @@ static int __init omap4_panda_i2c_init(void) TWL_COMMON_REGULATOR_VCXIO | TWL_COMMON_REGULATOR_VUSB | TWL_COMMON_REGULATOR_CLK32KG); - omap4_pmic_init("twl6030", &omap4_panda_twldata); + omap4_pmic_init("twl6030", &omap4_panda_twldata, + &twl6040_data, OMAP44XX_IRQ_SYS_2N); omap_register_i2c_bus(2, 400, NULL, 0); /* * Bus 3 is attached to the DVI port where devices like the pico DLP diff --git a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h index 1e2d3322f33e..c88420de1151 100644 --- a/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h +++ b/arch/arm/mach-omap2/include/mach/ctrl_module_pad_core_44xx.h @@ -941,10 +941,10 @@ #define OMAP4_DSI2_LANEENABLE_MASK (0x7 << 29) #define OMAP4_DSI1_LANEENABLE_SHIFT 24 #define OMAP4_DSI1_LANEENABLE_MASK (0x1f << 24) -#define OMAP4_DSI2_PIPD_SHIFT 19 -#define OMAP4_DSI2_PIPD_MASK (0x1f << 19) -#define OMAP4_DSI1_PIPD_SHIFT 14 -#define OMAP4_DSI1_PIPD_MASK (0x1f << 14) +#define OMAP4_DSI1_PIPD_SHIFT 19 +#define OMAP4_DSI1_PIPD_MASK (0x1f << 19) +#define OMAP4_DSI2_PIPD_SHIFT 14 +#define OMAP4_DSI2_PIPD_MASK (0x1f << 14) /* CONTROL_MCBSPLP */ #define OMAP4_ALBCTRLRX_FSX_SHIFT 31 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 2c27fdb61e66..7144ae651d3d 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1422,6 +1422,9 @@ static int _ocp_softreset(struct omap_hwmod *oh) goto dis_opt_clks; _write_sysconfig(v, oh); + if (oh->class->sysc->srst_udelay) + udelay(oh->class->sysc->srst_udelay); + if (oh->class->sysc->sysc_flags & SYSS_HAS_RESET_STATUS) omap_test_timeout((omap_hwmod_read(oh, oh->class->sysc->syss_offs) @@ -1903,10 +1906,20 @@ void omap_hwmod_write(u32 v, struct omap_hwmod *oh, u16 reg_offs) */ int omap_hwmod_softreset(struct omap_hwmod *oh) { - if (!oh) + u32 v; + int ret; + + if (!oh || !(oh->_sysc_cache)) return -EINVAL; - return _ocp_softreset(oh); + v = oh->_sysc_cache; + ret = _set_softreset(oh, &v); + if (ret) + goto error; + _write_sysconfig(v, oh); + +error: + return ret; } /** diff --git a/arch/arm/mach-omap2/omap_hwmod_2420_data.c b/arch/arm/mach-omap2/omap_hwmod_2420_data.c index a5409ce3f323..a6bde34e443a 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2420_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2420_data.c @@ -1000,7 +1000,6 @@ static struct omap_hwmod_ocp_if omap2420_l4_core__dss_venc = { .flags = OMAP_FIREWALL_L4, } }, - .flags = OCPIF_SWSUP_IDLE, .user = OCP_USER_MPU | OCP_USER_SDMA, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_2430_data.c b/arch/arm/mach-omap2/omap_hwmod_2430_data.c index c4f56cb60d7d..04a3885f4475 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2430_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2430_data.c @@ -1049,7 +1049,6 @@ static struct omap_hwmod_ocp_if omap2430_l4_core__dss_venc = { .slave = &omap2430_dss_venc_hwmod, .clk = "dss_ick", .addr = omap2_dss_venc_addrs, - .flags = OCPIF_SWSUP_IDLE, .user = OCP_USER_MPU | OCP_USER_SDMA, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 34b9766d1d23..db86ce90c69f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -1676,7 +1676,6 @@ static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_venc = { .flags = OMAP_FIREWALL_L4, } }, - .flags = OCPIF_SWSUP_IDLE, .user = OCP_USER_MPU | OCP_USER_SDMA, }; diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index cc9bd106a854..6abc75753e42 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2594,6 +2594,15 @@ static struct omap_hwmod omap44xx_ipu_hwmod = { static struct omap_hwmod_class_sysconfig omap44xx_iss_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, + /* + * ISS needs 100 OCP clk cycles delay after a softreset before + * accessing sysconfig again. + * The lowest frequency at the moment for L3 bus is 100 MHz, so + * 1usec delay is needed. Add an x2 margin to be safe (2 usecs). + * + * TODO: Indicate errata when available. + */ + .srst_udelay = 2, .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET), .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 0cdd359a128e..9fc2f44188cb 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c @@ -108,8 +108,14 @@ static void omap_uart_set_noidle(struct platform_device *pdev) static void omap_uart_set_smartidle(struct platform_device *pdev) { struct omap_device *od = to_omap_device(pdev); + u8 idlemode; - omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_SMART); + if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP) + idlemode = HWMOD_IDLEMODE_SMART_WKUP; + else + idlemode = HWMOD_IDLEMODE_SMART; + + omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode); } #else @@ -120,124 +126,8 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {} #endif /* CONFIG_PM */ #ifdef CONFIG_OMAP_MUX -static struct omap_device_pad default_uart1_pads[] __initdata = { - { - .name = "uart1_cts.uart1_cts", - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - }, - { - .name = "uart1_rts.uart1_rts", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart1_tx.uart1_tx", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart1_rx.uart1_rx", - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - }, -}; - -static struct omap_device_pad default_uart2_pads[] __initdata = { - { - .name = "uart2_cts.uart2_cts", - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - }, - { - .name = "uart2_rts.uart2_rts", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart2_tx.uart2_tx", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart2_rx.uart2_rx", - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - .idle = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - }, -}; - -static struct omap_device_pad default_uart3_pads[] __initdata = { - { - .name = "uart3_cts_rctx.uart3_cts_rctx", - .enable = OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0, - }, - { - .name = "uart3_rts_sd.uart3_rts_sd", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart3_tx_irtx.uart3_tx_irtx", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart3_rx_irrx.uart3_rx_irrx", - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, - .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0, - .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0, - }, -}; - -static struct omap_device_pad default_omap36xx_uart4_pads[] __initdata = { - { - .name = "gpmc_wait2.uart4_tx", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "gpmc_wait3.uart4_rx", - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, - .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE2, - .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE2, - }, -}; - -static struct omap_device_pad default_omap4_uart4_pads[] __initdata = { - { - .name = "uart4_tx.uart4_tx", - .enable = OMAP_PIN_OUTPUT | OMAP_MUX_MODE0, - }, - { - .name = "uart4_rx.uart4_rx", - .flags = OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP, - .enable = OMAP_PIN_INPUT | OMAP_MUX_MODE0, - .idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0, - }, -}; - static void omap_serial_fill_default_pads(struct omap_board_data *bdata) { - switch (bdata->id) { - case 0: - bdata->pads = default_uart1_pads; - bdata->pads_cnt = ARRAY_SIZE(default_uart1_pads); - break; - case 1: - bdata->pads = default_uart2_pads; - bdata->pads_cnt = ARRAY_SIZE(default_uart2_pads); - break; - case 2: - bdata->pads = default_uart3_pads; - bdata->pads_cnt = ARRAY_SIZE(default_uart3_pads); - break; - case 3: - if (cpu_is_omap44xx()) { - bdata->pads = default_omap4_uart4_pads; - bdata->pads_cnt = - ARRAY_SIZE(default_omap4_uart4_pads); - } else if (cpu_is_omap3630()) { - bdata->pads = default_omap36xx_uart4_pads; - bdata->pads_cnt = - ARRAY_SIZE(default_omap36xx_uart4_pads); - } - break; - default: - break; - } } #else static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {} diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c index 4b57757bf9d1..7a7b89304c48 100644 --- a/arch/arm/mach-omap2/twl-common.c +++ b/arch/arm/mach-omap2/twl-common.c @@ -37,6 +37,16 @@ static struct i2c_board_info __initdata pmic_i2c_board_info = { .flags = I2C_CLIENT_WAKE, }; +static struct i2c_board_info __initdata omap4_i2c1_board_info[] = { + { + .addr = 0x48, + .flags = I2C_CLIENT_WAKE, + }, + { + I2C_BOARD_INFO("twl6040", 0x4b), + }, +}; + void __init omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, struct twl4030_platform_data *pmic_data) @@ -49,14 +59,31 @@ void __init omap_pmic_init(int bus, u32 clkrate, omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); } +void __init omap4_pmic_init(const char *pmic_type, + struct twl4030_platform_data *pmic_data, + struct twl6040_platform_data *twl6040_data, int twl6040_irq) +{ + /* PMIC part*/ + strncpy(omap4_i2c1_board_info[0].type, pmic_type, + sizeof(omap4_i2c1_board_info[0].type)); + omap4_i2c1_board_info[0].irq = OMAP44XX_IRQ_SYS_1N; + omap4_i2c1_board_info[0].platform_data = pmic_data; + + /* TWL6040 audio IC part */ + omap4_i2c1_board_info[1].irq = twl6040_irq; + omap4_i2c1_board_info[1].platform_data = twl6040_data; + + omap_register_i2c_bus(1, 400, omap4_i2c1_board_info, 2); + +} + void __init omap_pmic_late_init(void) { /* Init the OMAP TWL parameters (if PMIC has been registerd) */ - if (!pmic_i2c_board_info.irq) - return; - - omap3_twl_init(); - omap4_twl_init(); + if (pmic_i2c_board_info.irq) + omap3_twl_init(); + if (omap4_i2c1_board_info[0].irq) + omap4_twl_init(); } #if defined(CONFIG_ARCH_OMAP3) diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h index 275dde8cb27a..09627483a57f 100644 --- a/arch/arm/mach-omap2/twl-common.h +++ b/arch/arm/mach-omap2/twl-common.h @@ -29,6 +29,7 @@ struct twl4030_platform_data; +struct twl6040_platform_data; void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, struct twl4030_platform_data *pmic_data); @@ -46,12 +47,9 @@ static inline void omap3_pmic_init(const char *pmic_type, omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data); } -static inline void omap4_pmic_init(const char *pmic_type, - struct twl4030_platform_data *pmic_data) -{ - /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */ - omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data); -} +void omap4_pmic_init(const char *pmic_type, + struct twl4030_platform_data *pmic_data, + struct twl6040_platform_data *audio_data, int twl6040_irq); void omap3_pmic_get_config(struct twl4030_platform_data *pmic_data, u32 pdata_flags, u32 regulators_flags); diff --git a/arch/arm/mach-orion5x/mpp.h b/arch/arm/mach-orion5x/mpp.h index eac68978a2c2..db70e79a1198 100644 --- a/arch/arm/mach-orion5x/mpp.h +++ b/arch/arm/mach-orion5x/mpp.h @@ -65,8 +65,8 @@ #define MPP8_GIGE MPP(8, 0x1, 0, 0, 1, 1, 1) #define MPP9_UNUSED MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GPIO MPP(9, 0x0, 0, 0, 1, 1, 1) -#define MPP9_GIGE MPP(9, 0x1, 1, 1, 1, 1, 1) +#define MPP9_GPIO MPP(9, 0x0, 1, 1, 1, 1, 1) +#define MPP9_GIGE MPP(9, 0x1, 0, 0, 1, 1, 1) #define MPP10_UNUSED MPP(10, 0x0, 0, 0, 1, 1, 1) #define MPP10_GPIO MPP(10, 0x0, 1, 1, 1, 1, 1) diff --git a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h index c54cef25895c..cbf51ae81855 100644 --- a/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h +++ b/arch/arm/mach-pxa/include/mach/mfp-pxa2xx.h @@ -17,6 +17,7 @@ * * bit 23 - Input/Output (PXA2xx specific) * bit 24 - Wakeup Enable(PXA2xx specific) + * bit 25 - Keep Output (PXA2xx specific) */ #define MFP_DIR_IN (0x0 << 23) @@ -25,6 +26,12 @@ #define MFP_DIR(x) (((x) >> 23) & 0x1) #define MFP_LPM_CAN_WAKEUP (0x1 << 24) + +/* + * MFP_LPM_KEEP_OUTPUT must be specified for pins that need to + * retain their last output level (low or high). + * Note: MFP_LPM_KEEP_OUTPUT has no effect on pins configured for input. + */ #define MFP_LPM_KEEP_OUTPUT (0x1 << 25) #define WAKEUP_ON_EDGE_RISE (MFP_LPM_CAN_WAKEUP | MFP_LPM_EDGE_RISE) diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index b0a842887780..ef0426a159d4 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c @@ -33,6 +33,8 @@ #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) #define GPLR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5)) #define GPDR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x0c) +#define GPSR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x18) +#define GPCR(x) __REG2(0x40E00000, BANK_OFF((x) >> 5) + 0x24) #define PWER_WE35 (1 << 24) @@ -348,6 +350,7 @@ static inline void pxa27x_mfp_init(void) {} #ifdef CONFIG_PM static unsigned long saved_gafr[2][4]; static unsigned long saved_gpdr[4]; +static unsigned long saved_gplr[4]; static unsigned long saved_pgsr[4]; static int pxa2xx_mfp_suspend(void) @@ -366,14 +369,26 @@ static int pxa2xx_mfp_suspend(void) } for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { - saved_gafr[0][i] = GAFR_L(i); saved_gafr[1][i] = GAFR_U(i); saved_gpdr[i] = GPDR(i * 32); + saved_gplr[i] = GPLR(i * 32); saved_pgsr[i] = PGSR(i); - GPDR(i * 32) = gpdr_lpm[i]; + GPSR(i * 32) = PGSR(i); + GPCR(i * 32) = ~PGSR(i); } + + /* set GPDR bits taking into account MFP_LPM_KEEP_OUTPUT */ + for (i = 0; i < pxa_last_gpio; i++) { + if ((gpdr_lpm[gpio_to_bank(i)] & GPIO_bit(i)) || + ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && + (saved_gpdr[gpio_to_bank(i)] & GPIO_bit(i)))) + GPDR(i) |= GPIO_bit(i); + else + GPDR(i) &= ~GPIO_bit(i); + } + return 0; } @@ -384,6 +399,8 @@ static void pxa2xx_mfp_resume(void) for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++) { GAFR_L(i) = saved_gafr[0][i]; GAFR_U(i) = saved_gafr[1][i]; + GPSR(i * 32) = saved_gplr[i]; + GPCR(i * 32) = ~saved_gplr[i]; GPDR(i * 32) = saved_gpdr[i]; PGSR(i) = saved_pgsr[i]; } diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 6bce78edce7a..4726c246dcdc 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -421,8 +421,11 @@ void __init pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info) pxa_register_device(&pxa27x_device_i2c_power, info); } +static struct pxa_gpio_platform_data pxa27x_gpio_info __initdata = { + .gpio_set_wake = gpio_set_wake, +}; + static struct platform_device *devices[] __initdata = { - &pxa_device_gpio, &pxa27x_device_udc, &pxa_device_pmu, &pxa_device_i2s, @@ -458,6 +461,7 @@ static int __init pxa27x_init(void) register_syscore_ops(&pxa2xx_mfp_syscore_ops); register_syscore_ops(&pxa2xx_clock_syscore_ops); + pxa_register_device(&pxa_device_gpio, &pxa27x_gpio_info); ret = platform_add_devices(devices, ARRAY_SIZE(devices)); } diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index 0f3a327ebcaa..b34287ab5afd 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -111,10 +111,6 @@ config S3C24XX_SETUP_TS help Compile in platform device definition for Samsung TouchScreen. -# cpu-specific sections - -if CPU_S3C2410 - config S3C2410_DMA bool depends on S3C24XX_DMA && (CPU_S3C2410 || CPU_S3C2442) @@ -127,6 +123,10 @@ config S3C2410_PM help Power Management code common to S3C2410 and better +# cpu-specific sections + +if CPU_S3C2410 + config S3C24XX_SIMTEC_NOR bool help diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index a8933de3d627..32395664e879 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -765,6 +766,7 @@ static void __init goni_pmic_init(void) /* MoviNAND */ static struct s3c_sdhci_platdata goni_hsmmc0_data __initdata = { .max_width = 4, + .host_caps2 = MMC_CAP2_BROKEN_VOLTAGE, .cd_type = S3C_SDHCI_CD_PERMANENT, }; diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c index 7c524b4e415d..16be4c56abe3 100644 --- a/arch/arm/mach-sa1100/generic.c +++ b/arch/arm/mach-sa1100/generic.c @@ -306,7 +306,7 @@ void sa11x0_register_irda(struct irda_platform_data *irda) } static struct resource sa1100_rtc_resources[] = { - DEFINE_RES_MEM(0x90010000, 0x9001003f), + DEFINE_RES_MEM(0x90010000, 0x40), DEFINE_RES_IRQ_NAMED(IRQ_RTC1Hz, "rtc 1Hz"), DEFINE_RES_IRQ_NAMED(IRQ_RTCAlrm, "rtc alarm"), }; diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index cb224a344af0..0891ec6e27f5 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c @@ -365,23 +365,13 @@ static struct platform_device mipidsi0_device = { }; /* SDHI0 */ -static irqreturn_t ag5evm_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, - .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED, .tmio_ocr_mask = MMC_VDD_27_28 | MMC_VDD_28_29, + .cd_gpio = GPIO_PORT251, }; static struct resource sdhi0_resources[] = { @@ -557,7 +547,6 @@ static void __init ag5evm_init(void) lcd_backlight_reset(); /* enable SDHI0 on CN15 [SD I/F] */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -566,13 +555,6 @@ static void __init ag5evm_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - if (!request_irq(intcs_evt2irq(0x3c0), ag5evm_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "sdhi0 cd", &sdhi0_device.dev)) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_warn("Unable to setup SDHI0 GPIO IRQ\n"); - /* enable SDHI1 on CN4 [WLAN I/F] */ gpio_request(GPIO_FN_SDHICLK1, NULL); gpio_request(GPIO_FN_SDHICMD1_PU, NULL); diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index f49e28abe0ab..8c6202bb6aeb 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -1011,21 +1011,12 @@ static int slot_cn7_get_cd(struct platform_device *pdev) } /* SDHI0 */ -static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg) -{ - struct device *dev = arg; - struct sh_mobile_sdhi_info *info = dev->platform_data; - struct tmio_mmc_data *pdata = info->pdata; - - tmio_mmc_cd_wakeup(pdata); - - return IRQ_HANDLED; -} - static struct sh_mobile_sdhi_info sdhi0_info = { .dma_slave_tx = SHDMA_SLAVE_SDHI0_TX, .dma_slave_rx = SHDMA_SLAVE_SDHI0_RX, + .tmio_flags = TMIO_MMC_USE_GPIO_CD, .tmio_caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, + .cd_gpio = GPIO_PORT172, }; static struct resource sdhi0_resources[] = { @@ -1384,7 +1375,6 @@ static void __init mackerel_init(void) { u32 srcr4; struct clk *clk; - int ret; /* External clock source */ clk_set_rate(&sh7372_dv_clki_clk, 27000000); @@ -1481,7 +1471,6 @@ static void __init mackerel_init(void) irq_set_irq_type(IRQ21, IRQ_TYPE_LEVEL_HIGH); /* enable SDHI0 */ - gpio_request(GPIO_FN_SDHICD0, NULL); gpio_request(GPIO_FN_SDHIWP0, NULL); gpio_request(GPIO_FN_SDHICMD0, NULL); gpio_request(GPIO_FN_SDHICLK0, NULL); @@ -1490,13 +1479,6 @@ static void __init mackerel_init(void) gpio_request(GPIO_FN_SDHID0_1, NULL); gpio_request(GPIO_FN_SDHID0_0, NULL); - ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd, - IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev); - if (!ret) - sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD; - else - pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret); - #if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE) /* enable SDHI1 */ gpio_request(GPIO_FN_SDHICMD1, NULL); diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S index 6ac015c89206..b202c1272526 100644 --- a/arch/arm/mach-shmobile/headsmp.S +++ b/arch/arm/mach-shmobile/headsmp.S @@ -16,6 +16,59 @@ __CPUINIT +/* Cache invalidation nicked from arch/arm/mach-imx/head-v7.S, thanks! + * + * The secondary kernel init calls v7_flush_dcache_all before it enables + * the L1; however, the L1 comes out of reset in an undefined state, so + * the clean + invalidate performed by v7_flush_dcache_all causes a bunch + * of cache lines with uninitialized data and uninitialized tags to get + * written out to memory, which does really unpleasant things to the main + * processor. We fix this by performing an invalidate, rather than a + * clean + invalidate, before jumping into the kernel. + * + * This funciton is cloned from arch/arm/mach-tegra/headsmp.S, and needs + * to be called for both secondary cores startup and primary core resume + * procedures. Ideally, it should be moved into arch/arm/mm/cache-v7.S. + */ +ENTRY(v7_invalidate_l1) + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache + mcr p15, 2, r0, c0, c0, 0 + mrc p15, 1, r0, c0, c0, 0 + + ldr r1, =0x7fff + and r2, r1, r0, lsr #13 + + ldr r1, =0x3ff + + and r3, r1, r0, lsr #3 @ NumWays - 1 + add r2, r2, #1 @ NumSets + + and r0, r0, #0x7 + add r0, r0, #4 @ SetShift + + clz r1, r3 @ WayShift + add r4, r3, #1 @ NumWays +1: sub r2, r2, #1 @ NumSets-- + mov r3, r4 @ Temp = NumWays +2: subs r3, r3, #1 @ Temp-- + mov r5, r3, lsl r1 + mov r6, r2, lsl r0 + orr r5, r5, r6 @ Reg = (Temp< write orreq r1, r1, #1 << 11 @ yes. +#endif b do_DataAbort diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index a53fd2aaa2f4..2a8e380501e8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -32,6 +32,7 @@ static void __iomem *l2x0_base; static DEFINE_RAW_SPINLOCK(l2x0_lock); static u32 l2x0_way_mask; /* Bitmask of active ways */ static u32 l2x0_size; +static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; struct l2x0_regs l2x0_saved_regs; @@ -61,12 +62,7 @@ static inline void cache_sync(void) { void __iomem *base = l2x0_base; -#ifdef CONFIG_PL310_ERRATA_753970 - /* write to an unmmapped register */ - writel_relaxed(0, base + L2X0_DUMMY_REG); -#else - writel_relaxed(0, base + L2X0_CACHE_SYNC); -#endif + writel_relaxed(0, base + sync_reg_offset); cache_wait(base + L2X0_CACHE_SYNC, 1); } @@ -85,10 +81,13 @@ static inline void l2x0_inv_line(unsigned long addr) } #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +static inline void debug_writel(unsigned long val) +{ + if (outer_cache.set_debug) + outer_cache.set_debug(val); +} -#define debug_writel(val) outer_cache.set_debug(val) - -static void l2x0_set_debug(unsigned long val) +static void pl310_set_debug(unsigned long val) { writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); } @@ -98,7 +97,7 @@ static inline void debug_writel(unsigned long val) { } -#define l2x0_set_debug NULL +#define pl310_set_debug NULL #endif #ifdef CONFIG_PL310_ERRATA_588369 @@ -331,6 +330,11 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) else ways = 8; type = "L310"; +#ifdef CONFIG_PL310_ERRATA_753970 + /* Unmapped register. */ + sync_reg_offset = L2X0_DUMMY_REG; +#endif + outer_cache.set_debug = pl310_set_debug; break; case L2X0_CACHE_ID_PART_L210: ways = (aux >> 13) & 0xf; @@ -379,7 +383,6 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) outer_cache.flush_all = l2x0_flush_all; outer_cache.inv_all = l2x0_inv_all; outer_cache.disable = l2x0_disable; - outer_cache.set_debug = l2x0_set_debug; printk(KERN_INFO "%s cache controller enabled\n", type); printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 595079fa9d1d..8f5813bbffb5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -293,11 +293,11 @@ EXPORT_SYMBOL(pfn_valid); #endif #ifndef CONFIG_SPARSEMEM -static void arm_memory_present(void) +static void __init arm_memory_present(void) { } #else -static void arm_memory_present(void) +static void __init arm_memory_present(void) { struct memblock_region *reg; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index b86f8933ff91..2c7cf2f9c837 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -618,8 +618,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, } } -static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, - unsigned long phys, const struct mem_type *type) +static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, + unsigned long end, unsigned long phys, const struct mem_type *type) { pud_t *pud = pud_offset(pgd, addr); unsigned long next; diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c index ecdb3da0dea9..c58d896cd5c3 100644 --- a/arch/arm/plat-omap/dma.c +++ b/arch/arm/plat-omap/dma.c @@ -916,6 +916,13 @@ void omap_start_dma(int lch) l |= OMAP_DMA_CCR_BUFFERING_DISABLE; l |= OMAP_DMA_CCR_EN; + /* + * As dma_write() uses IO accessors which are weakly ordered, there + * is no guarantee that data in coherent DMA memory will be visible + * to the DMA device. Add a memory barrier here to ensure that any + * such data is visible prior to enabling DMA. + */ + mb(); p->dma_write(l, CCR, lch); dma_chan[lch].flags |= OMAP_DMA_ACTIVE; @@ -965,6 +972,13 @@ void omap_stop_dma(int lch) p->dma_write(l, CCR, lch); } + /* + * Ensure that data transferred by DMA is visible to any access + * after DMA has been disabled. This is important for coherent + * DMA regions. + */ + mb(); + if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) { int next_lch, cur_lch = lch; char dma_chan_link_map[dma_lch_count]; diff --git a/arch/arm/plat-omap/include/plat/omap_hwmod.h b/arch/arm/plat-omap/include/plat/omap_hwmod.h index 8070145ccb98..3f26db4ee8e6 100644 --- a/arch/arm/plat-omap/include/plat/omap_hwmod.h +++ b/arch/arm/plat-omap/include/plat/omap_hwmod.h @@ -305,6 +305,7 @@ struct omap_hwmod_sysc_fields { * @rev_offs: IP block revision register offset (from module base addr) * @sysc_offs: OCP_SYSCONFIG register offset (from module base addr) * @syss_offs: OCP_SYSSTATUS register offset (from module base addr) + * @srst_udelay: Delay needed after doing a softreset in usecs * @idlemodes: One or more of {SIDLE,MSTANDBY}_{OFF,FORCE,SMART} * @sysc_flags: SYS{C,S}_HAS* flags indicating SYSCONFIG bits supported * @clockact: the default value of the module CLOCKACTIVITY bits @@ -330,9 +331,10 @@ struct omap_hwmod_class_sysconfig { u16 sysc_offs; u16 syss_offs; u16 sysc_flags; + struct omap_hwmod_sysc_fields *sysc_fields; + u8 srst_udelay; u8 idlemodes; u8 clockact; - struct omap_hwmod_sysc_fields *sysc_fields; }; /** diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index eec98afa0f83..f9a8c5341ee9 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c @@ -348,7 +348,6 @@ u32 omap3_configure_core_dpll(u32 m2, u32 unlock_dll, u32 f, u32 inc, sdrc_actim_ctrl_b_1, sdrc_mr_1); } -#ifdef CONFIG_PM void omap3_sram_restore_context(void) { omap_sram_ceil = omap_sram_base + omap_sram_size; @@ -358,17 +357,18 @@ void omap3_sram_restore_context(void) omap3_sram_configure_core_dpll_sz); omap_push_sram_idle(); } -#endif /* CONFIG_PM */ - -#endif /* CONFIG_ARCH_OMAP3 */ static inline int omap34xx_sram_init(void) { -#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) omap3_sram_restore_context(); -#endif return 0; } +#else +static inline int omap34xx_sram_init(void) +{ + return 0; +} +#endif /* CONFIG_ARCH_OMAP3 */ static inline int am33xx_sram_init(void) { diff --git a/arch/arm/plat-samsung/include/plat/sdhci.h b/arch/arm/plat-samsung/include/plat/sdhci.h index 317e246ffc56..e834c5ef437c 100644 --- a/arch/arm/plat-samsung/include/plat/sdhci.h +++ b/arch/arm/plat-samsung/include/plat/sdhci.h @@ -18,6 +18,8 @@ #ifndef __PLAT_S3C_SDHCI_H #define __PLAT_S3C_SDHCI_H __FILE__ +#include + struct platform_device; struct mmc_host; struct mmc_card; @@ -356,4 +358,30 @@ static inline void exynos4_default_sdhci3(void) { } #endif /* CONFIG_EXYNOS4_SETUP_SDHCI */ +static inline void s3c_sdhci_setname(int id, char *name) +{ + switch (id) { +#ifdef CONFIG_S3C_DEV_HSMMC + case 0: + s3c_device_hsmmc0.name = name; + break; +#endif +#ifdef CONFIG_S3C_DEV_HSMMC1 + case 1: + s3c_device_hsmmc1.name = name; + break; +#endif +#ifdef CONFIG_S3C_DEV_HSMMC2 + case 2: + s3c_device_hsmmc2.name = name; + break; +#endif +#ifdef CONFIG_S3C_DEV_HSMMC3 + case 3: + s3c_device_hsmmc3.name = name; + break; +#endif + } +} + #endif /* __PLAT_S3C_SDHCI_H */ diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 858748eaa144..bc683b8219b5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -17,6 +17,8 @@ #include #include #include +#include +#include #include #include @@ -528,6 +530,103 @@ void vfp_flush_hwstate(struct thread_info *thread) put_cpu(); } +/* + * Save the current VFP state into the provided structures and prepare + * for entry into a new function (signal handler). + */ +int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + int err = 0; + + /* Ensure that the saved hwstate is up-to-date. */ + vfp_sync_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err); + __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + if (err) + return -EFAULT; + + /* Ensure that VFP is disabled. */ + vfp_flush_hwstate(thread); + + /* + * As per the PCS, clear the length and stride bits for function + * entry. + */ + hwstate->fpscr &= ~(FPSCR_LENGTH_MASK | FPSCR_STRIDE_MASK); + + /* + * Disable VFP in the hwstate so that we can detect if it gets + * used. + */ + hwstate->fpexc &= ~FPEXC_EN; + return 0; +} + +/* Sanitise and restore the current VFP state from the provided structures. */ +int vfp_restore_user_hwstate(struct user_vfp __user *ufp, + struct user_vfp_exc __user *ufp_exc) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; + unsigned long fpexc; + int err = 0; + + /* + * If VFP has been used, then disable it to avoid corrupting + * the new thread state. + */ + if (hwstate->fpexc & FPEXC_EN) + vfp_flush_hwstate(thread); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, + sizeof(hwstate->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(hwstate->fpscr, &ufp->fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &ufp_exc->fpexc, err); + + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + hwstate->fpexc = fpexc; + + __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); + __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); + + return err ? -EFAULT : 0; +} + /* * VFP hardware can lose all context when a CPU goes offline. * As we will be running in SMP mode with CPU hotplug, we will save the diff --git a/arch/blackfin/mach-bf538/boards/ezkit.c b/arch/blackfin/mach-bf538/boards/ezkit.c index 1633a6f306c0..85038f54354d 100644 --- a/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/arch/blackfin/mach-bf538/boards/ezkit.c @@ -38,7 +38,7 @@ static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; -#endif +#endif /* CONFIG_RTC_DRV_BFIN */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 @@ -100,7 +100,7 @@ static struct platform_device bfin_uart0_device = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_UART0 */ #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { @@ -148,7 +148,7 @@ static struct platform_device bfin_uart1_device = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_UART1 */ #ifdef CONFIG_SERIAL_BFIN_UART2 static struct resource bfin_uart2_resources[] = { { @@ -196,8 +196,8 @@ static struct platform_device bfin_uart2_device = { .platform_data = &bfin_uart2_peripherals, /* Passed to driver */ }, }; -#endif -#endif +#endif /* CONFIG_SERIAL_BFIN_UART2 */ +#endif /* CONFIG_SERIAL_BFIN */ #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 @@ -224,7 +224,7 @@ static struct platform_device bfin_sir0_device = { .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; -#endif +#endif /* CONFIG_BFIN_SIR0 */ #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { @@ -249,7 +249,7 @@ static struct platform_device bfin_sir1_device = { .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; -#endif +#endif /* CONFIG_BFIN_SIR1 */ #ifdef CONFIG_BFIN_SIR2 static struct resource bfin_sir2_resources[] = { { @@ -274,8 +274,8 @@ static struct platform_device bfin_sir2_device = { .num_resources = ARRAY_SIZE(bfin_sir2_resources), .resource = bfin_sir2_resources, }; -#endif -#endif +#endif /* CONFIG_BFIN_SIR2 */ +#endif /* CONFIG_BFIN_SIR */ #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART @@ -311,7 +311,7 @@ static struct platform_device bfin_sport0_uart_device = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT0_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { @@ -345,7 +345,7 @@ static struct platform_device bfin_sport1_uart_device = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT1_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT2_UART static struct resource bfin_sport2_uart_resources[] = { { @@ -379,7 +379,7 @@ static struct platform_device bfin_sport2_uart_device = { .platform_data = &bfin_sport2_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT2_UART */ #ifdef CONFIG_SERIAL_BFIN_SPORT3_UART static struct resource bfin_sport3_uart_resources[] = { { @@ -413,8 +413,8 @@ static struct platform_device bfin_sport3_uart_device = { .platform_data = &bfin_sport3_peripherals, /* Passed to driver */ }, }; -#endif -#endif +#endif /* CONFIG_SERIAL_BFIN_SPORT3_UART */ +#endif /* CONFIG_SERIAL_BFIN_SPORT */ #if defined(CONFIG_CAN_BFIN) || defined(CONFIG_CAN_BFIN_MODULE) static unsigned short bfin_can_peripherals[] = { @@ -452,7 +452,7 @@ static struct platform_device bfin_can_device = { .platform_data = &bfin_can_peripherals, /* Passed to driver */ }, }; -#endif +#endif /* CONFIG_CAN_BFIN */ /* * USB-LAN EzExtender board @@ -488,7 +488,7 @@ static struct platform_device smc91x_device = { .platform_data = &smc91x_info, }, }; -#endif +#endif /* CONFIG_SMC91X */ #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* all SPI peripherals info goes here */ @@ -518,7 +518,8 @@ static struct flash_platform_data bfin_spi_flash_data = { static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; -#endif +#endif /* CONFIG_MTD_M25P80 */ +#endif /* CONFIG_SPI_BFIN5XX */ #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include @@ -535,7 +536,7 @@ static const struct ad7879_platform_data bfin_ad7879_ts_info = { .gpio_export = 1, /* Export GPIO to gpiolib */ .gpio_base = -1, /* Dynamic allocation */ }; -#endif +#endif /* CONFIG_TOUCHSCREEN_AD7879 */ #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include @@ -564,7 +565,7 @@ static struct platform_device bfin_lq035q1_device = { .platform_data = &bfin_lq035q1_data, }, }; -#endif +#endif /* CONFIG_FB_BFIN_LQ035Q1 */ static struct spi_board_info bf538_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ @@ -579,7 +580,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, -#endif +#endif /* CONFIG_MTD_M25P80 */ #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", @@ -590,7 +591,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .chip_select = 1, .mode = SPI_CPHA | SPI_CPOL, }, -#endif +#endif /* CONFIG_TOUCHSCREEN_AD7879_SPI */ #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", @@ -599,7 +600,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .chip_select = 2, .mode = SPI_CPHA | SPI_CPOL, }, -#endif +#endif /* CONFIG_FB_BFIN_LQ035Q1 */ #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", @@ -607,7 +608,7 @@ static struct spi_board_info bf538_spi_board_info[] __initdata = { .bus_num = 0, .chip_select = 1, }, -#endif +#endif /* CONFIG_SPI_SPIDEV */ }; /* SPI (0) */ @@ -716,8 +717,6 @@ static struct platform_device bf538_spi_master2 = { }, }; -#endif /* spi master and devices */ - #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { @@ -759,8 +758,8 @@ static struct platform_device i2c_bfin_twi1_device = { .num_resources = ARRAY_SIZE(bfin_twi1_resource), .resource = bfin_twi1_resource, }; -#endif -#endif +#endif /* CONFIG_BF542 */ +#endif /* CONFIG_I2C_BLACKFIN_TWI */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c index 37302218ca4a..0f2367cc5493 100644 --- a/arch/hexagon/kernel/dma.c +++ b/arch/hexagon/kernel/dma.c @@ -22,6 +22,7 @@ #include #include #include +#include struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); diff --git a/arch/hexagon/kernel/process.c b/arch/hexagon/kernel/process.c index 18c4f0b0f4ba..ff02821bfb7e 100644 --- a/arch/hexagon/kernel/process.c +++ b/arch/hexagon/kernel/process.c @@ -1,7 +1,7 @@ /* * Process creation support for Hexagon * - * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -88,7 +88,7 @@ void (*idle_sleep)(void) = default_idle; void cpu_idle(void) { while (1) { - tick_nohz_stop_sched_tick(1); + tick_nohz_idle_enter(); local_irq_disable(); while (!need_resched()) { idle_sleep(); @@ -97,7 +97,7 @@ void cpu_idle(void) local_irq_disable(); } local_irq_enable(); - tick_nohz_restart_sched_tick(); + tick_nohz_idle_exit(); schedule(); } } diff --git a/arch/hexagon/kernel/ptrace.c b/arch/hexagon/kernel/ptrace.c index 32342de1a79c..96c3b2c4dbad 100644 --- a/arch/hexagon/kernel/ptrace.c +++ b/arch/hexagon/kernel/ptrace.c @@ -28,6 +28,7 @@ #include #include #include +#include #include diff --git a/arch/hexagon/kernel/smp.c b/arch/hexagon/kernel/smp.c index 9b44a9e2d05a..1298141874a3 100644 --- a/arch/hexagon/kernel/smp.c +++ b/arch/hexagon/kernel/smp.c @@ -1,7 +1,7 @@ /* * SMP support for Hexagon * - * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -28,6 +28,7 @@ #include #include #include +#include #include /* timer_interrupt */ #include @@ -177,7 +178,12 @@ void __cpuinit start_secondary(void) printk(KERN_INFO "%s cpu %d\n", __func__, current_thread_info()->cpu); + notify_cpu_starting(cpu); + + ipi_call_lock(); set_cpu_online(cpu, true); + ipi_call_unlock(); + local_irq_enable(); cpu_idle(); diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c index 6bee15c9c113..5d9b33b67935 100644 --- a/arch/hexagon/kernel/time.c +++ b/arch/hexagon/kernel/time.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include diff --git a/arch/hexagon/kernel/vdso.c b/arch/hexagon/kernel/vdso.c index f212a453b527..5d39f42f7085 100644 --- a/arch/hexagon/kernel/vdso.c +++ b/arch/hexagon/kernel/vdso.c @@ -21,6 +21,7 @@ #include #include #include +#include #include diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9d0fd7d5bb82..f00ba025375d 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f) spin_unlock(&(x)->ctx_lock); } -static inline unsigned int -pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) -{ - return do_munmap(mm, addr, len); -} - static inline unsigned long pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec) { @@ -1458,8 +1452,9 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) * a PROTECT_CTX() section. */ static int -pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) +pfm_remove_smpl_mapping(void *vaddr, unsigned long size) { + struct task_struct *task = current; int r; /* sanity checks */ @@ -1473,13 +1468,8 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz /* * does the actual unmapping */ - down_write(&task->mm->mmap_sem); + r = vm_munmap((unsigned long)vaddr, size); - DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size)); - - r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0); - - up_write(&task->mm->mmap_sem); if (r !=0) { printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size); } @@ -1945,7 +1935,7 @@ pfm_flush(struct file *filp, fl_owner_t id) * because some VM function reenables interrupts. * */ - if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); + if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size); return 0; } diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index f5104b7c52cd..463fb3bbe11e 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c @@ -1174,7 +1174,7 @@ out: bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { - return irqchip_in_kernel(vcpu->kcm) == (vcpu->arch.apic != NULL); + return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); } int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) diff --git a/arch/m68k/configs/m5275evb_defconfig b/arch/m68k/configs/m5275evb_defconfig index 33c32aeca12b..a1230e82bb1e 100644 --- a/arch/m68k/configs/m5275evb_defconfig +++ b/arch/m68k/configs/m5275evb_defconfig @@ -49,7 +49,6 @@ CONFIG_BLK_DEV_RAM=y CONFIG_NETDEVICES=y CONFIG_NET_ETHERNET=y CONFIG_FEC=y -CONFIG_FEC2=y # CONFIG_NETDEV_1000 is not set # CONFIG_NETDEV_10000 is not set CONFIG_PPP=y diff --git a/arch/m68k/platform/520x/config.c b/arch/m68k/platform/520x/config.c index 235947844f27..09df4b89e8be 100644 --- a/arch/m68k/platform/520x/config.c +++ b/arch/m68k/platform/520x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m520x_qspi_init(void) { @@ -35,7 +35,7 @@ static void __init m520x_qspi_init(void) writew(par, MCF_GPIO_PAR_UART); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -79,7 +79,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m520x_uarts_init(); m520x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m520x_qspi_init(); #endif } diff --git a/arch/m68k/platform/523x/config.c b/arch/m68k/platform/523x/config.c index c8b405d5a961..d47dfd8f50a2 100644 --- a/arch/m68k/platform/523x/config.c +++ b/arch/m68k/platform/523x/config.c @@ -22,7 +22,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m523x_qspi_init(void) { @@ -36,7 +36,7 @@ static void __init m523x_qspi_init(void) writew(par, MCFGPIO_PAR_TIMER); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -58,7 +58,7 @@ void __init config_BSP(char *commandp, int size) { mach_sched_init = hw_timer_init; m523x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m523x_qspi_init(); #endif } diff --git a/arch/m68k/platform/5249/config.c b/arch/m68k/platform/5249/config.c index bbf05135bb98..300e729a58d0 100644 --- a/arch/m68k/platform/5249/config.c +++ b/arch/m68k/platform/5249/config.c @@ -51,7 +51,7 @@ static struct platform_device *m5249_devices[] __initdata = { /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m5249_qspi_init(void) { @@ -61,7 +61,7 @@ static void __init m5249_qspi_init(void) mcf_mapirq2imr(MCF_IRQ_QSPI, MCFINTC_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -90,7 +90,7 @@ void __init config_BSP(char *commandp, int size) #ifdef CONFIG_M5249C3 m5249_smc91x_init(); #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m5249_qspi_init(); #endif } diff --git a/arch/m68k/platform/527x/config.c b/arch/m68k/platform/527x/config.c index 7ed848c3b848..b3cb378c5e94 100644 --- a/arch/m68k/platform/527x/config.c +++ b/arch/m68k/platform/527x/config.c @@ -23,7 +23,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m527x_qspi_init(void) { @@ -42,7 +42,7 @@ static void __init m527x_qspi_init(void) #endif } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -74,9 +74,7 @@ static void __init m527x_fec_init(void) writew(par | 0xf00, MCF_IPSBAR + 0x100082); v = readb(MCF_IPSBAR + 0x100078); writeb(v | 0xc0, MCF_IPSBAR + 0x100078); -#endif -#ifdef CONFIG_FEC2 /* Set multi-function pins to ethernet mode for fec1 */ par = readw(MCF_IPSBAR + 0x100082); writew(par | 0xa0, MCF_IPSBAR + 0x100082); @@ -92,7 +90,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m527x_uarts_init(); m527x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m527x_qspi_init(); #endif } diff --git a/arch/m68k/platform/528x/config.c b/arch/m68k/platform/528x/config.c index d4492926614c..c5f11ba49be5 100644 --- a/arch/m68k/platform/528x/config.c +++ b/arch/m68k/platform/528x/config.c @@ -24,7 +24,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m528x_qspi_init(void) { @@ -32,7 +32,7 @@ static void __init m528x_qspi_init(void) __raw_writeb(0x07, MCFGPIO_PQSPAR); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -98,7 +98,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m528x_uarts_init(); m528x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m528x_qspi_init(); #endif } diff --git a/arch/m68k/platform/532x/config.c b/arch/m68k/platform/532x/config.c index 2bec3477b739..37082d02f2bd 100644 --- a/arch/m68k/platform/532x/config.c +++ b/arch/m68k/platform/532x/config.c @@ -30,7 +30,7 @@ /***************************************************************************/ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) static void __init m532x_qspi_init(void) { @@ -38,7 +38,7 @@ static void __init m532x_qspi_init(void) writew(0x01f0, MCF_GPIO_PAR_QSPI); } -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ /***************************************************************************/ @@ -77,7 +77,7 @@ void __init config_BSP(char *commandp, int size) mach_sched_init = hw_timer_init; m532x_uarts_init(); m532x_fec_init(); -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) m532x_qspi_init(); #endif diff --git a/arch/m68k/platform/68EZ328/Makefile b/arch/m68k/platform/68EZ328/Makefile index ee97735a242c..b44d799b1115 100644 --- a/arch/m68k/platform/68EZ328/Makefile +++ b/arch/m68k/platform/68EZ328/Makefile @@ -3,9 +3,3 @@ # obj-y := config.o - -extra-y := bootlogo.rh - -$(obj)/bootlogo.rh: $(src)/bootlogo.h - perl $(src)/../68328/bootlogo.pl < $(src)/bootlogo.h \ - > $(obj)/bootlogo.rh diff --git a/arch/m68k/platform/68VZ328/Makefile b/arch/m68k/platform/68VZ328/Makefile index 447ffa0fd7c7..a49d75e65489 100644 --- a/arch/m68k/platform/68VZ328/Makefile +++ b/arch/m68k/platform/68VZ328/Makefile @@ -3,14 +3,9 @@ # obj-y := config.o -logo-$(UCDIMM) := bootlogo.rh -logo-$(DRAGEN2) := screen.h -extra-y := $(logo-y) - -$(obj)/bootlogo.rh: $(src)/../68EZ328/bootlogo.h - perl $(src)/bootlogo.pl < $(src)/../68328/bootlogo.h > $(obj)/bootlogo.rh +extra-$(DRAGEN2):= screen.h $(obj)/screen.h: $(src)/screen.xbm $(src)/xbm2lcd.pl perl $(src)/xbm2lcd.pl < $(src)/screen.xbm > $(obj)/screen.h -clean-files := $(obj)/screen.h $(obj)/bootlogo.rh +clean-files := $(obj)/screen.h diff --git a/arch/m68k/platform/68EZ328/bootlogo.h b/arch/m68k/platform/68VZ328/bootlogo.h similarity index 99% rename from arch/m68k/platform/68EZ328/bootlogo.h rename to arch/m68k/platform/68VZ328/bootlogo.h index e842bdae5839..b38e2b255142 100644 --- a/arch/m68k/platform/68EZ328/bootlogo.h +++ b/arch/m68k/platform/68VZ328/bootlogo.h @@ -1,6 +1,6 @@ #define splash_width 640 #define splash_height 480 -static unsigned char splash_bits[] = { +unsigned char __attribute__ ((aligned(16))) bootlogo_bits[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/arch/m68k/platform/coldfire/device.c b/arch/m68k/platform/coldfire/device.c index fa50c48292ff..3aa77ddea89d 100644 --- a/arch/m68k/platform/coldfire/device.c +++ b/arch/m68k/platform/coldfire/device.c @@ -114,14 +114,14 @@ static struct resource mcf_fec1_resources[] = { static struct platform_device mcf_fec1 = { .name = "fec", - .id = 0, + .id = 1, .num_resources = ARRAY_SIZE(mcf_fec1_resources), .resource = mcf_fec1_resources, }; #endif /* MCFFEC_BASE1 */ #endif /* CONFIG_FEC */ -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) /* * The ColdFire QSPI module is an SPI protocol hardware block used * on a number of different ColdFire CPUs. @@ -274,7 +274,7 @@ static struct platform_device mcf_qspi = { .resource = mcf_qspi_resources, .dev.platform_data = &mcf_qspi_data, }; -#endif /* CONFIG_SPI_COLDFIRE_QSPI */ +#endif /* IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) */ static struct platform_device *mcf_devices[] __initdata = { &mcf_uart, @@ -284,7 +284,7 @@ static struct platform_device *mcf_devices[] __initdata = { &mcf_fec1, #endif #endif -#ifdef CONFIG_SPI_COLDFIRE_QSPI +#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) &mcf_qspi, #endif }; diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c index e21507052066..9c717bf98ffe 100644 --- a/arch/mips/ath79/dev-wmac.c +++ b/arch/mips/ath79/dev-wmac.c @@ -58,8 +58,8 @@ static void __init ar913x_wmac_setup(void) static int ar933x_wmac_reset(void) { - ath79_device_reset_clear(AR933X_RESET_WMAC); ath79_device_reset_set(AR933X_RESET_WMAC); + ath79_device_reset_clear(AR933X_RESET_WMAC); return 0; } diff --git a/arch/mips/include/asm/mach-jz4740/irq.h b/arch/mips/include/asm/mach-jz4740/irq.h index a865c983c70a..5ad1a9c113c6 100644 --- a/arch/mips/include/asm/mach-jz4740/irq.h +++ b/arch/mips/include/asm/mach-jz4740/irq.h @@ -45,7 +45,7 @@ #define JZ4740_IRQ_LCD JZ4740_IRQ(30) /* 2nd-level interrupts */ -#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (X)) +#define JZ4740_IRQ_DMA(x) (JZ4740_IRQ(32) + (x)) #define JZ4740_IRQ_INTC_GPIO(x) (JZ4740_IRQ_GPIO0 - (x)) #define JZ4740_IRQ_GPIO(x) (JZ4740_IRQ(48) + (x)) diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 73c0d45798de..9b02cfba7449 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -37,12 +37,6 @@ extern void tlbmiss_handler_setup_pgd(unsigned long pgd); write_c0_xcontext((unsigned long) smp_processor_id() << 51); \ } while (0) - -static inline unsigned long get_current_pgd(void) -{ - return PHYS_TO_XKSEG_CACHED((read_c0_context() >> 11) & ~0xfffUL); -} - #else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ /* diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 185ca00c4c84..d5a338a1739c 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -257,11 +257,8 @@ asmlinkage int sys_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -286,11 +283,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -362,10 +356,7 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext(®s, &frame->sf_sc); if (sig < 0) @@ -401,10 +392,7 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) @@ -580,12 +568,7 @@ static int handle_signal(unsigned long sig, siginfo_t *info, if (ret) return ret; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked, ¤t->blocked, &ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NODEFER)) - sigaddset(¤t->blocked, sig); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + block_sigmask(ka, sig); return ret; } diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 06b5da392e24..ac3b8d89aae5 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -290,11 +290,8 @@ asmlinkage int sys32_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -318,11 +315,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) return -EFAULT; sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -488,10 +482,7 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = blocked; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&blocked); sig = restore_sigcontext32(®s, &frame->sf_sc); if (sig < 0) @@ -529,10 +520,7 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext32(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c index ae29e894ab8d..86eb4b04631c 100644 --- a/arch/mips/kernel/signal_n32.c +++ b/arch/mips/kernel/signal_n32.c @@ -93,11 +93,8 @@ asmlinkage int sysn32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs) sigset_from_compat(&newset, &uset); sigdelsetmask(&newset, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); current->saved_sigmask = current->blocked; - current->blocked = newset; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&newset); current->state = TASK_INTERRUPTIBLE; schedule(); @@ -121,10 +118,7 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); sig = restore_sigcontext(®s, &frame->rs_uc.uc_mcontext); if (sig < 0) diff --git a/arch/parisc/include/asm/hardware.h b/arch/parisc/include/asm/hardware.h index 4e9626836bab..d1d864b81bae 100644 --- a/arch/parisc/include/asm/hardware.h +++ b/arch/parisc/include/asm/hardware.h @@ -2,7 +2,6 @@ #define _PARISC_HARDWARE_H #include -#include #define HWTYPE_ANY_ID PA_HWTYPE_ANY_ID #define HVERSION_ANY_ID PA_HVERSION_ANY_ID @@ -95,12 +94,14 @@ struct bc_module { #define HPHW_MC 15 #define HPHW_FAULTY 31 +struct parisc_device_id; /* hardware.c: */ extern const char *parisc_hardware_description(struct parisc_device_id *id); extern enum cpu_type parisc_get_cpu_type(unsigned long hversion); struct pci_dev; +struct hardware_path; /* drivers.c: */ extern struct parisc_device *alloc_pa_dev(unsigned long hpa, diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index a84cc1f925f6..4e0e7dbf0f3f 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h @@ -160,5 +160,11 @@ extern int npmem_ranges; #include #include +#include + +#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) + +/* DEFINITION OF THE ZERO-PAGE (PAG0) */ +/* based on work by Jason Eckhardt (jason@equator.com) */ #endif /* _PARISC_PAGE_H */ diff --git a/arch/parisc/include/asm/pdc.h b/arch/parisc/include/asm/pdc.h index 4ca510b3c6f8..7f0f2d23059d 100644 --- a/arch/parisc/include/asm/pdc.h +++ b/arch/parisc/include/asm/pdc.h @@ -343,8 +343,6 @@ #ifdef __KERNEL__ -#include /* for __PAGE_OFFSET */ - extern int pdc_type; /* Values for pdc_type */ @@ -677,11 +675,6 @@ static inline char * os_id_to_string(u16 os_id) { #endif /* __KERNEL__ */ -#define PAGE0 ((struct zeropage *)__PAGE_OFFSET) - -/* DEFINITION OF THE ZERO-PAGE (PAG0) */ -/* based on work by Jason Eckhardt (jason@equator.com) */ - /* flags of the device_path */ #define PF_AUTOBOOT 0x80 #define PF_AUTOSEARCH 0x40 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 22dadeb58695..ee99f2339356 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -44,6 +44,8 @@ struct vm_area_struct; #endif /* !__ASSEMBLY__ */ +#include + #define pte_ERROR(e) \ printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) #define pmd_ERROR(e) \ diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 804aa28ab1d6..3516e0b27044 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -1,6 +1,8 @@ #ifndef __ASM_SPINLOCK_H #define __ASM_SPINLOCK_H +#include +#include #include #include diff --git a/arch/parisc/kernel/pdc_cons.c b/arch/parisc/kernel/pdc_cons.c index 4f004596a6e7..47341aa208f2 100644 --- a/arch/parisc/kernel/pdc_cons.c +++ b/arch/parisc/kernel/pdc_cons.c @@ -50,6 +50,7 @@ #include #include #include +#include /* for PAGE0 */ #include /* for iodc_call() proto and friends */ static DEFINE_SPINLOCK(pdc_console_lock); @@ -104,7 +105,7 @@ static int pdc_console_tty_open(struct tty_struct *tty, struct file *filp) static void pdc_console_tty_close(struct tty_struct *tty, struct file *filp) { - if (!tty->count) { + if (tty->count == 1) { del_timer_sync(&pdc_console_timer); tty_port_tty_set(&tty_port, NULL); } diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 7c0774397b89..70e105d62423 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi new file mode 100644 index 000000000000..1cf0b77b1efe --- /dev/null +++ b/arch/powerpc/boot/dts/fsl/pq3-mpic-message-B.dtsi @@ -0,0 +1,43 @@ +/* + * PQ3 MPIC Message (Group B) device tree stub [ controller @ offset 0x42400 ] + * + * Copyright 2012 Freescale Semiconductor Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Freescale Semiconductor nor the + * names of its contributors may be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation, either version 2 of that License or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +message@42400 { + compatible = "fsl,mpic-v3.1-msgr"; + reg = <0x42400 0x200>; + interrupts = < + 0xb4 2 0 0 + 0xb5 2 0 0 + 0xb6 2 0 0 + 0xb7 2 0 0>; +}; diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi index fdedf7b1fe0f..71c30eb10056 100644 --- a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi +++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi @@ -53,6 +53,16 @@ timer@41100 { 3 0 3 0>; }; +message@41400 { + compatible = "fsl,mpic-v3.1-msgr"; + reg = <0x41400 0x200>; + interrupts = < + 0xb0 2 0 0 + 0xb1 2 0 0 + 0xb2 2 0 0 + 0xb3 2 0 0>; +}; + msi@41600 { compatible = "fsl,mpic-msi"; reg = <0x41600 0x80>; diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 548da3aa0a30..d58fc4e4149c 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -288,13 +288,6 @@ label##_hv: \ /* Exception addition: Hard disable interrupts */ #define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11) -/* Exception addition: Keep interrupt state */ -#define ENABLE_INTS \ - ld r11,PACAKMSR(r13); \ - ld r12,_MSR(r1); \ - rlwimi r11,r12,0,MSR_EE; \ - mtmsrd r11,1 - #define ADD_NVGPRS \ bl .save_nvgprs diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index e648af92ced1..0e40843a1c6e 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h @@ -18,10 +18,6 @@ #include -/* Define a way to iterate across irqs. */ -#define for_each_irq(i) \ - for ((i) = 0; (i) < NR_IRQS; ++(i)) - extern atomic_t ppc_n_lost_interrupts; /* This number is used when no interrupt has been assigned */ diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index c65b9294376e..c9f698a994be 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h @@ -275,9 +275,6 @@ struct mpic unsigned int isu_mask; /* Number of sources */ unsigned int num_sources; - /* default senses array */ - unsigned char *senses; - unsigned int senses_count; /* vector numbers used for internal sources (ipi/timers) */ unsigned int ipi_vecs[4]; @@ -415,21 +412,6 @@ extern struct mpic *mpic_alloc(struct device_node *node, extern void mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, phys_addr_t phys_addr); -/* Set default sense codes - * - * @mpic: controller - * @senses: array of sense codes - * @count: size of above array - * - * Optionally provide an array (indexed on hardware interrupt numbers - * for this MPIC) of default sense codes for the chip. Those are linux - * sense codes IRQ_TYPE_* - * - * The driver gets ownership of the pointer, don't dispose of it or - * anything like that. __init only. - */ -extern void mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count); - /* Initialize the controller. After this has been called, none of the above * should be called again for this mpic diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h index 3ec37dc9003e..326d33ca55cd 100644 --- a/arch/powerpc/include/asm/mpic_msgr.h +++ b/arch/powerpc/include/asm/mpic_msgr.h @@ -13,6 +13,7 @@ #include #include +#include struct mpic_msgr { u32 __iomem *base; diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index b86faa9107da..8a97aa7289d3 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -15,11 +15,6 @@ #ifndef __ASM_POWERPC_REG_BOOKE_H__ #define __ASM_POWERPC_REG_BOOKE_H__ -#ifdef CONFIG_BOOKE_WDT -extern u32 booke_wdt_enabled; -extern u32 booke_wdt_period; -#endif /* CONFIG_BOOKE_WDT */ - /* Machine State Register (MSR) Fields */ #define MSR_GS (1<<28) /* Guest state */ #define MSR_UCLE (1<<26) /* User-mode cache lock enable */ diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index f8a7a1a1a9f4..ef2074c3e906 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -588,23 +588,19 @@ _GLOBAL(ret_from_except_lite) fast_exc_return_irq: restore: /* - * This is the main kernel exit path, we first check if we - * have to change our interrupt state. + * This is the main kernel exit path. First we check if we + * are about to re-enable interrupts */ ld r5,SOFTE(r1) lbz r6,PACASOFTIRQEN(r13) - cmpwi cr1,r5,0 - cmpw cr0,r5,r6 - beq cr0,4f + cmpwi cr0,r5,0 + beq restore_irq_off - /* We do, handle disable first, which is easy */ - bne cr1,3f; - li r0,0 - stb r0,PACASOFTIRQEN(r13); - TRACE_DISABLE_INTS - b 4f + /* We are enabling, were we already enabled ? Yes, just return */ + cmpwi cr0,r6,1 + beq cr0,do_restore -3: /* + /* * We are about to soft-enable interrupts (we are hard disabled * at this point). We check if there's anything that needs to * be replayed first. @@ -626,7 +622,7 @@ restore_no_replay: /* * Final return path. BookE is handled in a different file */ -4: +do_restore: #ifdef CONFIG_PPC_BOOK3E b .exception_return_book3e #else @@ -699,6 +695,25 @@ fast_exception_return: #endif /* CONFIG_PPC_BOOK3E */ + /* + * We are returning to a context with interrupts soft disabled. + * + * However, we may also about to hard enable, so we need to + * make sure that in this case, we also clear PACA_IRQ_HARD_DIS + * or that bit can get out of sync and bad things will happen + */ +restore_irq_off: + ld r3,_MSR(r1) + lbz r7,PACAIRQHAPPENED(r13) + andi. r0,r3,MSR_EE + beq 1f + rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS + stb r7,PACAIRQHAPPENED(r13) +1: li r0,0 + stb r0,PACASOFTIRQEN(r13); + TRACE_DISABLE_INTS + b do_restore + /* * Something did happen, check if a re-emit is needed * (this also clears paca->irq_happened) @@ -748,6 +763,9 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ 1: b .ret_from_except /* What else to do here ? */ + + +3: do_work: #ifdef CONFIG_PREEMPT andi. r0,r3,MSR_PR /* Returning to user mode? */ @@ -767,16 +785,6 @@ do_work: SOFT_DISABLE_INTS(r3,r4) 1: bl .preempt_schedule_irq - /* Hard-disable interrupts again (and update PACA) */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - li r0,PACA_IRQ_HARD_DIS - stb r0,PACAIRQHAPPENED(r13) - /* Re-test flags and eventually loop */ clrrdi r9,r1,THREAD_SHIFT ld r4,TI_FLAGS(r9) @@ -787,14 +795,6 @@ do_work: user_work: #endif /* CONFIG_PREEMPT */ - /* Enable interrupts */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 1 -#else - ori r10,r10,MSR_EE - mtmsrd r10,1 -#endif /* CONFIG_PPC_BOOK3E */ - andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index cb705fdbb458..8f880bc77c56 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -768,8 +768,8 @@ alignment_common: std r3,_DAR(r1) std r4,_DSISR(r1) bl .save_nvgprs + DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - ENABLE_INTS bl .alignment_exception b .ret_from_except diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5ec1b2354ca6..641da9e868ce 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -229,6 +229,19 @@ notrace void arch_local_irq_restore(unsigned long en) */ if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) __hard_irq_disable(); +#ifdef CONFIG_TRACE_IRQFLAG + else { + /* + * We should already be hard disabled here. We had bugs + * where that wasn't the case so let's dbl check it and + * warn if we are wrong. Only do that when IRQ tracing + * is enabled as mfmsr() can be costly. + */ + if (WARN_ON(mfmsr() & MSR_EE)) + __hard_irq_disable(); + } +#endif /* CONFIG_TRACE_IRQFLAG */ + set_soft_enabled(0); /* @@ -260,11 +273,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); * if they are currently disabled. This is typically called before * schedule() or do_signal() when returning to userspace. We do it * in C to avoid the burden of dealing with lockdep etc... + * + * NOTE: This is called with interrupts hard disabled but not marked + * as such in paca->irq_happened, so we need to resync this. */ void restore_interrupts(void) { - if (irqs_disabled()) + if (irqs_disabled()) { + local_paca->irq_happened |= PACA_IRQ_HARD_DIS; local_irq_enable(); + } else + __hard_irq_enable(); } #endif /* CONFIG_PPC64 */ @@ -330,14 +349,10 @@ void migrate_irqs(void) alloc_cpumask_var(&mask, GFP_KERNEL); - for_each_irq(irq) { + for_each_irq_desc(irq, desc) { struct irq_data *data; struct irq_chip *chip; - desc = irq_to_desc(irq); - if (!desc) - continue; - data = irq_desc_get_irq_data(desc); if (irqd_is_per_cpu(data)) continue; diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c957b1202bdc..5df777794403 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -23,14 +23,11 @@ void machine_kexec_mask_interrupts(void) { unsigned int i; + struct irq_desc *desc; - for_each_irq(i) { - struct irq_desc *desc = irq_to_desc(i); + for_each_irq_desc(i, desc) { struct irq_chip *chip; - if (!desc) - continue; - chip = irq_desc_get_chip(desc); if (!chip) continue; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 9825f29d1faf..ec8a53fa9e8f 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -150,6 +150,9 @@ notrace void __init machine_init(u64 dt_ptr) } #ifdef CONFIG_BOOKE_WDT +extern u32 booke_wdt_enabled; +extern u32 booke_wdt_period; + /* Checks wdt=x and wdt_period=xx command-line option */ notrace int __init early_parse_wdt(char *p) { diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 6aa0c663e247..158972341a2d 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) addr, regs->nip, regs->link, code); } - if (!arch_irq_disabled_regs(regs)) + if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) local_irq_enable(); memset(&info, 0, sizeof(info)); @@ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) return; } - local_irq_enable(); + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); #ifdef CONFIG_MATH_EMULATION /* (reason & REASON_ILLEGAL) would be the obvious thing here, @@ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) { int sig, code, fixed = 0; + /* We restore the interrupt state now */ + if (!arch_irq_disabled_regs(regs)) + local_irq_enable(); + /* we don't implement logging of alignment exceptions */ if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) fixed = fix_alignment(regs); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index ddc485a529f2..c3beaeef3f60 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -258,6 +258,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, !(memslot->userspace_addr & (s - 1))) { start &= ~(s - 1); pgsize = s; + get_page(hpage); + put_page(page); page = hpage; } } @@ -281,11 +283,8 @@ static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, err = 0; out: - if (got) { - if (PageHuge(page)) - page = compound_head(page); + if (got) put_page(page); - } return err; up_err: @@ -678,8 +677,15 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, SetPageDirty(page); out_put: - if (page) - put_page(page); + if (page) { + /* + * We drop pages[0] here, not page because page might + * have been set to the head page of a compound, but + * we have to drop the reference on the correct tail + * page to match the get inside gup() + */ + put_page(pages[0]); + } return ret; out_unlock: @@ -979,6 +985,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, pa = *physp; } page = pfn_to_page(pa >> PAGE_SHIFT); + get_page(page); } else { hva = gfn_to_hva_memslot(memslot, gfn); npages = get_user_pages_fast(hva, 1, 1, pages); @@ -991,8 +998,6 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, page = compound_head(page); psize <<= compound_order(page); } - if (!kvm->arch.using_mmu_notifiers) - get_page(page); offset = gpa & (psize - 1); if (nb_ret) *nb_ret = psize - offset; @@ -1003,7 +1008,6 @@ void kvmppc_unpin_guest_page(struct kvm *kvm, void *va) { struct page *page = virt_to_page(va); - page = compound_head(page); put_page(page); } diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 01294a5099dd..108d1f580177 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1192,8 +1192,6 @@ static void unpin_slot(struct kvm *kvm, int slot_id) continue; pfn = physp[j] >> PAGE_SHIFT; page = pfn_to_page(pfn); - if (PageHuge(page)) - page = compound_head(page); SetPageDirty(page); put_page(page); } diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h index af1ab5e9a691..5c3cf2d04e41 100644 --- a/arch/powerpc/net/bpf_jit.h +++ b/arch/powerpc/net/bpf_jit.h @@ -48,7 +48,13 @@ /* * Assembly helpers from arch/powerpc/net/bpf_jit.S: */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +#define DECLARE_LOAD_FUNC(func) \ + extern u8 func[], func##_negative_offset[], func##_positive_offset[] + +DECLARE_LOAD_FUNC(sk_load_word); +DECLARE_LOAD_FUNC(sk_load_half); +DECLARE_LOAD_FUNC(sk_load_byte); +DECLARE_LOAD_FUNC(sk_load_byte_msh); #define FUNCTION_DESCR_SIZE 24 diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index ff4506e85cce..55ba3855a97f 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -31,14 +31,13 @@ * then branch directly to slow_path_XXX if required. (In fact, could * load a spare GPR with the address of slow_path_generic and pass size * as an argument, making the call site a mtlr, li and bllr.) - * - * Technically, the "is addr < 0" check is unnecessary & slowing down - * the ABS path, as it's statically checked on generation. */ .globl sk_load_word sk_load_word: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_word_neg + .globl sk_load_word_positive_offset +sk_load_word_positive_offset: /* Are we accessing past headlen? */ subi r_scratch1, r_HL, 4 cmpd r_scratch1, r_addr @@ -51,7 +50,9 @@ sk_load_word: .globl sk_load_half sk_load_half: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_half_neg + .globl sk_load_half_positive_offset +sk_load_half_positive_offset: subi r_scratch1, r_HL, 2 cmpd r_scratch1, r_addr blt bpf_slow_path_half @@ -61,7 +62,9 @@ sk_load_half: .globl sk_load_byte sk_load_byte: cmpdi r_addr, 0 - blt bpf_error + blt bpf_slow_path_byte_neg + .globl sk_load_byte_positive_offset +sk_load_byte_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte lbzx r_A, r_D, r_addr @@ -69,22 +72,20 @@ sk_load_byte: /* * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) - * r_addr is the offset value, already known positive + * r_addr is the offset value */ .globl sk_load_byte_msh sk_load_byte_msh: + cmpdi r_addr, 0 + blt bpf_slow_path_byte_msh_neg + .globl sk_load_byte_msh_positive_offset +sk_load_byte_msh_positive_offset: cmpd r_HL, r_addr ble bpf_slow_path_byte_msh lbzx r_X, r_D, r_addr rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr -bpf_error: - /* Entered with cr0 = lt */ - li r3, 0 - /* Generated code will 'blt epilogue', returning 0. */ - blr - /* Call out to skb_copy_bits: * We'll need to back up our volatile regs first; we have * local variable space at r1+(BPF_PPC_STACK_BASIC). @@ -136,3 +137,84 @@ bpf_slow_path_byte_msh: lbz r_X, BPF_PPC_STACK_BASIC+(2*8)(r1) rlwinm r_X, r_X, 2, 32-4-2, 31-2 blr + +/* Call out to bpf_internal_load_pointer_neg_helper: + * We'll need to back up our volatile regs first; we have + * local variable space at r1+(BPF_PPC_STACK_BASIC). + * Allocate a new stack frame here to remain ABI-compliant in + * stashing LR. + */ +#define sk_negative_common(SIZE) \ + mflr r0; \ + std r0, 16(r1); \ + /* R3 goes in parameter space of caller's frame */ \ + std r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + std r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + std r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + stdu r1, -BPF_PPC_SLOWPATH_FRAME(r1); \ + /* R3 = r_skb, as passed */ \ + mr r4, r_addr; \ + li r5, SIZE; \ + bl bpf_internal_load_pointer_neg_helper; \ + /* R3 != 0 on success */ \ + addi r1, r1, BPF_PPC_SLOWPATH_FRAME; \ + ld r0, 16(r1); \ + ld r_A, (BPF_PPC_STACK_BASIC+(0*8))(r1); \ + ld r_X, (BPF_PPC_STACK_BASIC+(1*8))(r1); \ + mtlr r0; \ + cmpldi r3, 0; \ + beq bpf_error_slow; /* cr0 = EQ */ \ + mr r_addr, r3; \ + ld r_skb, (BPF_PPC_STACKFRAME+48)(r1); \ + /* Great success! */ + +bpf_slow_path_word_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_word_negative_offset +sk_load_word_negative_offset: + sk_negative_common(4) + lwz r_A, 0(r_addr) + blr + +bpf_slow_path_half_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_half_negative_offset +sk_load_half_negative_offset: + sk_negative_common(2) + lhz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_negative_offset +sk_load_byte_negative_offset: + sk_negative_common(1) + lbz r_A, 0(r_addr) + blr + +bpf_slow_path_byte_msh_neg: + lis r_scratch1,-32 /* SKF_LL_OFF */ + cmpd r_addr, r_scratch1 /* addr < SKF_* */ + blt bpf_error /* cr0 = LT */ + .globl sk_load_byte_msh_negative_offset +sk_load_byte_msh_negative_offset: + sk_negative_common(1) + lbz r_X, 0(r_addr) + rlwinm r_X, r_X, 2, 32-4-2, 31-2 + blr + +bpf_error_slow: + /* fabricate a cr0 = lt */ + li r_scratch1, -1 + cmpdi r_scratch1, 0 +bpf_error: + /* Entered with cr0 = lt */ + li r3, 0 + /* Generated code will 'blt epilogue', returning 0. */ + blr diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 73619d3aeb6c..2dc8b1484845 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -127,6 +127,9 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) PPC_BLR(); } +#define CHOOSE_LOAD_FUNC(K, func) \ + ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) + /* Assemble the body code between the prologue & epilogue. */ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, struct codegen_context *ctx, @@ -391,21 +394,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, /*** Absolute loads from packet header/data ***/ case BPF_S_LD_W_ABS: - func = sk_load_word; + func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; case BPF_S_LD_H_ABS: - func = sk_load_half; + func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; case BPF_S_LD_B_ABS: - func = sk_load_byte; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: - /* - * Load from [K]. Reference with the (negative) - * SKF_NET_OFF/SKF_LL_OFF offsets is unsupported. - */ + /* Load from [K]. */ ctx->seen |= SEEN_DATAREF; - if ((int)K < 0) - return -ENOTSUPP; PPC_LI64(r_scratch1, func); PPC_MTLR(r_scratch1); PPC_LI32(r_addr, K); @@ -429,7 +427,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, common_load_ind: /* * Load from [X + K]. Negative offsets are tested for - * in the helper functions, and result in a 'ret 0'. + * in the helper functions. */ ctx->seen |= SEEN_DATAREF | SEEN_XREG; PPC_LI64(r_scratch1, func); @@ -443,13 +441,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; case BPF_S_LDX_B_MSH: - /* - * x86 version drops packet (RET 0) when K<0, whereas - * interpreter does allow K<0 (__load_pointer, special - * ancillary data). common_load returns ENOTSUPP if K<0, - * so we fall back to interpreter & filter works. - */ - func = sk_load_byte_msh; + func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c index 9fef5302adc1..67dac22b4363 100644 --- a/arch/powerpc/platforms/85xx/common.c +++ b/arch/powerpc/platforms/85xx/common.c @@ -21,6 +21,12 @@ static struct of_device_id __initdata mpc85xx_common_ids[] = { { .compatible = "fsl,qe", }, { .compatible = "fsl,cpm2", }, { .compatible = "fsl,srio", }, + /* So that the DMA channel nodes can be probed individually: */ + { .compatible = "fsl,eloplus-dma", }, + /* For the PMC driver */ + { .compatible = "fsl,mpc8548-guts", }, + /* Probably unnecessary? */ + { .compatible = "gpio-leds", }, {}, }; diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index 9a6f04406e0d..d208ebccb91c 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c @@ -399,12 +399,6 @@ static int __init board_fixups(void) machine_arch_initcall(mpc8568_mds, board_fixups); machine_arch_initcall(mpc8569_mds, board_fixups); -static struct of_device_id mpc85xx_ids[] = { - { .compatible = "fsl,mpc8548-guts", }, - { .compatible = "gpio-leds", }, - {}, -}; - static int __init mpc85xx_publish_devices(void) { if (machine_is(mpc8568_mds)) @@ -412,10 +406,7 @@ static int __init mpc85xx_publish_devices(void) if (machine_is(mpc8569_mds)) simple_gpiochip_init("fsl,mpc8569mds-bcsr-gpio"); - mpc85xx_common_publish_devices(); - of_platform_bus_probe(NULL, mpc85xx_ids, NULL); - - return 0; + return mpc85xx_common_publish_devices(); } machine_device_initcall(mpc8568_mds, mpc85xx_publish_devices); diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index e74b7cde9aee..f700c81a1321 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c @@ -460,18 +460,7 @@ static void __init p1022_ds_setup_arch(void) pr_info("Freescale P1022 DS reference board\n"); } -static struct of_device_id __initdata p1022_ds_ids[] = { - /* So that the DMA channel nodes can be probed individually: */ - { .compatible = "fsl,eloplus-dma", }, - {}, -}; - -static int __init p1022_ds_publish_devices(void) -{ - mpc85xx_common_publish_devices(); - return of_platform_bus_probe(NULL, p1022_ds_ids, NULL); -} -machine_device_initcall(p1022_ds, p1022_ds_publish_devices); +machine_device_initcall(p1022_ds, mpc85xx_common_publish_devices); machine_arch_initcall(p1022_ds, swiotlb_setup_bus_notifier); diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index d09f3e8e6867..85825b5401e5 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c @@ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) pr_devel("axon_msi: woff %x roff %x msi %x\n", write_offset, msic->read_offset, msi); - if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { + if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { generic_handle_irq(msi); msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); } else { @@ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) if (rc) return rc; - /* We rely on being able to stash a virq in a u16 */ - BUILD_BUG_ON(NR_IRQS > 65536); - list_for_each_entry(entry, &dev->msi_list, list) { virq = irq_create_direct_mapping(msic->irq_domain); if (virq == NO_IRQ) { @@ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device) } memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); - msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); + /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ + msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); if (!msic->irq_domain) { printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", dn->full_name); diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index f9a48af335cb..8c6dc42ecf65 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c @@ -248,6 +248,6 @@ void beatic_deinit_IRQ(void) { int i; - for (i = 1; i < NR_IRQS; i++) + for (i = 1; i < nr_irqs; i++) beat_destruct_irq_plug(i); } diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 996c5ff7824b..03685a329d7d 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c @@ -366,11 +366,20 @@ static void kw_i2c_timeout(unsigned long data) unsigned long flags; spin_lock_irqsave(&host->lock, flags); + + /* + * If the timer is pending, that means we raced with the + * irq, in which case we just return + */ + if (timer_pending(&host->timeout_timer)) + goto skip; + kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); if (host->state != state_idle) { host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; add_timer(&host->timeout_timer); } + skip: spin_unlock_irqrestore(&host->lock, flags); } diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 66ad93de1d55..c4e630576ff2 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c @@ -57,9 +57,9 @@ static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +/* The max irq number this driver deals with is 128; see max_irqs */ +static DECLARE_BITMAP(ppc_lost_interrupts, 128); +static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index aadbe4f6d537..178a5f300bc9 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -30,9 +30,9 @@ config PPC_SPLPAR two or more partitions. config EEH - bool "PCI Extended Error Handling (EEH)" if EXPERT + bool depends on PPC_PSERIES && PCI - default y if !EXPERT + default y config PSERIES_MSI bool diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 309d38ef7322..a75e37dc41aa 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c @@ -1076,7 +1076,7 @@ static void eeh_add_device_late(struct pci_dev *dev) pr_debug("EEH: Adding device %s\n", pci_name(dev)); dn = pci_device_to_OF_node(dev); - edev = pci_dev_to_eeh_dev(dev); + edev = of_node_to_eeh_dev(dn); if (edev->pdev == dev) { pr_debug("EEH: Already referenced !\n"); return; diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index d3be961e2ae7..10386b676d87 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c @@ -51,8 +51,7 @@ static intctl_cpm2_t __iomem *cpm2_intctl; static struct irq_domain *cpm2_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */ static const u_char irq_to_siureg[] = { 1, 1, 1, 1, 1, 1, 1, 1, diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index d5f5416be310..b724622c3a0b 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c @@ -18,69 +18,45 @@ extern int cpm_get_irq(struct pt_regs *regs); static struct irq_domain *mpc8xx_pic_host; -#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) -static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; +static unsigned long mpc8xx_cached_irq_mask; static sysconf8xx_t __iomem *siu_reg; -int cpm_get_irq(struct pt_regs *regs); +static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d) +{ + return 0x80000000 >> irqd_to_hwirq(d); +} static void mpc8xx_unmask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_mask_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static void mpc8xx_ack(struct irq_data *d) { - int bit; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); + out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d)); } static void mpc8xx_end_irq(struct irq_data *d) { - int bit, word; - unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); - - bit = irq_nr & 0x1f; - word = irq_nr >> 5; - - ppc_cached_irq_mask[word] |= (1 << (31-bit)); - out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); + mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); } static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) { - if (flow_type & IRQ_TYPE_EDGE_FALLING) { - irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); + /* only external IRQ senses are programmable */ + if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) { unsigned int siel = in_be32(&siu_reg->sc_siel); - - /* only external IRQ senses are programmable */ - if ((hw & 1) == 0) { - siel |= (0x80000000 >> hw); - out_be32(&siu_reg->sc_siel, siel); - __irq_set_handler_locked(d->irq, handle_edge_irq); - } + siel |= mpc8xx_irqd_to_bit(d); + out_be32(&siu_reg->sc_siel, siel); + __irq_set_handler_locked(d->irq, handle_edge_irq); } return 0; } @@ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, IRQ_TYPE_EDGE_FALLING, }; + if (intspec[0] > 0x1f) + return 0; + *out_hwirq = intspec[0]; if (intsize > 1 && intspec[1] < 4) *out_flags = map_pic_senses[intspec[1]]; diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 9ac71ebd2c40..395af1347749 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -604,18 +604,14 @@ static struct mpic *mpic_find(unsigned int irq) } /* Determine if the linux irq is an IPI */ -static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) +static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int src) { - unsigned int src = virq_to_hw(irq); - return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); } /* Determine if the linux irq is a timer */ -static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) +static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int src) { - unsigned int src = virq_to_hw(irq); - return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); } @@ -876,21 +872,45 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) if (src >= mpic->num_sources) return -EINVAL; - if (flow_type == IRQ_TYPE_NONE) - if (mpic->senses && src < mpic->senses_count) - flow_type = mpic->senses[src]; - if (flow_type == IRQ_TYPE_NONE) - flow_type = IRQ_TYPE_LEVEL_LOW; + vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); + /* We don't support "none" type */ + if (flow_type == IRQ_TYPE_NONE) + flow_type = IRQ_TYPE_DEFAULT; + + /* Default: read HW settings */ + if (flow_type == IRQ_TYPE_DEFAULT) { + switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) | + MPIC_INFO(VECPRI_SENSE_MASK))) { + case MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE): + flow_type = IRQ_TYPE_EDGE_RISING; + break; + case MPIC_INFO(VECPRI_SENSE_EDGE) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE): + flow_type = IRQ_TYPE_EDGE_FALLING; + break; + case MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_POSITIVE): + flow_type = IRQ_TYPE_LEVEL_HIGH; + break; + case MPIC_INFO(VECPRI_SENSE_LEVEL) | + MPIC_INFO(VECPRI_POLARITY_NEGATIVE): + flow_type = IRQ_TYPE_LEVEL_LOW; + break; + } + } + + /* Apply to irq desc */ irqd_set_trigger_type(d, flow_type); + /* Apply to HW */ if (mpic_is_ht_interrupt(mpic, src)) vecpri = MPIC_VECPRI_POLARITY_POSITIVE | MPIC_VECPRI_SENSE_EDGE; else vecpri = mpic_type_to_vecpri(mpic, flow_type); - vold = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); vnew = vold & ~(MPIC_INFO(VECPRI_POLARITY_MASK) | MPIC_INFO(VECPRI_SENSE_MASK)); vnew |= vecpri; @@ -1026,7 +1046,7 @@ static int mpic_host_map(struct irq_domain *h, unsigned int virq, irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); /* Set default irq type */ - irq_set_irq_type(virq, IRQ_TYPE_NONE); + irq_set_irq_type(virq, IRQ_TYPE_DEFAULT); /* If the MPIC was reset, then all vectors have already been * initialized. Otherwise, a per source lazy initialization @@ -1417,12 +1437,6 @@ void __init mpic_assign_isu(struct mpic *mpic, unsigned int isu_num, mpic->num_sources = isu_first + mpic->isu_size; } -void __init mpic_set_default_senses(struct mpic *mpic, u8 *senses, int count) -{ - mpic->senses = senses; - mpic->senses_count = count; -} - void __init mpic_init(struct mpic *mpic) { int i, cpu; @@ -1555,12 +1569,12 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) return; raw_spin_lock_irqsave(&mpic_lock, flags); - if (mpic_is_ipi(mpic, irq)) { + if (mpic_is_ipi(mpic, src)) { reg = mpic_ipi_read(src - mpic->ipi_vecs[0]) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_ipi_write(src - mpic->ipi_vecs[0], reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); - } else if (mpic_is_tm(mpic, irq)) { + } else if (mpic_is_tm(mpic, src)) { reg = mpic_tm_read(src - mpic->timer_vecs[0]) & ~MPIC_VECPRI_PRIORITY_MASK; mpic_tm_write(src - mpic->timer_vecs[0], diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c index 6e7fa386e76a..483d8fa72e8b 100644 --- a/arch/powerpc/sysdev/mpic_msgr.c +++ b/arch/powerpc/sysdev/mpic_msgr.c @@ -27,6 +27,7 @@ static struct mpic_msgr **mpic_msgrs; static unsigned int mpic_msgr_count; +static DEFINE_RAW_SPINLOCK(msgrs_lock); static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value) { @@ -56,12 +57,11 @@ struct mpic_msgr *mpic_msgr_get(unsigned int reg_num) if (reg_num >= mpic_msgr_count) return ERR_PTR(-ENODEV); - raw_spin_lock_irqsave(&msgr->lock, flags); - if (mpic_msgrs[reg_num]->in_use == MSGR_FREE) { - msgr = mpic_msgrs[reg_num]; + raw_spin_lock_irqsave(&msgrs_lock, flags); + msgr = mpic_msgrs[reg_num]; + if (msgr->in_use == MSGR_FREE) msgr->in_use = MSGR_INUSE; - } - raw_spin_unlock_irqrestore(&msgr->lock, flags); + raw_spin_unlock_irqrestore(&msgrs_lock, flags); return msgr; } @@ -228,7 +228,7 @@ static __devinit int mpic_msgr_probe(struct platform_device *dev) reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i; msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE; - msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET; + msgr->mer = (u32 *)((u8 *)msgr->base + MPIC_MSGR_MER_OFFSET); msgr->in_use = MSGR_FREE; msgr->num = i; raw_spin_lock_init(&msgr->lock); diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c index 49a3ece1c6b3..702256a1ca11 100644 --- a/arch/powerpc/sysdev/scom.c +++ b/arch/powerpc/sysdev/scom.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index ea5e204e3450..cd1d18db92c6 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -188,6 +188,7 @@ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; + struct irq_desc *desc; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) @@ -202,8 +203,7 @@ void xics_migrate_irqs_away(void) /* Allow IPIs again... */ icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq(virq) { - struct irq_desc *desc; + for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; unsigned long flags; @@ -212,9 +212,8 @@ void xics_migrate_irqs_away(void) /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; - desc = irq_to_desc(virq); /* We only need to migrate enabled IRQS */ - if (!desc || !desc->action) + if (!desc->action) continue; if (desc->irq_data.domain != xics_host) continue; diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h index 37f2f4a55231..f4c1c20bcdf6 100644 --- a/arch/sh/include/asm/atomic.h +++ b/arch/sh/include/asm/atomic.h @@ -11,7 +11,7 @@ #include #include -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic_set(v,i) ((v)->counter = (i)) diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index 324eef93c900..e99b104d967a 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c @@ -86,7 +86,7 @@ static noinline int vmalloc_fault(unsigned long address) pte_t *pte_k; /* Make sure we are in vmalloc/module/P3 area: */ - if (!(address >= VMALLOC_START && address < P3_ADDR_MAX)) + if (!(address >= P3SEG && address < P3_ADDR_MAX)) return -1; /* diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index 38d48a59879c..9708851a8b9f 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -269,4 +269,4 @@ static int __init sunfire_init(void) return 0; } -subsys_initcall(sunfire_init); +fs_initcall(sunfire_init); diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 1210fde18740..160cac9c4036 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -78,6 +79,8 @@ void __cpuinit leon_callin(void) local_flush_tlb_all(); leon_configure_cache_smp(); + notify_cpu_starting(cpuid); + /* Get our local ticker going. */ smp_setup_percpu_timer(); diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 232df9949530..3ee51f189a55 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -566,15 +566,10 @@ out: SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) { - long ret; - if (invalid_64bit_range(addr, len)) return -EINVAL; - down_write(¤t->mm->mmap_sem); - ret = do_munmap(current->mm, addr, len); - up_write(¤t->mm->mmap_sem); - return ret; + return vm_munmap(addr, len); } extern unsigned long do_mremap(unsigned long addr, diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index b57a5942ba64..874162a11ceb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -495,11 +495,11 @@ xcall_fetch_glob_regs: stx %o7, [%g1 + GR_SNAP_O7] stx %i7, [%g1 + GR_SNAP_I7] /* Don't try this at home kids... */ - rdpr %cwp, %g2 - sub %g2, 1, %g7 + rdpr %cwp, %g3 + sub %g3, 1, %g7 wrpr %g7, %cwp mov %i7, %g7 - wrpr %g2, %cwp + wrpr %g3, %cwp stx %g7, [%g1 + GR_SNAP_RPC] sethi %hi(trap_block), %g7 or %g7, %lo(trap_block), %g7 diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h index 5d5a635530bd..32e6cbe8dff3 100644 --- a/arch/tile/include/asm/pci.h +++ b/arch/tile/include/asm/pci.h @@ -47,8 +47,8 @@ struct pci_controller { */ #define PCI_DMA_BUS_IS_PHYS 1 -int __devinit tile_pci_init(void); -int __devinit pcibios_init(void); +int __init tile_pci_init(void); +int __init pcibios_init(void); static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c index a1bb59eecc18..b56d12bf5900 100644 --- a/arch/tile/kernel/pci.c +++ b/arch/tile/kernel/pci.c @@ -141,7 +141,7 @@ static int __devinit tile_init_irqs(int controller_id, * * Returns the number of controllers discovered. */ -int __devinit tile_pci_init(void) +int __init tile_pci_init(void) { int i; @@ -287,7 +287,7 @@ static void __devinit fixup_read_and_payload_sizes(void) * The controllers have been set up by the time we get here, by a call to * tile_pci_init. */ -int __devinit pcibios_init(void) +int __init pcibios_init(void) { int i; diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c index 9efbc1391b3c..89529c9f0605 100644 --- a/arch/tile/kernel/single_step.c +++ b/arch/tile/kernel/single_step.c @@ -346,12 +346,10 @@ void single_step_once(struct pt_regs *regs) } /* allocate a cache line of writable, executable memory */ - down_write(¤t->mm->mmap_sem); - buffer = (void __user *) do_mmap(NULL, 0, 64, + buffer = (void __user *) vm_mmap(NULL, 0, 64, PROT_EXEC | PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0); - up_write(¤t->mm->mmap_sem); if (IS_ERR((void __force *)buffer)) { kfree(state); diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 1d14cc6b79ad..c9866b0b77d8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -81,7 +81,7 @@ config X86 select CLKEVT_I8253 select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_IOMAP - select DCACHE_WORD_ACCESS if !DEBUG_PAGEALLOC + select DCACHE_WORD_ACCESS config INSTRUCTION_DECODER def_bool (KPROBES || PERF_EVENTS) diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index a0559930a180..c85e3ac99bba 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -33,6 +33,9 @@ __HEAD ENTRY(startup_32) #ifdef CONFIG_EFI_STUB + jmp preferred_addr + + .balign 0x10 /* * We don't need the return address, so set up the stack so * efi_main() can find its arugments. @@ -41,12 +44,17 @@ ENTRY(startup_32) call efi_main cmpl $0, %eax - je preferred_addr movl %eax, %esi - call 1f + jne 2f 1: + /* EFI init failed, so hang. */ + hlt + jmp 1b +2: + call 3f +3: popl %eax - subl $1b, %eax + subl $3b, %eax subl BP_pref_address(%esi), %eax add BP_code32_start(%esi), %eax leal preferred_addr(%eax), %eax diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 558d76ce23bc..87e03a13d8e3 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -200,18 +200,28 @@ ENTRY(startup_64) * entire text+data+bss and hopefully all of memory. */ #ifdef CONFIG_EFI_STUB - pushq %rsi + /* + * The entry point for the PE/COFF executable is 0x210, so only + * legacy boot loaders will execute this jmp. + */ + jmp preferred_addr + + .org 0x210 mov %rcx, %rdi mov %rdx, %rsi call efi_main - popq %rsi - cmpq $0,%rax - je preferred_addr movq %rax,%rsi - call 1f + cmpq $0,%rax + jne 2f 1: + /* EFI init failed, so hang. */ + hlt + jmp 1b +2: + call 3f +3: popq %rax - subq $1b, %rax + subq $3b, %rax subq BP_pref_address(%rsi), %rax add BP_code32_start(%esi), %eax leaq preferred_addr(%rax), %rax diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c index d3c0b0277666..fb7117a4ade1 100644 --- a/arch/x86/boot/compressed/relocs.c +++ b/arch/x86/boot/compressed/relocs.c @@ -403,13 +403,11 @@ static void print_absolute_symbols(void) for (i = 0; i < ehdr.e_shnum; i++) { struct section *sec = &secs[i]; char *sym_strtab; - Elf32_Sym *sh_symtab; int j; if (sec->shdr.sh_type != SHT_SYMTAB) { continue; } - sh_symtab = sec->symtab; sym_strtab = sec->link->strtab; for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Sym); j++) { Elf32_Sym *sym; diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c index ed549767a231..24443a332083 100644 --- a/arch/x86/boot/tools/build.c +++ b/arch/x86/boot/tools/build.c @@ -205,8 +205,13 @@ int main(int argc, char ** argv) put_unaligned_le32(file_sz, &buf[pe_header + 0x50]); #ifdef CONFIG_X86_32 - /* Address of entry point */ - put_unaligned_le32(i, &buf[pe_header + 0x28]); + /* + * Address of entry point. + * + * The EFI stub entry point is +16 bytes from the start of + * the .text section. + */ + put_unaligned_le32(i + 16, &buf[pe_header + 0x28]); /* .text size */ put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]); @@ -217,9 +222,11 @@ int main(int argc, char ** argv) /* * Address of entry point. startup_32 is at the beginning and * the 64-bit entry point (startup_64) is always 512 bytes - * after. + * after. The EFI stub entry point is 16 bytes after that, as + * the first instruction allows legacy loaders to jump over + * the EFI stub initialisation */ - put_unaligned_le32(i + 512, &buf[pe_header + 0x28]); + put_unaligned_le32(i + 528, &buf[pe_header + 0x28]); /* .text size */ put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]); diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index d511d951a052..07b3a68d2d29 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c @@ -119,9 +119,7 @@ static void set_brk(unsigned long start, unsigned long end) end = PAGE_ALIGN(end); if (end <= start) return; - down_write(¤t->mm->mmap_sem); - do_brk(start, end - start); - up_write(¤t->mm->mmap_sem); + vm_brk(start, end - start); } #ifdef CORE_DUMP @@ -296,8 +294,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) /* OK, This is the point of no return */ set_personality(PER_LINUX); - set_thread_flag(TIF_IA32); - current->mm->context.ia32_compat = 1; + set_personality_ia32(false); setup_new_exec(bprm); @@ -332,9 +329,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) pos = 32; map_size = ex.a_text+ex.a_data; - down_write(¤t->mm->mmap_sem); - error = do_brk(text_addr & PAGE_MASK, map_size); - up_write(¤t->mm->mmap_sem); + error = vm_brk(text_addr & PAGE_MASK, map_size); if (error != (text_addr & PAGE_MASK)) { send_sig(SIGKILL, current, 0); @@ -373,9 +368,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) if (!bprm->file->f_op->mmap || (fd_offset & ~PAGE_MASK) != 0) { loff_t pos = fd_offset; - down_write(¤t->mm->mmap_sem); - do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); - up_write(¤t->mm->mmap_sem); + vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex), ex.a_text+ex.a_data, &pos); @@ -385,26 +378,22 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) goto beyond_if; } - down_write(¤t->mm->mmap_sem); - error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, + error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT, fd_offset); - up_write(¤t->mm->mmap_sem); if (error != N_TXTADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } - down_write(¤t->mm->mmap_sem); - error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, + error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE | MAP_32BIT, fd_offset + ex.a_text); - up_write(¤t->mm->mmap_sem); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; @@ -476,9 +465,7 @@ static int load_aout_library(struct file *file) error_time = jiffies; } #endif - down_write(¤t->mm->mmap_sem); - do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); - up_write(¤t->mm->mmap_sem); + vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); file->f_op->read(file, (char __user *)start_addr, ex.a_text + ex.a_data, &pos); @@ -490,12 +477,10 @@ static int load_aout_library(struct file *file) goto out; } /* Now use mmap to map the library into memory. */ - down_write(¤t->mm->mmap_sem); - error = do_mmap(file, start_addr, ex.a_text + ex.a_data, + error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_32BIT, N_TXTOFF(ex)); - up_write(¤t->mm->mmap_sem); retval = error; if (error != start_addr) goto out; @@ -503,9 +488,7 @@ static int load_aout_library(struct file *file) len = PAGE_ALIGN(ex.a_text + ex.a_data); bss = ex.a_text + ex.a_data + ex.a_bss; if (bss > len) { - down_write(¤t->mm->mmap_sem); - error = do_brk(start_addr + len, bss - len); - up_write(¤t->mm->mmap_sem); + error = vm_brk(start_addr + len, bss - len); retval = error; if (error != start_addr + len) goto out; diff --git a/arch/x86/include/asm/posix_types.h b/arch/x86/include/asm/posix_types.h index 3427b7798dbc..7ef7c3020e5c 100644 --- a/arch/x86/include/asm/posix_types.h +++ b/arch/x86/include/asm/posix_types.h @@ -7,9 +7,9 @@ #else # ifdef __i386__ # include "posix_types_32.h" -# elif defined(__LP64__) -# include "posix_types_64.h" -# else +# elif defined(__ILP32__) # include "posix_types_x32.h" +# else +# include "posix_types_64.h" # endif #endif diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h index 4a085383af27..5ca71c065eef 100644 --- a/arch/x86/include/asm/sigcontext.h +++ b/arch/x86/include/asm/sigcontext.h @@ -257,7 +257,7 @@ struct sigcontext { __u64 oldmask; __u64 cr2; struct _fpstate __user *fpstate; /* zero when no FPU context */ -#ifndef __LP64__ +#ifdef __ILP32__ __u32 __fpstate_pad; #endif __u64 reserved1[8]; diff --git a/arch/x86/include/asm/siginfo.h b/arch/x86/include/asm/siginfo.h index fc1aa5535646..34c47b3341c0 100644 --- a/arch/x86/include/asm/siginfo.h +++ b/arch/x86/include/asm/siginfo.h @@ -2,7 +2,13 @@ #define _ASM_X86_SIGINFO_H #ifdef __x86_64__ -# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) +# ifdef __ILP32__ /* x32 */ +typedef long long __kernel_si_clock_t __attribute__((aligned(4))); +# define __ARCH_SI_CLOCK_T __kernel_si_clock_t +# define __ARCH_SI_ATTRIBUTES __attribute__((aligned(8))) +# else /* x86-64 */ +# define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) +# endif #endif #include diff --git a/arch/x86/include/asm/unistd.h b/arch/x86/include/asm/unistd.h index 37cdc9d99bb1..4437001d8e3d 100644 --- a/arch/x86/include/asm/unistd.h +++ b/arch/x86/include/asm/unistd.h @@ -63,10 +63,10 @@ #else # ifdef __i386__ # include -# elif defined(__LP64__) -# include -# else +# elif defined(__ILP32__) # include +# else +# include # endif #endif diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h index 6fe6767b7124..e58f03b206c3 100644 --- a/arch/x86/include/asm/word-at-a-time.h +++ b/arch/x86/include/asm/word-at-a-time.h @@ -43,4 +43,37 @@ static inline unsigned long has_zero(unsigned long a) return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); } +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret, dummy; + + asm( + "1:\tmov %2,%0\n" + "2:\n" + ".section .fixup,\"ax\"\n" + "3:\t" + "lea %2,%1\n\t" + "and %3,%1\n\t" + "mov (%1),%0\n\t" + "leal %2,%%ecx\n\t" + "andl %4,%%ecx\n\t" + "shll $3,%%ecx\n\t" + "shr %%cl,%0\n\t" + "jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) + :"=&r" (ret),"=&c" (dummy) + :"m" (*(unsigned long *)addr), + "i" (-sizeof(unsigned long)), + "i" (sizeof(unsigned long)-1)); + return ret; +} + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index baaca8defec8..764b66a4cf89 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -195,6 +195,5 @@ extern struct x86_msi_ops x86_msi; extern void x86_init_noop(void); extern void x86_init_uint_noop(unsigned int unused); -extern void x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node); #endif diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 103b6ab368d3..146a49c763a4 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags; static char temp_stack[4096]; #endif +asmlinkage void acpi_enter_s3(void) +{ + acpi_enter_sleep_state(3, wake_sleep_flags); +} /** * acpi_suspend_lowlevel - save kernel state * diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 416d4be13fef..d68677a2a010 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h @@ -3,12 +3,16 @@ */ #include +#include extern unsigned long saved_video_mode; extern long saved_magic; extern int wakeup_pmode_return; +extern u8 wake_sleep_flags; +extern asmlinkage void acpi_enter_s3(void); + extern unsigned long acpi_copy_wakeup_routine(unsigned long); extern void wakeup_long64(void); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 13ab720573e3..72610839f03b 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -74,9 +74,7 @@ restore_registers: ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers - pushl $3 - call acpi_enter_sleep_state - addl $4, %esp + call acpi_enter_s3 # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 8ea5164cbd04..014d1d28c397 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel) movq %rsi, saved_rsi addq $8, %rsp - movl $3, %edi - xorl %eax, %eax - call acpi_enter_sleep_state + call acpi_enter_s3 /* in case something went wrong, restore the machine status and go on */ jmp resume_point diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 11544d8f1e97..edc24480469f 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1637,9 +1637,11 @@ static int __init apic_verify(void) mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; /* The BIOS may have set up the APIC at some other address */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (l & MSR_IA32_APICBASE_ENABLE) - mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + if (l & MSR_IA32_APICBASE_ENABLE) + mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; + } pr_info("Found and enabled local APIC!\n"); return 0; @@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr) * MSR. This can only be done in software for Intel P6 or later * and AMD K7 (Model > 1) or later. */ - rdmsr(MSR_IA32_APICBASE, l, h); - if (!(l & MSR_IA32_APICBASE_ENABLE)) { - pr_info("Local APIC disabled by BIOS -- reenabling.\n"); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | addr; - wrmsr(MSR_IA32_APICBASE, l, h); - enabled_via_apicbase = 1; + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + if (!(l & MSR_IA32_APICBASE_ENABLE)) { + pr_info("Local APIC disabled by BIOS -- reenabling.\n"); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | addr; + wrmsr(MSR_IA32_APICBASE, l, h); + enabled_via_apicbase = 1; + } } return apic_verify(); } @@ -2209,10 +2213,12 @@ static void lapic_resume(void) * FIXME! This will be wrong if we ever support suspend on * SMP! We'll need to do this as part of the CPU restore! */ - rdmsr(MSR_IA32_APICBASE, l, h); - l &= ~MSR_IA32_APICBASE_BASE; - l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; - wrmsr(MSR_IA32_APICBASE, l, h); + if (boot_cpu_data.x86 >= 6) { + rdmsr(MSR_IA32_APICBASE, l, h); + l &= ~MSR_IA32_APICBASE_BASE; + l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; + wrmsr(MSR_IA32_APICBASE, l, h); + } } maxlvt = lapic_get_maxlvt(); diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 899803e03214..23e75422e013 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -207,8 +207,11 @@ static void __init map_csrs(void) static void fixup_cpu_id(struct cpuinfo_x86 *c, int node) { - c->phys_proc_id = node; - per_cpu(cpu_llc_id, smp_processor_id()) = node; + + if (c->phys_proc_id != node) { + c->phys_proc_id = node; + per_cpu(cpu_llc_id, smp_processor_id()) = node; + } } static int __init numachip_system_init(void) diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8a778db45e3a..991e315f4227 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id) { if (x2apic_phys) return x2apic_enabled(); + else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && + (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && + x2apic_enabled()) { + printk(KERN_DEBUG "System requires x2apic physical mode\n"); + return 1; + } else return 0; } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 0a44b90602b0..146bb6218eec 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -26,7 +26,8 @@ * contact AMD for precise details and a CPU swap. * * See http://www.multimania.com/poulot/k6bug.html - * http://www.amd.com/K6/k6docs/revgd.html + * and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" + * (Publication # 21266 Issue Date: August 1998) * * The following test is erm.. interesting. AMD neglected to up * the chip setting when fixing the bug but they also tweaked some @@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) "system stability may be impaired when more than 32 MB are used.\n"); else printk(KERN_CONT "probably OK (after B9730xxxx).\n"); - printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n"); } /* K6 with old style WHCR */ @@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) node = per_cpu(cpu_llc_id, cpu); /* - * If core numbers are inconsistent, it's likely a multi-fabric platform, - * so invoke platform-specific handler + * On multi-fabric platform (e.g. Numascale NumaChip) a + * platform-specific handler needs to be called to fixup some + * IDs of the CPU. */ - if (c->phys_proc_id != node) + if (x86_cpuinit.fixup_cpu_id) x86_cpuinit.fixup_cpu_id(c, node); if (!node_online(node)) { @@ -579,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } + /* re-enable TopologyExtensions if switched off by BIOS */ + if ((c->x86 == 0x15) && + (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && + !cpu_has(c, X86_FEATURE_TOPOEXT)) { + u64 val; + + if (!rdmsrl_amd_safe(0xc0011005, &val)) { + val |= 1ULL << 54; + wrmsrl_amd_safe(0xc0011005, val); + rdmsrl(0xc0011005, val); + if (val & (1ULL << 54)) { + set_cpu_cap(c, X86_FEATURE_TOPOEXT); + printk(KERN_INFO FW_INFO "CPU: Re-enabling " + "disabled Topology Extensions Support\n"); + } + } + } + cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 67e258362a3d..cf79302198a6 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1162,15 +1162,6 @@ static void dbg_restore_debug_regs(void) #define dbg_restore_debug_regs() #endif /* ! CONFIG_KGDB */ -/* - * Prints an error where the NUMA and configured core-number mismatch and the - * platform didn't override this to fix it up - */ -void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node) -{ - pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id); -} - /* * cpu_init() initializes state that is per-CPU. Some data is already * initialized (naturally) in the bootstrap process, such as the GDT diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 73d08ed98a64..b8f3653dddbc 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot, /* check if @slot is already used or the index is already disabled */ ret = amd_get_l3_disable_slot(nb, slot); if (ret >= 0) - return -EINVAL; + return -EEXIST; if (index > nb->l3_cache.indices) return -EINVAL; /* check whether the other slot has disabled the same index already */ if (index == amd_get_l3_disable_slot(nb, !slot)) - return -EINVAL; + return -EEXIST; amd_l3_disable_index(nb, cpu, slot, index); @@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf, err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val); if (err) { if (err == -EEXIST) - printk(KERN_WARNING "L3 disable slot %d in use!\n", - slot); + pr_warning("L3 slot %d in use/index already disabled!\n", + slot); return err; } return count; diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 7734bcbb5a3a..2d6e6498c176 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk) if (tsk_used_math(tsk)) { if (HAVE_HWFP && tsk == current) unlazy_fpu(tsk); + tsk->thread.fpu.last_cpu = ~0; return 0; } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index b8ba6e4a27e4..e554e5ad2fe8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -79,7 +79,6 @@ struct kvm_task_sleep_node { u32 token; int cpu; bool halted; - struct mm_struct *mm; }; static struct kvm_task_sleep_head { @@ -126,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token) n.token = token; n.cpu = smp_processor_id(); - n.mm = current->active_mm; n.halted = idle || preempt_count() > 1; - atomic_inc(&n.mm->mm_count); init_waitqueue_head(&n.wq); hlist_add_head(&n.link, &b->list); spin_unlock(&b->lock); @@ -161,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); static void apf_task_wake_one(struct kvm_task_sleep_node *n) { hlist_del_init(&n->link); - if (!n->mm) - return; - mmdrop(n->mm); if (n->halted) smp_send_reschedule(n->cpu); else if (waitqueue_active(&n->wq)) @@ -207,7 +201,7 @@ again: * async PF was not yet handled. * Add dummy entry for the token. */ - n = kmalloc(sizeof(*n), GFP_ATOMIC); + n = kzalloc(sizeof(*n), GFP_ATOMIC); if (!n) { /* * Allocation failed! Busy wait while other cpu @@ -219,7 +213,6 @@ again: } n->token = token; n->cpu = smp_processor_id(); - n->mm = NULL; init_waitqueue_head(&n->wq); hlist_add_head(&n->link, &b->list); } else diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 73465aab28f8..8a2ce8fd41c0 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) { struct cpuinfo_x86 *c = &cpu_data(cpu); - if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { - pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); - return -1; - } - csig->rev = c->microcode; pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); @@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = { struct microcode_ops * __init init_amd_microcode(void) { + struct cpuinfo_x86 *c = &cpu_data(0); + + if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { + pr_warning("AMD CPU family 0x%x not supported\n", c->x86); + return NULL; + } + patch = (void *)get_zeroed_page(GFP_KERNEL); if (!patch) return NULL; diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 87a0f8688301..c9bda6d6035c 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif) if (err) return err; - if (microcode_init_cpu(cpu) == UCODE_ERROR) { - sysfs_remove_group(&dev->kobj, &mc_attr_group); + if (microcode_init_cpu(cpu) == UCODE_ERROR) return -EINVAL; - } return err; } @@ -528,11 +526,11 @@ static int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); - - if (!microcode_ops) { + else pr_err("no support for this CPU vendor\n"); + + if (!microcode_ops) return -ENODEV; - } microcode_pdev = platform_device_register_simple("microcode", -1, NULL, 0); diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 733ca39f367e..43d8b48b23e6 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -423,6 +423,7 @@ void set_personality_ia32(bool x32) current_thread_info()->status |= TS_COMPAT; } } +EXPORT_SYMBOL_GPL(set_personality_ia32); unsigned long get_wchan(struct task_struct *p) { diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 71f4727da373..5a98aa272184 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void) #endif rc = -EINVAL; if (pcpu_chosen_fc != PCPU_FC_PAGE) { - const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE; const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; + size_t atom_size; + /* + * On 64bit, use PMD_SIZE for atom_size so that embedded + * percpu areas are aligned to PMD. This, in the future, + * can also allow using PMD mappings in vmalloc area. Use + * PAGE_SIZE on 32bit as vmalloc space is highly contended + * and large vmalloc area allocs can easily fail. + */ +#ifdef CONFIG_X86_64 + atom_size = PMD_SIZE; +#else + atom_size = PAGE_SIZE; +#endif rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE, dyn_size, atom_size, pcpu_cpu_distance, diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index e9f265fd79ae..9cf71d0b2d37 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = { struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { .early_percpu_clock_init = x86_init_noop, .setup_percpu_clockev = setup_secondary_APIC_clock, - .fixup_cpu_id = x86_default_fixup_cpu_id, }; static void default_nmi_init(void) { }; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 4044ce0bf7c1..185a2b823a2d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6336,13 +6336,11 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, if (npages && !old.rmap) { unsigned long userspace_addr; - down_write(¤t->mm->mmap_sem); - userspace_addr = do_mmap(NULL, 0, + userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE, PROT_READ | PROT_WRITE, map_flags, 0); - up_write(¤t->mm->mmap_sem); if (IS_ERR((void *)userspace_addr)) return PTR_ERR((void *)userspace_addr); @@ -6366,10 +6364,8 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, if (!user_alloc && !old.user_alloc && old.rmap && !npages) { int ret; - down_write(¤t->mm->mmap_sem); - ret = do_munmap(current->mm, old.userspace_addr, + ret = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); - up_write(¤t->mm->mmap_sem); if (ret < 0) printk(KERN_WARNING "kvm_vm_ioctl_set_memory_region: " @@ -6585,6 +6581,7 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, kvm_inject_page_fault(vcpu, &fault); } vcpu->arch.apf.halted = false; + vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; } bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu) diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c index 66d377e334f7..646e3b5b4bb6 100644 --- a/arch/x86/platform/geode/net5501.c +++ b/arch/x86/platform/geode/net5501.c @@ -63,7 +63,7 @@ static struct gpio_led net5501_leds[] = { .name = "net5501:1", .gpio = 6, .default_trigger = "default-on", - .active_low = 1, + .active_low = 0, }, }; diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index e0a37233c0af..e31bcd8f2eee 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c @@ -805,7 +805,7 @@ void intel_scu_devices_create(void) } else i2c_register_board_info(i2c_bus[i], i2c_devs[i], 1); } - intel_scu_notifier_post(SCU_AVAILABLE, 0L); + intel_scu_notifier_post(SCU_AVAILABLE, NULL); } EXPORT_SYMBOL_GPL(intel_scu_devices_create); @@ -814,7 +814,7 @@ void intel_scu_devices_destroy(void) { int i; - intel_scu_notifier_post(SCU_DOWN, 0L); + intel_scu_notifier_post(SCU_DOWN, NULL); for (i = 0; i < ipc_next_dev; i++) platform_device_del(ipc_devs[i]); diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 4f51bebac02c..95dccce8e979 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -63,6 +63,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI #include @@ -261,7 +262,8 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx, static bool __init xen_check_mwait(void) { -#ifdef CONFIG_ACPI +#if defined(CONFIG_ACPI) && !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) && \ + !defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) struct xen_platform_op op = { .cmd = XENPF_set_processor_pminfo, .u.set_pminfo.id = -1, @@ -349,7 +351,6 @@ static void __init xen_init_cpuid_mask(void) /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ if ((cx & xsave_mask) != xsave_mask) cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */ - if (xen_check_mwait()) cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32)); } @@ -809,9 +810,40 @@ static void xen_io_delay(void) } #ifdef CONFIG_X86_LOCAL_APIC +static unsigned long xen_set_apic_id(unsigned int x) +{ + WARN_ON(1); + return x; +} +static unsigned int xen_get_apic_id(unsigned long x) +{ + return ((x)>>24) & 0xFFu; +} static u32 xen_apic_read(u32 reg) { - return 0; + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = 0, + }; + int ret = 0; + + /* Shouldn't need this as APIC is turned off for PV, and we only + * get called on the bootup processor. But just in case. */ + if (!xen_initial_domain() || smp_processor_id()) + return 0; + + if (reg == APIC_LVR) + return 0x10; + + if (reg != APIC_ID) + return 0; + + ret = HYPERVISOR_dom0_op(&op); + if (ret) + return 0; + + return op.u.pcpu_info.apic_id << 24; } static void xen_apic_write(u32 reg, u32 val) @@ -849,6 +881,8 @@ static void set_xen_basic_apic_ops(void) apic->icr_write = xen_apic_icr_write; apic->wait_icr_idle = xen_apic_wait_icr_idle; apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; + apic->set_apic_id = xen_set_apic_id; + apic->get_apic_id = xen_get_apic_id; } #endif @@ -1365,8 +1399,10 @@ asmlinkage void __init xen_start_kernel(void) /* Make sure ACS will be enabled */ pci_request_acs(); } - - +#ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ + pci_probe &= ~PCI_PROBE_BIOS; +#endif xen_raw_console_write("about to get started...\n"); xen_setup_runstate_info(0); diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index b8e279479a6b..69f5857660ac 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -353,8 +353,13 @@ static pteval_t pte_mfn_to_pfn(pteval_t val) { if (val & _PAGE_PRESENT) { unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + unsigned long pfn = mfn_to_pfn(mfn); + pteval_t flags = val & PTE_FLAGS_MASK; - val = ((pteval_t)mfn_to_pfn(mfn) << PAGE_SHIFT) | flags; + if (unlikely(pfn == ~0)) + val = flags & ~_PAGE_PRESENT; + else + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; } return val; diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 5fac6919b957..0503c0c493a9 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -178,6 +178,7 @@ static void __init xen_fill_possible_map(void) static void __init xen_filter_cpu_maps(void) { int i, rc; + unsigned int subtract = 0; if (!xen_initial_domain()) return; @@ -192,8 +193,22 @@ static void __init xen_filter_cpu_maps(void) } else { set_cpu_possible(i, false); set_cpu_present(i, false); + subtract++; } } +#ifdef CONFIG_HOTPLUG_CPU + /* This is akin to using 'nr_cpus' on the Linux command line. + * Which is OK as when we use 'dom0_max_vcpus=X' we can only + * have up to X, while nr_cpu_ids is greater than X. This + * normally is not a problem, except when CPU hotplugging + * is involved and then there might be more than X CPUs + * in the guest - which will not work as there is no + * hypercall to expand the max number of VCPUs an already + * running guest has. So cap it up to X. */ + if (subtract) + nr_cpu_ids = nr_cpu_ids - subtract; +#endif + } static void __init xen_smp_prepare_boot_cpu(void) diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index 79d7362ad6d1..3e45aa000718 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -96,7 +96,7 @@ ENTRY(xen_restore_fl_direct) /* check for unmasked and pending */ cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending - jz 1f + jnz 1f 2: call check_events 1: ENDPATCH(xen_restore_fl_direct) diff --git a/arch/xtensa/include/asm/hardirq.h b/arch/xtensa/include/asm/hardirq.h index 26664cef8f11..91695a135498 100644 --- a/arch/xtensa/include/asm/hardirq.h +++ b/arch/xtensa/include/asm/hardirq.h @@ -11,9 +11,6 @@ #ifndef _XTENSA_HARDIRQ_H #define _XTENSA_HARDIRQ_H -void ack_bad_irq(unsigned int irq); -#define ack_bad_irq ack_bad_irq - #include #endif /* _XTENSA_HARDIRQ_H */ diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index d04cd3a625fa..4beb43c087d3 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h @@ -14,6 +14,7 @@ #ifdef __KERNEL__ #include #include +#include #include #include diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c index b69b000349fc..d78869a00b11 100644 --- a/arch/xtensa/kernel/signal.c +++ b/arch/xtensa/kernel/signal.c @@ -496,6 +496,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset) signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { + int ret; /* Are we from a system call? */ diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 107f6f7be5e1..dd30f40af9f5 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -174,7 +174,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len) index = sctx->count[0] & 0x7f; /* Update number of bytes */ - if (!(sctx->count[0] += len)) + if ((sctx->count[0] += len) < len) sctx->count[1]++; part_len = 128 - index; diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index ab513a972c95..a716fede4f25 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -74,7 +74,8 @@ acpi_status acpi_reset(void) /* Check if the reset register is supported */ - if (!reset_reg->address) { + if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) || + !reset_reg->address) { return_ACPI_STATUS(AE_NOT_EXIST); } diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 7049a7d27c4f..330bb4d75852 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state) * We know a device's inferred power state when all the resources * required for a given D-state are 'on'. */ - for (i = ACPI_STATE_D0; i < ACPI_STATE_D3; i++) { + for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { list = &device->power.states[i].resources; if (list->count < 1) continue; diff --git a/drivers/acpi/reboot.c b/drivers/acpi/reboot.c index c1d612435939..a6c77e8b37bd 100644 --- a/drivers/acpi/reboot.c +++ b/drivers/acpi/reboot.c @@ -23,7 +23,8 @@ void acpi_reboot(void) /* Is the reset register supported? The spec says we should be * checking the bit width and bit offset, but Windows ignores * these fields */ - /* Ignore also acpi_gbl_FADT.flags.ACPI_FADT_RESET_REGISTER */ + if (!(acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)) + return; reset_value = acpi_gbl_FADT.reset_value; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 767e2dcb9616..7417267e88fa 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -869,7 +869,7 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) /* * Enumerate supported power management states */ - for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3; i++) { + for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { struct acpi_device_power_state *ps = &device->power.states[i]; char object_name[5] = { '_', 'P', 'R', '0' + i, '\0' }; @@ -884,21 +884,18 @@ static int acpi_bus_get_power_flags(struct acpi_device *device) acpi_bus_add_power_resource(ps->resources.handles[j]); } - /* The exist of _PR3 indicates D3Cold support */ - if (i == ACPI_STATE_D3) { - status = acpi_get_handle(device->handle, object_name, &handle); - if (ACPI_SUCCESS(status)) - device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1; - } - /* Evaluate "_PSx" to see if we can do explicit sets */ object_name[2] = 'S'; status = acpi_get_handle(device->handle, object_name, &handle); if (ACPI_SUCCESS(status)) ps->flags.explicit_set = 1; - /* State is valid if we have some power control */ - if (ps->resources.count || ps->flags.explicit_set) + /* + * State is valid if there are means to put the device into it. + * D3hot is only valid if _PR3 present. + */ + if (ps->resources.count || + (ps->flags.explicit_set && i < ACPI_STATE_D3_HOT)) ps->flags.valid = 1; ps->power = -1; /* Unknown - driver assigned */ diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 1d661b5c3287..eb6fd233764b 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -28,24 +28,34 @@ #include "internal.h" #include "sleep.h" +u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS; static unsigned int gts, bfs; -module_param(gts, uint, 0644); -module_param(bfs, uint, 0644); +static int set_param_wake_flag(const char *val, struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + + if (ret) + return ret; + + if (kp->arg == (const char *)>s) { + if (gts) + wake_sleep_flags |= ACPI_EXECUTE_GTS; + else + wake_sleep_flags &= ~ACPI_EXECUTE_GTS; + } + if (kp->arg == (const char *)&bfs) { + if (bfs) + wake_sleep_flags |= ACPI_EXECUTE_BFS; + else + wake_sleep_flags &= ~ACPI_EXECUTE_BFS; + } + return ret; +} +module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644); +module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644); MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); -static u8 wake_sleep_flags(void) -{ - u8 flags = ACPI_NO_OPTIONAL_METHODS; - - if (gts) - flags |= ACPI_EXECUTE_GTS; - if (bfs) - flags |= ACPI_EXECUTE_BFS; - - return flags; -} - static u8 sleep_states[ACPI_S_STATE_COUNT]; static void acpi_sleep_tts_switch(u32 acpi_state) @@ -263,7 +273,6 @@ static int acpi_suspend_enter(suspend_state_t pm_state) { acpi_status status = AE_OK; u32 acpi_state = acpi_target_sleep_state; - u8 flags = wake_sleep_flags(); int error; ACPI_FLUSH_CPU_CACHE(); @@ -271,7 +280,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) switch (acpi_state) { case ACPI_STATE_S1: barrier(); - status = acpi_enter_sleep_state(acpi_state, flags); + status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); break; case ACPI_STATE_S3: @@ -286,7 +295,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(acpi_state, flags); + acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the @@ -550,30 +559,27 @@ static int acpi_hibernation_begin(void) static int acpi_hibernation_enter(void) { - u8 flags = wake_sleep_flags(); acpi_status status = AE_OK; ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ - status = acpi_enter_sleep_state(ACPI_STATE_S4, flags); + status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); + acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } static void acpi_hibernation_leave(void) { - u8 flags = wake_sleep_flags(); - /* * If ACPI is not enabled by the BIOS and the boot kernel, we need to * enable it here. */ acpi_enable(); /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, flags); + acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) { printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " @@ -828,12 +834,10 @@ static void acpi_power_off_prepare(void) static void acpi_power_off(void) { - u8 flags = wake_sleep_flags(); - /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); - acpi_enter_sleep_state(ACPI_STATE_S5, flags); + acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); } /* diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 79a1e9dd56d9..ebaf67e4b2bc 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -394,6 +394,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ { PCI_DEVICE(0x1b4b, 0x9125), .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ + { PCI_DEVICE(0x1b4b, 0x917a), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c index 0c86c77764bc..9e419e1c2006 100644 --- a/drivers/ata/ahci_platform.c +++ b/drivers/ata/ahci_platform.c @@ -280,6 +280,7 @@ static struct dev_pm_ops ahci_pm_ops = { static const struct of_device_id ahci_of_match[] = { { .compatible = "calxeda,hb-ahci", }, + { .compatible = "snps,spear-ahci", }, {}, }; MODULE_DEVICE_TABLE(of, ahci_of_match); diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 28db50b57b91..23763a1ec570 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -95,7 +95,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev); static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); -atomic_t ata_print_id = ATOMIC_INIT(1); +atomic_t ata_print_id = ATOMIC_INIT(0); struct ata_force_param { const char *name; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index c61316e9d2f7..d1fbd59ead16 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3501,7 +3501,8 @@ static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg u64 now = get_jiffies_64(); int *trials = void_arg; - if (ent->timestamp < now - min(now, interval)) + if ((ent->eflags & ATA_EFLAG_OLD_ER) || + (ent->timestamp < now - min(now, interval))) return -1; (*trials)++; diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 93dabdcd2cbe..22226350cd0c 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3399,7 +3399,8 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) */ shost->max_host_blocked = 1; - rc = scsi_add_host(ap->scsi_host, &ap->tdev); + rc = scsi_add_host_with_dma(ap->scsi_host, + &ap->tdev, ap->host->dev); if (rc) goto err_add; } @@ -3838,18 +3839,25 @@ void ata_sas_port_stop(struct ata_port *ap) } EXPORT_SYMBOL_GPL(ata_sas_port_stop); -int ata_sas_async_port_init(struct ata_port *ap) +/** + * ata_sas_async_probe - simply schedule probing and return + * @ap: Port to probe + * + * For batch scheduling of probe for sas attached ata devices, assumes + * the port has already been through ata_sas_port_init() + */ +void ata_sas_async_probe(struct ata_port *ap) { - int rc = ap->ops->port_start(ap); - - if (!rc) { - ap->print_id = atomic_inc_return(&ata_print_id); - __ata_port_probe(ap); - } - - return rc; + __ata_port_probe(ap); } -EXPORT_SYMBOL_GPL(ata_sas_async_port_init); +EXPORT_SYMBOL_GPL(ata_sas_async_probe); + +int ata_sas_sync_probe(struct ata_port *ap) +{ + return ata_port_probe(ap); +} +EXPORT_SYMBOL_GPL(ata_sas_sync_probe); + /** * ata_sas_port_init - Initialize a SATA device @@ -3866,12 +3874,10 @@ int ata_sas_port_init(struct ata_port *ap) { int rc = ap->ops->port_start(ap); - if (!rc) { - ap->print_id = atomic_inc_return(&ata_print_id); - rc = ata_port_probe(ap); - } - - return rc; + if (rc) + return rc; + ap->print_id = atomic_inc_return(&ata_print_id); + return 0; } EXPORT_SYMBOL_GPL(ata_sas_port_init); diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c index fc2db2a89a6b..3239517f4d90 100644 --- a/drivers/ata/pata_arasan_cf.c +++ b/drivers/ata/pata_arasan_cf.c @@ -943,9 +943,9 @@ static int arasan_cf_resume(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(arasan_cf_pm_ops, arasan_cf_suspend, arasan_cf_resume); -#endif static struct platform_driver arasan_cf_driver = { .probe = arasan_cf_probe, @@ -953,9 +953,7 @@ static struct platform_driver arasan_cf_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, -#ifdef CONFIG_PM .pm = &arasan_cf_pm_ops, -#endif }, }; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 7a3f535e481c..bb80853ff27a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -775,9 +775,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, map->format.parse_val(val + i); } else { for (i = 0; i < val_count; i++) { - ret = regmap_read(map, reg + i, val + (i * val_bytes)); + unsigned int ival; + ret = regmap_read(map, reg + i, &ival); if (ret != 0) return ret; + memcpy(val + (i * val_bytes), &ival, val_bytes); } } diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c index cdcf75c0954f..3e2a6002aae6 100644 --- a/drivers/bcma/sprom.c +++ b/drivers/bcma/sprom.c @@ -404,16 +404,19 @@ int bcma_sprom_get(struct bcma_bus *bus) return -EOPNOTSUPP; if (!bcma_sprom_ext_available(bus)) { + bool sprom_onchip; + /* * External SPROM takes precedence so check * on-chip OTP only when no external SPROM * is present. */ - if (bcma_sprom_onchip_available(bus)) { + sprom_onchip = bcma_sprom_onchip_available(bus); + if (sprom_onchip) { /* determine offset */ offset = bcma_sprom_onchip_offset(bus); } - if (!offset) { + if (!offset || !sprom_onchip) { /* * Maybe there is no SPROM on the device? * Now we ask the arch code if there is some sprom diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index abfaacaaf346..946166e13953 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -2297,7 +2297,7 @@ static void drbd_connector_callback(struct cn_msg *req, struct netlink_skb_parms return; } - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) { + if (!capable(CAP_SYS_ADMIN)) { retcode = ERR_PERM; goto fail; } diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index 89860f34a7ec..4f66171c6683 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c @@ -416,7 +416,7 @@ static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info "discard-secure", "%d", blkif->vbd.discard_secure); if (err) { - dev_warn(dev-dev, "writing discard-secure (%d)", err); + dev_warn(&dev->dev, "writing discard-secure (%d)", err); return; } } diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index ae9edca7b56d..57fd867553d7 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -75,6 +75,8 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x04CA, 0x3005) }, + { USB_DEVICE(0x13d3, 0x3362) }, + { USB_DEVICE(0x0CF3, 0xE004) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, @@ -94,6 +96,8 @@ static struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 3311b812a0c6..9217121362e1 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -101,12 +101,16 @@ static struct usb_device_id btusb_table[] = { { USB_DEVICE(0x0c10, 0x0000) }, /* Broadcom BCM20702A0 */ + { USB_DEVICE(0x0489, 0xe042) }, { USB_DEVICE(0x0a5c, 0x21e3) }, { USB_DEVICE(0x0a5c, 0x21e6) }, { USB_DEVICE(0x0a5c, 0x21e8) }, { USB_DEVICE(0x0a5c, 0x21f3) }, { USB_DEVICE(0x413c, 0x8197) }, + /* Foxconn - Hon Hai */ + { USB_DEVICE(0x0489, 0xe033) }, + { } /* Terminating entry */ }; @@ -133,6 +137,8 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c index 0053d7ebb5ca..8f3f74ce8c7f 100644 --- a/drivers/crypto/ixp4xx_crypto.c +++ b/drivers/crypto/ixp4xx_crypto.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c index dc641c796526..921039e56f87 100644 --- a/drivers/crypto/talitos.c +++ b/drivers/crypto/talitos.c @@ -124,6 +124,9 @@ struct talitos_private { void __iomem *reg; int irq[2]; + /* SEC global registers lock */ + spinlock_t reg_lock ____cacheline_aligned; + /* SEC version geometry (from device tree node) */ unsigned int num_channels; unsigned int chfifo_len; @@ -412,6 +415,7 @@ static void talitos_done_##name(unsigned long data) \ { \ struct device *dev = (struct device *)data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ + unsigned long flags; \ \ if (ch_done_mask & 1) \ flush_channel(dev, 0, 0, 0); \ @@ -427,8 +431,10 @@ static void talitos_done_##name(unsigned long data) \ out: \ /* At this point, all completed channels have been processed */ \ /* Unmask done interrupts for channels completed later on. */ \ + spin_lock_irqsave(&priv->reg_lock, flags); \ setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ } DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE) DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE) @@ -619,22 +625,28 @@ static irqreturn_t talitos_interrupt_##name(int irq, void *data) \ struct device *dev = data; \ struct talitos_private *priv = dev_get_drvdata(dev); \ u32 isr, isr_lo; \ + unsigned long flags; \ \ + spin_lock_irqsave(&priv->reg_lock, flags); \ isr = in_be32(priv->reg + TALITOS_ISR); \ isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ /* Acknowledge interrupt */ \ out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ \ - if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \ - talitos_error(dev, isr, isr_lo); \ - else \ + if (unlikely(isr & ch_err_mask || isr_lo)) { \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + talitos_error(dev, isr & ch_err_mask, isr_lo); \ + } \ + else { \ if (likely(isr & ch_done_mask)) { \ /* mask further done interrupts. */ \ clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ /* done_task will unmask done interrupts at exit */ \ tasklet_schedule(&priv->done_task[tlet]); \ } \ + spin_unlock_irqrestore(&priv->reg_lock, flags); \ + } \ \ return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ IRQ_NONE; \ @@ -2719,6 +2731,8 @@ static int talitos_probe(struct platform_device *ofdev) priv->ofdev = ofdev; + spin_lock_init(&priv->reg_lock); + err = talitos_probe_irq(ofdev); if (err) goto err_out; diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index cf9da362d64f..ef378b5b17e4 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -91,11 +91,10 @@ config DW_DMAC config AT_HDMAC tristate "Atmel AHB DMA support" - depends on ARCH_AT91SAM9RL || ARCH_AT91SAM9G45 + depends on ARCH_AT91 select DMA_ENGINE help - Support the Atmel AHB DMA controller. This can be integrated in - chips such as the Atmel AT91SAM9RL. + Support the Atmel AHB DMA controller. config FSL_DMA tristate "Freescale Elo and Elo Plus DMA support" diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index c301a8ec31aa..3d704abd7912 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1429,6 +1429,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * signal */ release_phy_channel(plchan); + plchan->phychan_hold = 0; } /* Dequeue jobs and free LLIs */ if (plchan->at) { diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 7aa58d204892..445fdf811695 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -221,10 +221,6 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) vdbg_dump_regs(atchan); - /* clear any pending interrupt */ - while (dma_readl(atdma, EBCISR)) - cpu_relax(); - channel_writel(atchan, SADDR, 0); channel_writel(atchan, DADDR, 0); channel_writel(atchan, CTRLA, 0); diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index a45b5d2a5987..bb787d8e1529 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -571,11 +571,14 @@ static void imxdma_tasklet(unsigned long data) if (desc->desc.callback) desc->desc.callback(desc->desc.callback_param); - dma_cookie_complete(&desc->desc); - - /* If we are dealing with a cyclic descriptor keep it on ld_active */ + /* If we are dealing with a cyclic descriptor keep it on ld_active + * and dont mark the descripor as complete. + * Only in non-cyclic cases it would be marked as complete + */ if (imxdma_chan_is_doing_cyclic(imxdmac)) goto out; + else + dma_cookie_complete(&desc->desc); /* Free 2D slot if it was an interleaved transfer */ if (imxdmac->enabled_2d) { diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index c81ef7e10e08..655d4ce6ed0d 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -201,10 +201,6 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) { - struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); - - mxs_dma_enable_chan(mxs_chan); - return dma_cookie_assign(tx); } @@ -558,9 +554,9 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, static void mxs_dma_issue_pending(struct dma_chan *chan) { - /* - * Nothing to do. We only have a single descriptor. - */ + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + + mxs_dma_enable_chan(mxs_chan); } static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 282caf118be8..2ee6e23930ad 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2225,12 +2225,9 @@ static inline void free_desc_list(struct list_head *list) { struct dma_pl330_dmac *pdmac; struct dma_pl330_desc *desc; - struct dma_pl330_chan *pch; + struct dma_pl330_chan *pch = NULL; unsigned long flags; - if (list_empty(list)) - return; - /* Finish off the work list */ list_for_each_entry(desc, list, node) { dma_async_tx_callback callback; @@ -2247,6 +2244,10 @@ static inline void free_desc_list(struct list_head *list) desc->pchan = NULL; } + /* pch will be unset if list was empty */ + if (!pch) + return; + pdmac = pch->dmac; spin_lock_irqsave(&pdmac->pool_lock, flags); @@ -2257,12 +2258,9 @@ static inline void free_desc_list(struct list_head *list) static inline void handle_cyclic_desc_list(struct list_head *list) { struct dma_pl330_desc *desc; - struct dma_pl330_chan *pch; + struct dma_pl330_chan *pch = NULL; unsigned long flags; - if (list_empty(list)) - return; - list_for_each_entry(desc, list, node) { dma_async_tx_callback callback; @@ -2274,6 +2272,10 @@ static inline void handle_cyclic_desc_list(struct list_head *list) callback(desc->txd.callback_param); } + /* pch will be unset if list was empty */ + if (!pch) + return; + spin_lock_irqsave(&pch->lock, flags); list_splice_tail_init(list, &pch->work_list); spin_unlock_irqrestore(&pch->lock, flags); @@ -2926,8 +2928,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ - num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri, - (u8)pi->pcfg.num_chan); + if (pdat) + num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); + else + num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); + pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); for (i = 0; i < num_chan; i++) { diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index bdd41d4bfa8d..2ed1ac3513f3 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -68,6 +69,22 @@ enum d40_command { D40_DMA_SUSPENDED = 3 }; +/* + * enum d40_events - The different Event Enables for the event lines. + * + * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan. + * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan. + * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line. + * @D40_ROUND_EVENTLINE: Status check for event line. + */ + +enum d40_events { + D40_DEACTIVATE_EVENTLINE = 0, + D40_ACTIVATE_EVENTLINE = 1, + D40_SUSPEND_REQ_EVENTLINE = 2, + D40_ROUND_EVENTLINE = 3 +}; + /* * These are the registers that has to be saved and later restored * when the DMA hw is powered off. @@ -870,8 +887,8 @@ static void d40_save_restore_registers(struct d40_base *base, bool save) } #endif -static int d40_channel_execute_command(struct d40_chan *d40c, - enum d40_command command) +static int __d40_execute_command_phy(struct d40_chan *d40c, + enum d40_command command) { u32 status; int i; @@ -880,6 +897,12 @@ static int d40_channel_execute_command(struct d40_chan *d40c, unsigned long flags; u32 wmask; + if (command == D40_DMA_STOP) { + ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ); + if (ret) + return ret; + } + spin_lock_irqsave(&d40c->base->execmd_lock, flags); if (d40c->phy_chan->num % 2 == 0) @@ -973,67 +996,109 @@ static void d40_term_all(struct d40_chan *d40c) } d40c->pending_tx = 0; - d40c->busy = false; } -static void __d40_config_set_event(struct d40_chan *d40c, bool enable, - u32 event, int reg) +static void __d40_config_set_event(struct d40_chan *d40c, + enum d40_events event_type, u32 event, + int reg) { void __iomem *addr = chan_base(d40c) + reg; int tries; + u32 status; + + switch (event_type) { + + case D40_DEACTIVATE_EVENTLINE: - if (!enable) { writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) | ~D40_EVENTLINE_MASK(event), addr); - return; - } + break; + case D40_SUSPEND_REQ_EVENTLINE: + status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> + D40_EVENTLINE_POS(event); + + if (status == D40_DEACTIVATE_EVENTLINE || + status == D40_SUSPEND_REQ_EVENTLINE) + break; + + writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event)) + | ~D40_EVENTLINE_MASK(event), addr); + + for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) { + + status = (readl(addr) & D40_EVENTLINE_MASK(event)) >> + D40_EVENTLINE_POS(event); + + cpu_relax(); + /* + * Reduce the number of bus accesses while + * waiting for the DMA to suspend. + */ + udelay(3); + + if (status == D40_DEACTIVATE_EVENTLINE) + break; + } + + if (tries == D40_SUSPEND_MAX_IT) { + chan_err(d40c, + "unable to stop the event_line chl %d (log: %d)" + "status %x\n", d40c->phy_chan->num, + d40c->log_num, status); + } + break; + + case D40_ACTIVATE_EVENTLINE: /* * The hardware sometimes doesn't register the enable when src and dst * event lines are active on the same logical channel. Retry to ensure * it does. Usually only one retry is sufficient. */ - tries = 100; - while (--tries) { - writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) - | ~D40_EVENTLINE_MASK(event), addr); + tries = 100; + while (--tries) { + writel((D40_ACTIVATE_EVENTLINE << + D40_EVENTLINE_POS(event)) | + ~D40_EVENTLINE_MASK(event), addr); + + if (readl(addr) & D40_EVENTLINE_MASK(event)) + break; + } + + if (tries != 99) + dev_dbg(chan2dev(d40c), + "[%s] workaround enable S%cLNK (%d tries)\n", + __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', + 100 - tries); + + WARN_ON(!tries); + break; + + case D40_ROUND_EVENTLINE: + BUG(); + break; - if (readl(addr) & D40_EVENTLINE_MASK(event)) - break; } - - if (tries != 99) - dev_dbg(chan2dev(d40c), - "[%s] workaround enable S%cLNK (%d tries)\n", - __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', - 100 - tries); - - WARN_ON(!tries); } -static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) +static void d40_config_set_event(struct d40_chan *d40c, + enum d40_events event_type) { - unsigned long flags; - - spin_lock_irqsave(&d40c->phy_chan->lock, flags); - /* Enable event line connected to device (or memcpy) */ if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); - __d40_config_set_event(d40c, do_enable, event, + __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SSLNK); } if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); - __d40_config_set_event(d40c, do_enable, event, + __d40_config_set_event(d40c, event_type, event, D40_CHAN_REG_SDLNK); } - - spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); } static u32 d40_chan_has_events(struct d40_chan *d40c) @@ -1047,6 +1112,64 @@ static u32 d40_chan_has_events(struct d40_chan *d40c) return val; } +static int +__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command) +{ + unsigned long flags; + int ret = 0; + u32 active_status; + void __iomem *active_reg; + + if (d40c->phy_chan->num % 2 == 0) + active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; + else + active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; + + + spin_lock_irqsave(&d40c->phy_chan->lock, flags); + + switch (command) { + case D40_DMA_STOP: + case D40_DMA_SUSPEND_REQ: + + active_status = (readl(active_reg) & + D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> + D40_CHAN_POS(d40c->phy_chan->num); + + if (active_status == D40_DMA_RUN) + d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE); + else + d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE); + + if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP)) + ret = __d40_execute_command_phy(d40c, command); + + break; + + case D40_DMA_RUN: + + d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE); + ret = __d40_execute_command_phy(d40c, command); + break; + + case D40_DMA_SUSPENDED: + BUG(); + break; + } + + spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); + return ret; +} + +static int d40_channel_execute_command(struct d40_chan *d40c, + enum d40_command command) +{ + if (chan_is_logical(d40c)) + return __d40_execute_command_log(d40c, command); + else + return __d40_execute_command_phy(d40c, command); +} + static u32 d40_get_prmo(struct d40_chan *d40c) { static const unsigned int phy_map[] = { @@ -1149,15 +1272,7 @@ static int d40_pause(struct d40_chan *d40c) spin_lock_irqsave(&d40c->lock, flags); res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res == 0) { - if (chan_is_logical(d40c)) { - d40_config_set_event(d40c, false); - /* Resume the other logical channels if any */ - if (d40_chan_has_events(d40c)) - res = d40_channel_execute_command(d40c, - D40_DMA_RUN); - } - } + pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); @@ -1174,45 +1289,17 @@ static int d40_resume(struct d40_chan *d40c) spin_lock_irqsave(&d40c->lock, flags); pm_runtime_get_sync(d40c->base->dev); - if (d40c->base->rev == 0) - if (chan_is_logical(d40c)) { - res = d40_channel_execute_command(d40c, - D40_DMA_SUSPEND_REQ); - goto no_suspend; - } /* If bytes left to transfer or linked tx resume job */ - if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { - - if (chan_is_logical(d40c)) - d40_config_set_event(d40c, true); - + if (d40_residue(d40c) || d40_tx_is_linked(d40c)) res = d40_channel_execute_command(d40c, D40_DMA_RUN); - } -no_suspend: pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); spin_unlock_irqrestore(&d40c->lock, flags); return res; } -static int d40_terminate_all(struct d40_chan *chan) -{ - unsigned long flags; - int ret = 0; - - ret = d40_pause(chan); - if (!ret && chan_is_physical(chan)) - ret = d40_channel_execute_command(chan, D40_DMA_STOP); - - spin_lock_irqsave(&chan->lock, flags); - d40_term_all(chan); - spin_unlock_irqrestore(&chan->lock, flags); - - return ret; -} - static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) { struct d40_chan *d40c = container_of(tx->chan, @@ -1232,20 +1319,6 @@ static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) static int d40_start(struct d40_chan *d40c) { - if (d40c->base->rev == 0) { - int err; - - if (chan_is_logical(d40c)) { - err = d40_channel_execute_command(d40c, - D40_DMA_SUSPEND_REQ); - if (err) - return err; - } - } - - if (chan_is_logical(d40c)) - d40_config_set_event(d40c, true); - return d40_channel_execute_command(d40c, D40_DMA_RUN); } @@ -1258,10 +1331,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c) d40d = d40_first_queued(d40c); if (d40d != NULL) { - if (!d40c->busy) + if (!d40c->busy) { d40c->busy = true; - - pm_runtime_get_sync(d40c->base->dev); + pm_runtime_get_sync(d40c->base->dev); + } /* Remove from queue */ d40_desc_remove(d40d); @@ -1388,8 +1461,8 @@ static void dma_tasklet(unsigned long data) return; - err: - /* Rescue manoeuvre if receiving double interrupts */ +err: + /* Rescue manouver if receiving double interrupts */ if (d40c->pending_tx > 0) d40c->pending_tx--; spin_unlock_irqrestore(&d40c->lock, flags); @@ -1770,7 +1843,6 @@ static int d40_config_memcpy(struct d40_chan *d40c) return 0; } - static int d40_free_dma(struct d40_chan *d40c) { @@ -1806,44 +1878,19 @@ static int d40_free_dma(struct d40_chan *d40c) } pm_runtime_get_sync(d40c->base->dev); - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); - if (res) { - chan_err(d40c, "suspend failed\n"); - goto out; - } - - if (chan_is_logical(d40c)) { - /* Release logical channel, deactivate the event line */ - - d40_config_set_event(d40c, false); - d40c->base->lookup_log_chans[d40c->log_num] = NULL; - - /* - * Check if there are more logical allocation - * on this phy channel. - */ - if (!d40_alloc_mask_free(phy, is_src, event)) { - /* Resume the other logical channels if any */ - if (d40_chan_has_events(d40c)) { - res = d40_channel_execute_command(d40c, - D40_DMA_RUN); - if (res) - chan_err(d40c, - "Executing RUN command\n"); - } - goto out; - } - } else { - (void) d40_alloc_mask_free(phy, is_src, 0); - } - - /* Release physical channel */ res = d40_channel_execute_command(d40c, D40_DMA_STOP); if (res) { - chan_err(d40c, "Failed to stop channel\n"); + chan_err(d40c, "stop failed\n"); goto out; } + d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0); + + if (chan_is_logical(d40c)) + d40c->base->lookup_log_chans[d40c->log_num] = NULL; + else + d40c->base->lookup_phy_chans[phy->num] = NULL; + if (d40c->busy) { pm_runtime_mark_last_busy(d40c->base->dev); pm_runtime_put_autosuspend(d40c->base->dev); @@ -1852,7 +1899,6 @@ static int d40_free_dma(struct d40_chan *d40c) d40c->busy = false; d40c->phy_chan = NULL; d40c->configured = false; - d40c->base->lookup_phy_chans[phy->num] = NULL; out: pm_runtime_mark_last_busy(d40c->base->dev); @@ -2070,7 +2116,7 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, if (sg_next(&sg_src[sg_len - 1]) == sg_src) desc->cyclic = true; - if (direction != DMA_NONE) { + if (direction != DMA_TRANS_NONE) { dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); if (direction == DMA_DEV_TO_MEM) @@ -2371,6 +2417,31 @@ static void d40_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&d40c->lock, flags); } +static void d40_terminate_all(struct dma_chan *chan) +{ + unsigned long flags; + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); + int ret; + + spin_lock_irqsave(&d40c->lock, flags); + + pm_runtime_get_sync(d40c->base->dev); + ret = d40_channel_execute_command(d40c, D40_DMA_STOP); + if (ret) + chan_err(d40c, "Failed to stop channel\n"); + + d40_term_all(d40c); + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); + if (d40c->busy) { + pm_runtime_mark_last_busy(d40c->base->dev); + pm_runtime_put_autosuspend(d40c->base->dev); + } + d40c->busy = false; + + spin_unlock_irqrestore(&d40c->lock, flags); +} + static int dma40_config_to_halfchannel(struct d40_chan *d40c, struct stedma40_half_channel_info *info, @@ -2551,7 +2622,8 @@ static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, switch (cmd) { case DMA_TERMINATE_ALL: - return d40_terminate_all(d40c); + d40_terminate_all(chan); + return 0; case DMA_PAUSE: return d40_pause(d40c); case DMA_RESUME: @@ -2908,6 +2980,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", rev, res->start); + if (rev < 2) { + d40_err(&pdev->dev, "hardware revision: %d is not supported", + rev); + goto failure; + } + plat_data = pdev->dev.platform_data; /* Count the number of logical channels in use */ @@ -2998,6 +3076,7 @@ failure: if (base) { kfree(base->lcla_pool.alloc_map); + kfree(base->reg_val_backup_chan); kfree(base->lookup_log_chans); kfree(base->lookup_phy_chans); kfree(base->phy_res); diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h index 8d3d490968a3..51e8e5396e9b 100644 --- a/drivers/dma/ste_dma40_ll.h +++ b/drivers/dma/ste_dma40_ll.h @@ -62,8 +62,6 @@ #define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) /* Link register */ -#define D40_DEACTIVATE_EVENTLINE 0x0 -#define D40_ACTIVATE_EVENTLINE 0x1 #define D40_EVENTLINE_POS(i) (2 * i) #define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index d25599f2a3f8..47408e802ab6 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c @@ -191,6 +191,190 @@ utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len) } } +static bool +validate_device_path(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + struct efi_generic_dev_path *node; + int offset = 0; + + node = (struct efi_generic_dev_path *)buffer; + + if (len < sizeof(*node)) + return false; + + while (offset <= len - sizeof(*node) && + node->length >= sizeof(*node) && + node->length <= len - offset) { + offset += node->length; + + if ((node->type == EFI_DEV_END_PATH || + node->type == EFI_DEV_END_PATH2) && + node->sub_type == EFI_DEV_END_ENTIRE) + return true; + + node = (struct efi_generic_dev_path *)(buffer + offset); + } + + /* + * If we're here then either node->length pointed past the end + * of the buffer or we reached the end of the buffer without + * finding a device path end node. + */ + return false; +} + +static bool +validate_boot_order(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + /* An array of 16-bit integers */ + if ((len % 2) != 0) + return false; + + return true; +} + +static bool +validate_load_option(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + u16 filepathlength; + int i, desclength = 0, namelen; + + namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName)); + + /* Either "Boot" or "Driver" followed by four digits of hex */ + for (i = match; i < match+4; i++) { + if (var->VariableName[i] > 127 || + hex_to_bin(var->VariableName[i] & 0xff) < 0) + return true; + } + + /* Reject it if there's 4 digits of hex and then further content */ + if (namelen > match + 4) + return false; + + /* A valid entry must be at least 8 bytes */ + if (len < 8) + return false; + + filepathlength = buffer[4] | buffer[5] << 8; + + /* + * There's no stored length for the description, so it has to be + * found by hand + */ + desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2; + + /* Each boot entry must have a descriptor */ + if (!desclength) + return false; + + /* + * If the sum of the length of the description, the claimed filepath + * length and the original header are greater than the length of the + * variable, it's malformed + */ + if ((desclength + filepathlength + 6) > len) + return false; + + /* + * And, finally, check the filepath + */ + return validate_device_path(var, match, buffer + desclength + 6, + filepathlength); +} + +static bool +validate_uint16(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + /* A single 16-bit integer */ + if (len != 2) + return false; + + return true; +} + +static bool +validate_ascii_string(struct efi_variable *var, int match, u8 *buffer, + unsigned long len) +{ + int i; + + for (i = 0; i < len; i++) { + if (buffer[i] > 127) + return false; + + if (buffer[i] == 0) + return true; + } + + return false; +} + +struct variable_validate { + char *name; + bool (*validate)(struct efi_variable *var, int match, u8 *data, + unsigned long len); +}; + +static const struct variable_validate variable_validate[] = { + { "BootNext", validate_uint16 }, + { "BootOrder", validate_boot_order }, + { "DriverOrder", validate_boot_order }, + { "Boot*", validate_load_option }, + { "Driver*", validate_load_option }, + { "ConIn", validate_device_path }, + { "ConInDev", validate_device_path }, + { "ConOut", validate_device_path }, + { "ConOutDev", validate_device_path }, + { "ErrOut", validate_device_path }, + { "ErrOutDev", validate_device_path }, + { "Timeout", validate_uint16 }, + { "Lang", validate_ascii_string }, + { "PlatformLang", validate_ascii_string }, + { "", NULL }, +}; + +static bool +validate_var(struct efi_variable *var, u8 *data, unsigned long len) +{ + int i; + u16 *unicode_name = var->VariableName; + + for (i = 0; variable_validate[i].validate != NULL; i++) { + const char *name = variable_validate[i].name; + int match; + + for (match = 0; ; match++) { + char c = name[match]; + u16 u = unicode_name[match]; + + /* All special variables are plain ascii */ + if (u > 127) + return true; + + /* Wildcard in the matching name means we've matched */ + if (c == '*') + return variable_validate[i].validate(var, + match, data, len); + + /* Case sensitive match */ + if (c != u) + break; + + /* Reached the end of the string while matching */ + if (!c) + return variable_validate[i].validate(var, + match, data, len); + } + } + + return true; +} + static efi_status_t get_var_data_locked(struct efivars *efivars, struct efi_variable *var) { @@ -324,6 +508,12 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count) return -EINVAL; } + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || + validate_var(new_var, new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } + spin_lock(&efivars->lock); status = efivars->ops->set_variable(new_var->VariableName, &new_var->VendorGuid, @@ -626,6 +816,12 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN)) return -EACCES; + if ((new_var->Attributes & ~EFI_VARIABLE_MASK) != 0 || + validate_var(new_var, new_var->Data, new_var->DataSize) == false) { + printk(KERN_ERR "efivars: Malformed variable content\n"); + return -EINVAL; + } + spin_lock(&efivars->lock); /* diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 1adc2ec1e383..4461540653a8 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -965,18 +965,15 @@ static void omap_gpio_mod_init(struct gpio_bank *bank) } _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv); - _gpio_rmw(base, bank->regs->irqstatus, l, - bank->regs->irqenable_inv == false); - _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0); - _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0); + _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv); if (bank->regs->debounce_en) - _gpio_rmw(base, bank->regs->debounce_en, 0, 1); + __raw_writel(0, base + bank->regs->debounce_en); /* Save OE default value (0xffffffff) in the context */ bank->context.oe = __raw_readl(bank->base + bank->regs->direction); /* Initialize interface clk ungated, module enabled */ if (bank->regs->ctrl) - _gpio_rmw(base, bank->regs->ctrl, 0, 1); + __raw_writel(0, base + bank->regs->ctrl); } static __devinit void diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c index e8729cc2ba2b..2cd958e0b822 100644 --- a/drivers/gpio/gpio-pch.c +++ b/drivers/gpio/gpio-pch.c @@ -230,16 +230,12 @@ static void pch_gpio_setup(struct pch_gpio *chip) static int pch_irq_type(struct irq_data *d, unsigned int type) { - u32 im; - u32 __iomem *im_reg; - u32 ien; - u32 im_pos; - int ch; - unsigned long flags; - u32 val; - int irq = d->irq; struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); struct pch_gpio *chip = gc->private; + u32 im, im_pos, val; + u32 __iomem *im_reg; + unsigned long flags; + int ch, irq = d->irq; ch = irq - chip->irq_base; if (irq <= chip->irq_base + 7) { @@ -270,30 +266,22 @@ static int pch_irq_type(struct irq_data *d, unsigned int type) case IRQ_TYPE_LEVEL_LOW: val = PCH_LEVEL_L; break; - case IRQ_TYPE_PROBE: - goto end; default: - dev_warn(chip->dev, "%s: unknown type(%dd)", - __func__, type); - goto end; + goto unlock; } /* Set interrupt mode */ im = ioread32(im_reg) & ~(PCH_IM_MASK << (im_pos * 4)); iowrite32(im | (val << (im_pos * 4)), im_reg); - /* iclr */ - iowrite32(BIT(ch), &chip->reg->iclr); + /* And the handler */ + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) + __irq_set_handler_locked(d->irq, handle_level_irq); + else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) + __irq_set_handler_locked(d->irq, handle_edge_irq); - /* IMASKCLR */ - iowrite32(BIT(ch), &chip->reg->imaskclr); - - /* Enable interrupt */ - ien = ioread32(&chip->reg->ien); - iowrite32(ien | BIT(ch), &chip->reg->ien); -end: +unlock: spin_unlock_irqrestore(&chip->spinlock, flags); - return 0; } @@ -313,18 +301,24 @@ static void pch_irq_mask(struct irq_data *d) iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->imask); } +static void pch_irq_ack(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct pch_gpio *chip = gc->private; + + iowrite32(1 << (d->irq - chip->irq_base), &chip->reg->iclr); +} + static irqreturn_t pch_gpio_handler(int irq, void *dev_id) { struct pch_gpio *chip = dev_id; u32 reg_val = ioread32(&chip->reg->istatus); - int i; - int ret = IRQ_NONE; + int i, ret = IRQ_NONE; for (i = 0; i < gpio_pins[chip->ioh]; i++) { if (reg_val & BIT(i)) { dev_dbg(chip->dev, "%s:[%d]:irq=%d status=0x%x\n", __func__, i, irq, reg_val); - iowrite32(BIT(i), &chip->reg->iclr); generic_handle_irq(chip->irq_base + i); ret = IRQ_HANDLED; } @@ -343,6 +337,7 @@ static __devinit void pch_gpio_alloc_generic_chip(struct pch_gpio *chip, gc->private = chip; ct = gc->chip_types; + ct->chip.irq_ack = pch_irq_ack; ct->chip.irq_mask = pch_irq_mask; ct->chip.irq_unmask = pch_irq_unmask; ct->chip.irq_set_type = pch_irq_type; @@ -357,6 +352,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev, s32 ret; struct pch_gpio *chip; int irq_base; + u32 msk; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) @@ -408,8 +404,13 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev, } chip->irq_base = irq_base; + /* Mask all interrupts, but enable them */ + msk = (1 << gpio_pins[chip->ioh]) - 1; + iowrite32(msk, &chip->reg->imask); + iowrite32(msk, &chip->reg->ien); + ret = request_irq(pdev->irq, pch_gpio_handler, - IRQF_SHARED, KBUILD_MODNAME, chip); + IRQF_SHARED, KBUILD_MODNAME, chip); if (ret != 0) { dev_err(&pdev->dev, "%s request_irq failed\n", __func__); @@ -418,8 +419,6 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev, pch_gpio_alloc_generic_chip(chip, irq_base, gpio_pins[chip->ioh]); - /* Initialize interrupt ien register */ - iowrite32(0, &chip->reg->ien); end: return 0; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 5689ce62fd81..fc3ace3fd4cb 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -64,6 +64,7 @@ struct pxa_gpio_chip { unsigned long irq_mask; unsigned long irq_edge_rise; unsigned long irq_edge_fall; + int (*set_wake)(unsigned int gpio, unsigned int on); #ifdef CONFIG_PM unsigned long saved_gplr; @@ -269,7 +270,8 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) (value ? GPSR_OFFSET : GPCR_OFFSET)); } -static int __devinit pxa_init_gpio_chip(int gpio_end) +static int __devinit pxa_init_gpio_chip(int gpio_end, + int (*set_wake)(unsigned int, unsigned int)) { int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; struct pxa_gpio_chip *chips; @@ -285,6 +287,7 @@ static int __devinit pxa_init_gpio_chip(int gpio_end) sprintf(chips[i].label, "gpio-%d", i); chips[i].regbase = gpio_reg_base + BANK_OFF(i); + chips[i].set_wake = set_wake; c->base = gpio; c->label = chips[i].label; @@ -412,6 +415,17 @@ static void pxa_mask_muxed_gpio(struct irq_data *d) writel_relaxed(gfer, c->regbase + GFER_OFFSET); } +static int pxa_gpio_set_wake(struct irq_data *d, unsigned int on) +{ + int gpio = pxa_irq_to_gpio(d->irq); + struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); + + if (c->set_wake) + return c->set_wake(gpio, on); + else + return 0; +} + static void pxa_unmask_muxed_gpio(struct irq_data *d) { int gpio = pxa_irq_to_gpio(d->irq); @@ -427,6 +441,7 @@ static struct irq_chip pxa_muxed_gpio_chip = { .irq_mask = pxa_mask_muxed_gpio, .irq_unmask = pxa_unmask_muxed_gpio, .irq_set_type = pxa_gpio_irq_type, + .irq_set_wake = pxa_gpio_set_wake, }; static int pxa_gpio_nums(void) @@ -471,6 +486,7 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev) struct pxa_gpio_chip *c; struct resource *res; struct clk *clk; + struct pxa_gpio_platform_data *info; int gpio, irq, ret; int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0; @@ -516,7 +532,8 @@ static int __devinit pxa_gpio_probe(struct platform_device *pdev) } /* Initialize GPIO chips */ - pxa_init_gpio_chip(pxa_last_gpio); + info = dev_get_platdata(&pdev->dev); + pxa_init_gpio_chip(pxa_last_gpio, info ? info->gpio_set_wake : NULL); /* clear all GPIO edge detects */ for_each_gpio_chip(gpio, c) { diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c index 19d6fc0229c3..e991d9171961 100644 --- a/drivers/gpio/gpio-samsung.c +++ b/drivers/gpio/gpio-samsung.c @@ -452,12 +452,14 @@ static struct samsung_gpio_cfg s3c24xx_gpiocfg_banka = { }; #endif +#if defined(CONFIG_ARCH_EXYNOS4) || defined(CONFIG_ARCH_EXYNOS5) static struct samsung_gpio_cfg exynos_gpio_cfg = { .set_pull = exynos_gpio_setpull, .get_pull = exynos_gpio_getpull, .set_config = samsung_gpio_setcfg_4bit, .get_config = samsung_gpio_getcfg_4bit, }; +#endif #if defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = { @@ -2123,8 +2125,8 @@ static struct samsung_gpio_chip s5pv210_gpios_4bit[] = { * uses the above macro and depends on the banks being listed in order here. */ -static struct samsung_gpio_chip exynos4_gpios_1[] = { #ifdef CONFIG_ARCH_EXYNOS4 +static struct samsung_gpio_chip exynos4_gpios_1[] = { { .chip = { .base = EXYNOS4_GPA0(0), @@ -2222,11 +2224,11 @@ static struct samsung_gpio_chip exynos4_gpios_1[] = { .label = "GPF3", }, }, -#endif }; +#endif -static struct samsung_gpio_chip exynos4_gpios_2[] = { #ifdef CONFIG_ARCH_EXYNOS4 +static struct samsung_gpio_chip exynos4_gpios_2[] = { { .chip = { .base = EXYNOS4_GPJ0(0), @@ -2367,11 +2369,11 @@ static struct samsung_gpio_chip exynos4_gpios_2[] = { .to_irq = samsung_gpiolib_to_irq, }, }, -#endif }; +#endif -static struct samsung_gpio_chip exynos4_gpios_3[] = { #ifdef CONFIG_ARCH_EXYNOS4 +static struct samsung_gpio_chip exynos4_gpios_3[] = { { .chip = { .base = EXYNOS4_GPZ(0), @@ -2379,8 +2381,8 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = { .label = "GPZ", }, }, -#endif }; +#endif #ifdef CONFIG_ARCH_EXYNOS5 static struct samsung_gpio_chip exynos5_gpios_1[] = { @@ -2719,7 +2721,9 @@ static __init int samsung_gpiolib_init(void) { struct samsung_gpio_chip *chip; int i, nr_chips; +#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250) void __iomem *gpio_base1, *gpio_base2, *gpio_base3, *gpio_base4; +#endif int group = 0; samsung_gpiolib_set_cfg(samsung_gpio_cfgs, ARRAY_SIZE(samsung_gpio_cfgs)); @@ -2971,6 +2975,7 @@ static __init int samsung_gpiolib_init(void) return 0; +#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS5250) err_ioremap4: iounmap(gpio_base3); err_ioremap3: @@ -2979,6 +2984,7 @@ err_ioremap2: iounmap(gpio_base1); err_ioremap1: return -ENOMEM; +#endif } core_initcall(samsung_gpiolib_init); diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 30372f7b2d45..348b367debeb 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -1510,8 +1510,8 @@ int drm_freebufs(struct drm_device *dev, void *data, * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. * - * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information - * about each buffer into user space. For PCI buffers, it calls do_mmap() with + * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information + * about each buffer into user space. For PCI buffers, it calls vm_mmap() with * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ @@ -1553,18 +1553,14 @@ int drm_mapbufs(struct drm_device *dev, void *data, retcode = -EINVAL; goto done; } - down_write(¤t->mm->mmap_sem); - virtual = do_mmap(file_priv->filp, 0, map->size, + virtual = vm_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, token); - up_write(¤t->mm->mmap_sem); } else { - down_write(¤t->mm->mmap_sem); - virtual = do_mmap(file_priv->filp, 0, dma->byte_count, + virtual = vm_mmap(file_priv->filp, 0, dma->byte_count, PROT_READ | PROT_WRITE, MAP_SHARED, 0); - up_write(¤t->mm->mmap_sem); } if (virtual > -1024UL) { /* Real error */ diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 26d51979116b..1dffa8359f88 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -149,22 +149,12 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj, unsigned long pfn; if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { - unsigned long usize = buf->size; - if (!buf->pages) return -EINTR; - while (usize > 0) { - pfn = page_to_pfn(buf->pages[page_offset++]); - vm_insert_mixed(vma, f_vaddr, pfn); - f_vaddr += PAGE_SIZE; - usize -= PAGE_SIZE; - } - - return 0; - } - - pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; + pfn = page_to_pfn(buf->pages[page_offset++]); + } else + pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset; return vm_insert_mixed(vma, f_vaddr, pfn); } @@ -524,6 +514,8 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp, if (!buffer->pages) return -EINVAL; + vma->vm_flags |= VM_MIXEDMAP; + do { ret = vm_insert_page(vma, uaddr, buffer->pages[i++]); if (ret) { @@ -581,10 +573,8 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data, obj->filp->f_op = &exynos_drm_gem_fops; obj->filp->private_data = obj; - down_write(¤t->mm->mmap_sem); - addr = do_mmap(obj->filp, 0, args->size, + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, 0); - up_write(¤t->mm->mmap_sem); drm_gem_object_unreference_unlocked(obj); @@ -712,7 +702,6 @@ int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv, int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_gem_object *obj = vma->vm_private_data; - struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); struct drm_device *dev = obj->dev; unsigned long f_vaddr; pgoff_t page_offset; @@ -724,21 +713,10 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) mutex_lock(&dev->struct_mutex); - /* - * allocate all pages as desired size if user wants to allocate - * physically non-continuous memory. - */ - if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) { - ret = exynos_drm_gem_get_pages(obj); - if (ret < 0) - goto err; - } - ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset); if (ret < 0) DRM_ERROR("failed to map pages.\n"); -err: mutex_unlock(&dev->struct_mutex); return convert_to_vm_err_msg(ret); diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 2c8a60c3b98e..f920fb5e42b6 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -129,6 +129,7 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv) if (buf_priv->currently_mapped == I810_BUF_MAPPED) return -EINVAL; + /* This is all entirely broken */ down_write(¤t->mm->mmap_sem); old_fops = file_priv->filp->f_op; file_priv->filp->f_op = &i810_buffer_fops; @@ -157,11 +158,8 @@ static int i810_unmap_buffer(struct drm_buf *buf) if (buf_priv->currently_mapped != I810_BUF_MAPPED) return -EINVAL; - down_write(¤t->mm->mmap_sem); - retcode = do_munmap(current->mm, - (unsigned long)buf_priv->virtual, + retcode = vm_munmap((unsigned long)buf_priv->virtual, (size_t) buf->total); - up_write(¤t->mm->mmap_sem); buf_priv->currently_mapped = I810_BUF_UNMAPPED; buf_priv->virtual = NULL; diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b505b70dba05..e6162a1681f0 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1224,6 +1224,9 @@ static int i915_emon_status(struct seq_file *m, void *unused) unsigned long temp, chipset, gfx; int ret; + if (!IS_GEN5(dev)) + return -ENODEV; + ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 785f67f963ef..ba60f3c8f911 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1701,6 +1701,9 @@ void i915_update_gfx_val(struct drm_i915_private *dev_priv) unsigned long diffms; u32 count; + if (dev_priv->info->gen != 5) + return; + getrawmonotonic(&now); diff1 = timespec_sub(now, dev_priv->last_time2); @@ -2121,12 +2124,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed, (unsigned long) dev); - spin_lock(&mchdev_lock); - i915_mch_dev = dev_priv; - dev_priv->mchdev_lock = &mchdev_lock; - spin_unlock(&mchdev_lock); + if (IS_GEN5(dev)) { + spin_lock(&mchdev_lock); + i915_mch_dev = dev_priv; + dev_priv->mchdev_lock = &mchdev_lock; + spin_unlock(&mchdev_lock); - ips_ping_for_i915_load(); + ips_ping_for_i915_load(); + } return 0; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0e3c6acde955..0d1e4b7b4b99 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1087,11 +1087,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, if (obj == NULL) return -ENOENT; - down_write(¤t->mm->mmap_sem); - addr = do_mmap(obj->filp, 0, args->size, + addr = vm_mmap(obj->filp, 0, args->size, PROT_READ | PROT_WRITE, MAP_SHARED, args->offset); - up_write(¤t->mm->mmap_sem); drm_gem_object_unreference_unlocked(obj); if (IS_ERR((void *)addr)) return addr; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index f51a696486cb..de431942ded4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1133,6 +1133,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, return -EINVAL; } + if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { + DRM_DEBUG("execbuf with %u cliprects\n", + args->num_cliprects); + return -EINVAL; + } cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects), GFP_KERNEL); if (cliprects == NULL) { @@ -1404,7 +1409,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, struct drm_i915_gem_exec_object2 *exec2_list = NULL; int ret; - if (args->buffer_count < 1) { + if (args->buffer_count < 1 || + args->buffer_count > UINT_MAX / sizeof(*exec2_list)) { DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count); return -EINVAL; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b4bb1ef77ddc..9d24d65f0c3e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -568,6 +568,7 @@ #define CM0_MASK_SHIFT 16 #define CM0_IZ_OPT_DISABLE (1<<6) #define CM0_ZR_OPT_DISABLE (1<<5) +#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5) #define CM0_DEPTH_EVICT_DISABLE (1<<4) #define CM0_COLOR_EVICT_DISABLE (1<<3) #define CM0_DEPTH_WRITE_DISABLE (1<<1) diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 4d3d736a4f56..90b9793fd5da 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -430,8 +430,8 @@ intel_crt_detect(struct drm_connector *connector, bool force) { struct drm_device *dev = connector->dev; struct intel_crt *crt = intel_attached_crt(connector); - struct drm_crtc *crtc; enum drm_connector_status status; + struct intel_load_detect_pipe tmp; if (I915_HAS_HOTPLUG(dev)) { if (intel_crt_detect_hotplug(connector)) { @@ -450,23 +450,16 @@ intel_crt_detect(struct drm_connector *connector, bool force) return connector->status; /* for pre-945g platforms use load detect */ - crtc = crt->base.base.crtc; - if (crtc && crtc->enabled) { - status = intel_crt_load_detect(crt); - } else { - struct intel_load_detect_pipe tmp; - - if (intel_get_load_detect_pipe(&crt->base, connector, NULL, - &tmp)) { - if (intel_crt_detect_ddc(connector)) - status = connector_status_connected; - else - status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(&crt->base, connector, - &tmp); - } else - status = connector_status_unknown; - } + if (intel_get_load_detect_pipe(&crt->base, connector, NULL, + &tmp)) { + if (intel_crt_detect_ddc(connector)) + status = connector_status_connected; + else + status = intel_crt_load_detect(crt); + intel_release_load_detect_pipe(&crt->base, connector, + &tmp); + } else + status = connector_status_unknown; return status; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5908cd563400..1b1cf3b3ff51 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7072,9 +7072,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; drm_i915_private_t *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; - int dpll_reg = DPLL(pipe); - int dpll = I915_READ(dpll_reg); if (HAS_PCH_SPLIT(dev)) return; @@ -7087,10 +7084,15 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) * the manual case. */ if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { + int pipe = intel_crtc->pipe; + int dpll_reg = DPLL(pipe); + u32 dpll; + DRM_DEBUG_DRIVER("downclocking LVDS\n"); assert_panel_unlocked(dev_priv, pipe); + dpll = I915_READ(dpll_reg); dpll |= DISPLAY_RATE_SELECT_FPA1; I915_WRITE(dpll_reg, dpll); intel_wait_for_vblank(dev, pipe); @@ -7098,7 +7100,6 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc) if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); } - } /** diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index cae3e5f17a49..2d7f47b56b6a 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -136,7 +136,7 @@ static void i9xx_write_infoframe(struct drm_encoder *encoder, val &= ~VIDEO_DIP_SELECT_MASK; - I915_WRITE(VIDEO_DIP_CTL, val | port | flags); + I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | val | port | flags); for (i = 0; i < len; i += 4) { I915_WRITE(VIDEO_DIP_DATA, *data); diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 30e2c82101de..9c71183629c2 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -750,7 +750,7 @@ static const struct dmi_system_id intel_no_lvds[] = { .ident = "Hewlett-Packard t5745", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_BOARD_NAME, "hp t5745"), + DMI_MATCH(DMI_PRODUCT_NAME, "hp t5745"), }, }, { @@ -758,7 +758,7 @@ static const struct dmi_system_id intel_no_lvds[] = { .ident = "Hewlett-Packard st5747", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_BOARD_NAME, "hp st5747"), + DMI_MATCH(DMI_PRODUCT_NAME, "hp st5747"), }, }, { diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index f75806e5bff5..62892a826ede 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -398,6 +398,17 @@ static int init_render_ring(struct intel_ring_buffer *ring) return ret; } + + if (IS_GEN6(dev)) { + /* From the Sandybridge PRM, volume 1 part 3, page 24: + * "If this bit is set, STCunit will have LRA as replacement + * policy. [...] This bit must be reset. LRA replacement + * policy is not supported." + */ + I915_WRITE(CACHE_MODE_0, + CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT); + } + if (INTEL_INFO(dev)->gen >= 6) { I915_WRITE(INSTPM, INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index e36b171c1e7d..ae5e748f39bb 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -731,6 +731,7 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, uint16_t width, height; uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len; uint16_t h_sync_offset, v_sync_offset; + int mode_clock; width = mode->crtc_hdisplay; height = mode->crtc_vdisplay; @@ -745,7 +746,11 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd, h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; - dtd->part1.clock = mode->clock / 10; + mode_clock = mode->clock; + mode_clock /= intel_mode_get_pixel_multiplier(mode) ?: 1; + mode_clock /= 10; + dtd->part1.clock = mode_clock; + dtd->part1.h_active = width & 0xff; dtd->part1.h_blank = h_blank_len & 0xff; dtd->part1.h_high = (((width >> 8) & 0xf) << 4) | @@ -996,7 +1001,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder); u32 sdvox; struct intel_sdvo_in_out_map in_out; - struct intel_sdvo_dtd input_dtd; + struct intel_sdvo_dtd input_dtd, output_dtd; int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); int rate; @@ -1021,20 +1026,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, intel_sdvo->attached_output)) return; - /* We have tried to get input timing in mode_fixup, and filled into - * adjusted_mode. - */ - if (intel_sdvo->is_tv || intel_sdvo->is_lvds) { - input_dtd = intel_sdvo->input_dtd; - } else { - /* Set the output timing to the screen */ - if (!intel_sdvo_set_target_output(intel_sdvo, - intel_sdvo->attached_output)) - return; - - intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); - (void) intel_sdvo_set_output_timing(intel_sdvo, &input_dtd); - } + /* lvds has a special fixed output timing. */ + if (intel_sdvo->is_lvds) + intel_sdvo_get_dtd_from_mode(&output_dtd, + intel_sdvo->sdvo_lvds_fixed_mode); + else + intel_sdvo_get_dtd_from_mode(&output_dtd, mode); + (void) intel_sdvo_set_output_timing(intel_sdvo, &output_dtd); /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) @@ -1052,6 +1050,10 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, !intel_sdvo_set_tv_format(intel_sdvo)) return; + /* We have tried to get input timing in mode_fixup, and filled into + * adjusted_mode. + */ + intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); (void) intel_sdvo_set_input_timing(intel_sdvo, &input_dtd); switch (pixel_multiplier) { @@ -1218,8 +1220,14 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) { + struct drm_device *dev = intel_sdvo->base.base.dev; u8 response[2]; + /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise + * on the line. */ + if (IS_I945G(dev) || IS_I945GM(dev)) + return false; + return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, &response, 2) && response[0]; } diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 7814a760c164..284bd25d5d21 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -270,7 +270,7 @@ static bool nouveau_dsm_detect(void) struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; struct pci_dev *pdev = NULL; int has_dsm = 0; - int has_optimus; + int has_optimus = 0; int vga_count = 0; bool guid_valid; int retval; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 80963d05b54a..0be4a815e706 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -6156,10 +6156,14 @@ dcb_fake_connectors(struct nvbios *bios) /* heuristic: if we ever get a non-zero connector field, assume * that all the indices are valid and we don't need fake them. + * + * and, as usual, a blacklist of boards with bad bios data.. */ - for (i = 0; i < dcbt->entries; i++) { - if (dcbt->entry[i].connector) - return; + if (!nv_match_device(bios->dev, 0x0392, 0x107d, 0x20a2)) { + for (i = 0; i < dcbt->entries; i++) { + if (dcbt->entry[i].connector) + return; + } } /* no useful connector info available, we need to make it up diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c index 59ea1c14eca0..c3de36384522 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hdmi.c +++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c @@ -32,7 +32,9 @@ static bool hdmi_sor(struct drm_encoder *encoder) { struct drm_nouveau_private *dev_priv = encoder->dev->dev_private; - if (dev_priv->chipset < 0xa3) + if (dev_priv->chipset < 0xa3 || + dev_priv->chipset == 0xaa || + dev_priv->chipset == 0xac) return false; return true; } diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c index e2be95af2e52..77e564667b5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -29,10 +29,6 @@ #include "nouveau_i2c.h" #include "nouveau_hw.h" -#define T_TIMEOUT 2200000 -#define T_RISEFALL 1000 -#define T_HOLD 5000 - static void i2c_drive_scl(void *data, int state) { @@ -113,175 +109,6 @@ i2c_sense_sda(void *data) return 0; } -static void -i2c_delay(struct nouveau_i2c_chan *port, u32 nsec) -{ - udelay((nsec + 500) / 1000); -} - -static bool -i2c_raise_scl(struct nouveau_i2c_chan *port) -{ - u32 timeout = T_TIMEOUT / T_RISEFALL; - - i2c_drive_scl(port, 1); - do { - i2c_delay(port, T_RISEFALL); - } while (!i2c_sense_scl(port) && --timeout); - - return timeout != 0; -} - -static int -i2c_start(struct nouveau_i2c_chan *port) -{ - int ret = 0; - - port->state = i2c_sense_scl(port); - port->state |= i2c_sense_sda(port) << 1; - if (port->state != 3) { - i2c_drive_scl(port, 0); - i2c_drive_sda(port, 1); - if (!i2c_raise_scl(port)) - ret = -EBUSY; - } - - i2c_drive_sda(port, 0); - i2c_delay(port, T_HOLD); - i2c_drive_scl(port, 0); - i2c_delay(port, T_HOLD); - return ret; -} - -static void -i2c_stop(struct nouveau_i2c_chan *port) -{ - i2c_drive_scl(port, 0); - i2c_drive_sda(port, 0); - i2c_delay(port, T_RISEFALL); - - i2c_drive_scl(port, 1); - i2c_delay(port, T_HOLD); - i2c_drive_sda(port, 1); - i2c_delay(port, T_HOLD); -} - -static int -i2c_bitw(struct nouveau_i2c_chan *port, int sda) -{ - i2c_drive_sda(port, sda); - i2c_delay(port, T_RISEFALL); - - if (!i2c_raise_scl(port)) - return -ETIMEDOUT; - i2c_delay(port, T_HOLD); - - i2c_drive_scl(port, 0); - i2c_delay(port, T_HOLD); - return 0; -} - -static int -i2c_bitr(struct nouveau_i2c_chan *port) -{ - int sda; - - i2c_drive_sda(port, 1); - i2c_delay(port, T_RISEFALL); - - if (!i2c_raise_scl(port)) - return -ETIMEDOUT; - i2c_delay(port, T_HOLD); - - sda = i2c_sense_sda(port); - - i2c_drive_scl(port, 0); - i2c_delay(port, T_HOLD); - return sda; -} - -static int -i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last) -{ - int i, bit; - - *byte = 0; - for (i = 7; i >= 0; i--) { - bit = i2c_bitr(port); - if (bit < 0) - return bit; - *byte |= bit << i; - } - - return i2c_bitw(port, last ? 1 : 0); -} - -static int -i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte) -{ - int i, ret; - for (i = 7; i >= 0; i--) { - ret = i2c_bitw(port, !!(byte & (1 << i))); - if (ret < 0) - return ret; - } - - ret = i2c_bitr(port); - if (ret == 1) /* nack */ - ret = -EIO; - return ret; -} - -static int -i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg) -{ - u32 addr = msg->addr << 1; - if (msg->flags & I2C_M_RD) - addr |= 1; - return i2c_put_byte(port, addr); -} - -static int -i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) -{ - struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap; - struct i2c_msg *msg = msgs; - int ret = 0, mcnt = num; - - while (!ret && mcnt--) { - u8 remaining = msg->len; - u8 *ptr = msg->buf; - - ret = i2c_start(port); - if (ret == 0) - ret = i2c_addr(port, msg); - - if (msg->flags & I2C_M_RD) { - while (!ret && remaining--) - ret = i2c_get_byte(port, ptr++, !remaining); - } else { - while (!ret && remaining--) - ret = i2c_put_byte(port, *ptr++); - } - - msg++; - } - - i2c_stop(port); - return (ret < 0) ? ret : num; -} - -static u32 -i2c_bit_func(struct i2c_adapter *adap) -{ - return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; -} - -const struct i2c_algorithm nouveau_i2c_bit_algo = { - .master_xfer = i2c_bit_xfer, - .functionality = i2c_bit_func -}; - static const uint32_t nv50_i2c_port[] = { 0x00e138, 0x00e150, 0x00e168, 0x00e180, 0x00e254, 0x00e274, 0x00e764, 0x00e780, @@ -384,12 +211,10 @@ nouveau_i2c_init(struct drm_device *dev) case 0: /* NV04:NV50 */ port->drive = entry[0]; port->sense = entry[1]; - port->adapter.algo = &nouveau_i2c_bit_algo; break; case 4: /* NV4E */ port->drive = 0x600800 + entry[1]; port->sense = port->drive; - port->adapter.algo = &nouveau_i2c_bit_algo; break; case 5: /* NV50- */ port->drive = entry[0] & 0x0f; @@ -402,7 +227,6 @@ nouveau_i2c_init(struct drm_device *dev) port->drive = 0x00d014 + (port->drive * 0x20); port->sense = port->drive; } - port->adapter.algo = &nouveau_i2c_bit_algo; break; case 6: /* NV50- DP AUX */ port->drive = entry[0]; @@ -413,7 +237,7 @@ nouveau_i2c_init(struct drm_device *dev) break; } - if (!port->adapter.algo) { + if (!port->adapter.algo && !port->drive) { NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n", i, port->type, port->drive, port->sense); kfree(port); @@ -429,7 +253,26 @@ nouveau_i2c_init(struct drm_device *dev) port->dcb = ROM32(entry[0]); i2c_set_adapdata(&port->adapter, i2c); - ret = i2c_add_adapter(&port->adapter); + if (port->adapter.algo != &nouveau_dp_i2c_algo) { + port->adapter.algo_data = &port->bit; + port->bit.udelay = 10; + port->bit.timeout = usecs_to_jiffies(2200); + port->bit.data = port; + port->bit.setsda = i2c_drive_sda; + port->bit.setscl = i2c_drive_scl; + port->bit.getsda = i2c_sense_sda; + port->bit.getscl = i2c_sense_scl; + + i2c_drive_scl(port, 0); + i2c_drive_sda(port, 1); + i2c_drive_scl(port, 1); + + ret = i2c_bit_add_bus(&port->adapter); + } else { + port->adapter.algo = &nouveau_dp_i2c_algo; + ret = i2c_add_adapter(&port->adapter); + } + if (ret) { NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret); kfree(port); diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h index 4d2e4e9031be..1d083893a4d7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_i2c.h +++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h @@ -34,6 +34,7 @@ struct nouveau_i2c_chan { struct i2c_adapter adapter; struct drm_device *dev; + struct i2c_algo_bit_data bit; struct list_head head; u8 index; u8 type; diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c index 550ad3fcf0af..9d79180069df 100644 --- a/drivers/gpu/drm/nouveau/nv10_gpio.c +++ b/drivers/gpu/drm/nouveau/nv10_gpio.c @@ -65,7 +65,7 @@ nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out) if (line < 10) { line = (line - 2) * 4; reg = NV_PCRTC_GPIO_EXT; - mask = 0x00000003 << ((line - 2) * 4); + mask = 0x00000003; data = (dir << 1) | out; } else if (line < 14) { diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c index 5bf55038fd92..f704e942372e 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/drivers/gpu/drm/nouveau/nvc0_fb.c @@ -54,6 +54,11 @@ nvc0_mfb_isr(struct drm_device *dev) nvc0_mfb_subp_isr(dev, unit, subp); units &= ~(1 << unit); } + + /* we do something horribly wrong and upset PMFB a lot, so mask off + * interrupts from it after the first one until it's fixed + */ + nv_mask(dev, 0x000640, 0x02000000, 0x00000000); } static void diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index b5ff1f7b6f7e..af1054f8202a 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -575,6 +575,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (rdev->family < CHIP_RV770) pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; + /* use frac fb div on APUs */ + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; } else { pll->flags |= RADEON_PLL_LEGACY; @@ -955,8 +958,8 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode break; } - if (radeon_encoder->active_device & - (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) { + if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) || + (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) { struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ea7df16e2f84..5992502a3448 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -241,8 +241,8 @@ int radeon_wb_init(struct radeon_device *rdev) rdev->wb.use_event = true; } } - /* always use writeback/events on NI */ - if (ASIC_IS_DCE5(rdev)) { + /* always use writeback/events on NI, APUs */ + if (rdev->family >= CHIP_PALM) { rdev->wb.enabled = true; rdev->wb.use_event = true; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8086c96e0b06..0a1d4bd65edc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -533,7 +533,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index) radeon_legacy_init_crtc(dev, radeon_crtc); } -static const char *encoder_names[36] = { +static const char *encoder_names[37] = { "NONE", "INTERNAL_LVDS", "INTERNAL_TMDS1", @@ -570,6 +570,7 @@ static const char *encoder_names[36] = { "INTERNAL_UNIPHY2", "NUTMEG", "TRAVIS", + "INTERNAL_VCE" }; static const char *connector_names[15] = { diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index a3d033252995..ffddcba32af6 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -34,7 +34,7 @@ config HID config HID_BATTERY_STRENGTH bool depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY - default y + default n config HIDRAW bool "/dev/hidraw raw HID device support" diff --git a/drivers/hid/hid-tivo.c b/drivers/hid/hid-tivo.c index de47039c708c..9f85f827607f 100644 --- a/drivers/hid/hid-tivo.c +++ b/drivers/hid/hid-tivo.c @@ -62,7 +62,7 @@ static int tivo_input_mapping(struct hid_device *hdev, struct hid_input *hi, static const struct hid_device_id tivo_devices[] = { /* TiVo Slide Bluetooth remote, pairs with a Broadcom dongle */ - { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, { } }; diff --git a/drivers/hsi/clients/hsi_char.c b/drivers/hsi/clients/hsi_char.c index 88a050df2389..3ad91f6447d8 100644 --- a/drivers/hsi/clients/hsi_char.c +++ b/drivers/hsi/clients/hsi_char.c @@ -123,7 +123,7 @@ struct hsc_client_data { static unsigned int hsc_major; /* Maximum buffer size that hsi_char will accept from userspace */ static unsigned int max_data_size = 0x1000; -module_param(max_data_size, uint, S_IRUSR | S_IWUSR); +module_param(max_data_size, uint, 0); MODULE_PARM_DESC(max_data_size, "max read/write data size [4,8..65536] (^2)"); static void hsc_add_tail(struct hsc_channel *channel, struct hsi_msg *msg, diff --git a/drivers/hsi/hsi.c b/drivers/hsi/hsi.c index 4e2d79b79334..2d58f939d27f 100644 --- a/drivers/hsi/hsi.c +++ b/drivers/hsi/hsi.c @@ -21,26 +21,13 @@ */ #include #include -#include #include -#include #include #include #include +#include #include "hsi_core.h" -static struct device_type hsi_ctrl = { - .name = "hsi_controller", -}; - -static struct device_type hsi_cl = { - .name = "hsi_client", -}; - -static struct device_type hsi_port = { - .name = "hsi_port", -}; - static ssize_t modalias_show(struct device *dev, struct device_attribute *a __maybe_unused, char *buf) { @@ -54,8 +41,7 @@ static struct device_attribute hsi_bus_dev_attrs[] = { static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) { - if (dev->type == &hsi_cl) - add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); + add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); return 0; } @@ -80,12 +66,10 @@ static void hsi_client_release(struct device *dev) static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) { struct hsi_client *cl; - unsigned long flags; cl = kzalloc(sizeof(*cl), GFP_KERNEL); if (!cl) return; - cl->device.type = &hsi_cl; cl->tx_cfg = info->tx_cfg; cl->rx_cfg = info->rx_cfg; cl->device.bus = &hsi_bus_type; @@ -93,14 +77,11 @@ static void hsi_new_client(struct hsi_port *port, struct hsi_board_info *info) cl->device.release = hsi_client_release; dev_set_name(&cl->device, info->name); cl->device.platform_data = info->platform_data; - spin_lock_irqsave(&port->clock, flags); - list_add_tail(&cl->link, &port->clients); - spin_unlock_irqrestore(&port->clock, flags); if (info->archdata) cl->device.archdata = *info->archdata; if (device_register(&cl->device) < 0) { pr_err("hsi: failed to register client: %s\n", info->name); - kfree(cl); + put_device(&cl->device); } } @@ -120,13 +101,6 @@ static void hsi_scan_board_info(struct hsi_controller *hsi) static int hsi_remove_client(struct device *dev, void *data __maybe_unused) { - struct hsi_client *cl = to_hsi_client(dev); - struct hsi_port *port = to_hsi_port(dev->parent); - unsigned long flags; - - spin_lock_irqsave(&port->clock, flags); - list_del(&cl->link); - spin_unlock_irqrestore(&port->clock, flags); device_unregister(dev); return 0; @@ -140,12 +114,17 @@ static int hsi_remove_port(struct device *dev, void *data __maybe_unused) return 0; } -static void hsi_controller_release(struct device *dev __maybe_unused) +static void hsi_controller_release(struct device *dev) { + struct hsi_controller *hsi = to_hsi_controller(dev); + + kfree(hsi->port); + kfree(hsi); } -static void hsi_port_release(struct device *dev __maybe_unused) +static void hsi_port_release(struct device *dev) { + kfree(to_hsi_port(dev)); } /** @@ -170,20 +149,12 @@ int hsi_register_controller(struct hsi_controller *hsi) unsigned int i; int err; - hsi->device.type = &hsi_ctrl; - hsi->device.bus = &hsi_bus_type; - hsi->device.release = hsi_controller_release; - err = device_register(&hsi->device); + err = device_add(&hsi->device); if (err < 0) return err; for (i = 0; i < hsi->num_ports; i++) { - hsi->port[i].device.parent = &hsi->device; - hsi->port[i].device.bus = &hsi_bus_type; - hsi->port[i].device.release = hsi_port_release; - hsi->port[i].device.type = &hsi_port; - INIT_LIST_HEAD(&hsi->port[i].clients); - spin_lock_init(&hsi->port[i].clock); - err = device_register(&hsi->port[i].device); + hsi->port[i]->device.parent = &hsi->device; + err = device_add(&hsi->port[i]->device); if (err < 0) goto out; } @@ -192,7 +163,9 @@ int hsi_register_controller(struct hsi_controller *hsi) return 0; out: - hsi_unregister_controller(hsi); + while (i-- > 0) + device_del(&hsi->port[i]->device); + device_del(&hsi->device); return err; } @@ -222,6 +195,29 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) return 0; } +/** + * hsi_put_controller - Free an HSI controller + * + * @hsi: Pointer to the HSI controller to freed + * + * HSI controller drivers should only use this function if they need + * to free their allocated hsi_controller structures before a successful + * call to hsi_register_controller. Other use is not allowed. + */ +void hsi_put_controller(struct hsi_controller *hsi) +{ + unsigned int i; + + if (!hsi) + return; + + for (i = 0; i < hsi->num_ports; i++) + if (hsi->port && hsi->port[i]) + put_device(&hsi->port[i]->device); + put_device(&hsi->device); +} +EXPORT_SYMBOL_GPL(hsi_put_controller); + /** * hsi_alloc_controller - Allocate an HSI controller and its ports * @n_ports: Number of ports on the HSI controller @@ -232,54 +228,51 @@ static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) { struct hsi_controller *hsi; - struct hsi_port *port; + struct hsi_port **port; unsigned int i; if (!n_ports) return NULL; - port = kzalloc(sizeof(*port)*n_ports, flags); - if (!port) - return NULL; hsi = kzalloc(sizeof(*hsi), flags); if (!hsi) - goto out; - for (i = 0; i < n_ports; i++) { - dev_set_name(&port[i].device, "port%d", i); - port[i].num = i; - port[i].async = hsi_dummy_msg; - port[i].setup = hsi_dummy_cl; - port[i].flush = hsi_dummy_cl; - port[i].start_tx = hsi_dummy_cl; - port[i].stop_tx = hsi_dummy_cl; - port[i].release = hsi_dummy_cl; - mutex_init(&port[i].lock); + return NULL; + port = kzalloc(sizeof(*port)*n_ports, flags); + if (!port) { + kfree(hsi); + return NULL; } hsi->num_ports = n_ports; hsi->port = port; + hsi->device.release = hsi_controller_release; + device_initialize(&hsi->device); + + for (i = 0; i < n_ports; i++) { + port[i] = kzalloc(sizeof(**port), flags); + if (port[i] == NULL) + goto out; + port[i]->num = i; + port[i]->async = hsi_dummy_msg; + port[i]->setup = hsi_dummy_cl; + port[i]->flush = hsi_dummy_cl; + port[i]->start_tx = hsi_dummy_cl; + port[i]->stop_tx = hsi_dummy_cl; + port[i]->release = hsi_dummy_cl; + mutex_init(&port[i]->lock); + ATOMIC_INIT_NOTIFIER_HEAD(&port[i]->n_head); + dev_set_name(&port[i]->device, "port%d", i); + hsi->port[i]->device.release = hsi_port_release; + device_initialize(&hsi->port[i]->device); + } return hsi; out: - kfree(port); + hsi_put_controller(hsi); return NULL; } EXPORT_SYMBOL_GPL(hsi_alloc_controller); -/** - * hsi_free_controller - Free an HSI controller - * @hsi: Pointer to HSI controller - */ -void hsi_free_controller(struct hsi_controller *hsi) -{ - if (!hsi) - return; - - kfree(hsi->port); - kfree(hsi); -} -EXPORT_SYMBOL_GPL(hsi_free_controller); - /** * hsi_free_msg - Free an HSI message * @msg: Pointer to the HSI message @@ -414,37 +407,67 @@ void hsi_release_port(struct hsi_client *cl) } EXPORT_SYMBOL_GPL(hsi_release_port); -static int hsi_start_rx(struct hsi_client *cl, void *data __maybe_unused) +static int hsi_event_notifier_call(struct notifier_block *nb, + unsigned long event, void *data __maybe_unused) { - if (cl->hsi_start_rx) - (*cl->hsi_start_rx)(cl); + struct hsi_client *cl = container_of(nb, struct hsi_client, nb); + + (*cl->ehandler)(cl, event); return 0; } -static int hsi_stop_rx(struct hsi_client *cl, void *data __maybe_unused) +/** + * hsi_register_port_event - Register a client to receive port events + * @cl: HSI client that wants to receive port events + * @cb: Event handler callback + * + * Clients should register a callback to be able to receive + * events from the ports. Registration should happen after + * claiming the port. + * The handler can be called in interrupt context. + * + * Returns -errno on error, or 0 on success. + */ +int hsi_register_port_event(struct hsi_client *cl, + void (*handler)(struct hsi_client *, unsigned long)) { - if (cl->hsi_stop_rx) - (*cl->hsi_stop_rx)(cl); + struct hsi_port *port = hsi_get_port(cl); - return 0; + if (!handler || cl->ehandler) + return -EINVAL; + if (!hsi_port_claimed(cl)) + return -EACCES; + cl->ehandler = handler; + cl->nb.notifier_call = hsi_event_notifier_call; + + return atomic_notifier_chain_register(&port->n_head, &cl->nb); } +EXPORT_SYMBOL_GPL(hsi_register_port_event); -static int hsi_port_for_each_client(struct hsi_port *port, void *data, - int (*fn)(struct hsi_client *cl, void *data)) +/** + * hsi_unregister_port_event - Stop receiving port events for a client + * @cl: HSI client that wants to stop receiving port events + * + * Clients should call this function before releasing their associated + * port. + * + * Returns -errno on error, or 0 on success. + */ +int hsi_unregister_port_event(struct hsi_client *cl) { - struct hsi_client *cl; + struct hsi_port *port = hsi_get_port(cl); + int err; - spin_lock(&port->clock); - list_for_each_entry(cl, &port->clients, link) { - spin_unlock(&port->clock); - (*fn)(cl, data); - spin_lock(&port->clock); - } - spin_unlock(&port->clock); + WARN_ON(!hsi_port_claimed(cl)); - return 0; + err = atomic_notifier_chain_unregister(&port->n_head, &cl->nb); + if (!err) + cl->ehandler = NULL; + + return err; } +EXPORT_SYMBOL_GPL(hsi_unregister_port_event); /** * hsi_event -Notifies clients about port events @@ -458,22 +481,12 @@ static int hsi_port_for_each_client(struct hsi_port *port, void *data, * Events: * HSI_EVENT_START_RX - Incoming wake line high * HSI_EVENT_STOP_RX - Incoming wake line down + * + * Returns -errno on error, or 0 on success. */ -void hsi_event(struct hsi_port *port, unsigned int event) +int hsi_event(struct hsi_port *port, unsigned long event) { - int (*fn)(struct hsi_client *cl, void *data); - - switch (event) { - case HSI_EVENT_START_RX: - fn = hsi_start_rx; - break; - case HSI_EVENT_STOP_RX: - fn = hsi_stop_rx; - break; - default: - return; - } - hsi_port_for_each_client(port, NULL, fn); + return atomic_notifier_call_chain(&port->n_head, event, NULL); } EXPORT_SYMBOL_GPL(hsi_event); diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c index ce43642ef03e..f85ce70d9677 100644 --- a/drivers/hwmon/ad7314.c +++ b/drivers/hwmon/ad7314.c @@ -47,7 +47,7 @@ struct ad7314_data { u16 rx ____cacheline_aligned; }; -static int ad7314_spi_read(struct ad7314_data *chip, s16 *data) +static int ad7314_spi_read(struct ad7314_data *chip) { int ret; @@ -57,9 +57,7 @@ static int ad7314_spi_read(struct ad7314_data *chip, s16 *data) return ret; } - *data = be16_to_cpu(chip->rx); - - return ret; + return be16_to_cpu(chip->rx); } static ssize_t ad7314_show_temperature(struct device *dev, @@ -70,12 +68,12 @@ static ssize_t ad7314_show_temperature(struct device *dev, s16 data; int ret; - ret = ad7314_spi_read(chip, &data); + ret = ad7314_spi_read(chip); if (ret < 0) return ret; switch (spi_get_device_id(chip->spi_dev)->driver_data) { case ad7314: - data = (data & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; + data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_OFFSET; data = (data << 6) >> 6; return sprintf(buf, "%d\n", 250 * data); @@ -86,7 +84,7 @@ static ssize_t ad7314_show_temperature(struct device *dev, * with a sign bit - which is a 14 bit 2's complement * register. 1lsb - 31.25 milli degrees centigrade */ - data &= ADT7301_TEMP_MASK; + data = ret & ADT7301_TEMP_MASK; data = (data << 2) >> 2; return sprintf(buf, "%d\n", diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c index 7765e4f74ec5..1958f03efd7a 100644 --- a/drivers/hwmon/ads1015.c +++ b/drivers/hwmon/ads1015.c @@ -59,14 +59,11 @@ struct ads1015_data { struct ads1015_channel_data channel_data[ADS1015_CHANNELS]; }; -static int ads1015_read_value(struct i2c_client *client, unsigned int channel, - int *value) +static int ads1015_read_adc(struct i2c_client *client, unsigned int channel) { u16 config; - s16 conversion; struct ads1015_data *data = i2c_get_clientdata(client); unsigned int pga = data->channel_data[channel].pga; - int fullscale; unsigned int data_rate = data->channel_data[channel].data_rate; unsigned int conversion_time_ms; int res; @@ -78,7 +75,6 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel, if (res < 0) goto err_unlock; config = res; - fullscale = fullscale_table[pga]; conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]); /* setup and start single conversion */ @@ -105,33 +101,36 @@ static int ads1015_read_value(struct i2c_client *client, unsigned int channel, } res = i2c_smbus_read_word_swapped(client, ADS1015_CONVERSION); - if (res < 0) - goto err_unlock; - conversion = res; - - mutex_unlock(&data->update_lock); - - *value = DIV_ROUND_CLOSEST(conversion * fullscale, 0x7ff0); - - return 0; err_unlock: mutex_unlock(&data->update_lock); return res; } +static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel, + s16 reg) +{ + struct ads1015_data *data = i2c_get_clientdata(client); + unsigned int pga = data->channel_data[channel].pga; + int fullscale = fullscale_table[pga]; + + return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0); +} + /* sysfs callback function */ static ssize_t show_in(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct i2c_client *client = to_i2c_client(dev); - int in; int res; + int index = attr->index; - res = ads1015_read_value(client, attr->index, &in); + res = ads1015_read_adc(client, index); + if (res < 0) + return res; - return (res < 0) ? res : sprintf(buf, "%d\n", in); + return sprintf(buf, "%d\n", ads1015_reg_to_mv(client, index, res)); } static const struct sensor_device_attribute ads1015_in[] = { diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 0d3141fbbc20..b9d512331ed4 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -52,7 +52,7 @@ module_param_named(tjmax, force_tjmax, int, 0444); MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ -#define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ +#define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) @@ -709,6 +709,10 @@ static void __cpuinit put_core_offline(unsigned int cpu) indx = TO_ATTR_NO(cpu); + /* The core id is too big, just return */ + if (indx > MAX_CORE_DATA - 1) + return; + if (pdata->core_data[indx] && pdata->core_data[indx]->cpu == cpu) coretemp_remove_core(pdata, &pdev->dev, indx); diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index b7494af1e4a9..e8e18cab1fb8 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c @@ -122,6 +122,41 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4) return true; } +/* + * Newer BKDG versions have an updated recommendation on how to properly + * initialize the running average range (was: 0xE, now: 0x9). This avoids + * counter saturations resulting in bogus power readings. + * We correct this value ourselves to cope with older BIOSes. + */ +static DEFINE_PCI_DEVICE_TABLE(affected_device) = { + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, + { 0 } +}; + +static void __devinit tweak_runavg_range(struct pci_dev *pdev) +{ + u32 val; + + /* + * let this quirk apply only to the current version of the + * northbridge, since future versions may change the behavior + */ + if (!pci_match_id(affected_device, pdev)) + return; + + pci_bus_read_config_dword(pdev->bus, + PCI_DEVFN(PCI_SLOT(pdev->devfn), 5), + REG_TDP_RUNNING_AVERAGE, &val); + if ((val & 0xf) != 0xe) + return; + + val &= ~0xf; + val |= 0x9; + pci_bus_write_config_dword(pdev->bus, + PCI_DEVFN(PCI_SLOT(pdev->devfn), 5), + REG_TDP_RUNNING_AVERAGE, val); +} + static void __devinit fam15h_power_init_data(struct pci_dev *f4, struct fam15h_power_data *data) { @@ -155,6 +190,13 @@ static int __devinit fam15h_power_probe(struct pci_dev *pdev, struct device *dev; int err; + /* + * though we ignore every other northbridge, we still have to + * do the tweaking on _each_ node in MCM processors as the counters + * are working hand-in-hand + */ + tweak_runavg_range(pdev); + if (!fam15h_power_is_internal_node0(pdev)) { err = -ENODEV; goto exit; diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index f086131cb1c7..c811289b61e2 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -324,7 +324,7 @@ static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap) { long ret; ret = wait_event_timeout(pch_event, - (adap->pch_event_flag != 0), msecs_to_jiffies(50)); + (adap->pch_event_flag != 0), msecs_to_jiffies(1000)); if (ret == 0) { pch_err(adap, "timeout: %x\n", adap->pch_event_flag); @@ -1063,6 +1063,6 @@ module_exit(pch_pci_exit); MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C"); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Tomoya MORINAGA. "); +MODULE_AUTHOR("Tomoya MORINAGA. "); module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR)); module_param(pch_clk, int, (S_IRUSR | S_IWUSR)); diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 3d471d56bf15..76b8af44f634 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -227,6 +227,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, return -EINVAL; init_completion(&i2c->cmd_complete); + i2c->cmd_err = 0; flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0; @@ -252,6 +253,9 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, if (i2c->cmd_err == -ENXIO) mxs_i2c_reset(i2c); + else + writel(MXS_I2C_QUEUECTRL_QUEUE_RUN, + i2c->regs + MXS_I2C_QUEUECTRL_CLR); dev_dbg(i2c->dev, "Done with err=%d\n", i2c->cmd_err); @@ -299,8 +303,6 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id) MXS_I2C_CTRL1_SLAVE_STOP_IRQ | MXS_I2C_CTRL1_SLAVE_IRQ)) /* MXS_I2C_CTRL1_OVERSIZE_XFER_TERM_IRQ is only for slaves */ i2c->cmd_err = -EIO; - else - i2c->cmd_err = 0; is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) & MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0; @@ -384,8 +386,6 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev) if (ret) return -EBUSY; - writel(MXS_I2C_QUEUECTRL_QUEUE_RUN, - i2c->regs + MXS_I2C_QUEUECTRL_CLR); writel(MXS_I2C_CTRL0_SFTRST, i2c->regs + MXS_I2C_CTRL0_SET); platform_set_drvdata(pdev, NULL); diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 04be9f82e14b..eb8ad538c79f 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -546,8 +546,7 @@ static int i2c_pnx_controller_suspend(struct platform_device *pdev, { struct i2c_pnx_algo_data *alg_data = platform_get_drvdata(pdev); - /* FIXME: shouldn't this be clk_disable? */ - clk_enable(alg_data->clk); + clk_disable(alg_data->clk); return 0; } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index e978635e60f0..55e5ea62ccee 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -516,6 +516,14 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) return 0; + /* + * NACK interrupt is generated before the I2C controller generates the + * STOP condition on the bus. So wait for 2 clock periods before resetting + * the controller so that STOP condition has been delivered properly. + */ + if (i2c_dev->msg_err == I2C_ERR_NO_ACK) + udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate)); + tegra_i2c_init(i2c_dev); if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { if (msg->flags & I2C_M_IGNORE_NAK) diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 426bb7617ec6..b0d0bc8a6fb6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1854,6 +1854,8 @@ static bool generate_unmatched_resp(struct ib_mad_private *recv, response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; response->mad.mad.mad_hdr.status = cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); + if (recv->mad.mad.mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) + response->mad.mad.mad_hdr.status |= IB_SMP_DIRECTION; return true; } else { @@ -1869,6 +1871,7 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_mad_list_head *mad_list; struct ib_mad_agent_private *mad_agent; int port_num; + int ret = IB_MAD_RESULT_SUCCESS; mad_list = (struct ib_mad_list_head *)(unsigned long)wc->wr_id; qp_info = mad_list->mad_queue->qp_info; @@ -1952,8 +1955,6 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, local: /* Give driver "right of first refusal" on incoming MAD */ if (port_priv->device->process_mad) { - int ret; - ret = port_priv->device->process_mad(port_priv->device, 0, port_priv->port_num, wc, &recv->grh, @@ -1981,7 +1982,8 @@ local: * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; - } else if (generate_unmatched_resp(recv, response)) { + } else if ((ret & IB_MAD_RESULT_SUCCESS) && + generate_unmatched_resp(recv, response)) { agent_send_response(&response->mad.mad, &recv->grh, wc, port_priv->device, port_num, qp_info->qp->qp_num); } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 669673e81439..b948b6dd5d55 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -247,7 +247,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); if (err) - return err; + goto out; /* Checking LinkSpeedActive for FDR-10 */ if (out_mad->data[15] & 0x1) diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index 2d787796bf50..7faf4a7fcaa9 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig @@ -380,8 +380,7 @@ config INPUT_TWL4030_VIBRA config INPUT_TWL6040_VIBRA tristate "Support for TWL6040 Vibrator" - depends on TWL4030_CORE - select TWL6040_CORE + depends on TWL6040_CORE select INPUT_FF_MEMLESS help This option enables support for TWL6040 Vibrator Driver. diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c index 45874fed523a..14e94f56cb7d 100644 --- a/drivers/input/misc/twl6040-vibra.c +++ b/drivers/input/misc/twl6040-vibra.c @@ -28,7 +28,7 @@ #include #include #include -#include +#include #include #include #include @@ -257,7 +257,7 @@ static SIMPLE_DEV_PM_OPS(twl6040_vibra_pm_ops, twl6040_vibra_suspend, NULL); static int __devinit twl6040_vibra_probe(struct platform_device *pdev) { - struct twl4030_vibra_data *pdata = pdev->dev.platform_data; + struct twl6040_vibra_data *pdata = pdev->dev.platform_data; struct vibra_info *info; int ret; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 8081a0a5d602..a4b14a41cbf4 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -274,7 +274,8 @@ static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse) static unsigned char param = 0xc8; struct synaptics_data *priv = psmouse->private; - if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) + if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || + SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c))) return 0; if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL)) diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c index 800243b6037e..64ad702a2ecc 100644 --- a/drivers/leds/leds-atmel-pwm.c +++ b/drivers/leds/leds-atmel-pwm.c @@ -35,7 +35,7 @@ static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b) * NOTE: we reuse the platform_data structure of GPIO leds, * but repurpose its "gpio" number as a PWM channel number. */ -static int __init pwmled_probe(struct platform_device *pdev) +static int __devinit pwmled_probe(struct platform_device *pdev) { const struct gpio_led_platform_data *pdata; struct pwmled *leds; diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c index d8433f2d53bc..73973fdbd8be 100644 --- a/drivers/leds/leds-netxbig.c +++ b/drivers/leds/leds-netxbig.c @@ -112,7 +112,7 @@ err_free_addr: return err; } -static void __devexit gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) +static void gpio_ext_free(struct netxbig_gpio_ext *gpio_ext) { int i; @@ -294,7 +294,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev, static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store); -static void __devexit delete_netxbig_led(struct netxbig_led_data *led_dat) +static void delete_netxbig_led(struct netxbig_led_data *led_dat) { if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) device_remove_file(led_dat->cdev.dev, &dev_attr_sata); diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index 2f0a14421a73..01cf89ec6944 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -255,7 +255,7 @@ err_free_cmd: return ret; } -static void __devexit delete_ns2_led(struct ns2_led_data *led_dat) +static void delete_ns2_led(struct ns2_led_data *led_dat) { device_remove_file(led_dat->cdev.dev, &dev_attr_sata); led_classdev_unregister(&led_dat->cdev); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 97e73e555d11..17e2b472e16d 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1727,8 +1727,7 @@ int bitmap_create(struct mddev *mddev) bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize) - BITMAP_BLOCK_SHIFT); - /* now that chunksize and chunkshift are set, we can use these macros */ - chunks = (blocks + bitmap->chunkshift - 1) >> + chunks = (blocks + (1 << bitmap->chunkshift) - 1) >> bitmap->chunkshift; pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h index 55ca5aec84e4..b44b0aba2d47 100644 --- a/drivers/md/bitmap.h +++ b/drivers/md/bitmap.h @@ -101,9 +101,6 @@ typedef __u16 bitmap_counter_t; #define BITMAP_BLOCK_SHIFT 9 -/* how many blocks per chunk? (this is variable) */ -#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT) - #endif /* diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 1f23e048f077..08d9a207259a 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c @@ -134,7 +134,7 @@ static void cn_ulog_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) { struct dm_ulog_request *tfr = (struct dm_ulog_request *)(msg + 1); - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) return; spin_lock(&receiving_list_lock); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 922a3385eead..754f38f8a692 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -718,8 +718,8 @@ static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) return 0; m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); - request_module("scsi_dh_%s", m->hw_handler_name); - if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { + if (!try_then_request_module(scsi_dh_handler_exist(m->hw_handler_name), + "scsi_dh_%s", m->hw_handler_name)) { ti->error = "unknown hardware handler type"; ret = -EINVAL; goto fail; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index b0ba52459ed7..68965e663248 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) int ret; unsigned redundancy = 0; struct raid_dev *dev; - struct md_rdev *rdev, *freshest; + struct md_rdev *rdev, *tmp, *freshest; struct mddev *mddev = &rs->md; switch (rs->raid_type->level) { @@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) } freshest = NULL; - rdev_for_each(rdev, mddev) { + rdev_for_each_safe(rdev, tmp, mddev) { if (!rdev->meta_bdev) continue; diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 213ae32a0fc4..2fd87b544a93 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -279,8 +279,10 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) hlist_del(&cell->list); - bio_list_add(inmates, cell->holder); - bio_list_merge(inmates, &cell->bios); + if (inmates) { + bio_list_add(inmates, cell->holder); + bio_list_merge(inmates, &cell->bios); + } mempool_free(cell, prison->cell_pool); } @@ -303,9 +305,10 @@ static void cell_release(struct cell *cell, struct bio_list *bios) */ static void __cell_release_singleton(struct cell *cell, struct bio *bio) { - hlist_del(&cell->list); BUG_ON(cell->holder != bio); BUG_ON(!bio_list_empty(&cell->bios)); + + __cell_release(cell, NULL); } static void cell_release_singleton(struct cell *cell, struct bio *bio) @@ -1177,6 +1180,7 @@ static void no_space(struct cell *cell) static void process_discard(struct thin_c *tc, struct bio *bio) { int r; + unsigned long flags; struct pool *pool = tc->pool; struct cell *cell, *cell2; struct cell_key key, key2; @@ -1218,7 +1222,9 @@ static void process_discard(struct thin_c *tc, struct bio *bio) m->bio = bio; if (!ds_add_work(&pool->all_io_ds, &m->list)) { + spin_lock_irqsave(&pool->lock, flags); list_add(&m->list, &pool->prepared_discards); + spin_unlock_irqrestore(&pool->lock, flags); wake_worker(pool); } } else { @@ -2626,8 +2632,10 @@ static int thin_endio(struct dm_target *ti, if (h->all_io_entry) { INIT_LIST_HEAD(&work); ds_dec(h->all_io_entry, &work); + spin_lock_irqsave(&pool->lock, flags); list_for_each_entry_safe(m, tmp, &work, list) list_add(&m->list, &pool->prepared_discards); + spin_unlock_irqrestore(&pool->lock, flags); } mempool_free(h, pool->endio_hook_pool); @@ -2759,6 +2767,6 @@ static void dm_thin_exit(void) module_init(dm_thin_init); module_exit(dm_thin_exit); -MODULE_DESCRIPTION(DM_NAME "device-mapper thin provisioning target"); +MODULE_DESCRIPTION(DM_NAME " thin provisioning target"); MODULE_AUTHOR("Joe Thornber "); MODULE_LICENSE("GPL"); diff --git a/drivers/md/md.c b/drivers/md/md.c index b572e1e386ce..477eb2e180c0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -7560,14 +7560,14 @@ void md_check_recovery(struct mddev *mddev) * any transients in the value of "sync_action". */ set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); - clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); /* Clear some bits that don't mean anything, but * might be left set */ clear_bit(MD_RECOVERY_INTR, &mddev->recovery); clear_bit(MD_RECOVERY_DONE, &mddev->recovery); - if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) + if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || + test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) goto unlock; /* no recovery is running. * remove any failed drives, then @@ -8140,7 +8140,8 @@ static int md_notify_reboot(struct notifier_block *this, for_each_mddev(mddev, tmp) { if (mddev_trylock(mddev)) { - __md_stop_writes(mddev); + if (mddev->pers) + __md_stop_writes(mddev); mddev->safemode = 2; mddev_unlock(mddev); } diff --git a/drivers/media/common/tuners/xc5000.c b/drivers/media/common/tuners/xc5000.c index 7f98984e4fad..eab2ea424200 100644 --- a/drivers/media/common/tuners/xc5000.c +++ b/drivers/media/common/tuners/xc5000.c @@ -54,6 +54,7 @@ struct xc5000_priv { struct list_head hybrid_tuner_instance_list; u32 if_khz; + u32 xtal_khz; u32 freq_hz; u32 bandwidth; u8 video_standard; @@ -214,9 +215,9 @@ static const struct xc5000_fw_cfg xc5000a_1_6_114 = { .size = 12401, }; -static const struct xc5000_fw_cfg xc5000c_41_024_5_31875 = { - .name = "dvb-fe-xc5000c-41.024.5-31875.fw", - .size = 16503, +static const struct xc5000_fw_cfg xc5000c_41_024_5 = { + .name = "dvb-fe-xc5000c-41.024.5.fw", + .size = 16497, }; static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id) @@ -226,7 +227,7 @@ static inline const struct xc5000_fw_cfg *xc5000_assign_firmware(int chip_id) case XC5000A: return &xc5000a_1_6_114; case XC5000C: - return &xc5000c_41_024_5_31875; + return &xc5000c_41_024_5; } } @@ -572,6 +573,31 @@ static int xc_tune_channel(struct xc5000_priv *priv, u32 freq_hz, int mode) return found; } +static int xc_set_xtal(struct dvb_frontend *fe) +{ + struct xc5000_priv *priv = fe->tuner_priv; + int ret = XC_RESULT_SUCCESS; + + switch (priv->chip_id) { + default: + case XC5000A: + /* 32.000 MHz xtal is default */ + break; + case XC5000C: + switch (priv->xtal_khz) { + default: + case 32000: + /* 32.000 MHz xtal is default */ + break; + case 31875: + /* 31.875 MHz xtal configuration */ + ret = xc_write_reg(priv, 0x000f, 0x8081); + break; + } + break; + } + return ret; +} static int xc5000_fwupload(struct dvb_frontend *fe) { @@ -603,6 +629,8 @@ static int xc5000_fwupload(struct dvb_frontend *fe) } else { printk(KERN_INFO "xc5000: firmware uploading...\n"); ret = xc_load_i2c_sequence(fe, fw->data); + if (XC_RESULT_SUCCESS == ret) + ret = xc_set_xtal(fe); printk(KERN_INFO "xc5000: firmware upload complete...\n"); } @@ -1164,6 +1192,9 @@ struct dvb_frontend *xc5000_attach(struct dvb_frontend *fe, priv->if_khz = cfg->if_khz; } + if (priv->xtal_khz == 0) + priv->xtal_khz = cfg->xtal_khz; + if (priv->radio_input == 0) priv->radio_input = cfg->radio_input; diff --git a/drivers/media/common/tuners/xc5000.h b/drivers/media/common/tuners/xc5000.h index 3396f8e02b40..39a73bf01406 100644 --- a/drivers/media/common/tuners/xc5000.h +++ b/drivers/media/common/tuners/xc5000.h @@ -34,6 +34,7 @@ struct xc5000_config { u8 i2c_address; u32 if_khz; u8 radio_input; + u32 xtal_khz; int chip_id; }; diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c index 39696c6a4ed7..0f64d7182657 100644 --- a/drivers/media/dvb/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb/dvb-core/dvb_frontend.c @@ -1446,6 +1446,28 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system) __func__); return -EINVAL; } + /* + * Get a delivery system that is compatible with DVBv3 + * NOTE: in order for this to work with softwares like Kaffeine that + * uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to + * DVB-S, drivers that support both should put the SYS_DVBS entry + * before the SYS_DVBS2, otherwise it won't switch back to DVB-S. + * The real fix is that userspace applications should not use DVBv3 + * and not trust on calling FE_SET_FRONTEND to switch the delivery + * system. + */ + ncaps = 0; + while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) { + if (fe->ops.delsys[ncaps] == desired_system) { + delsys = desired_system; + break; + } + ncaps++; + } + if (delsys == SYS_UNDEFINED) { + dprintk("%s() Couldn't find a delivery system that matches %d\n", + __func__, desired_system); + } } else { /* * This is a DVBv5 call. So, it likely knows the supported @@ -1494,9 +1516,10 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system) __func__); return -EINVAL; } - c->delivery_system = delsys; } + c->delivery_system = delsys; + /* * The DVBv3 or DVBv5 call is requesting a different system. So, * emulation is needed. diff --git a/drivers/media/dvb/frontends/drxk_hard.c b/drivers/media/dvb/frontends/drxk_hard.c index 36d11756492f..a414b1f2b6a5 100644 --- a/drivers/media/dvb/frontends/drxk_hard.c +++ b/drivers/media/dvb/frontends/drxk_hard.c @@ -1520,8 +1520,10 @@ static int scu_command(struct drxk_state *state, dprintk(1, "\n"); if ((cmd == 0) || ((parameterLen > 0) && (parameter == NULL)) || - ((resultLen > 0) && (result == NULL))) - goto error; + ((resultLen > 0) && (result == NULL))) { + printk(KERN_ERR "drxk: Error %d on %s\n", status, __func__); + return status; + } mutex_lock(&state->mutex); diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c index b09c5fae489b..af526586fa26 100644 --- a/drivers/media/rc/winbond-cir.c +++ b/drivers/media/rc/winbond-cir.c @@ -1046,6 +1046,7 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id) goto exit_unregister_led; } + data->dev->driver_type = RC_DRIVER_IR_RAW; data->dev->driver_name = WBCIR_NAME; data->dev->input_name = WBCIR_NAME; data->dev->input_phys = "wbcir/cir0"; diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index f2479c5c0eb2..ce1e7ba940f6 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig @@ -492,7 +492,7 @@ config VIDEO_VS6624 config VIDEO_MT9M032 tristate "MT9M032 camera sensor support" - depends on I2C && VIDEO_V4L2 + depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API select VIDEO_APTINA_PLL ---help--- This driver supports MT9M032 camera sensors from Aptina, monochrome diff --git a/drivers/media/video/mt9m032.c b/drivers/media/video/mt9m032.c index 7636672c3548..645973c5feb0 100644 --- a/drivers/media/video/mt9m032.c +++ b/drivers/media/video/mt9m032.c @@ -392,10 +392,11 @@ static int mt9m032_set_pad_format(struct v4l2_subdev *subdev, } /* Scaling is not supported, the format is thus fixed. */ - ret = mt9m032_get_pad_format(subdev, fh, fmt); + fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which); + ret = 0; done: - mutex_lock(&sensor->lock); + mutex_unlock(&sensor->lock); return ret; } diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 29f463cc09cb..11e44386fa9b 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -268,10 +268,17 @@ config TWL6030_PWM This is used to control charging LED brightness. config TWL6040_CORE - bool - depends on TWL4030_CORE && GENERIC_HARDIRQS + bool "Support for TWL6040 audio codec" + depends on I2C=y && GENERIC_HARDIRQS select MFD_CORE + select REGMAP_I2C default n + help + Say yes here if you want support for Texas Instruments TWL6040 audio + codec. + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the + functionality of the device (audio, vibra). config MFD_STMPE bool "Support STMicroelectronics STMPE" diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 1895cf9fab8c..1582c3d95257 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c @@ -527,7 +527,9 @@ static void asic3_gpio_set(struct gpio_chip *chip, static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { - return (offset < ASIC3_NUM_GPIOS) ? IRQ_BOARD_START + offset : -ENXIO; + struct asic3 *asic = container_of(chip, struct asic3, gpio); + + return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO; } static __init int asic3_gpio_probe(struct platform_device *pdev, diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 95a2e546a489..7e96bb229724 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include @@ -502,19 +502,6 @@ static void omap_usbhs_init(struct device *dev) pm_runtime_get_sync(dev); spin_lock_irqsave(&omap->lock, flags); - if (pdata->ehci_data->phy_reset) { - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) - gpio_request_one(pdata->ehci_data->reset_gpio_port[0], - GPIOF_OUT_INIT_LOW, "USB1 PHY reset"); - - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) - gpio_request_one(pdata->ehci_data->reset_gpio_port[1], - GPIOF_OUT_INIT_LOW, "USB2 PHY reset"); - - /* Hold the PHY in RESET for enough time till DIR is high */ - udelay(10); - } - omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); @@ -593,39 +580,10 @@ static void omap_usbhs_init(struct device *dev) usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT); } - if (pdata->ehci_data->phy_reset) { - /* Hold the PHY in RESET for enough time till - * PHY is settled and ready - */ - udelay(10); - - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) - gpio_set_value - (pdata->ehci_data->reset_gpio_port[0], 1); - - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) - gpio_set_value - (pdata->ehci_data->reset_gpio_port[1], 1); - } - spin_unlock_irqrestore(&omap->lock, flags); pm_runtime_put_sync(dev); } -static void omap_usbhs_deinit(struct device *dev) -{ - struct usbhs_hcd_omap *omap = dev_get_drvdata(dev); - struct usbhs_omap_platform_data *pdata = &omap->platdata; - - if (pdata->ehci_data->phy_reset) { - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) - gpio_free(pdata->ehci_data->reset_gpio_port[0]); - - if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) - gpio_free(pdata->ehci_data->reset_gpio_port[1]); - } -} - /** * usbhs_omap_probe - initialize TI-based HCDs @@ -860,7 +818,6 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev) { struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev); - omap_usbhs_deinit(&pdev->dev); iounmap(omap->tll_base); iounmap(omap->uhh_base); clk_put(omap->init_60m_fclk); diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c index 99ef944c621d..44afae0a69ce 100644 --- a/drivers/mfd/rc5t583.c +++ b/drivers/mfd/rc5t583.c @@ -80,44 +80,6 @@ static struct mfd_cell rc5t583_subdevs[] = { {.name = "rc5t583-key", } }; -int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) -{ - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); - return regmap_write(rc5t583->regmap, reg, val); -} - -int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) -{ - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); - unsigned int ival; - int ret; - ret = regmap_read(rc5t583->regmap, reg, &ival); - if (!ret) - *val = (uint8_t)ival; - return ret; -} - -int rc5t583_set_bits(struct device *dev, unsigned int reg, - unsigned int bit_mask) -{ - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); - return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); -} - -int rc5t583_clear_bits(struct device *dev, unsigned int reg, - unsigned int bit_mask) -{ - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); - return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); -} - -int rc5t583_update(struct device *dev, unsigned int reg, - unsigned int val, unsigned int mask) -{ - struct rc5t583 *rc5t583 = dev_get_drvdata(dev); - return regmap_update_bits(rc5t583->regmap, reg, mask, val); -} - static int __rc5t583_set_ext_pwrreq1_control(struct device *dev, int id, int ext_pwr, int slots) { @@ -197,6 +159,7 @@ int rc5t583_ext_power_req_config(struct device *dev, int ds_id, ds_id, ext_pwr_req); return 0; } +EXPORT_SYMBOL(rc5t583_ext_power_req_config); static int rc5t583_clear_ext_power_req(struct rc5t583 *rc5t583, struct rc5t583_platform_data *pdata) diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c index b2d8e512d3cb..2d6bedadca09 100644 --- a/drivers/mfd/twl6040-core.c +++ b/drivers/mfd/twl6040-core.c @@ -30,7 +30,9 @@ #include #include #include -#include +#include +#include +#include #include #include @@ -39,7 +41,7 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg) { int ret; - u8 val = 0; + unsigned int val; mutex_lock(&twl6040->io_mutex); /* Vibra control registers from cache */ @@ -47,7 +49,7 @@ int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg) reg == TWL6040_REG_VIBCTLR)) { val = twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)]; } else { - ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); + ret = regmap_read(twl6040->regmap, reg, &val); if (ret < 0) { mutex_unlock(&twl6040->io_mutex); return ret; @@ -64,7 +66,7 @@ int twl6040_reg_write(struct twl6040 *twl6040, unsigned int reg, u8 val) int ret; mutex_lock(&twl6040->io_mutex); - ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); + ret = regmap_write(twl6040->regmap, reg, val); /* Cache the vibra control registers */ if (reg == TWL6040_REG_VIBCTLL || reg == TWL6040_REG_VIBCTLR) twl6040->vibra_ctrl_cache[VIBRACTRL_MEMBER(reg)] = val; @@ -77,16 +79,9 @@ EXPORT_SYMBOL(twl6040_reg_write); int twl6040_set_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) { int ret; - u8 val; mutex_lock(&twl6040->io_mutex); - ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); - if (ret) - goto out; - - val |= mask; - ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); -out: + ret = regmap_update_bits(twl6040->regmap, reg, mask, mask); mutex_unlock(&twl6040->io_mutex); return ret; } @@ -95,16 +90,9 @@ EXPORT_SYMBOL(twl6040_set_bits); int twl6040_clear_bits(struct twl6040 *twl6040, unsigned int reg, u8 mask) { int ret; - u8 val; mutex_lock(&twl6040->io_mutex); - ret = twl_i2c_read_u8(TWL_MODULE_AUDIO_VOICE, &val, reg); - if (ret) - goto out; - - val &= ~mask; - ret = twl_i2c_write_u8(TWL_MODULE_AUDIO_VOICE, val, reg); -out: + ret = regmap_update_bits(twl6040->regmap, reg, mask, 0); mutex_unlock(&twl6040->io_mutex); return ret; } @@ -494,32 +482,58 @@ static struct resource twl6040_codec_rsrc[] = { }, }; -static int __devinit twl6040_probe(struct platform_device *pdev) +static bool twl6040_readable_reg(struct device *dev, unsigned int reg) { - struct twl4030_audio_data *pdata = pdev->dev.platform_data; + /* Register 0 is not readable */ + if (!reg) + return false; + return true; +} + +static struct regmap_config twl6040_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = TWL6040_REG_STATUS, /* 0x2e */ + + .readable_reg = twl6040_readable_reg, +}; + +static int __devinit twl6040_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct twl6040_platform_data *pdata = client->dev.platform_data; struct twl6040 *twl6040; struct mfd_cell *cell = NULL; int ret, children = 0; if (!pdata) { - dev_err(&pdev->dev, "Platform data is missing\n"); + dev_err(&client->dev, "Platform data is missing\n"); return -EINVAL; } /* In order to operate correctly we need valid interrupt config */ - if (!pdata->naudint_irq || !pdata->irq_base) { - dev_err(&pdev->dev, "Invalid IRQ configuration\n"); + if (!client->irq || !pdata->irq_base) { + dev_err(&client->dev, "Invalid IRQ configuration\n"); return -EINVAL; } - twl6040 = kzalloc(sizeof(struct twl6040), GFP_KERNEL); - if (!twl6040) - return -ENOMEM; + twl6040 = devm_kzalloc(&client->dev, sizeof(struct twl6040), + GFP_KERNEL); + if (!twl6040) { + ret = -ENOMEM; + goto err; + } - platform_set_drvdata(pdev, twl6040); + twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config); + if (IS_ERR(twl6040->regmap)) { + ret = PTR_ERR(twl6040->regmap); + goto err; + } - twl6040->dev = &pdev->dev; - twl6040->irq = pdata->naudint_irq; + i2c_set_clientdata(client, twl6040); + + twl6040->dev = &client->dev; + twl6040->irq = client->irq; twl6040->irq_base = pdata->irq_base; mutex_init(&twl6040->mutex); @@ -588,12 +602,12 @@ static int __devinit twl6040_probe(struct platform_device *pdev) } if (children) { - ret = mfd_add_devices(&pdev->dev, pdev->id, twl6040->cells, + ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children, NULL, 0); if (ret) goto mfd_err; } else { - dev_err(&pdev->dev, "No platform data found for children\n"); + dev_err(&client->dev, "No platform data found for children\n"); ret = -ENODEV; goto mfd_err; } @@ -608,14 +622,15 @@ gpio2_err: if (gpio_is_valid(twl6040->audpwron)) gpio_free(twl6040->audpwron); gpio1_err: - platform_set_drvdata(pdev, NULL); - kfree(twl6040); + i2c_set_clientdata(client, NULL); + regmap_exit(twl6040->regmap); +err: return ret; } -static int __devexit twl6040_remove(struct platform_device *pdev) +static int __devexit twl6040_remove(struct i2c_client *client) { - struct twl6040 *twl6040 = platform_get_drvdata(pdev); + struct twl6040 *twl6040 = i2c_get_clientdata(client); if (twl6040->power_count) twl6040_power(twl6040, 0); @@ -626,23 +641,30 @@ static int __devexit twl6040_remove(struct platform_device *pdev) free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040); twl6040_irq_exit(twl6040); - mfd_remove_devices(&pdev->dev); - platform_set_drvdata(pdev, NULL); - kfree(twl6040); + mfd_remove_devices(&client->dev); + i2c_set_clientdata(client, NULL); + regmap_exit(twl6040->regmap); return 0; } -static struct platform_driver twl6040_driver = { +static const struct i2c_device_id twl6040_i2c_id[] = { + { "twl6040", 0, }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, twl6040_i2c_id); + +static struct i2c_driver twl6040_driver = { + .driver = { + .name = "twl6040", + .owner = THIS_MODULE, + }, .probe = twl6040_probe, .remove = __devexit_p(twl6040_remove), - .driver = { - .owner = THIS_MODULE, - .name = "twl6040", - }, + .id_table = twl6040_i2c_id, }; -module_platform_driver(twl6040_driver); +module_i2c_driver(twl6040_driver); MODULE_DESCRIPTION("TWL6040 MFD"); MODULE_AUTHOR("Misael Lopez Cruz "); diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index b1809650b7aa..dabec556ebb8 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -873,7 +873,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; - unsigned int from, nr, arg; + unsigned int from, nr, arg, trim_arg, erase_arg; int err = 0, type = MMC_BLK_SECDISCARD; if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { @@ -881,20 +881,26 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, goto out; } - /* The sanitize operation is supported at v4.5 only */ - if (mmc_can_sanitize(card)) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, 0); - goto out; - } - from = blk_rq_pos(req); nr = blk_rq_sectors(req); - if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) - arg = MMC_SECURE_TRIM1_ARG; - else - arg = MMC_SECURE_ERASE_ARG; + /* The sanitize operation is supported at v4.5 only */ + if (mmc_can_sanitize(card)) { + erase_arg = MMC_ERASE_ARG; + trim_arg = MMC_TRIM_ARG; + } else { + erase_arg = MMC_SECURE_ERASE_ARG; + trim_arg = MMC_SECURE_TRIM1_ARG; + } + + if (mmc_erase_group_aligned(card, from, nr)) + arg = erase_arg; + else if (mmc_can_trim(card)) + arg = trim_arg; + else { + err = -EINVAL; + goto out; + } retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -904,25 +910,41 @@ retry: INAND_CMD38_ARG_SECERASE, 0); if (err) - goto out; + goto out_retry; } + err = mmc_erase(card, from, nr, arg); - if (!err && arg == MMC_SECURE_TRIM1_ARG) { + if (err == -EIO) + goto out_retry; + if (err) + goto out; + + if (arg == MMC_SECURE_TRIM1_ARG) { if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, 0); if (err) - goto out; + goto out_retry; } + err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); + if (err == -EIO) + goto out_retry; + if (err) + goto out; } -out: - if (err == -EIO && !mmc_blk_reset(md, card->host, type)) + + if (mmc_can_sanitize(card)) + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_SANITIZE_START, 1, 0); +out_retry: + if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); +out: spin_lock_irq(&md->lock); __blk_end_request(req, err, blk_rq_bytes(req)); spin_unlock_irq(&md->lock); @@ -1802,7 +1824,7 @@ static void mmc_blk_remove(struct mmc_card *card) } #ifdef CONFIG_PM -static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) +static int mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 2517547b4366..996f8e36e23d 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -139,7 +139,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); q->limits.max_discard_sectors = max_discard; - if (card->erased_byte == 0) + if (card->erased_byte == 0 && !mmc_can_discard(card)) q->limits.discard_zeroes_data = 1; q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 3f606068d552..c60cee92a2b2 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -122,14 +122,14 @@ static int mmc_bus_remove(struct device *dev) return 0; } -static int mmc_bus_suspend(struct device *dev, pm_message_t state) +static int mmc_bus_suspend(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); int ret = 0; if (dev->driver && drv->suspend) - ret = drv->suspend(card, state); + ret = drv->suspend(card); return ret; } @@ -165,20 +165,14 @@ static int mmc_runtime_idle(struct device *dev) return pm_runtime_suspend(dev); } -static const struct dev_pm_ops mmc_bus_pm_ops = { - .runtime_suspend = mmc_runtime_suspend, - .runtime_resume = mmc_runtime_resume, - .runtime_idle = mmc_runtime_idle, -}; - -#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops) - -#else /* !CONFIG_PM_RUNTIME */ - -#define MMC_PM_OPS_PTR NULL - #endif /* !CONFIG_PM_RUNTIME */ +static const struct dev_pm_ops mmc_bus_pm_ops = { + SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, + mmc_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) +}; + static struct bus_type mmc_bus_type = { .name = "mmc", .dev_attrs = mmc_dev_attrs, @@ -186,9 +180,7 @@ static struct bus_type mmc_bus_type = { .uevent = mmc_bus_uevent, .probe = mmc_bus_probe, .remove = mmc_bus_remove, - .suspend = mmc_bus_suspend, - .resume = mmc_bus_resume, - .pm = MMC_PM_OPS_PTR, + .pm = &mmc_bus_pm_ops, }; int mmc_register_bus(void) diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c index 29de31e260dd..2c14be73254c 100644 --- a/drivers/mmc/core/cd-gpio.c +++ b/drivers/mmc/core/cd-gpio.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 7474c47b9c08..ba821fe70bca 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1409,7 +1409,10 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, { unsigned int erase_timeout; - if (card->ext_csd.erase_group_def & 1) { + if (arg == MMC_DISCARD_ARG || + (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { + erase_timeout = card->ext_csd.trim_timeout; + } else if (card->ext_csd.erase_group_def & 1) { /* High Capacity Erase Group Size uses HC timeouts */ if (arg == MMC_TRIM_ARG) erase_timeout = card->ext_csd.trim_timeout; @@ -1681,8 +1684,6 @@ int mmc_can_trim(struct mmc_card *card) { if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) return 1; - if (mmc_can_discard(card)) - return 1; return 0; } EXPORT_SYMBOL(mmc_can_trim); @@ -1701,6 +1702,8 @@ EXPORT_SYMBOL(mmc_can_discard); int mmc_can_sanitize(struct mmc_card *card) { + if (!mmc_can_trim(card) && !mmc_can_erase(card)) + return 0; if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) return 1; return 0; @@ -2235,6 +2238,7 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable) mmc_card_is_removable(host)) return err; + mmc_claim_host(host); if (card && mmc_card_mmc(card) && (card->ext_csd.cache_size > 0)) { enable = !!enable; @@ -2252,6 +2256,7 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable) card->ext_csd.cache_ctrl = enable; } } + mmc_release_host(host); return err; } @@ -2269,49 +2274,32 @@ int mmc_suspend_host(struct mmc_host *host) cancel_delayed_work(&host->detect); mmc_flush_scheduled_work(); - if (mmc_try_claim_host(host)) { - err = mmc_cache_ctrl(host, 0); - mmc_release_host(host); - } else { - err = -EBUSY; - } + err = mmc_cache_ctrl(host, 0); if (err) goto out; mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { - /* - * A long response time is not acceptable for device drivers - * when doing suspend. Prevent mmc_claim_host in the suspend - * sequence, to potentially wait "forever" by trying to - * pre-claim the host. - */ - if (mmc_try_claim_host(host)) { - if (host->bus_ops->suspend) { - err = host->bus_ops->suspend(host); - } - mmc_release_host(host); + if (host->bus_ops->suspend) + err = host->bus_ops->suspend(host); - if (err == -ENOSYS || !host->bus_ops->resume) { - /* - * We simply "remove" the card in this case. - * It will be redetected on resume. (Calling - * bus_ops->remove() with a claimed host can - * deadlock.) - */ - if (host->bus_ops->remove) - host->bus_ops->remove(host); - mmc_claim_host(host); - mmc_detach_bus(host); - mmc_power_off(host); - mmc_release_host(host); - host->pm_flags = 0; - err = 0; - } - } else { - err = -EBUSY; + if (err == -ENOSYS || !host->bus_ops->resume) { + /* + * We simply "remove" the card in this case. + * It will be redetected on resume. (Calling + * bus_ops->remove() with a claimed host can + * deadlock.) + */ + if (host->bus_ops->remove) + host->bus_ops->remove(host); + mmc_claim_host(host); + mmc_detach_bus(host); + mmc_power_off(host); + mmc_release_host(host); + host->pm_flags = 0; + err = 0; } } mmc_bus_put(host); diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index bf3c9b456aaf..ab3fc4617107 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -526,8 +526,10 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) return -ENODEV; sg_len = dw_mci_pre_dma_transfer(host, data, 0); - if (sg_len < 0) + if (sg_len < 0) { + host->dma_ops->stop(host); return sg_len; + } host->using_dma = 1; @@ -1879,7 +1881,8 @@ static void dw_mci_init_dma(struct dw_mci *host) if (!host->dma_ops) goto no_dma; - if (host->dma_ops->init) { + if (host->dma_ops->init && host->dma_ops->start && + host->dma_ops->stop && host->dma_ops->cleanup) { if (host->dma_ops->init(host)) { dev_err(&host->dev, "%s: Unable to initialize " "DMA Controller.\n", __func__); diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index b0f2ef988188..e3f5af96ab87 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -363,6 +363,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) goto out; dmaengine_submit(desc); + dma_async_issue_pending(host->dmach); return; out: @@ -403,6 +404,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) goto out; dmaengine_submit(desc); + dma_async_issue_pending(host->dmach); return; out: @@ -531,6 +533,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) goto out; dmaengine_submit(desc); + dma_async_issue_pending(host->dmach); return; out: dev_warn(mmc_dev(host->mmc), diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 5c2b1c10af9c..56d4499d4388 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -249,7 +249,7 @@ static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, * the pbias cell programming support is still missing when * booting with Device tree */ - if (of_have_populated_dt() && !vdd) + if (dev->of_node && !vdd) return 0; if (mmc_slot(host).before_set_reg) @@ -1549,7 +1549,7 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) * can't be allowed when booting with device * tree. */ - (!of_have_populated_dt())) { + !host->dev->of_node) { /* * The mmc_select_voltage fn of the core does * not seem to set the power_mode to @@ -1741,7 +1741,7 @@ static const struct of_device_id omap_mmc_of_match[] = { .data = &omap4_reg_offset, }, {}, -} +}; MODULE_DEVICE_TABLE(of, omap_mmc_of_match); static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 6193a0d7bde5..8abdaf6697a8 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -467,8 +467,7 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) clk_prepare_enable(clk); pltfm_host->clk = clk; - if (!is_imx25_esdhc(imx_data)) - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 9aa77f3f04a8..ccefdebeff14 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -147,7 +147,7 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) u32 present, irqs; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || - !mmc_card_is_removable(host->mmc)) + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) return; present = sdhci_readl(host, SDHCI_PRESENT_STATE) & diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 75b1dde16358..9ec51cec2e14 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -266,6 +266,7 @@ int start_dma_without_bch_irq(struct gpmi_nand_data *this, desc->callback = dma_irq_callback; desc->callback_param = this; dmaengine_submit(desc); + dma_async_issue_pending(get_dma_chan(this)); /* Wait for the interrupt from the DMA block. */ err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 25197b698dd6..b8b4c7ba884f 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev) BUGLVL(D_NORMAL) printk(VERSION); BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); - BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n", + BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n", dev->dev_addr[0], dev->mem_start, dev->irq); if (dev->mem_start <= 0 || dev->irq <= 0) { - BUGMSG(D_NORMAL, "No autoprobe for RIM I; you " + BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you " "must specify the shmem and irq!\n"); return -ENODEV; } if (dev->dev_addr[0] == 0) { - BUGMSG(D_NORMAL, "You need to specify your card's station " + BUGLVL(D_NORMAL) printk("You need to specify your card's station " "ID!\n"); return -ENODEV; } @@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev) * will be taken. */ if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { - BUGMSG(D_NORMAL, "Card memory already allocated\n"); + BUGLVL(D_NORMAL) printk("Card memory already allocated\n"); return -ENODEV; } return arcrimi_found(dev); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 793b00138275..3463b469e657 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -2173,9 +2173,10 @@ re_arm: * received frames (loopback). Since only the payload is given to this * function, it check for loopback. */ -static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) +static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u16 length) { struct port *port; + int ret = RX_HANDLER_ANOTHER; if (length >= sizeof(struct lacpdu)) { @@ -2184,11 +2185,12 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u if (!port->slave) { pr_warning("%s: Warning: port of slave %s is uninitialized\n", slave->dev->name, slave->dev->master->name); - return; + return ret; } switch (lacpdu->subtype) { case AD_TYPE_LACPDU: + ret = RX_HANDLER_CONSUMED; pr_debug("Received LACPDU on port %d\n", port->actor_port_number); /* Protect against concurrent state machines */ @@ -2198,6 +2200,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u break; case AD_TYPE_MARKER: + ret = RX_HANDLER_CONSUMED; // No need to convert fields to Little Endian since we don't use the marker's fields. switch (((struct bond_marker *)lacpdu)->tlv_type) { @@ -2219,6 +2222,7 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u } } } + return ret; } /** @@ -2456,18 +2460,20 @@ out: return NETDEV_TX_OK; } -void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, +int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, struct slave *slave) { + int ret = RX_HANDLER_ANOTHER; if (skb->protocol != PKT_TYPE_LACPDU) - return; + return ret; if (!pskb_may_pull(skb, sizeof(struct lacpdu))) - return; + return ret; read_lock(&bond->lock); - bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); + ret = bond_3ad_rx_indication((struct lacpdu *) skb->data, slave, skb->len); read_unlock(&bond->lock); + return ret; } /* diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 235b2cc58b28..5ee7e3c45db7 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h @@ -274,7 +274,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave); void bond_3ad_handle_link_change(struct slave *slave, char link); int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); -void bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, +int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct bonding *bond, struct slave *slave); int bond_3ad_set_carrier(struct bonding *bond); void bond_3ad_update_lacp_rate(struct bonding *bond); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 62d2409bb293..bc13b3d77432 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1444,8 +1444,9 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) struct sk_buff *skb = *pskb; struct slave *slave; struct bonding *bond; - void (*recv_probe)(struct sk_buff *, struct bonding *, + int (*recv_probe)(struct sk_buff *, struct bonding *, struct slave *); + int ret = RX_HANDLER_ANOTHER; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) @@ -1464,8 +1465,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); if (likely(nskb)) { - recv_probe(nskb, bond, slave); + ret = recv_probe(nskb, bond, slave); dev_kfree_skb(nskb); + if (ret == RX_HANDLER_CONSUMED) { + consume_skb(skb); + return ret; + } } } @@ -1487,7 +1492,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN); } - return RX_HANDLER_ANOTHER; + return ret; } /* enslave device to bond device */ @@ -2723,7 +2728,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 } } -static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, +static int bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, struct slave *slave) { struct arphdr *arp; @@ -2731,7 +2736,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, __be32 sip, tip; if (skb->protocol != __cpu_to_be16(ETH_P_ARP)) - return; + return RX_HANDLER_ANOTHER; read_lock(&bond->lock); @@ -2776,6 +2781,7 @@ static void bond_arp_rcv(struct sk_buff *skb, struct bonding *bond, out_unlock: read_unlock(&bond->lock); + return RX_HANDLER_ANOTHER; } /* diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c index 9a66e2a910ae..9c1c8cd5223f 100644 --- a/drivers/net/caif/caif_hsi.c +++ b/drivers/net/caif/caif_hsi.c @@ -744,14 +744,14 @@ static void cfhsi_wake_up(struct work_struct *work) size_t fifo_occupancy = 0; /* Wakeup timeout */ - dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n", + dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n", __func__); /* Check FIFO to check if modem has sent something. */ WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev, &fifo_occupancy)); - dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", + dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n", __func__, (unsigned) fifo_occupancy); /* Check if we misssed the interrupt. */ @@ -1210,7 +1210,7 @@ int cfhsi_probe(struct platform_device *pdev) static void cfhsi_shutdown(struct cfhsi *cfhsi) { - u8 *tx_buf, *rx_buf; + u8 *tx_buf, *rx_buf, *flip_buf; /* Stop TXing */ netif_tx_stop_all_queues(cfhsi->ndev); @@ -1234,7 +1234,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi) /* Store bufferes: will be freed later. */ tx_buf = cfhsi->tx_buf; rx_buf = cfhsi->rx_buf; - + flip_buf = cfhsi->rx_flip_buf; /* Flush transmit queues. */ cfhsi_abort_tx(cfhsi); @@ -1247,6 +1247,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi) /* Free buffers. */ kfree(tx_buf); kfree(rx_buf); + kfree(flip_buf); } int cfhsi_remove(struct platform_device *pdev) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index 5234586dff15..629c4ba5d49d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c @@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) PCAN_USBPRO_INFO_FW, &fi, sizeof(fi)); if (err) { + kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", pcan_usb_pro.name, err); @@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev) PCAN_USBPRO_INFO_BL, &bi, sizeof(bi)); if (err) { + kfree(usb_if); dev_err(dev->netdev->dev.parent, "unable to read %s bootloader info (err %d)\n", pcan_usb_pro.name, err); diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index d5c6d92f1ee7..442d91a2747b 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev) return 0; } -static void dummy_dev_free(struct net_device *dev) +static void dummy_dev_uninit(struct net_device *dev) { free_percpu(dev->dstats); - free_netdev(dev); } static const struct net_device_ops dummy_netdev_ops = { .ndo_init = dummy_dev_init, + .ndo_uninit = dummy_dev_uninit, .ndo_start_xmit = dummy_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = set_multicast_list, @@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev) /* Initialize the device structure. */ dev->netdev_ops = &dummy_netdev_ops; - dev->destructor = dummy_dev_free; + dev->destructor = free_netdev; /* Fill in device structure with ethernet-generic values. */ dev->tx_queue_len = 0; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 40ac41436549..c926857e8205 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data) "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); + schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } } @@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data) "pcie DMA r/w error (status = 0x%x)\n", status); iowrite32(0, adapter->hw.hw_addr + REG_IMR); - schedule_work(&adapter->pcie_dma_to_rst_task); + schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } @@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter) atl1_clean_rx_ring(adapter); } -static void atl1_tx_timeout_task(struct work_struct *work) +static void atl1_reset_dev_task(struct work_struct *work) { struct atl1_adapter *adapter = - container_of(work, struct atl1_adapter, tx_timeout_task); + container_of(work, struct atl1_adapter, reset_dev_task); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); @@ -3038,12 +3038,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev, (unsigned long)adapter); adapter->phy_timer_pending = false; - INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); + INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); - INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); - err = register_netdev(netdev); if (err) goto err_common; diff --git a/drivers/net/ethernet/atheros/atlx/atl1.h b/drivers/net/ethernet/atheros/atlx/atl1.h index 109d6da8be97..e04bf4d71e46 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.h +++ b/drivers/net/ethernet/atheros/atlx/atl1.h @@ -758,9 +758,8 @@ struct atl1_adapter { u16 link_speed; u16 link_duplex; spinlock_t lock; - struct work_struct tx_timeout_task; + struct work_struct reset_dev_task; struct work_struct link_chg_task; - struct work_struct pcie_dma_to_rst_task; struct timer_list phy_config_timer; bool phy_timer_pending; diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c index 3cd8837236dc..c9e9dc57986c 100644 --- a/drivers/net/ethernet/atheros/atlx/atlx.c +++ b/drivers/net/ethernet/atheros/atlx/atlx.c @@ -194,7 +194,7 @@ static void atlx_tx_timeout(struct net_device *netdev) { struct atlx_adapter *adapter = netdev_priv(netdev); /* Do the reset outside of interrupt context */ - schedule_work(&adapter->tx_timeout_task); + schedule_work(&adapter->reset_dev_task); } /* diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index ad95324dc042..64392ec410a3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -942,6 +942,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 : DCBX_E3B0_MAX_NUM_COS_PORT0; + if (pri >= max_num_of_cos) { + DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " + "parameter Illegal strict priority\n"); + return -EINVAL; + } + if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) { DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " "parameter There can't be two COS's with " @@ -949,12 +955,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params, return -EINVAL; } - if (pri > max_num_of_cos) { - DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid " - "parameter Illegal strict priority\n"); - return -EINVAL; - } - sp_pri_to_cos[pri] = cos_entry; return 0; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e077d2508727..6af310195bae 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9122,13 +9122,34 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) return bnx2x_prev_mcp_done(bp); } +/* previous driver DMAE transaction may have occurred when pre-boot stage ended + * and boot began, or when kdump kernel was loaded. Either case would invalidate + * the addresses of the transaction, resulting in was-error bit set in the pci + * causing all hw-to-host pcie transactions to timeout. If this happened we want + * to clear the interrupt which detected this from the pglueb and the was done + * bit + */ +static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) +{ + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); + } +} + static int __devinit bnx2x_prev_unload(struct bnx2x *bp) { int time_counter = 10; u32 rc, fw, hw_lock_reg, hw_lock_val; BNX2X_DEV_INFO("Entering Previous Unload Flow\n"); - /* Release previously held locks */ + /* clear hw from errors which may have resulted from an interrupted + * dmae transaction. + */ + bnx2x_prev_interrupted_dmae(bp); + + /* Release previously held locks */ hw_lock_reg = (BP_FUNC(bp) <= 5) ? (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) : (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 062ac333fde6..ceeab8e852ef 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -879,8 +879,13 @@ static inline unsigned int tg3_has_work(struct tg3_napi *tnapi) if (sblk->status & SD_STATUS_LINK_CHG) work_exists = 1; } - /* check for RX/TX work to do */ - if (sblk->idx[0].tx_consumer != tnapi->tx_cons || + + /* check for TX work to do */ + if (sblk->idx[0].tx_consumer != tnapi->tx_cons) + work_exists = 1; + + /* check for RX work to do */ + if (tnapi->rx_rcb_prod_idx && *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) work_exists = 1; @@ -6124,6 +6129,9 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) return work_done; } + if (!tnapi->rx_rcb_prod_idx) + return work_done; + /* run RX thread, within the bounds set by NAPI. * All RX "locking" is done by ensuring outside * code synchronizes with tg3->napi.poll() @@ -7567,6 +7575,12 @@ static int tg3_alloc_consistent(struct tg3 *tp) */ switch (i) { default: + if (tg3_flag(tp, ENABLE_RSS)) { + tnapi->rx_rcb_prod_idx = NULL; + break; + } + /* Fall through */ + case 1: tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer; break; case 2: diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 63bfdd10bd6d..abb6ce7c1b7e 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1149,6 +1149,48 @@ release_tpsram: return ret; } +/** + * t3_synchronize_rx - wait for current Rx processing on a port to complete + * @adap: the adapter + * @p: the port + * + * Ensures that current Rx processing on any of the queues associated with + * the given port completes before returning. We do this by acquiring and + * releasing the locks of the response queues associated with the port. + */ +static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) +{ + int i; + + for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { + struct sge_rspq *q = &adap->sge.qs[i].rspq; + + spin_lock_irq(&q->lock); + spin_unlock_irq(&q->lock); + } +} + +static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) +{ + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; + + if (adapter->params.rev > 0) { + t3_set_vlan_accel(adapter, 1 << pi->port_id, + features & NETIF_F_HW_VLAN_RX); + } else { + /* single control for all ports */ + unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; + + for_each_port(adapter, i) + have_vlans |= + adapter->port[i]->features & NETIF_F_HW_VLAN_RX; + + t3_set_vlan_accel(adapter, 1, have_vlans); + } + t3_synchronize_rx(adapter, pi); +} + /** * cxgb_up - enable the adapter * @adapter: adapter being enabled @@ -1161,7 +1203,7 @@ release_tpsram: */ static int cxgb_up(struct adapter *adap) { - int err; + int i, err; if (!(adap->flags & FULL_INIT_DONE)) { err = t3_check_fw_version(adap); @@ -1198,6 +1240,9 @@ static int cxgb_up(struct adapter *adap) if (err) goto out; + for_each_port(adap, i) + cxgb_vlan_mode(adap->port[i], adap->port[i]->features); + setup_rss(adap); if (!(adap->flags & NAPI_INIT)) init_napi(adap); @@ -2508,48 +2553,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) return 0; } -/** - * t3_synchronize_rx - wait for current Rx processing on a port to complete - * @adap: the adapter - * @p: the port - * - * Ensures that current Rx processing on any of the queues associated with - * the given port completes before returning. We do this by acquiring and - * releasing the locks of the response queues associated with the port. - */ -static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) -{ - int i; - - for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { - struct sge_rspq *q = &adap->sge.qs[i].rspq; - - spin_lock_irq(&q->lock); - spin_unlock_irq(&q->lock); - } -} - -static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features) -{ - struct port_info *pi = netdev_priv(dev); - struct adapter *adapter = pi->adapter; - - if (adapter->params.rev > 0) { - t3_set_vlan_accel(adapter, 1 << pi->port_id, - features & NETIF_F_HW_VLAN_RX); - } else { - /* single control for all ports */ - unsigned int i, have_vlans = features & NETIF_F_HW_VLAN_RX; - - for_each_port(adapter, i) - have_vlans |= - adapter->port[i]->features & NETIF_F_HW_VLAN_RX; - - t3_set_vlan_accel(adapter, 1, have_vlans); - } - t3_synchronize_rx(adapter, pi); -} - static netdev_features_t cxgb_fix_features(struct net_device *dev, netdev_features_t features) { @@ -3353,9 +3356,6 @@ static int __devinit init_one(struct pci_dev *pdev, err = sysfs_create_group(&adapter->port[0]->dev.kobj, &cxgb3_attr_group); - for_each_port(adapter, i) - cxgb_vlan_mode(adapter->port[i], adapter->port[i]->features); - print_port_info(adapter, ai); return 0; diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index b2dc2c81a147..2e09edb9cdf8 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1259,55 +1259,21 @@ rio_ioctl (struct net_device *dev, struct ifreq *rq, int cmd) { int phy_addr; struct netdev_private *np = netdev_priv(dev); - struct mii_data *miidata = (struct mii_data *) &rq->ifr_ifru; - - struct netdev_desc *desc; - int i; + struct mii_ioctl_data *miidata = if_mii(rq); phy_addr = np->phy_addr; switch (cmd) { - case SIOCDEVPRIVATE: + case SIOCGMIIPHY: + miidata->phy_id = phy_addr; break; - - case SIOCDEVPRIVATE + 1: - miidata->out_value = mii_read (dev, phy_addr, miidata->reg_num); + case SIOCGMIIREG: + miidata->val_out = mii_read (dev, phy_addr, miidata->reg_num); break; - case SIOCDEVPRIVATE + 2: - mii_write (dev, phy_addr, miidata->reg_num, miidata->in_value); + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + mii_write (dev, phy_addr, miidata->reg_num, miidata->val_in); break; - case SIOCDEVPRIVATE + 3: - break; - case SIOCDEVPRIVATE + 4: - break; - case SIOCDEVPRIVATE + 5: - netif_stop_queue (dev); - break; - case SIOCDEVPRIVATE + 6: - netif_wake_queue (dev); - break; - case SIOCDEVPRIVATE + 7: - printk - ("tx_full=%x cur_tx=%lx old_tx=%lx cur_rx=%lx old_rx=%lx\n", - netif_queue_stopped(dev), np->cur_tx, np->old_tx, np->cur_rx, - np->old_rx); - break; - case SIOCDEVPRIVATE + 8: - printk("TX ring:\n"); - for (i = 0; i < TX_RING_SIZE; i++) { - desc = &np->tx_ring[i]; - printk - ("%02x:cur:%08x next:%08x status:%08x frag1:%08x frag0:%08x", - i, - (u32) (np->tx_ring_dma + i * sizeof (*desc)), - (u32)le64_to_cpu(desc->next_desc), - (u32)le64_to_cpu(desc->status), - (u32)(le64_to_cpu(desc->fraginfo) >> 32), - (u32)le64_to_cpu(desc->fraginfo)); - printk ("\n"); - } - printk ("\n"); - break; - default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h index ba0adcafa55a..30c2da3de548 100644 --- a/drivers/net/ethernet/dlink/dl2k.h +++ b/drivers/net/ethernet/dlink/dl2k.h @@ -365,13 +365,6 @@ struct ioctl_data { char *data; }; -struct mii_data { - __u16 reserved; - __u16 reg_num; - __u16 in_value; - __u16 out_value; -}; - /* The Rx and Tx buffer descriptors. */ struct netdev_desc { __le64 next_desc; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 17a46e76123f..9ac14f804851 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -116,10 +116,10 @@ static struct ucc_geth_info ugeth_primary_info = { .maxGroupAddrInHash = 4, .maxIndAddrInHash = 4, .prel = 7, - .maxFrameLength = 1518, + .maxFrameLength = 1518+16, /* Add extra bytes for VLANs etc. */ .minFrameLength = 64, - .maxD1Length = 1520, - .maxD2Length = 1520, + .maxD1Length = 1520+16, /* Add extra bytes for VLANs etc. */ + .maxD2Length = 1520+16, /* Add extra bytes for VLANs etc. */ .vlantype = 0x8100, .ecamptr = ((uint32_t) NULL), .eventRegMask = UCCE_OTHER, diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h index 2e395a2566b8..f71b3e7b12de 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.h +++ b/drivers/net/ethernet/freescale/ucc_geth.h @@ -877,7 +877,7 @@ struct ucc_geth_hardware_statistics { /* Driver definitions */ #define TX_BD_RING_LEN 0x10 -#define RX_BD_RING_LEN 0x10 +#define RX_BD_RING_LEN 0x20 #define TX_RING_MOD_MASK(size) (size-1) #define RX_RING_MOD_MASK(size) (size-1) diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 3516e17a399d..f4d2da0db1b1 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -290,16 +290,18 @@ static void ehea_update_bcmc_registrations(void) arr[i].adh = adapter->handle; arr[i].port_id = port->logical_port_id; - arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | - EHEA_BCMC_MULTICAST | + arr[i].reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED; + if (mc_entry->macaddr == 0) + arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL; arr[i++].macaddr = mc_entry->macaddr; arr[i].adh = adapter->handle; arr[i].port_id = port->logical_port_id; - arr[i].reg_type = EHEA_BCMC_SCOPE_ALL | - EHEA_BCMC_MULTICAST | + arr[i].reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL; + if (mc_entry->macaddr == 0) + arr[i].reg_type |= EHEA_BCMC_SCOPE_ALL; arr[i++].macaddr = mc_entry->macaddr; num_registrations -= 2; } @@ -1838,8 +1840,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, u64 hret; u8 reg_type; - reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST - | EHEA_BCMC_UNTAGGED; + reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_UNTAGGED; + if (mc_mac_addr == 0) + reg_type |= EHEA_BCMC_SCOPE_ALL; hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, port->logical_port_id, @@ -1847,8 +1850,9 @@ static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, if (hret) goto out; - reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST - | EHEA_BCMC_VLANID_ALL; + reg_type = EHEA_BCMC_MULTICAST | EHEA_BCMC_VLANID_ALL; + if (mc_mac_addr == 0) + reg_type |= EHEA_BCMC_SCOPE_ALL; hret = ehea_h_reg_dereg_bcmc(port->adapter->handle, port->logical_port_id, @@ -1898,7 +1902,7 @@ static void ehea_allmulti(struct net_device *dev, int enable) netdev_err(dev, "failed enabling IFF_ALLMULTI\n"); } - } else + } else { if (!enable) { /* Disable ALLMULTI */ hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC); @@ -1908,6 +1912,7 @@ static void ehea_allmulti(struct net_device *dev, int enable) netdev_err(dev, "failed disabling IFF_ALLMULTI\n"); } + } } static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr) @@ -1941,11 +1946,7 @@ static void ehea_set_multicast_list(struct net_device *dev) struct netdev_hw_addr *ha; int ret; - if (port->promisc) { - ehea_promiscuous(dev, 1); - return; - } - ehea_promiscuous(dev, 0); + ehea_promiscuous(dev, !!(dev->flags & IFF_PROMISC)); if (dev->flags & IFF_ALLMULTI) { ehea_allmulti(dev, 1); @@ -2463,6 +2464,7 @@ static int ehea_down(struct net_device *dev) return 0; ehea_drop_multicast_list(dev); + ehea_allmulti(dev, 0); ehea_broadcast_reg_helper(port, H_DEREG_BCMC); ehea_free_interrupts(dev); @@ -3261,6 +3263,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev, struct ehea_adapter *adapter; const u64 *adapter_handle; int ret; + int i; if (!dev || !dev->dev.of_node) { pr_err("Invalid ibmebus device probed\n"); @@ -3314,17 +3317,9 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev, tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet, (unsigned long)adapter); - ret = ibmebus_request_irq(adapter->neq->attr.ist1, - ehea_interrupt_neq, IRQF_DISABLED, - "ehea_neq", adapter); - if (ret) { - dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); - goto out_kill_eq; - } - ret = ehea_create_device_sysfs(dev); if (ret) - goto out_free_irq; + goto out_kill_eq; ret = ehea_setup_ports(adapter); if (ret) { @@ -3332,15 +3327,30 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev, goto out_rem_dev_sysfs; } + ret = ibmebus_request_irq(adapter->neq->attr.ist1, + ehea_interrupt_neq, IRQF_DISABLED, + "ehea_neq", adapter); + if (ret) { + dev_err(&dev->dev, "requesting NEQ IRQ failed\n"); + goto out_shutdown_ports; + } + + /* Handle any events that might be pending. */ + tasklet_hi_schedule(&adapter->neq_tasklet); + ret = 0; goto out; +out_shutdown_ports: + for (i = 0; i < EHEA_MAX_PORTS; i++) + if (adapter->port[i]) { + ehea_shutdown_single_port(adapter->port[i]); + adapter->port[i] = NULL; + } + out_rem_dev_sysfs: ehea_remove_device_sysfs(dev); -out_free_irq: - ibmebus_free_irq(adapter->neq->attr.ist1, adapter); - out_kill_eq: ehea_destroy_eq(adapter->neq); diff --git a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h index 52c456ec4d6c..8364815c32ff 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_phyp.h +++ b/drivers/net/ethernet/ibm/ehea/ehea_phyp.h @@ -450,7 +450,7 @@ u64 ehea_h_modify_ehea_port(const u64 adapter_handle, const u16 port_num, void *cb_addr); #define H_REGBCMC_PN EHEA_BMASK_IBM(48, 63) -#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(61, 63) +#define H_REGBCMC_REGTYPE EHEA_BMASK_IBM(60, 63) #define H_REGBCMC_MACADDR EHEA_BMASK_IBM(16, 63) #define H_REGBCMC_VLANID EHEA_BMASK_IBM(52, 63) diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 4348b6fd44fa..37caa8885c2a 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3380,7 +3380,7 @@ static void e1000_dump(struct e1000_adapter *adapter) for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; - struct my_u { u64 a; u64 b; }; + struct my_u { __le64 a; __le64 b; }; struct my_u *u = (struct my_u *)tx_desc; const char *type; @@ -3424,7 +3424,7 @@ rx_ring_summary: for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; - struct my_u { u64 a; u64 b; }; + struct my_u { __le64 a; __le64 b; }; struct my_u *u = (struct my_u *)rx_desc; const char *type; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 64c76443a7aa..b461c24945e3 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) oem_reg |= HV_OEM_BITS_LPLU; - - /* Set Restart auto-neg to activate the bits */ - if (!hw->phy.ops.check_reset_block(hw)) - oem_reg |= HV_OEM_BITS_RESTART_AN; } else { if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) @@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) oem_reg |= HV_OEM_BITS_LPLU; } + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (hw->mac.type != e1000_pchlan)) && + !hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg); release: @@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) if (hw->mac.type >= e1000_pchlan) { e1000_oem_bits_config_ich8lan(hw, false); - e1000_phy_hw_reset_ich8lan(hw); + + /* Reset PHY to activate OEM bits on 82577/8 */ + if (hw->mac.type == e1000_pchlan) + e1000e_phy_hw_reset_generic(hw); + ret_val = hw->phy.ops.acquire(hw); if (ret_val) return; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 19ab2154802c..9520a6ac1f30 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3799,7 +3799,7 @@ static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) /* fire an unusual interrupt on the test handler */ ew32(ICS, E1000_ICS_RXSEQ); e1e_flush(); - msleep(50); + msleep(100); e1000_irq_disable(adapter); diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c index ff796e42c3eb..16adeb9418a8 100644 --- a/drivers/net/ethernet/intel/e1000e/param.c +++ b/drivers/net/ethernet/intel/e1000e/param.c @@ -106,7 +106,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); /* * Interrupt Throttle Rate (interrupts/sec) * - * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative */ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); #define DEFAULT_ITR 3 @@ -344,53 +344,60 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter) if (num_InterruptThrottleRate > bd) { adapter->itr = InterruptThrottleRate[bd]; - switch (adapter->itr) { - case 0: - e_info("%s turned off\n", opt.name); - break; - case 1: - e_info("%s set to dynamic mode\n", opt.name); - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - break; - case 3: - e_info("%s set to dynamic conservative mode\n", - opt.name); - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - break; - case 4: - e_info("%s set to simplified (2000-8000 ints) " - "mode\n", opt.name); - adapter->itr_setting = 4; - break; - default: - /* - * Save the setting, because the dynamic bits - * change itr. - */ - if (e1000_validate_option(&adapter->itr, &opt, - adapter) && - (adapter->itr == 3)) { - /* - * In case of invalid user value, - * default to conservative mode. - */ - adapter->itr_setting = adapter->itr; - adapter->itr = 20000; - } else { - /* - * Clear the lower two bits because - * they are used as control. - */ - adapter->itr_setting = - adapter->itr & ~3; - } - break; - } + + /* + * Make sure a message is printed for non-special + * values. And in case of an invalid option, display + * warning, use default and got through itr/itr_setting + * adjustment logic below + */ + if ((adapter->itr > 4) && + e1000_validate_option(&adapter->itr, &opt, adapter)) + adapter->itr = opt.def; } else { - adapter->itr_setting = opt.def; + /* + * If no option specified, use default value and go + * through the logic below to adjust itr/itr_setting + */ + adapter->itr = opt.def; + + /* + * Make sure a message is printed for non-special + * default values + */ + if (adapter->itr > 40) + e_info("%s set to default %d\n", opt.name, + adapter->itr); + } + + adapter->itr_setting = adapter->itr; + switch (adapter->itr) { + case 0: + e_info("%s turned off\n", opt.name); + break; + case 1: + e_info("%s set to dynamic mode\n", opt.name); adapter->itr = 20000; + break; + case 3: + e_info("%s set to dynamic conservative mode\n", + opt.name); + adapter->itr = 20000; + break; + case 4: + e_info("%s set to simplified (2000-8000 ints) mode\n", + opt.name); + break; + default: + /* + * Save the setting, because the dynamic bits + * change itr. + * + * Clear the lower two bits because + * they are used as control. + */ + adapter->itr_setting &= ~3; + break; } } { /* Interrupt Mode */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 5ec31598ee47..8683ca4748c8 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1111,9 +1111,12 @@ msi_only: adapter->flags |= IGB_FLAG_HAS_MSI; out: /* Notify the stack of the (possibly) reduced queue counts. */ + rtnl_lock(); netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); - return netif_set_real_num_rx_queues(adapter->netdev, - adapter->num_rx_queues); + err = netif_set_real_num_rx_queues(adapter->netdev, + adapter->num_rx_queues); + rtnl_unlock(); + return err; } /** @@ -2771,8 +2774,6 @@ void igb_configure_tx_ring(struct igb_adapter *adapter, txdctl |= E1000_TXDCTL_QUEUE_ENABLE; wr32(E1000_TXDCTL(reg_idx), txdctl); - - netdev_tx_reset_queue(txring_txq(ring)); } /** @@ -3282,6 +3283,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) igb_unmap_and_free_tx_resource(tx_ring, buffer_info); } + netdev_tx_reset_queue(txring_txq(tx_ring)); + size = sizeof(struct igb_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); @@ -6796,18 +6799,7 @@ static int igb_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); - if (!rtnl_is_locked()) { - /* - * shut up ASSERT_RTNL() warning in - * netif_set_real_num_tx/rx_queues. - */ - rtnl_lock(); - err = igb_init_interrupt_scheme(adapter); - rtnl_unlock(); - } else { - err = igb_init_interrupt_scheme(adapter); - } - if (err) { + if (igb_init_interrupt_scheme(adapter)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); return -ENOMEM; } diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index d61ca2a732f0..8ec74b07f940 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2731,14 +2731,14 @@ static int __devinit igbvf_probe(struct pci_dev *pdev, netdev->addr_len); } - if (!is_valid_ether_addr(netdev->perm_addr)) { + if (!is_valid_ether_addr(netdev->dev_addr)) { dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", netdev->dev_addr); err = -EIO; goto err_hw_init; } - memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len); + memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len); setup_timer(&adapter->watchdog_timer, &igbvf_watchdog, (unsigned long) adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 74e192107f9a..81b155589532 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -574,9 +574,6 @@ extern struct ixgbe_info ixgbe_82599_info; extern struct ixgbe_info ixgbe_X540_info; #ifdef CONFIG_IXGBE_DCB extern const struct dcbnl_rtnl_ops dcbnl_ops; -extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, - struct ixgbe_dcb_config *dst_dcb_cfg, - int tc_max); #endif extern char ixgbe_driver_name[]; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c index 652e4b09546d..32e5c02ff6d0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c @@ -44,18 +44,26 @@ #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ -int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *scfg, - struct ixgbe_dcb_config *dcfg, int tc_max) +static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) { + struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; + struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; struct tc_configuration *src = NULL; struct tc_configuration *dst = NULL; int i, j; int tx = DCB_TX_CONFIG; int rx = DCB_RX_CONFIG; int changes = 0; +#ifdef IXGBE_FCOE + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(adapter->netdev, &app); - if (!scfg || !dcfg) - return changes; + if (up && !(up & (1 << adapter->fcoe.up))) + changes |= BIT_APP_UPCHG; +#endif for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; @@ -332,28 +340,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) struct ixgbe_adapter *adapter = netdev_priv(netdev); int ret = DCB_NO_HW_CHG; int i; -#ifdef IXGBE_FCOE - struct dcb_app app = { - .selector = DCB_APP_IDTYPE_ETHTYPE, - .protocol = ETH_P_FCOE, - }; - u8 up; - - /* In IEEE mode, use the IEEE Ethertype selector value */ - if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { - app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; - up = dcb_ieee_getapp_mask(netdev, &app); - } else { - up = dcb_getapp(netdev, &app); - } -#endif /* Fail command if not in CEE mode */ if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return ret; - adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, - &adapter->dcb_cfg, + adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, MAX_TRAFFIC_CLASS); if (!adapter->dcb_set_bitmap) return ret; @@ -440,8 +432,13 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) * FCoE is using changes. This happens if the APP info * changes or the up2tc mapping is updated. */ - if ((up && !(up & (1 << adapter->fcoe.up))) || - (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) { + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { + struct dcb_app app = { + .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = ETH_P_FCOE, + }; + u8 up = dcb_getapp(netdev, &app); + adapter->fcoe.up = ffs(up) - 1; ixgbe_dcbnl_devreset(netdev); ret = DCB_HW_CHG_RST; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 31a2bf76a346..cfe7d269590c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -1780,6 +1780,8 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); } + netdev_tx_reset_queue(txring_txq(tx_ring)); + /* re-map buffers to ring, store next to clean values */ ixgbe_alloc_rx_buffers(rx_ring, count); rx_ring->next_to_clean = rx_ntc; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 77ea4b716535..bc07933d67da 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -437,6 +437,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, */ if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && (fctl & FC_FC_END_SEQ)) { + skb_linearize(skb); crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); crc->fcoe_eof = FC_EOF_T; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 027d7a75be39..ed1b47dc0834 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx, if (adapter->hw.mac.type == ixgbe_mac_82599EB) set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); +#ifdef IXGBE_FCOE + if (adapter->netdev->features & NETIF_F_FCOE_MTU) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; + if ((rxr_idx >= f->mask) && + (rxr_idx < f->mask + f->indices)) + set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state); + } + +#endif /* IXGBE_FCOE */ /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 3e26b1f9ac75..467948e9ecd9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -2671,8 +2671,6 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, /* enable queue */ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), txdctl); - netdev_tx_reset_queue(txring_txq(ring)); - /* TXDCTL.EN will return 0 on 82598 if link is down, so skip it */ if (hw->mac.type == ixgbe_mac_82598EB && !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP)) @@ -3154,14 +3152,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) set_ring_rsc_enabled(rx_ring); else clear_ring_rsc_enabled(rx_ring); -#ifdef IXGBE_FCOE - if (netdev->features & NETIF_F_FCOE_MTU) { - struct ixgbe_ring_feature *f; - f = &adapter->ring_feature[RING_F_FCOE]; - if ((i >= f->mask) && (i < f->mask + f->indices)) - set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state); - } -#endif /* IXGBE_FCOE */ } } @@ -4175,6 +4165,8 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); } + netdev_tx_reset_queue(txring_txq(tx_ring)); + size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; memset(tx_ring->tx_buffer_info, 0, size); @@ -4426,8 +4418,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->dcb_cfg.pfc_mode_enable = false; adapter->dcb_set_bitmap = 0x00; adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; - ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg, - MAX_TRAFFIC_CLASS); + memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, + sizeof(adapter->temp_dcb_cfg)); #endif @@ -4836,7 +4828,9 @@ static int ixgbe_resume(struct pci_dev *pdev) pci_wake_from_d3(pdev, false); + rtnl_lock(); err = ixgbe_init_interrupt_scheme(adapter); + rtnl_unlock(); if (err) { e_dev_err("Cannot initialize interrupts for device\n"); return err; @@ -4872,17 +4866,15 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) netif_device_detach(netdev); if (netif_running(netdev)) { + rtnl_lock(); ixgbe_down(adapter); ixgbe_free_irq(adapter); ixgbe_free_all_tx_resources(adapter); ixgbe_free_all_rx_resources(adapter); + rtnl_unlock(); } ixgbe_clear_interrupt_scheme(adapter); -#ifdef CONFIG_DCB - kfree(adapter->ixgbe_ieee_pfc); - kfree(adapter->ixgbe_ieee_ets); -#endif #ifdef CONFIG_PM retval = pci_save_state(pdev); @@ -4893,6 +4885,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) if (wufc) { ixgbe_set_rx_mode(netdev); + /* + * enable the optics for both mult-speed fiber and + * 82599 SFP+ fiber as we can WoL. + */ + if (hw->mac.ops.enable_tx_laser && + (hw->phy.multispeed_fiber || + (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber && + hw->mac.type == ixgbe_mac_82599EB))) + hw->mac.ops.enable_tx_laser(hw); + /* turn on all-multi mode if wake on multicast is enabled */ if (wufc & IXGBE_WUFC_MC) { fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); @@ -7220,6 +7222,11 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev) ixgbe_release_hw_control(adapter); +#ifdef CONFIG_DCB + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); + +#endif iounmap(adapter->hw.hw_addr); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index c9b504e2dfc3..487a6c8bd4ec 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2494,8 +2494,13 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb_copy_from_linear_data(re->skb, skb->data, length); skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; + skb->rxhash = re->skb->rxhash; + skb->vlan_tci = re->skb->vlan_tci; + pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); + re->skb->vlan_tci = 0; + re->skb->rxhash = 0; re->skb->ip_summed = CHECKSUM_NONE; skb_put(skb, length); } @@ -2580,9 +2585,6 @@ static struct sk_buff *sky2_receive(struct net_device *dev, struct sk_buff *skb = NULL; u16 count = (status & GMR_FS_LEN) >> 16; - if (status & GMR_FS_VLAN) - count -= VLAN_HLEN; /* Account for vlan tag */ - netif_printk(sky2, rx_status, KERN_DEBUG, dev, "rx slot %u status 0x%x len %d\n", sky2->rx_next, status, length); @@ -2590,6 +2592,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev, sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending; prefetch(sky2->rx_ring + sky2->rx_next); + if (vlan_tx_tag_present(re->skb)) + count -= VLAN_HLEN; /* Account for vlan tag */ + /* This chip has hardware problems that generates bogus status. * So do only marginal checking and expect higher level protocols * to handle crap frames. @@ -2647,11 +2652,8 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) } static inline void sky2_skb_rx(const struct sky2_port *sky2, - u32 status, struct sk_buff *skb) + struct sk_buff *skb) { - if (status & GMR_FS_VLAN) - __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag)); - if (skb->ip_summed == CHECKSUM_NONE) netif_receive_skb(skb); else @@ -2705,6 +2707,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) } } +static void sky2_rx_tag(struct sky2_port *sky2, u16 length) +{ + struct sk_buff *skb; + + skb = sky2->rx_ring[sky2->rx_next].skb; + __vlan_hwaccel_put_tag(skb, be16_to_cpu(length)); +} + static void sky2_rx_hash(struct sky2_port *sky2, u32 status) { struct sk_buff *skb; @@ -2763,8 +2773,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) } skb->protocol = eth_type_trans(skb, dev); - - sky2_skb_rx(sky2, status, skb); + sky2_skb_rx(sky2, skb); /* Stop after net poll weight */ if (++work_done >= to_do) @@ -2772,11 +2781,11 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) break; case OP_RXVLAN: - sky2->rx_tag = length; + sky2_rx_tag(sky2, length); break; case OP_RXCHKSVLAN: - sky2->rx_tag = length; + sky2_rx_tag(sky2, length); /* fall through */ case OP_RXCHKS: if (likely(dev->features & NETIF_F_RXCSUM)) diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h index ff6f58bf822a..3c896ce80b71 100644 --- a/drivers/net/ethernet/marvell/sky2.h +++ b/drivers/net/ethernet/marvell/sky2.h @@ -2241,7 +2241,6 @@ struct sky2_port { u16 rx_pending; u16 rx_data_size; u16 rx_nfrags; - u16 rx_tag; struct { unsigned long last; diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c index c722aa607d07..5e313e9a252f 100644 --- a/drivers/net/ethernet/micrel/ks8851.c +++ b/drivers/net/ethernet/micrel/ks8851.c @@ -618,10 +618,8 @@ static void ks8851_irq_work(struct work_struct *work) netif_dbg(ks, intr, ks->netdev, "%s: status 0x%04x\n", __func__, status); - if (status & IRQ_LCI) { - /* should do something about checking link status */ + if (status & IRQ_LCI) handled |= IRQ_LCI; - } if (status & IRQ_LDI) { u16 pmecr = ks8851_rdreg16(ks, KS_PMECR); @@ -684,6 +682,9 @@ static void ks8851_irq_work(struct work_struct *work) mutex_unlock(&ks->lock); + if (status & IRQ_LCI) + mii_check_link(&ks->mii); + if (status & IRQ_TXI) netif_wake_queue(ks->netdev); @@ -889,16 +890,17 @@ static int ks8851_net_stop(struct net_device *dev) netif_stop_queue(dev); mutex_lock(&ks->lock); + /* turn off the IRQs and ack any outstanding */ + ks8851_wrreg16(ks, KS_IER, 0x0000); + ks8851_wrreg16(ks, KS_ISR, 0xffff); + mutex_unlock(&ks->lock); /* stop any outstanding work */ flush_work(&ks->irq_work); flush_work(&ks->tx_work); flush_work(&ks->rxctrl_work); - /* turn off the IRQs and ack any outstanding */ - ks8851_wrreg16(ks, KS_IER, 0x0000); - ks8851_wrreg16(ks, KS_ISR, 0xffff); - + mutex_lock(&ks->lock); /* shutdown RX process */ ks8851_wrreg16(ks, KS_RXCR1, 0x0000); @@ -907,6 +909,7 @@ static int ks8851_net_stop(struct net_device *dev) /* set powermode to soft power down to save power */ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN); + mutex_unlock(&ks->lock); /* ensure any queued tx buffers are dumped */ while (!skb_queue_empty(&ks->txq)) { @@ -918,7 +921,6 @@ static int ks8851_net_stop(struct net_device *dev) dev_kfree_skb(txb); } - mutex_unlock(&ks->lock); return 0; } @@ -1418,6 +1420,7 @@ static int __devinit ks8851_probe(struct spi_device *spi) struct net_device *ndev; struct ks8851_net *ks; int ret; + unsigned cider; ndev = alloc_etherdev(sizeof(struct ks8851_net)); if (!ndev) @@ -1484,8 +1487,8 @@ static int __devinit ks8851_probe(struct spi_device *spi) ks8851_soft_reset(ks, GRR_GSR); /* simple check for a valid chip being connected to the bus */ - - if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { + cider = ks8851_rdreg16(ks, KS_CIDER); + if ((cider & ~CIDER_REV_MASK) != CIDER_ID) { dev_err(&spi->dev, "failed to read device ID\n"); ret = -ENODEV; goto err_id; @@ -1516,15 +1519,14 @@ static int __devinit ks8851_probe(struct spi_device *spi) } netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n", - CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), - ndev->dev_addr, ndev->irq, + CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq, ks->rc_ccr & CCR_EEPROM ? "has" : "no"); return 0; err_netdev: - free_irq(ndev->irq, ndev); + free_irq(ndev->irq, ks); err_id: err_irq: diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c index b8104d9f4081..5ffde23ac8fb 100644 --- a/drivers/net/ethernet/micrel/ks8851_mll.c +++ b/drivers/net/ethernet/micrel/ks8851_mll.c @@ -40,7 +40,7 @@ #define DRV_NAME "ks8851_mll" static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; -#define MAX_RECV_FRAMES 32 +#define MAX_RECV_FRAMES 255 #define MAX_BUF_SIZE 2048 #define TX_BUF_SIZE 2000 #define RX_BUF_SIZE 2000 diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index ef723b185d85..eaf9ff0262a9 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr) memcpy(hw->override_addr, mac->sa_data, ETH_ALEN); } - memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN); + memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN); interrupt = hw_block_intr(hw); diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index abc79076f867..b3287c0fe279 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp) cpw8(Cmd, RxOn | TxOn); } +static void cp_enable_irq(struct cp_private *cp) +{ + cpw16_f(IntrMask, cp_intr_mask); +} + static void cp_init_hw (struct cp_private *cp) { struct net_device *dev = cp->dev; @@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp) cpw16(MultiIntr, 0); - cpw16_f(IntrMask, cp_intr_mask); - cpw8_f(Cfg9346, Cfg9346_Lock); } @@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev) if (rc) goto err_out_hw; + cp_enable_irq(cp); + netif_carrier_off(dev); mii_check_media(&cp->mii_if, netif_msg_link(cp), true); netif_start_queue(dev); @@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev) /* FIXME: sh*t may happen if the Rx ring buffer is depleted */ cp_init_rings_index (cp); cp_init_hw (cp); + cp_enable_irq(cp); netif_start_queue (dev); spin_lock_irqsave (&cp->lock, flags); diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index f54509377efa..ce6b44d1f252 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -61,8 +61,12 @@ #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) -#define TX_BUFFS_AVAIL(tp) \ - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1) +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ @@ -5115,7 +5119,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 opts[2]; int frags; - if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) { + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); goto err_stop_0; } @@ -5169,7 +5173,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, mmiowb(); - if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ @@ -5183,7 +5187,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, * can't. */ smp_mb(); - if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS) + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) netif_wake_queue(dev); } @@ -5306,7 +5310,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) */ smp_mb(); if (netif_queue_stopped(dev) && - (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) { + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { netif_wake_queue(dev); } /* diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3cbfbffe3f00..4a0005342e65 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1349,7 +1349,7 @@ static int efx_probe_interrupts(struct efx_nic *efx) } /* RSS might be usable on VFs even if it is disabled on the PF */ - efx->rss_spread = (efx->n_rx_channels > 1 ? + efx->rss_spread = ((efx->n_rx_channels > 1 || !efx_sriov_wanted(efx)) ? efx->n_rx_channels : efx_vf_size(efx)); return 0; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 4a6971027076..cd3defb11ffb 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat) /* Quickly dumps bad packets */ static void -smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes) +smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords) { - unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2; - if (likely(pktwords >= 4)) { unsigned int timeout = 500; unsigned int val; @@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) continue; } - skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN); + skb = netdev_alloc_skb(dev, pktwords << 2); if (unlikely(!skb)) { SMSC_WARN(pdata, rx_err, "Unable to allocate skb for rx packet"); @@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget) break; } - skb->data = skb->head; - skb_reset_tail_pointer(skb); + pdata->ops->rx_readfifo(pdata, + (unsigned int *)skb->data, pktwords); /* Align IP on 16B boundary */ skb_reserve(skb, NET_IP_ALIGN); skb_put(skb, pktlength - 4); - pdata->ops->rx_readfifo(pdata, - (unsigned int *)skb->head, pktwords); skb->protocol = eth_type_trans(skb, dev); skb_checksum_none_assert(skb); netif_receive_skb(skb); @@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev) smsc911x_reg_write(pdata, FIFO_INT, temp); /* set RX Data offset to 2 bytes for alignment */ - smsc911x_reg_write(pdata, RX_CFG, (2 << 8)); + smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8)); /* enable NAPI polling before enabling RX interrupts */ napi_enable(&pdata->napi); @@ -2382,7 +2378,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); pdata = netdev_priv(dev); - dev->irq = irq_res->start; irq_flags = irq_res->flags & IRQF_TRIGGER_MASK; pdata->ioaddr = ioremap_nocache(res->start, res_size); @@ -2446,7 +2441,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) if (retval) { SMSC_WARN(pdata, probe, "Unable to claim requested irq: %d", dev->irq); - goto out_free_irq; + goto out_disable_resources; } retval = register_netdev(dev); diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index 558409ff4058..4ba969096717 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2339,7 +2339,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) netif_device_detach(dev); /* Switch off chip, remember WOL setting */ - gp->asleep_wol = gp->wake_on_lan; + gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); /* Unlock the network stack */ diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 174a3348f676..08aff1a2087c 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1511,7 +1511,7 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd) static int match_first_device(struct device *dev, void *data) { - return 1; + return !strncmp(dev_name(dev), "davinci_mdio", 12); } /** diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 2757c7d6e633..e4e47088e26b 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data) __davinci_mdio_reset(data); return -EAGAIN; } + + reg = __raw_readl(®s->user[0].access); + if ((reg & USERACCESS_GO) == 0) + return 0; + dev_err(data->dev, "timed out waiting for user access\n"); return -ETIMEDOUT; } diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 817ad3bc4957..efd36691ce54 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -228,7 +228,7 @@ tlan_get_skb(const struct tlan_list *tag) unsigned long addr; addr = tag->buffer[9].address; - addr |= (tag->buffer[8].address << 16) << 16; + addr |= ((unsigned long) tag->buffer[8].address << 16) << 16; return (struct sk_buff *) addr; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index cc83af083fd7..44b8d2bad8c3 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -2,9 +2,7 @@ * Definitions for Xilinx Axi Ethernet device driver. * * Copyright (c) 2009 Secret Lab Technologies, Ltd. - * Copyright (c) 2010 Xilinx, Inc. All rights reserved. - * Copyright (c) 2012 Daniel Borkmann, - * Copyright (c) 2012 Ariane Keller, + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. */ #ifndef XILINX_AXIENET_H diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 2fcbeba6814b..9c365e192a31 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -4,9 +4,9 @@ * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. - * Copyright (c) 2010 Xilinx, Inc. All rights reserved. - * Copyright (c) 2012 Daniel Borkmann, - * Copyright (c) 2012 Ariane Keller, + * Copyright (c) 2010 - 2011 Michal Simek + * Copyright (c) 2010 - 2011 PetaLogix + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. * * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 * and Spartan6. diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c index d70b6e79f6c0..e90e1f46121e 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c @@ -2,9 +2,9 @@ * MDIO bus driver for the Xilinx Axi Ethernet device * * Copyright (c) 2009 Secret Lab Technologies, Ltd. - * Copyright (c) 2010 Xilinx, Inc. All rights reserved. - * Copyright (c) 2012 Daniel Borkmann, - * Copyright (c) 2012 Ariane Keller, + * Copyright (c) 2010 - 2011 Michal Simek + * Copyright (c) 2010 - 2011 PetaLogix + * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved. */ #include diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index dd294783b5c5..2d59138db7f3 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -44,6 +44,7 @@ struct net_device_context { /* point back to our device context */ struct hv_device *device_ctx; struct delayed_work dwork; + struct work_struct work; }; @@ -51,30 +52,22 @@ static int ring_size = 128; module_param(ring_size, int, S_IRUGO); MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)"); -struct set_multicast_work { - struct work_struct work; - struct net_device *net; -}; - static void do_set_multicast(struct work_struct *w) { - struct set_multicast_work *swk = - container_of(w, struct set_multicast_work, work); - struct net_device *net = swk->net; - - struct net_device_context *ndevctx = netdev_priv(net); + struct net_device_context *ndevctx = + container_of(w, struct net_device_context, work); struct netvsc_device *nvdev; struct rndis_device *rdev; nvdev = hv_get_drvdata(ndevctx->device_ctx); - if (nvdev == NULL) - goto out; + if (nvdev == NULL || nvdev->ndev == NULL) + return; rdev = nvdev->extension; if (rdev == NULL) - goto out; + return; - if (net->flags & IFF_PROMISC) + if (nvdev->ndev->flags & IFF_PROMISC) rndis_filter_set_packet_filter(rdev, NDIS_PACKET_TYPE_PROMISCUOUS); else @@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w) NDIS_PACKET_TYPE_BROADCAST | NDIS_PACKET_TYPE_ALL_MULTICAST | NDIS_PACKET_TYPE_DIRECTED); - -out: - kfree(w); } static void netvsc_set_multicast_list(struct net_device *net) { - struct set_multicast_work *swk = - kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC); - if (swk == NULL) - return; + struct net_device_context *net_device_ctx = netdev_priv(net); - swk->net = net; - INIT_WORK(&swk->work, do_set_multicast); - schedule_work(&swk->work); + schedule_work(&net_device_ctx->work); } static int netvsc_open(struct net_device *net) @@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net) netif_tx_disable(net); + /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */ + cancel_work_sync(&net_device_ctx->work); ret = rndis_filter_close(device_obj); if (ret != 0) netdev_err(net, "unable to close device (ret %d).\n", ret); @@ -335,6 +322,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu) nvdev->start_remove = true; cancel_delayed_work_sync(&ndevctx->dwork); + cancel_work_sync(&ndevctx->work); netif_tx_disable(ndev); rndis_filter_device_remove(hdev); @@ -403,6 +391,7 @@ static int netvsc_probe(struct hv_device *dev, net_device_ctx->device_ctx = dev; hv_set_drvdata(dev, net); INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp); + INIT_WORK(&net_device_ctx->work, do_set_multicast); net->netdev_ops = &device_ops; @@ -456,6 +445,7 @@ static int netvsc_remove(struct hv_device *dev) ndev_ctx = netdev_priv(net); cancel_delayed_work_sync(&ndev_ctx->dwork); + cancel_work_sync(&ndev_ctx->work); /* Stop outbound asap */ netif_tx_disable(net); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index f975afdc315c..025367a94add 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -259,7 +259,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) xmit_world: skb->ip_summed = ip_summed; - skb_set_dev(skb, vlan->lowerdev); + skb->dev = vlan->lowerdev; return dev_queue_xmit(skb); } diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 0427c6561c84..cb8fd5069dbe 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -1,5 +1,6 @@ #include #include +#include #include #include #include @@ -759,6 +760,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, struct macvlan_dev *vlan; int ret; int vnet_hdr_len = 0; + int vlan_offset = 0; + int copied; if (q->flags & IFF_VNET_HDR) { struct virtio_net_hdr vnet_hdr; @@ -773,18 +776,48 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr))) return -EFAULT; } + copied = vnet_hdr_len; - len = min_t(int, skb->len, len); + if (!vlan_tx_tag_present(skb)) + len = min_t(int, skb->len, len); + else { + int copy; + struct { + __be16 h_vlan_proto; + __be16 h_vlan_TCI; + } veth; + veth.h_vlan_proto = htons(ETH_P_8021Q); + veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); - ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len); + vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); + len = min_t(int, skb->len + VLAN_HLEN, len); + copy = min_t(int, vlan_offset, len); + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); + len -= copy; + copied += copy; + if (ret || !len) + goto done; + + copy = min_t(int, sizeof(veth), len); + ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy); + len -= copy; + copied += copy; + if (ret || !len) + goto done; + } + + ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len); + copied += len; + +done: rcu_read_lock_bh(); vlan = rcu_dereference_bh(q->vlan); if (vlan) - macvlan_count_rx(vlan, len, ret == 0, 0); + macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); rcu_read_unlock_bh(); - return ret ? ret : (len + vnet_hdr_len); + return ret ? ret : copied; } static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb, diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index f08c85acf761..5ac46f5226f3 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -40,6 +40,7 @@ MODULE_LICENSE("GPL"); #define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */ #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ #define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ +#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ static int ip175c_config_init(struct phy_device *phydev) { @@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev) return 0; } +static int ip101a_g_ack_interrupt(struct phy_device *phydev) +{ + int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); + if (err < 0) + return err; + + return 0; +} + static struct phy_driver ip175c_driver = { .phy_id = 0x02430d80, .name = "ICPlus IP175C", @@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = { .phy_id_mask = 0x0ffffff0, .features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, - .flags = PHY_HAS_INTERRUPT, .config_init = &ip1001_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, @@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = { .features = PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause, .flags = PHY_HAS_INTERRUPT, + .ack_interrupt = ip101a_g_ack_interrupt, .config_init = &ip101a_g_config_init, .config_aneg = &genphy_config_aneg, .read_status = &genphy_read_status, diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 33f8c51968b6..21d7151fb0ab 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -235,7 +235,7 @@ struct ppp_net { /* Prototypes. */ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, struct file *file, unsigned int cmd, unsigned long arg); -static int ppp_xmit_process(struct ppp *ppp); +static void ppp_xmit_process(struct ppp *ppp); static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); static void ppp_push(struct ppp *ppp); static void ppp_channel_push(struct channel *pch); @@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev) put_unaligned_be16(proto, pp); skb_queue_tail(&ppp->file.xq, skb); - if (!ppp_xmit_process(ppp)) - netif_stop_queue(dev); + ppp_xmit_process(ppp); return NETDEV_TX_OK; outf: @@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev) * Called to do any work queued up on the transmit side * that can now be done. */ -static int +static void ppp_xmit_process(struct ppp *ppp) { struct sk_buff *skb; - int ret = 0; ppp_xmit_lock(ppp); if (!ppp->closing) { @@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp) ppp_send_frame(ppp, skb); /* If there's no work left to do, tell the core net code that we can accept some more. */ - if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) { + if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) netif_wake_queue(ppp->dev); - ret = 1; - } + else + netif_stop_queue(ppp->dev); } ppp_xmit_unlock(ppp); - return ret; } static inline struct sk_buff * diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 5ee032cafade..42b5151aa78a 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -355,7 +355,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, u32 packet_len; u32 padbytes = 0xffff0000; - padlen = ((skb->len + 4) % 512) ? 0 : 4; + padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4; if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) { @@ -377,7 +377,7 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, cpu_to_le32s(&packet_len); skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); - if ((skb->len % 512) == 0) { + if (padlen) { cpu_to_le32s(&padbytes); memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); skb_put(skb, sizeof(padbytes)); diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 90a30026a931..00880edba048 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -83,6 +83,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) struct cdc_state *info = (void *) &dev->data; int status; int rndis; + bool android_rndis_quirk = false; struct usb_driver *driver = driver_of(intf); struct usb_cdc_mdlm_desc *desc = NULL; struct usb_cdc_mdlm_detail_desc *detail = NULL; @@ -195,6 +196,11 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) info->control, info->u->bSlaveInterface0, info->data); + /* fall back to hard-wiring for RNDIS */ + if (rndis) { + android_rndis_quirk = true; + goto next_desc; + } goto bad_desc; } if (info->control != intf) { @@ -271,11 +277,15 @@ next_desc: /* Microsoft ActiveSync based and some regular RNDIS devices lack the * CDC descriptors, so we'll hard-wire the interfaces and not check * for descriptors. + * + * Some Android RNDIS devices have a CDC Union descriptor pointing + * to non-existing interfaces. Ignore that and attempt the same + * hard-wired 0 and 1 interfaces. */ - if (rndis && !info->u) { + if (rndis && (!info->u || android_rndis_quirk)) { info->control = usb_ifnum_to_if(dev->udev, 0); info->data = usb_ifnum_to_if(dev->udev, 1); - if (!info->control || !info->data) { + if (!info->control || !info->data || info->control != intf) { dev_dbg(&intf->dev, "rndis: master #0/%p slave #1/%p\n", info->control, diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 552d24bf862e..d316503b35d4 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -365,6 +365,27 @@ static const struct driver_info qmi_wwan_force_int4 = { .data = BIT(4), /* interface whitelist bitmap */ }; +/* Sierra Wireless provide equally useless interface descriptors + * Devices in QMI mode can be switched between two different + * configurations: + * a) USB interface #8 is QMI/wwan + * b) USB interfaces #8, #19 and #20 are QMI/wwan + * + * Both configurations provide a number of other interfaces (serial++), + * some of which have the same endpoint configuration as we expect, so + * a whitelist or blacklist is necessary. + * + * FIXME: The below whitelist should include BIT(20). It does not + * because I cannot get it to work... + */ +static const struct driver_info qmi_wwan_sierra = { + .description = "Sierra Wireless wwan/QMI device", + .flags = FLAG_WWAN, + .bind = qmi_wwan_bind_gobi, + .unbind = qmi_wwan_unbind_shared, + .manage_power = qmi_wwan_manage_power, + .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ +}; #define HUAWEI_VENDOR_ID 0x12D1 #define QMI_GOBI_DEVICE(vend, prod) \ @@ -445,6 +466,15 @@ static const struct usb_device_id products[] = { .bInterfaceProtocol = 0xff, .driver_info = (unsigned long)&qmi_wwan_force_int4, }, + { /* Sierra Wireless MC77xx in QMI mode */ + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, + .idVendor = 0x1199, + .idProduct = 0x68a2, + .bInterfaceClass = 0xff, + .bInterfaceSubClass = 0xff, + .bInterfaceProtocol = 0xff, + .driver_info = (unsigned long)&qmi_wwan_sierra, + }, {QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 187d01ccb973..00103a8c5e04 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -98,7 +98,7 @@ static int __must_check smsc75xx_read_reg(struct usbnet *dev, u32 index, if (unlikely(ret < 0)) netdev_warn(dev->net, - "Failed to read register index 0x%08x", index); + "Failed to read reg index 0x%08x: %d", index, ret); le32_to_cpus(buf); *data = *buf; @@ -128,7 +128,7 @@ static int __must_check smsc75xx_write_reg(struct usbnet *dev, u32 index, if (unlikely(ret < 0)) netdev_warn(dev->net, - "Failed to write register index 0x%08x", index); + "Failed to write reg index 0x%08x: %d", index, ret); kfree(buf); @@ -171,7 +171,7 @@ static int smsc75xx_mdio_read(struct net_device *netdev, int phy_id, int idx) idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) - | MII_ACCESS_READ; + | MII_ACCESS_READ | MII_ACCESS_BUSY; ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); check_warn_goto_done(ret, "Error writing MII_ACCESS"); @@ -210,7 +210,7 @@ static void smsc75xx_mdio_write(struct net_device *netdev, int phy_id, int idx, idx &= dev->mii.reg_num_mask; addr = ((phy_id << MII_ACCESS_PHY_ADDR_SHIFT) & MII_ACCESS_PHY_ADDR) | ((idx << MII_ACCESS_REG_ADDR_SHIFT) & MII_ACCESS_REG_ADDR) - | MII_ACCESS_WRITE; + | MII_ACCESS_WRITE | MII_ACCESS_BUSY; ret = smsc75xx_write_reg(dev, MII_ACCESS, addr); check_warn_goto_done(ret, "Error writing MII_ACCESS"); @@ -508,9 +508,10 @@ static int smsc75xx_link_reset(struct usbnet *dev) u16 lcladv, rmtadv; int ret; - /* clear interrupt status */ + /* read and write to clear phy interrupt status */ ret = smsc75xx_mdio_read(dev->net, mii->phy_id, PHY_INT_SRC); check_warn_return(ret, "Error reading PHY_INT_SRC"); + smsc75xx_mdio_write(dev->net, mii->phy_id, PHY_INT_SRC, 0xffff); ret = smsc75xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL); check_warn_return(ret, "Error writing INT_STS"); @@ -643,7 +644,7 @@ static int smsc75xx_set_mac_address(struct usbnet *dev) static int smsc75xx_phy_initialize(struct usbnet *dev) { - int bmcr, timeout = 0; + int bmcr, ret, timeout = 0; /* Initialize MII structure */ dev->mii.dev = dev->net; @@ -651,6 +652,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) dev->mii.mdio_write = smsc75xx_mdio_write; dev->mii.phy_id_mask = 0x1f; dev->mii.reg_num_mask = 0x1f; + dev->mii.supports_gmii = 1; dev->mii.phy_id = SMSC75XX_INTERNAL_PHY_ID; /* reset phy and wait for reset to complete */ @@ -661,7 +663,7 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) bmcr = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMCR); check_warn_return(bmcr, "Error reading MII_BMCR"); timeout++; - } while ((bmcr & MII_BMCR) && (timeout < 100)); + } while ((bmcr & BMCR_RESET) && (timeout < 100)); if (timeout >= 100) { netdev_warn(dev->net, "timeout on PHY Reset"); @@ -671,10 +673,13 @@ static int smsc75xx_phy_initialize(struct usbnet *dev) smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_CTRL1000, + ADVERTISE_1000FULL); - /* read to clear */ - smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); - check_warn_return(bmcr, "Error reading PHY_INT_SRC"); + /* read and write to clear phy interrupt status */ + ret = smsc75xx_mdio_read(dev->net, dev->mii.phy_id, PHY_INT_SRC); + check_warn_return(ret, "Error reading PHY_INT_SRC"); + smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_SRC, 0xffff); smsc75xx_mdio_write(dev->net, dev->mii.phy_id, PHY_INT_MASK, PHY_INT_MASK_DEFAULT); @@ -946,6 +951,14 @@ static int smsc75xx_reset(struct usbnet *dev) ret = smsc75xx_write_reg(dev, INT_EP_CTL, buf); check_warn_return(ret, "Failed to write INT_EP_CTL: %d", ret); + /* allow mac to detect speed and duplex from phy */ + ret = smsc75xx_read_reg(dev, MAC_CR, &buf); + check_warn_return(ret, "Failed to read MAC_CR: %d", ret); + + buf |= (MAC_CR_ADD | MAC_CR_ASD); + ret = smsc75xx_write_reg(dev, MAC_CR, buf); + check_warn_return(ret, "Failed to write MAC_CR: %d", ret); + ret = smsc75xx_read_reg(dev, MAC_TX, &buf); check_warn_return(ret, "Failed to read MAC_TX: %d", ret); @@ -1051,6 +1064,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &smsc75xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } @@ -1211,7 +1225,7 @@ static const struct driver_info smsc75xx_info = { .rx_fixup = smsc75xx_rx_fixup, .tx_fixup = smsc75xx_tx_fixup, .status = smsc75xx_status, - .flags = FLAG_ETHER | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; static const struct usb_device_id products[] = { diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 5f19f84d3494..94ae66999f59 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -1017,6 +1017,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->ethtool_ops = &smsc95xx_ethtool_ops; dev->net->flags |= IFF_MULTICAST; dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } @@ -1191,7 +1192,7 @@ static const struct driver_info smsc95xx_info = { .rx_fixup = smsc95xx_rx_fixup, .tx_fixup = smsc95xx_tx_fixup, .status = smsc95xx_status, - .flags = FLAG_ETHER | FLAG_SEND_ZLP, + .flags = FLAG_ETHER | FLAG_SEND_ZLP | FLAG_LINK_INTR, }; static const struct usb_device_id products[] = { diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index b7b3f5b0d406..2d927fb4adf4 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -210,6 +210,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf) } else { usb_fill_int_urb(dev->interrupt, dev->udev, pipe, buf, maxp, intr_complete, dev, period); + dev->interrupt->transfer_flags |= URB_FREE_BUFFER; dev_dbg(&intf->dev, "status ep%din, %d bytes period %d\n", usb_pipeendpoint(pipe), maxp, period); @@ -1443,7 +1444,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) status = register_netdev (net); if (status) - goto out3; + goto out4; netif_info(dev, probe, dev->net, "register '%s' at usb-%s-%s, %s, %pM\n", udev->dev.driver->name, @@ -1461,6 +1462,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) return 0; +out4: + usb_free_urb(dev->interrupt); out3: if (info->unbind) info->unbind (dev, udev); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 4de2760c5937..af8acc85f4bb 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -626,16 +626,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) /* This can happen with OOM and indirect buffers. */ if (unlikely(capacity < 0)) { if (likely(capacity == -ENOMEM)) { - if (net_ratelimit()) { + if (net_ratelimit()) dev_warn(&dev->dev, "TX queue failure: out of memory\n"); - } else { + } else { dev->stats.tx_fifo_errors++; if (net_ratelimit()) dev_warn(&dev->dev, "Unexpected TX queue failure: %d\n", capacity); - } } dev->stats.tx_dropped++; kfree_skb(skb); diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c index ebb9f24eefb5..1a623183cbe5 100644 --- a/drivers/net/wan/farsync.c +++ b/drivers/net/wan/farsync.c @@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent) pr_err("Control memory remap failed\n"); pci_release_regions(pdev); pci_disable_device(pdev); + iounmap(card->mem); kfree(card); return -ENODEV; } diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c index 8faa129da5a0..aec33cc207fd 100644 --- a/drivers/net/wireless/ath/ath5k/ahb.c +++ b/drivers/net/wireless/ath/ath5k/ahb.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "ath5k.h" #include "debug.h" @@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev) if (res == NULL) { dev_err(&pdev->dev, "no IRQ resource found\n"); ret = -ENXIO; - goto err_out; + goto err_iounmap; } irq = res->start; @@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev) if (hw == NULL) { dev_err(&pdev->dev, "no memory for ieee80211_hw\n"); ret = -ENOMEM; - goto err_out; + goto err_iounmap; } ah = hw->priv; @@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev) err_free_hw: ieee80211_free_hw(hw); platform_set_drvdata(pdev, NULL); + err_iounmap: + iounmap(mem); err_out: return ret; } @@ -217,6 +220,7 @@ static int ath_ahb_remove(struct platform_device *pdev) } ath5k_deinit_ah(ah); + iounmap(ah->iobase); platform_set_drvdata(pdev, NULL); ieee80211_free_hw(hw); diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index d7d8e9199140..aba088005b22 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -869,7 +869,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah, ar5008_hw_set_channel_regs(ah, chan); ar5008_hw_init_chain_masks(ah); ath9k_olc_init(ah); - ath9k_hw_apply_txpower(ah, chan); + ath9k_hw_apply_txpower(ah, chan, false); /* Write analog registers */ if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 59647a3ceb7f..3d400e8d6535 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -54,7 +54,7 @@ void ar9003_paprd_enable(struct ath_hw *ah, bool val) if (val) { ah->paprd_table_write_done = true; - ath9k_hw_apply_txpower(ah, chan); + ath9k_hw_apply_txpower(ah, chan, false); } REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0, diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index bc992b237ae5..600aca9fe6b1 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -373,7 +373,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah, else spur_subchannel_sd = 0; - spur_freq_sd = (freq_offset << 9) / 11; + spur_freq_sd = ((freq_offset + 10) << 9) / 11; } else { if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL, @@ -382,7 +382,7 @@ static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah, else spur_subchannel_sd = 1; - spur_freq_sd = (freq_offset << 9) / 11; + spur_freq_sd = ((freq_offset - 10) << 9) / 11; } @@ -694,7 +694,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah, ar9003_hw_override_ini(ah); ar9003_hw_set_channel_regs(ah, chan); ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); - ath9k_hw_apply_txpower(ah, chan); + ath9k_hw_apply_txpower(ah, chan, false); if (AR_SREV_9462(ah)) { if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0, diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c index f272236d8053..b34e8b2990b1 100644 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c @@ -824,6 +824,8 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah, regulatory->max_power_level = ratesArray[i]; } + ath9k_hw_update_regulatory_maxpower(ah); + if (test) return; diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 6c69e4e8b1cb..fa84e37bf091 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -1454,7 +1454,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah, return false; } ath9k_hw_set_clockrate(ah); - ath9k_hw_apply_txpower(ah, chan); + ath9k_hw_apply_txpower(ah, chan, false); ath9k_hw_rfbus_done(ah); if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) @@ -2652,7 +2652,8 @@ static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan) return ah->eep_ops->get_eeprom(ah, gain_param); } -void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan) +void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, + bool test) { struct ath_regulatory *reg = ath9k_hw_regulatory(ah); struct ieee80211_channel *channel; @@ -2673,7 +2674,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan) ah->eep_ops->set_txpower(ah, chan, ath9k_regd_get_ctl(reg, chan), - ant_reduction, new_pwr, false); + ant_reduction, new_pwr, test); } void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) @@ -2686,7 +2687,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test) if (test) channel->max_power = MAX_RATE_POWER / 2; - ath9k_hw_apply_txpower(ah, chan); + ath9k_hw_apply_txpower(ah, chan, test); if (test) channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2); diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h index aa1680a0c7fd..e88f182ff45c 100644 --- a/drivers/net/wireless/ath/ath9k/hw.h +++ b/drivers/net/wireless/ath/ath9k/hw.h @@ -985,7 +985,8 @@ void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len); /* PHY */ void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled, u32 *coef_mantissa, u32 *coef_exponent); -void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan); +void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan, + bool test); /* * Code Specific to AR5008, AR9001 or AR9002, diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 2504ab005589..798ea57252b4 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1548,6 +1548,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_conf *conf = &hw->conf; + bool reset_channel = false; ath9k_ps_wakeup(sc); mutex_lock(&sc->mutex); @@ -1556,6 +1557,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE); if (sc->ps_idle) ath_cancel_work(sc); + else + /* + * The chip needs a reset to properly wake up from + * full sleep + */ + reset_channel = ah->chip_fullsleep; } /* @@ -1584,7 +1591,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) } } - if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { + if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) { struct ieee80211_channel *curchan = hw->conf.channel; int pos = curchan->hw_value; int old_pos = -1; diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 834e6bc45e8b..23eaa1b26ebe 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, struct ath_frame_info *fi = get_frame_info(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ath_buf *bf; + int fragno; u16 seqno; bf = ath_tx_get_buffer(sc); @@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, ATH_TXBUF_RESET(bf); if (tid) { + fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG; seqno = tid->seq_next; hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); - INCR(tid->seq_next, IEEE80211_SEQ_MAX); + + if (fragno) + hdr->seq_ctrl |= cpu_to_le16(fragno); + + if (!ieee80211_has_morefrags(hdr->frame_control)) + INCR(tid->seq_next, IEEE80211_SEQ_MAX); + bf->bf_state.seqno = seqno; } diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index c79e6638c88d..e4d6dc2e37d1 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -4827,8 +4827,14 @@ static int b43_op_start(struct ieee80211_hw *hw) out_mutex_unlock: mutex_unlock(&wl->mutex); - /* reload configuration */ - b43_op_config(hw, ~0); + /* + * Configuration may have been overwritten during initialization. + * Reload the configuration, but only if initialization was + * successful. Reloading the configuration after a failed init + * may hang the system. + */ + if (!err) + b43_op_config(hw, ~0); return err; } diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 4688904908ec..758c115b556e 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -108,9 +108,15 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev, sdio_release_host(sdfunc); } } else if (regaddr == SDIO_CCCR_ABORT) { + sdfunc = kmemdup(sdiodev->func[0], sizeof(struct sdio_func), + GFP_KERNEL); + if (!sdfunc) + return -ENOMEM; + sdfunc->num = 0; sdio_claim_host(sdfunc); sdio_writeb(sdfunc, *byte, regaddr, &err_ret); sdio_release_host(sdfunc); + kfree(sdfunc); } else if (regaddr < 0xF0) { brcmf_dbg(ERROR, "F0 Wr:0x%02x: write disallowed\n", regaddr); err_ret = -EPERM; @@ -486,7 +492,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, kfree(bus_if); return -ENOMEM; } - sdiodev->func[0] = func->card->sdio_func[0]; + sdiodev->func[0] = func; sdiodev->func[1] = func; sdiodev->bus_if = bus_if; bus_if->bus_priv.sdio = sdiodev; diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index 2bf5dda29291..e2b34e1563f4 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -574,6 +574,8 @@ struct brcmf_sdio { struct task_struct *dpc_tsk; struct completion dpc_wait; + struct list_head dpc_tsklst; + spinlock_t dpc_tl_lock; struct semaphore sdsem; @@ -2594,29 +2596,59 @@ clkwait: return resched; } +static inline void brcmf_sdbrcm_adddpctsk(struct brcmf_sdio *bus) +{ + struct list_head *new_hd; + unsigned long flags; + + if (in_interrupt()) + new_hd = kzalloc(sizeof(struct list_head), GFP_ATOMIC); + else + new_hd = kzalloc(sizeof(struct list_head), GFP_KERNEL); + if (new_hd == NULL) + return; + + spin_lock_irqsave(&bus->dpc_tl_lock, flags); + list_add_tail(new_hd, &bus->dpc_tsklst); + spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); +} + static int brcmf_sdbrcm_dpc_thread(void *data) { struct brcmf_sdio *bus = (struct brcmf_sdio *) data; + struct list_head *cur_hd, *tmp_hd; + unsigned long flags; allow_signal(SIGTERM); /* Run until signal received */ while (1) { if (kthread_should_stop()) break; - if (!wait_for_completion_interruptible(&bus->dpc_wait)) { - /* Call bus dpc unless it indicated down - (then clean stop) */ - if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) { - if (brcmf_sdbrcm_dpc(bus)) - complete(&bus->dpc_wait); - } else { + + if (list_empty(&bus->dpc_tsklst)) + if (wait_for_completion_interruptible(&bus->dpc_wait)) + break; + + spin_lock_irqsave(&bus->dpc_tl_lock, flags); + list_for_each_safe(cur_hd, tmp_hd, &bus->dpc_tsklst) { + spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); + + if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) { /* after stopping the bus, exit thread */ brcmf_sdbrcm_bus_stop(bus->sdiodev->dev); bus->dpc_tsk = NULL; + spin_lock_irqsave(&bus->dpc_tl_lock, flags); break; } - } else - break; + + if (brcmf_sdbrcm_dpc(bus)) + brcmf_sdbrcm_adddpctsk(bus); + + spin_lock_irqsave(&bus->dpc_tl_lock, flags); + list_del(cur_hd); + kfree(cur_hd); + } + spin_unlock_irqrestore(&bus->dpc_tl_lock, flags); } return 0; } @@ -2669,8 +2701,10 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) /* Schedule DPC if needed to send queued packet(s) */ if (!bus->dpc_sched) { bus->dpc_sched = true; - if (bus->dpc_tsk) + if (bus->dpc_tsk) { + brcmf_sdbrcm_adddpctsk(bus); complete(&bus->dpc_wait); + } } return ret; @@ -3514,8 +3548,10 @@ void brcmf_sdbrcm_isr(void *arg) brcmf_dbg(ERROR, "isr w/o interrupt configured!\n"); bus->dpc_sched = true; - if (bus->dpc_tsk) + if (bus->dpc_tsk) { + brcmf_sdbrcm_adddpctsk(bus); complete(&bus->dpc_wait); + } } static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) @@ -3559,8 +3595,10 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) bus->ipend = true; bus->dpc_sched = true; - if (bus->dpc_tsk) + if (bus->dpc_tsk) { + brcmf_sdbrcm_adddpctsk(bus); complete(&bus->dpc_wait); + } } } @@ -3897,6 +3935,8 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) } /* Initialize DPC thread */ init_completion(&bus->dpc_wait); + INIT_LIST_HEAD(&bus->dpc_tsklst); + spin_lock_init(&bus->dpc_tl_lock); bus->dpc_tsk = kthread_run(brcmf_sdbrcm_dpc_thread, bus, "brcmf_dpc"); if (IS_ERR(bus->dpc_tsk)) { diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index 231ddf4a674f..b4d92792c502 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c @@ -847,8 +847,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) */ if (!(txs->status & TX_STATUS_AMPDU) && (txs->status & TX_STATUS_INTERMEDIATE)) { - wiphy_err(wlc->wiphy, "%s: INTERMEDIATE but not AMPDU\n", - __func__); + BCMMSG(wlc->wiphy, "INTERMEDIATE but not AMPDU\n"); return false; } @@ -7614,6 +7613,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh, { int len_mpdu; struct ieee80211_rx_status rx_status; + struct ieee80211_hdr *hdr; memset(&rx_status, 0, sizeof(rx_status)); prep_mac80211_status(wlc, rxh, p, &rx_status); @@ -7623,6 +7623,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh, skb_pull(p, D11_PHY_HDR_LEN); __skb_trim(p, len_mpdu); + /* unmute transmit */ + if (wlc->hw->suspended_fifos) { + hdr = (struct ieee80211_hdr *)p->data; + if (ieee80211_is_beacon(hdr->frame_control)) + brcms_b_mute(wlc->hw, false); + } + memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status)); ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p); } diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 2b022571a859..1779db3aa2b0 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c @@ -2191,6 +2191,7 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) { int rc = 0; unsigned long flags; + unsigned long now, end; spin_lock_irqsave(&priv->lock, flags); if (priv->status & STATUS_HCMD_ACTIVE) { @@ -2232,10 +2233,20 @@ static int __ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd) } spin_unlock_irqrestore(&priv->lock, flags); + now = jiffies; + end = now + HOST_COMPLETE_TIMEOUT; +again: rc = wait_event_interruptible_timeout(priv->wait_command_queue, !(priv-> status & STATUS_HCMD_ACTIVE), - HOST_COMPLETE_TIMEOUT); + end - now); + if (rc < 0) { + now = jiffies; + if (time_before(now, end)) + goto again; + rc = 0; + } + if (rc == 0) { spin_lock_irqsave(&priv->lock, flags); if (priv->status & STATUS_HCMD_ACTIVE) { diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c index 5b0d888f746b..8d80e233bc7a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-1000.c +++ b/drivers/net/wireless/iwlwifi/iwl-1000.c @@ -46,8 +46,8 @@ #include "iwl-prph.h" /* Highest firmware API version supported */ -#define IWL1000_UCODE_API_MAX 6 -#define IWL100_UCODE_API_MAX 6 +#define IWL1000_UCODE_API_MAX 5 +#define IWL100_UCODE_API_MAX 5 /* Oldest version we won't warn about */ #define IWL1000_UCODE_API_OK 5 @@ -226,5 +226,5 @@ const struct iwl_cfg iwl100_bg_cfg = { IWL_DEVICE_100, }; -MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c index 5635b9e2c69e..ea108622e0bd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-2000.c +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c @@ -51,10 +51,10 @@ #define IWL135_UCODE_API_MAX 6 /* Oldest version we won't warn about */ -#define IWL2030_UCODE_API_OK 5 -#define IWL2000_UCODE_API_OK 5 -#define IWL105_UCODE_API_OK 5 -#define IWL135_UCODE_API_OK 5 +#define IWL2030_UCODE_API_OK 6 +#define IWL2000_UCODE_API_OK 6 +#define IWL105_UCODE_API_OK 6 +#define IWL135_UCODE_API_OK 6 /* Lowest firmware API version supported */ #define IWL2030_UCODE_API_MIN 5 @@ -328,7 +328,7 @@ const struct iwl_cfg iwl135_bgn_cfg = { .ht_params = &iwl2000_ht_params, }; -MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL2000_MODULE_FIRMWARE(IWL2000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL2030_MODULE_FIRMWARE(IWL2030_UCODE_API_OK)); +MODULE_FIRMWARE(IWL105_MODULE_FIRMWARE(IWL105_UCODE_API_OK)); +MODULE_FIRMWARE(IWL135_MODULE_FIRMWARE(IWL135_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index a805e97b89af..de0920c74cdd 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c @@ -51,6 +51,10 @@ #define IWL5000_UCODE_API_MAX 5 #define IWL5150_UCODE_API_MAX 2 +/* Oldest version we won't warn about */ +#define IWL5000_UCODE_API_OK 5 +#define IWL5150_UCODE_API_OK 2 + /* Lowest firmware API version supported */ #define IWL5000_UCODE_API_MIN 1 #define IWL5150_UCODE_API_MIN 1 @@ -326,6 +330,7 @@ static const struct iwl_ht_params iwl5000_ht_params = { #define IWL_DEVICE_5000 \ .fw_name_pre = IWL5000_FW_PRE, \ .ucode_api_max = IWL5000_UCODE_API_MAX, \ + .ucode_api_ok = IWL5000_UCODE_API_OK, \ .ucode_api_min = IWL5000_UCODE_API_MIN, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ @@ -371,6 +376,7 @@ const struct iwl_cfg iwl5350_agn_cfg = { .name = "Intel(R) WiMAX/WiFi Link 5350 AGN", .fw_name_pre = IWL5000_FW_PRE, .ucode_api_max = IWL5000_UCODE_API_MAX, + .ucode_api_ok = IWL5000_UCODE_API_OK, .ucode_api_min = IWL5000_UCODE_API_MIN, .max_inst_size = IWLAGN_RTC_INST_SIZE, .max_data_size = IWLAGN_RTC_DATA_SIZE, @@ -386,6 +392,7 @@ const struct iwl_cfg iwl5350_agn_cfg = { #define IWL_DEVICE_5150 \ .fw_name_pre = IWL5150_FW_PRE, \ .ucode_api_max = IWL5150_UCODE_API_MAX, \ + .ucode_api_ok = IWL5150_UCODE_API_OK, \ .ucode_api_min = IWL5150_UCODE_API_MIN, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ @@ -409,5 +416,5 @@ const struct iwl_cfg iwl5150_abg_cfg = { IWL_DEVICE_5150, }; -MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_OK)); +MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c index 64060cd738b5..f0c91505a7f7 100644 --- a/drivers/net/wireless/iwlwifi/iwl-6000.c +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c @@ -53,6 +53,8 @@ /* Oldest version we won't warn about */ #define IWL6000_UCODE_API_OK 4 #define IWL6000G2_UCODE_API_OK 5 +#define IWL6050_UCODE_API_OK 5 +#define IWL6000G2B_UCODE_API_OK 6 /* Lowest firmware API version supported */ #define IWL6000_UCODE_API_MIN 4 @@ -388,7 +390,7 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = { #define IWL_DEVICE_6030 \ .fw_name_pre = IWL6030_FW_PRE, \ .ucode_api_max = IWL6000G2_UCODE_API_MAX, \ - .ucode_api_ok = IWL6000G2_UCODE_API_OK, \ + .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \ .ucode_api_min = IWL6000G2_UCODE_API_MIN, \ .max_inst_size = IWL60_RTC_INST_SIZE, \ .max_data_size = IWL60_RTC_DATA_SIZE, \ @@ -557,6 +559,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = { }; MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK)); -MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_OK)); +MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2B_UCODE_API_OK)); diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c index f4b84d1596e3..22474608a70b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c @@ -773,8 +773,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, struct sk_buff *skb; __le16 fc = hdr->frame_control; struct iwl_rxon_context *ctx; - struct page *p; - int offset; + unsigned int hdrlen, fraglen; /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { @@ -788,16 +787,24 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv, iwlagn_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; - skb = dev_alloc_skb(128); + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); if (!skb) { - IWL_ERR(priv, "dev_alloc_skb failed\n"); + IWL_ERR(priv, "alloc_skb failed\n"); return; } + hdrlen = min_t(unsigned int, len, skb_tailroom(skb)); + memcpy(skb_put(skb, hdrlen), hdr, hdrlen); + fraglen = len - hdrlen; - offset = (void *)hdr - rxb_addr(rxb); - p = rxb_steal_page(rxb); - skb_add_rx_frag(skb, 0, p, offset, len, len); + if (fraglen) { + int offset = (void *)hdr + hdrlen - rxb_addr(rxb); + skb_add_rx_frag(skb, 0, rxb_steal_page(rxb), offset, + fraglen, rxb->truesize); + } iwl_update_stats(priv, false, fc, len); /* diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c index f1226dbf789d..2a9a16f901c3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c @@ -863,7 +863,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work) void iwlagn_prepare_restart(struct iwl_priv *priv) { - struct iwl_rxon_context *ctx; bool bt_full_concurrent; u8 bt_ci_compliance; u8 bt_load; @@ -872,8 +871,6 @@ void iwlagn_prepare_restart(struct iwl_priv *priv) lockdep_assert_held(&priv->mutex); - for_each_context(priv, ctx) - ctx->vif = NULL; priv->is_open = 0; /* diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index 90208094b8eb..74bce97a8600 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -104,15 +104,29 @@ * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04 * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte * aligned (address bits 0-7 must be 0). + * Later devices have 20 (5000 series) or 30 (higher) queues, but the registers + * for them are in different places. * * Bit fields in each pointer register: * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned */ -#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) -#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) +#define FH_MEM_CBBC_0_15_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0) +#define FH_MEM_CBBC_0_15_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10) +#define FH_MEM_CBBC_16_19_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBF0) +#define FH_MEM_CBBC_16_19_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) +#define FH_MEM_CBBC_20_31_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xB20) +#define FH_MEM_CBBC_20_31_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xB80) -/* Find TFD CB base pointer for given queue (range 0-15). */ -#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4) +/* Find TFD CB base pointer for given queue */ +static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) +{ + if (chnl < 16) + return FH_MEM_CBBC_0_15_LOWER_BOUND + 4 * chnl; + if (chnl < 20) + return FH_MEM_CBBC_16_19_LOWER_BOUND + 4 * (chnl - 16); + WARN_ON_ONCE(chnl >= 32); + return FH_MEM_CBBC_20_31_LOWER_BOUND + 4 * (chnl - 20); +} /** diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c index b6805f8e9a01..c24a7134a6f9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c +++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c @@ -1244,6 +1244,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, struct iwl_rxon_context *tmp, *ctx = NULL; int err; enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif); + bool reset = false; IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", viftype, vif->addr); @@ -1265,6 +1266,13 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, tmp->interface_modes | tmp->exclusive_interface_modes; if (tmp->vif) { + /* On reset we need to add the same interface again */ + if (tmp->vif == vif) { + reset = true; + ctx = tmp; + break; + } + /* check if this busy context is exclusive */ if (tmp->exclusive_interface_modes & BIT(tmp->vif->type)) { @@ -1291,7 +1299,7 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw, ctx->vif = vif; err = iwl_setup_interface(priv, ctx); - if (!err) + if (!err || reset) goto out; ctx->vif = NULL; diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h index 75dc20bd965b..3b1069290fa9 100644 --- a/drivers/net/wireless/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/iwlwifi/iwl-prph.h @@ -223,12 +223,33 @@ #define SCD_AIT (SCD_BASE + 0x0c) #define SCD_TXFACT (SCD_BASE + 0x10) #define SCD_ACTIVE (SCD_BASE + 0x14) -#define SCD_QUEUE_WRPTR(x) (SCD_BASE + 0x18 + (x) * 4) -#define SCD_QUEUE_RDPTR(x) (SCD_BASE + 0x68 + (x) * 4) #define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) #define SCD_AGGR_SEL (SCD_BASE + 0x248) #define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) -#define SCD_QUEUE_STATUS_BITS(x) (SCD_BASE + 0x10c + (x) * 4) + +static inline unsigned int SCD_QUEUE_WRPTR(unsigned int chnl) +{ + if (chnl < 20) + return SCD_BASE + 0x18 + chnl * 4; + WARN_ON_ONCE(chnl >= 32); + return SCD_BASE + 0x284 + (chnl - 20) * 4; +} + +static inline unsigned int SCD_QUEUE_RDPTR(unsigned int chnl) +{ + if (chnl < 20) + return SCD_BASE + 0x68 + chnl * 4; + WARN_ON_ONCE(chnl >= 32); + return SCD_BASE + 0x2B4 + (chnl - 20) * 4; +} + +static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl) +{ + if (chnl < 20) + return SCD_BASE + 0x10c + chnl * 4; + WARN_ON_ONCE(chnl >= 32); + return SCD_BASE + 0x384 + (chnl - 20) * 4; +} /*********************** END TX SCHEDULER *************************************/ diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c index 8b1a7988e176..aa7aea168138 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c @@ -374,8 +374,9 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans, if (WARN_ON(!rxb)) return; + rxcb.truesize = PAGE_SIZE << hw_params(trans).rx_page_order; dma_unmap_page(trans->dev, rxb->page_dma, - PAGE_SIZE << hw_params(trans).rx_page_order, + rxcb.truesize, DMA_FROM_DEVICE); rxcb._page = rxb->page; diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h index 0c81cbaa8088..fdf97886a5e4 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/iwlwifi/iwl-trans.h @@ -260,6 +260,7 @@ static inline void iwl_free_resp(struct iwl_host_cmd *cmd) struct iwl_rx_cmd_buffer { struct page *_page; + unsigned int truesize; }; static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r) diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c index 3fa1ecebadfd..2fa879b015b6 100644 --- a/drivers/net/wireless/libertas/cfg.c +++ b/drivers/net/wireless/libertas/cfg.c @@ -103,7 +103,7 @@ static const u32 cipher_suites[] = { * Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1 * in the firmware spec */ -static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type) +static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type) { int ret = -ENOTSUPP; @@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev, goto done; } - lbs_set_authtype(priv, sme); + ret = lbs_set_authtype(priv, sme); + if (ret == -ENOTSUPP) { + wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type); + goto done; + } + lbs_set_radio(priv, preamble, 1); /* Do the actual association */ diff --git a/drivers/net/wireless/mwifiex/pcie.h b/drivers/net/wireless/mwifiex/pcie.h index 445ff21772e2..2f218f9a3fd3 100644 --- a/drivers/net/wireless/mwifiex/pcie.h +++ b/drivers/net/wireless/mwifiex/pcie.h @@ -48,15 +48,15 @@ #define PCIE_HOST_INT_STATUS_MASK 0xC3C #define PCIE_SCRATCH_2_REG 0xC40 #define PCIE_SCRATCH_3_REG 0xC44 -#define PCIE_SCRATCH_4_REG 0xCC0 -#define PCIE_SCRATCH_5_REG 0xCC4 -#define PCIE_SCRATCH_6_REG 0xCC8 -#define PCIE_SCRATCH_7_REG 0xCCC -#define PCIE_SCRATCH_8_REG 0xCD0 -#define PCIE_SCRATCH_9_REG 0xCD4 -#define PCIE_SCRATCH_10_REG 0xCD8 -#define PCIE_SCRATCH_11_REG 0xCDC -#define PCIE_SCRATCH_12_REG 0xCE0 +#define PCIE_SCRATCH_4_REG 0xCD0 +#define PCIE_SCRATCH_5_REG 0xCD4 +#define PCIE_SCRATCH_6_REG 0xCD8 +#define PCIE_SCRATCH_7_REG 0xCDC +#define PCIE_SCRATCH_8_REG 0xCE0 +#define PCIE_SCRATCH_9_REG 0xCE4 +#define PCIE_SCRATCH_10_REG 0xCE8 +#define PCIE_SCRATCH_11_REG 0xCEC +#define PCIE_SCRATCH_12_REG 0xCF0 #define CPU_INTR_DNLD_RDY BIT(0) #define CPU_INTR_DOOR_BELL BIT(1) diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 288b035a3579..cc15fdb36060 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -1941,6 +1941,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev) rtl_deinit_deferred_work(hw); rtlpriv->intf_ops->adapter_stop(hw); } + rtlpriv->cfg->ops->disable_interrupt(hw); /*deinit rfkill */ rtl_deinit_rfkill(hw); diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c index 41302c7b1ad0..d1afb8e3b2ef 100644 --- a/drivers/net/wireless/wl1251/main.c +++ b/drivers/net/wireless/wl1251/main.c @@ -479,6 +479,7 @@ static void wl1251_op_stop(struct ieee80211_hw *hw) cancel_work_sync(&wl->irq_work); cancel_work_sync(&wl->tx_work); cancel_work_sync(&wl->filter_work); + cancel_delayed_work_sync(&wl->elp_work); mutex_lock(&wl->mutex); diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c index f78694295c39..1b851f650e07 100644 --- a/drivers/net/wireless/wl1251/sdio.c +++ b/drivers/net/wireless/wl1251/sdio.c @@ -315,8 +315,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func) if (wl->irq) free_irq(wl->irq, wl); - kfree(wl_sdio); wl1251_free_hw(wl); + kfree(wl_sdio); sdio_claim_host(func); sdio_release_irq(func); diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 8644d5372e7f..42cfcd9eb9aa 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -44,6 +44,7 @@ #include #include /* for proc_mckinley_root */ #include /* for proc_runway_root */ +#include /* for PAGE0 */ #include /* for PDC_MODEL_* */ #include /* for is_pdc_pat() */ #include diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 083a49fee56a..165274c064bc 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o obj-$(CONFIG_PARISC) += setup-bus.o obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o obj-$(CONFIG_PPC) += setup-bus.o +obj-$(CONFIG_FRV) += setup-bus.o obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o obj-$(CONFIG_MN10300) += setup-bus.o diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 0f150f271c2a..1929c0c63b75 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -200,7 +200,7 @@ static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev) return PCI_D1; case ACPI_STATE_D2: return PCI_D2; - case ACPI_STATE_D3: + case ACPI_STATE_D3_HOT: return PCI_D3hot; case ACPI_STATE_D3_COLD: return PCI_D3cold; @@ -223,7 +223,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) [PCI_D0] = ACPI_STATE_D0, [PCI_D1] = ACPI_STATE_D1, [PCI_D2] = ACPI_STATE_D2, - [PCI_D3hot] = ACPI_STATE_D3, + [PCI_D3hot] = ACPI_STATE_D3_HOT, [PCI_D3cold] = ACPI_STATE_D3 }; int error = -EINVAL; diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index ec3b8cc188af..df6296c5f47b 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -908,10 +908,6 @@ static int pinctrl_groups_show(struct seq_file *s, void *what) const struct pinctrl_ops *ops = pctldev->desc->pctlops; unsigned selector = 0; - /* No grouping */ - if (!ops) - return 0; - mutex_lock(&pinctrl_mutex); seq_puts(s, "registered pin groups:\n"); @@ -1225,6 +1221,19 @@ static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev) #endif +static int pinctrl_check_ops(struct pinctrl_dev *pctldev) +{ + const struct pinctrl_ops *ops = pctldev->desc->pctlops; + + if (!ops || + !ops->list_groups || + !ops->get_group_name || + !ops->get_group_pins) + return -EINVAL; + + return 0; +} + /** * pinctrl_register() - register a pin controller device * @pctldesc: descriptor for this pin controller @@ -1256,6 +1265,14 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, INIT_LIST_HEAD(&pctldev->gpio_ranges); pctldev->dev = dev; + /* check core ops for sanity */ + ret = pinctrl_check_ops(pctldev); + if (ret) { + pr_err("%s pinctrl ops lacks necessary functions\n", + pctldesc->name); + goto out_err; + } + /* If we're implementing pinmuxing, check the ops for sanity */ if (pctldesc->pmxops) { ret = pinmux_check_ops(pctldev); diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c index bc8384c6f3eb..639db4d0aa76 100644 --- a/drivers/platform/x86/acerhdf.c +++ b/drivers/platform/x86/acerhdf.c @@ -50,7 +50,7 @@ */ #undef START_IN_KERNEL_MODE -#define DRV_VER "0.5.24" +#define DRV_VER "0.5.26" /* * According to the Atom N270 datasheet, @@ -83,8 +83,8 @@ static int kernelmode; #endif static unsigned int interval = 10; -static unsigned int fanon = 63000; -static unsigned int fanoff = 58000; +static unsigned int fanon = 60000; +static unsigned int fanoff = 53000; static unsigned int verbose; static unsigned int fanstate = ACERHDF_FAN_AUTO; static char force_bios[16]; @@ -150,6 +150,8 @@ static const struct bios_settings_t bios_tbl[] = { {"Acer", "AOA150", "v0.3308", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3309", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AOA150", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, + /* LT1005u */ + {"Acer", "LT-10Q", "v0.3310", 0x55, 0x58, {0x20, 0x00} }, /* Acer 1410 */ {"Acer", "Aspire 1410", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, @@ -161,6 +163,7 @@ static const struct bios_settings_t bios_tbl[] = { {"Acer", "Aspire 1410", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1410", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, + {"Acer", "Aspire 1410", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, /* Acer 1810xx */ {"Acer", "Aspire 1810TZ", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, @@ -183,29 +186,44 @@ static const struct bios_settings_t bios_tbl[] = { {"Acer", "Aspire 1810TZ", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810T", "v1.3310", 0x55, 0x58, {0x9e, 0x00} }, {"Acer", "Aspire 1810TZ", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, + {"Acer", "Aspire 1810T", "v1.3314", 0x55, 0x58, {0x9e, 0x00} }, /* Acer 531 */ + {"Acer", "AO531h", "v0.3104", 0x55, 0x58, {0x20, 0x00} }, {"Acer", "AO531h", "v0.3201", 0x55, 0x58, {0x20, 0x00} }, + {"Acer", "AO531h", "v0.3304", 0x55, 0x58, {0x20, 0x00} }, + /* Acer 751 */ + {"Acer", "AO751h", "V0.3212", 0x55, 0x58, {0x21, 0x00} }, + /* Acer 1825 */ + {"Acer", "Aspire 1825PTZ", "V1.3118", 0x55, 0x58, {0x9e, 0x00} }, + {"Acer", "Aspire 1825PTZ", "V1.3127", 0x55, 0x58, {0x9e, 0x00} }, + /* Acer TravelMate 7730 */ + {"Acer", "TravelMate 7730G", "v0.3509", 0x55, 0x58, {0xaf, 0x00} }, /* Gateway */ - {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, - {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, - {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, - {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, - {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, + {"Gateway", "AOA110", "v0.3103", 0x55, 0x58, {0x21, 0x00} }, + {"Gateway", "AOA150", "v0.3103", 0x55, 0x58, {0x20, 0x00} }, + {"Gateway", "LT31", "v1.3103", 0x55, 0x58, {0x9e, 0x00} }, + {"Gateway", "LT31", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, + {"Gateway", "LT31", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, + {"Gateway", "LT31", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} }, /* Packard Bell */ - {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, - {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, - {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, - {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, - {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, - {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOA150", "v0.3104", 0x55, 0x58, {0x21, 0x00} }, + {"Packard Bell", "DOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, + {"Packard Bell", "AOA110", "v0.3105", 0x55, 0x58, {0x21, 0x00} }, + {"Packard Bell", "AOA150", "v0.3105", 0x55, 0x58, {0x20, 0x00} }, + {"Packard Bell", "ENBFT", "V1.3118", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "ENBFT", "V1.3127", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v1.3303", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v0.3120", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v0.3108", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v0.3113", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v0.3115", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v0.3117", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v0.3119", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMU", "v1.3204", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMA", "v1.3201", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMA", "v1.3302", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTMA", "v1.3303t", 0x55, 0x58, {0x9e, 0x00} }, + {"Packard Bell", "DOTVR46", "v1.3308", 0x55, 0x58, {0x9e, 0x00} }, /* pewpew-terminator */ {"", "", "", 0, 0, {0, 0} } }; @@ -701,15 +719,20 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Peter Feuerer"); MODULE_DESCRIPTION("Aspire One temperature and fan driver"); MODULE_ALIAS("dmi:*:*Acer*:pnAOA*:"); +MODULE_ALIAS("dmi:*:*Acer*:pnAO751h*:"); MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1410*:"); MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1810*:"); +MODULE_ALIAS("dmi:*:*Acer*:pnAspire*1825PTZ:"); MODULE_ALIAS("dmi:*:*Acer*:pnAO531*:"); +MODULE_ALIAS("dmi:*:*Acer*:TravelMate*7730G:"); MODULE_ALIAS("dmi:*:*Gateway*:pnAOA*:"); MODULE_ALIAS("dmi:*:*Gateway*:pnLT31*:"); MODULE_ALIAS("dmi:*:*Packard*Bell*:pnAOA*:"); MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOA*:"); MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMU*:"); +MODULE_ALIAS("dmi:*:*Packard*Bell*:pnENBFT*:"); MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTMA*:"); +MODULE_ALIAS("dmi:*:*Packard*Bell*:pnDOTVR46*:"); module_init(acerhdf_init); module_exit(acerhdf_exit); diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index a05fc9c955d8..e6c08ee8d46c 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c @@ -212,6 +212,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { }, .driver_data = &quirk_dell_vostro_v130, }, + { } }; static struct calling_interface_buffer *buffer; diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c index f7ba316e0ed6..0ffdb3cde2bb 100644 --- a/drivers/platform/x86/intel_ips.c +++ b/drivers/platform/x86/intel_ips.c @@ -1565,7 +1565,7 @@ static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) ips->poll_turbo_status = true; if (!ips_get_i915_syms(ips)) { - dev_err(&dev->dev, "failed to get i915 symbols, graphics turbo disabled\n"); + dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n"); ips->gpu_turbo_enabled = false; } else { dev_dbg(&dev->dev, "graphics turbo enabled\n"); diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 0a3594c7e912..bcbad8452a6f 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -78,7 +78,7 @@ static int __devinit mfld_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0, + error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_NO_SUSPEND, DRIVER_NAME, input); if (error) { dev_err(&pdev->dev, "Unable to request irq %d for mfld power" diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index e70dd382a009..046fb1bd8619 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1431,7 +1431,10 @@ void devm_regulator_put(struct regulator *regulator) rc = devres_destroy(regulator->dev, devm_regulator_release, devm_regulator_match, regulator); - WARN_ON(rc); + if (rc == 0) + regulator_put(regulator); + else + WARN_ON(rc); } EXPORT_SYMBOL_GPL(devm_regulator_put); diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 96579296f04d..17a58c56eebf 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c @@ -684,7 +684,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev, } new_val++; - } while (desc->min + desc->step + new_val <= desc->max); + } while (desc->min + desc->step * new_val <= desc->max); new_idx = tmp_idx; new_val = tmp_val; diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index cd188ab72f79..c293d0cdb104 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -902,6 +902,7 @@ read_rtc: } ds1307->nvram->attr.name = "nvram"; ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; + sysfs_bin_attr_init(ds1307->nvram); ds1307->nvram->read = ds1307_nvram_read, ds1307->nvram->write = ds1307_nvram_write, ds1307->nvram->size = chip->nvram_size; diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c index 42f5f829b3ee..029e421baaed 100644 --- a/drivers/rtc/rtc-mpc5121.c +++ b/drivers/rtc/rtc-mpc5121.c @@ -360,12 +360,11 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op) &mpc5200_rtc_ops, THIS_MODULE); } - rtc->rtc->uie_unsupported = 1; - if (IS_ERR(rtc->rtc)) { err = PTR_ERR(rtc->rtc); goto out_free_irq; } + rtc->rtc->uie_unsupported = 1; return 0; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 120955c66410..8334dadc681d 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -1672,7 +1672,8 @@ static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd) { QETH_DBF_TEXT(SETUP, 2, "cfgblkt"); - if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) { + if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && + (prcd[76] == 0xF5 || prcd[76] == 0xF6)) { card->info.blkt.time_total = 250; card->info.blkt.inter_packet = 5; card->info.blkt.inter_packet_jumbo = 15; @@ -4540,7 +4541,8 @@ static void qeth_determine_capabilities(struct qeth_card *card) goto out_offline; } qeth_configure_unitaddr(card, prcd); - qeth_configure_blkt_default(card, prcd); + if (ddev_offline) + qeth_configure_blkt_default(card, prcd); kfree(prcd); rc = qdio_get_ssqd_desc(ddev, &card->ssqd); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 351dc0b86fab..a3a056a9db67 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -218,6 +218,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev, if (!shost->shost_gendev.parent) shost->shost_gendev.parent = dev ? dev : &platform_bus; + if (!dma_dev) + dma_dev = shost->shost_gendev.parent; + shost->dma_dev = dma_dev; error = device_add(&shost->shost_gendev); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e002cd466e9a..467dc38246f9 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -4549,8 +4549,12 @@ static int ipr_ata_slave_alloc(struct scsi_device *sdev) ENTER; if (sdev->sdev_target) sata_port = sdev->sdev_target->hostdata; - if (sata_port) + if (sata_port) { rc = ata_sas_port_init(sata_port->ap); + if (rc == 0) + rc = ata_sas_sync_probe(sata_port->ap); + } + if (rc) ipr_slave_destroy(sdev); diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index d4bf9c12ecd4..45385f531649 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -192,22 +192,27 @@ static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) static bool sci_controller_isr(struct isci_host *ihost) { - if (sci_controller_completion_queue_has_entries(ihost)) { + if (sci_controller_completion_queue_has_entries(ihost)) return true; - } else { - /* - * we have a spurious interrupt it could be that we have already - * emptied the completion queue from a previous interrupt */ - writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); - /* - * There is a race in the hardware that could cause us not to be notified - * of an interrupt completion if we do not take this step. We will mask - * then unmask the interrupts so if there is another interrupt pending - * the clearing of the interrupt source we get the next interrupt message. */ + /* we have a spurious interrupt it could be that we have already + * emptied the completion queue from a previous interrupt + * FIXME: really!? + */ + writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); + + /* There is a race in the hardware that could cause us not to be + * notified of an interrupt completion if we do not take this + * step. We will mask then unmask the interrupts so if there is + * another interrupt pending the clearing of the interrupt + * source we get the next interrupt message. + */ + spin_lock(&ihost->scic_lock); + if (test_bit(IHOST_IRQ_ENABLED, &ihost->flags)) { writel(0xFF000000, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); } + spin_unlock(&ihost->scic_lock); return false; } @@ -642,7 +647,6 @@ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status co if (completion_status != SCI_SUCCESS) dev_info(&ihost->pdev->dev, "controller start timed out, continuing...\n"); - isci_host_change_state(ihost, isci_ready); clear_bit(IHOST_START_PENDING, &ihost->flags); wake_up(&ihost->eventq); } @@ -657,12 +661,7 @@ int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) sas_drain_work(ha); - dev_dbg(&ihost->pdev->dev, - "%s: ihost->status = %d, time = %ld\n", - __func__, isci_host_get_state(ihost), time); - return 1; - } /** @@ -704,14 +703,15 @@ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) static void sci_controller_enable_interrupts(struct isci_host *ihost) { - BUG_ON(ihost->smu_registers == NULL); + set_bit(IHOST_IRQ_ENABLED, &ihost->flags); writel(0, &ihost->smu_registers->interrupt_mask); } void sci_controller_disable_interrupts(struct isci_host *ihost) { - BUG_ON(ihost->smu_registers == NULL); + clear_bit(IHOST_IRQ_ENABLED, &ihost->flags); writel(0xffffffff, &ihost->smu_registers->interrupt_mask); + readl(&ihost->smu_registers->interrupt_mask); /* flush */ } static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) @@ -822,7 +822,7 @@ static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host * &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); } -static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) +void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) { if (ihost->sm.current_state_id == SCIC_STARTING) { /* @@ -849,6 +849,7 @@ static bool is_phy_starting(struct isci_phy *iphy) case SCI_PHY_SUB_AWAIT_SATA_POWER: case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: + case SCI_PHY_SUB_AWAIT_OSSP_EN: case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: case SCI_PHY_SUB_FINAL: return true; @@ -857,6 +858,39 @@ static bool is_phy_starting(struct isci_phy *iphy) } } +bool is_controller_start_complete(struct isci_host *ihost) +{ + int i; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct isci_phy *iphy = &ihost->phys[i]; + u32 state = iphy->sm.current_state_id; + + /* in apc mode we need to check every phy, in + * mpc mode we only need to check phys that have + * been configured into a port + */ + if (is_port_config_apc(ihost)) + /* pass */; + else if (!phy_get_non_dummy_port(iphy)) + continue; + + /* The controller start operation is complete iff: + * - all links have been given an opportunity to start + * - have no indication of a connected device + * - have an indication of a connected device and it has + * finished the link training process. + */ + if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || + (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || + (iphy->is_in_link_training == true && is_phy_starting(iphy)) || + (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) + return false; + } + + return true; +} + /** * sci_controller_start_next_phy - start phy * @scic: controller @@ -877,36 +911,7 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) return status; if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { - bool is_controller_start_complete = true; - u32 state; - u8 index; - - for (index = 0; index < SCI_MAX_PHYS; index++) { - iphy = &ihost->phys[index]; - state = iphy->sm.current_state_id; - - if (!phy_get_non_dummy_port(iphy)) - continue; - - /* The controller start operation is complete iff: - * - all links have been given an opportunity to start - * - have no indication of a connected device - * - have an indication of a connected device and it has - * finished the link training process. - */ - if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || - (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || - (iphy->is_in_link_training == true && is_phy_starting(iphy)) || - (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) { - is_controller_start_complete = false; - break; - } - } - - /* - * The controller has successfully finished the start process. - * Inform the SCI Core user and transition to the READY state. */ - if (is_controller_start_complete == true) { + if (is_controller_start_complete(ihost)) { sci_controller_transition_to_ready(ihost, SCI_SUCCESS); sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; @@ -987,9 +992,8 @@ static enum sci_status sci_controller_start(struct isci_host *ihost, u16 index; if (ihost->sm.current_state_id != SCIC_INITIALIZED) { - dev_warn(&ihost->pdev->dev, - "SCIC Controller start operation requested in " - "invalid state\n"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } @@ -1053,9 +1057,8 @@ void isci_host_scan_start(struct Scsi_Host *shost) spin_unlock_irq(&ihost->scic_lock); } -static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) +static void isci_host_stop_complete(struct isci_host *ihost) { - isci_host_change_state(ihost, isci_stopped); sci_controller_disable_interrupts(ihost); clear_bit(IHOST_STOP_PENDING, &ihost->flags); wake_up(&ihost->eventq); @@ -1074,6 +1077,32 @@ static void sci_controller_completion_handler(struct isci_host *ihost) writel(0, &ihost->smu_registers->interrupt_mask); } +void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task) +{ + task->lldd_task = NULL; + if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags) && + !(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (test_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags)) { + /* Normal notification (task_done) */ + dev_dbg(&ihost->pdev->dev, + "%s: Normal - ireq/task = %p/%p\n", + __func__, ireq, task); + + task->task_done(task); + } else { + dev_dbg(&ihost->pdev->dev, + "%s: Error - ireq/task = %p/%p\n", + __func__, ireq, task); + + sas_task_abort(task); + } + } + if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) + wake_up_all(&ihost->eventq); + + if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) + isci_free_tag(ihost, ireq->io_tag); +} /** * isci_host_completion_routine() - This function is the delayed service * routine that calls the sci core library's completion handler. It's @@ -1082,107 +1111,15 @@ static void sci_controller_completion_handler(struct isci_host *ihost) * @data: This parameter specifies the ISCI host object * */ -static void isci_host_completion_routine(unsigned long data) +void isci_host_completion_routine(unsigned long data) { struct isci_host *ihost = (struct isci_host *)data; - struct list_head completed_request_list; - struct list_head errored_request_list; - struct list_head *current_position; - struct list_head *next_position; - struct isci_request *request; - struct isci_request *next_request; - struct sas_task *task; u16 active; - INIT_LIST_HEAD(&completed_request_list); - INIT_LIST_HEAD(&errored_request_list); - spin_lock_irq(&ihost->scic_lock); - sci_controller_completion_handler(ihost); - - /* Take the lists of completed I/Os from the host. */ - - list_splice_init(&ihost->requests_to_complete, - &completed_request_list); - - /* Take the list of errored I/Os from the host. */ - list_splice_init(&ihost->requests_to_errorback, - &errored_request_list); - spin_unlock_irq(&ihost->scic_lock); - /* Process any completions in the lists. */ - list_for_each_safe(current_position, next_position, - &completed_request_list) { - - request = list_entry(current_position, struct isci_request, - completed_node); - task = isci_request_access_task(request); - - /* Normal notification (task_done) */ - dev_dbg(&ihost->pdev->dev, - "%s: Normal - request/task = %p/%p\n", - __func__, - request, - task); - - /* Return the task to libsas */ - if (task != NULL) { - - task->lldd_task = NULL; - if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { - - /* If the task is already in the abort path, - * the task_done callback cannot be called. - */ - task->task_done(task); - } - } - - spin_lock_irq(&ihost->scic_lock); - isci_free_tag(ihost, request->io_tag); - spin_unlock_irq(&ihost->scic_lock); - } - list_for_each_entry_safe(request, next_request, &errored_request_list, - completed_node) { - - task = isci_request_access_task(request); - - /* Use sas_task_abort */ - dev_warn(&ihost->pdev->dev, - "%s: Error - request/task = %p/%p\n", - __func__, - request, - task); - - if (task != NULL) { - - /* Put the task into the abort path if it's not there - * already. - */ - if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) - sas_task_abort(task); - - } else { - /* This is a case where the request has completed with a - * status such that it needed further target servicing, - * but the sas_task reference has already been removed - * from the request. Since it was errored, it was not - * being aborted, so there is nothing to do except free - * it. - */ - - spin_lock_irq(&ihost->scic_lock); - /* Remove the request from the remote device's list - * of pending requests. - */ - list_del_init(&request->dev_node); - isci_free_tag(ihost, request->io_tag); - spin_unlock_irq(&ihost->scic_lock); - } - } - /* the coalesence timeout doubles at each encoding step, so * update it based on the ilog2 value of the outstanding requests */ @@ -1213,9 +1150,8 @@ static void isci_host_completion_routine(unsigned long data) static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) { if (ihost->sm.current_state_id != SCIC_READY) { - dev_warn(&ihost->pdev->dev, - "SCIC Controller stop operation requested in " - "invalid state\n"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } @@ -1241,7 +1177,7 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost) switch (ihost->sm.current_state_id) { case SCIC_RESET: case SCIC_READY: - case SCIC_STOPPED: + case SCIC_STOPPING: case SCIC_FAILED: /* * The reset operation is not a graceful cleanup, just @@ -1250,13 +1186,50 @@ static enum sci_status sci_controller_reset(struct isci_host *ihost) sci_change_state(&ihost->sm, SCIC_RESETTING); return SCI_SUCCESS; default: - dev_warn(&ihost->pdev->dev, - "SCIC Controller reset operation requested in " - "invalid state\n"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } } +static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) +{ + u32 index; + enum sci_status status; + enum sci_status phy_status; + + status = SCI_SUCCESS; + + for (index = 0; index < SCI_MAX_PHYS; index++) { + phy_status = sci_phy_stop(&ihost->phys[index]); + + if (phy_status != SCI_SUCCESS && + phy_status != SCI_FAILURE_INVALID_STATE) { + status = SCI_FAILURE; + + dev_warn(&ihost->pdev->dev, + "%s: Controller stop operation failed to stop " + "phy %d because of status %d.\n", + __func__, + ihost->phys[index].phy_index, phy_status); + } + } + + return status; +} + + +/** + * isci_host_deinit - shutdown frame reception and dma + * @ihost: host to take down + * + * This is called in either the driver shutdown or the suspend path. In + * the shutdown case libsas went through port teardown and normal device + * removal (i.e. physical links stayed up to service scsi_device removal + * commands). In the suspend case we disable the hardware without + * notifying libsas of the link down events since we want libsas to + * remember the domain across the suspend/resume cycle + */ void isci_host_deinit(struct isci_host *ihost) { int i; @@ -1265,17 +1238,6 @@ void isci_host_deinit(struct isci_host *ihost) for (i = 0; i < isci_gpio_count(ihost); i++) writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); - isci_host_change_state(ihost, isci_stopping); - for (i = 0; i < SCI_MAX_PORTS; i++) { - struct isci_port *iport = &ihost->ports[i]; - struct isci_remote_device *idev, *d; - - list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { - if (test_bit(IDEV_ALLOCATED, &idev->flags)) - isci_remote_device_stop(ihost, idev); - } - } - set_bit(IHOST_STOP_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); @@ -1284,12 +1246,21 @@ void isci_host_deinit(struct isci_host *ihost) wait_for_stop(ihost); + /* phy stop is after controller stop to allow port and device to + * go idle before shutting down the phys, but the expectation is + * that i/o has been shut off well before we reach this + * function. + */ + sci_controller_stop_phys(ihost); + /* disable sgpio: where the above wait should give time for the * enclosure to sample the gpios going inactive */ writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); + spin_lock_irq(&ihost->scic_lock); sci_controller_reset(ihost); + spin_unlock_irq(&ihost->scic_lock); /* Cancel any/all outstanding port timers */ for (i = 0; i < ihost->logical_port_entries; i++) { @@ -1328,29 +1299,6 @@ static void __iomem *smu_base(struct isci_host *isci_host) return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; } -static void isci_user_parameters_get(struct sci_user_parameters *u) -{ - int i; - - for (i = 0; i < SCI_MAX_PHYS; i++) { - struct sci_phy_user_params *u_phy = &u->phys[i]; - - u_phy->max_speed_generation = phy_gen; - - /* we are not exporting these for now */ - u_phy->align_insertion_frequency = 0x7f; - u_phy->in_connection_align_insertion_frequency = 0xff; - u_phy->notify_enable_spin_up_insertion_frequency = 0x33; - } - - u->stp_inactivity_timeout = stp_inactive_to; - u->ssp_inactivity_timeout = ssp_inactive_to; - u->stp_max_occupancy_timeout = stp_max_occ_to; - u->ssp_max_occupancy_timeout = ssp_max_occ_to; - u->no_outbound_task_timeout = no_outbound_task_to; - u->max_concurr_spinup = max_concurr_spinup; -} - static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); @@ -1510,32 +1458,6 @@ static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) sci_controller_set_interrupt_coalescence(ihost, 0, 0); } -static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) -{ - u32 index; - enum sci_status status; - enum sci_status phy_status; - - status = SCI_SUCCESS; - - for (index = 0; index < SCI_MAX_PHYS; index++) { - phy_status = sci_phy_stop(&ihost->phys[index]); - - if (phy_status != SCI_SUCCESS && - phy_status != SCI_FAILURE_INVALID_STATE) { - status = SCI_FAILURE; - - dev_warn(&ihost->pdev->dev, - "%s: Controller stop operation failed to stop " - "phy %d because of status %d.\n", - __func__, - ihost->phys[index].phy_index, phy_status); - } - } - - return status; -} - static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) { u32 index; @@ -1595,10 +1517,11 @@ static void sci_controller_stopping_state_enter(struct sci_base_state_machine *s { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); - /* Stop all of the components for this controller */ - sci_controller_stop_phys(ihost); - sci_controller_stop_ports(ihost); sci_controller_stop_devices(ihost); + sci_controller_stop_ports(ihost); + + if (!sci_controller_has_remote_devices_stopping(ihost)) + isci_host_stop_complete(ihost); } static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) @@ -1624,6 +1547,9 @@ static void sci_controller_reset_hardware(struct isci_host *ihost) /* The write to the UFQGP clears the UFQPR */ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); + + /* clear all interrupts */ + writel(~SMU_INTERRUPT_STATUS_RESERVED_MASK, &ihost->smu_registers->interrupt_status); } static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) @@ -1655,59 +1581,9 @@ static const struct sci_base_state sci_controller_state_table[] = { .enter_state = sci_controller_stopping_state_enter, .exit_state = sci_controller_stopping_state_exit, }, - [SCIC_STOPPED] = {}, [SCIC_FAILED] = {} }; -static void sci_controller_set_default_config_parameters(struct isci_host *ihost) -{ - /* these defaults are overridden by the platform / firmware */ - u16 index; - - /* Default to APC mode. */ - ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; - - /* Default to APC mode. */ - ihost->oem_parameters.controller.max_concurr_spin_up = 1; - - /* Default to no SSC operation. */ - ihost->oem_parameters.controller.do_enable_ssc = false; - - /* Default to short cables on all phys. */ - ihost->oem_parameters.controller.cable_selection_mask = 0; - - /* Initialize all of the port parameter information to narrow ports. */ - for (index = 0; index < SCI_MAX_PORTS; index++) { - ihost->oem_parameters.ports[index].phy_mask = 0; - } - - /* Initialize all of the phy parameter information. */ - for (index = 0; index < SCI_MAX_PHYS; index++) { - /* Default to 3G (i.e. Gen 2). */ - ihost->user_parameters.phys[index].max_speed_generation = - SCIC_SDS_PARM_GEN2_SPEED; - - /* the frequencies cannot be 0 */ - ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; - ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; - ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; - - /* - * Previous Vitesse based expanders had a arbitration issue that - * is worked around by having the upper 32-bits of SAS address - * with a value greater then the Vitesse company identifier. - * Hence, usage of 0x5FCFFFFF. */ - ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; - ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; - } - - ihost->user_parameters.stp_inactivity_timeout = 5; - ihost->user_parameters.ssp_inactivity_timeout = 5; - ihost->user_parameters.stp_max_occupancy_timeout = 5; - ihost->user_parameters.ssp_max_occupancy_timeout = 20; - ihost->user_parameters.no_outbound_task_timeout = 2; -} - static void controller_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; @@ -1724,7 +1600,7 @@ static void controller_timeout(unsigned long data) sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); else if (sm->current_state_id == SCIC_STOPPING) { sci_change_state(sm, SCIC_FAILED); - isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); + isci_host_stop_complete(ihost); } else /* / @todo Now what do we want to do in this case? */ dev_err(&ihost->pdev->dev, "%s: Controller timer fired when controller was not " @@ -1764,9 +1640,6 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost, sci_init_timer(&ihost->timer, controller_timeout); - /* Initialize the User and OEM parameters to default values. */ - sci_controller_set_default_config_parameters(ihost); - return sci_controller_reset(ihost); } @@ -1846,27 +1719,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) return 0; } -static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) -{ - u32 state = ihost->sm.current_state_id; - struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); - - if (state == SCIC_RESET || - state == SCIC_INITIALIZING || - state == SCIC_INITIALIZED) { - u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version : - ISCI_ROM_VER_1_0; - - if (sci_oem_parameters_validate(&ihost->oem_parameters, - oem_version)) - return SCI_FAILURE_INVALID_PARAMETER_VALUE; - - return SCI_SUCCESS; - } - - return SCI_FAILURE_INVALID_STATE; -} - static u8 max_spin_up(struct isci_host *ihost) { if (ihost->user_parameters.max_concurr_spinup) @@ -1914,7 +1766,7 @@ static void power_control_timeout(unsigned long data) ihost->power_control.phys_granted_power++; sci_phy_consume_power_handler(iphy); - if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { + if (iphy->protocol == SAS_PROTOCOL_SSP) { u8 j; for (j = 0; j < SCI_MAX_PHYS; j++) { @@ -1988,7 +1840,7 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost, sizeof(current_phy->frame_rcvd.iaf.sas_addr)); if (current_phy->sm.current_state_id == SCI_PHY_READY && - current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS && + current_phy->protocol == SAS_PROTOCOL_SSP && other == 0) { sci_phy_consume_power_handler(iphy); break; @@ -2279,9 +2131,8 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost) unsigned long i, state, val; if (ihost->sm.current_state_id != SCIC_RESET) { - dev_warn(&ihost->pdev->dev, - "SCIC Controller initialize operation requested " - "in invalid state\n"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } @@ -2384,96 +2235,76 @@ static enum sci_status sci_controller_initialize(struct isci_host *ihost) return result; } -static enum sci_status sci_user_parameters_set(struct isci_host *ihost, - struct sci_user_parameters *sci_parms) +static int sci_controller_dma_alloc(struct isci_host *ihost) { - u32 state = ihost->sm.current_state_id; + struct device *dev = &ihost->pdev->dev; + size_t size; + int i; - if (state == SCIC_RESET || - state == SCIC_INITIALIZING || - state == SCIC_INITIALIZED) { - u16 index; + /* detect re-initialization */ + if (ihost->completion_queue) + return 0; - /* - * Validate the user parameters. If they are not legal, then - * return a failure. - */ - for (index = 0; index < SCI_MAX_PHYS; index++) { - struct sci_phy_user_params *user_phy; + size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); + ihost->completion_queue = dmam_alloc_coherent(dev, size, &ihost->cq_dma, + GFP_KERNEL); + if (!ihost->completion_queue) + return -ENOMEM; - user_phy = &sci_parms->phys[index]; + size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); + ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &ihost->rnc_dma, + GFP_KERNEL); - if (!((user_phy->max_speed_generation <= - SCIC_SDS_PARM_MAX_SPEED) && - (user_phy->max_speed_generation > - SCIC_SDS_PARM_NO_SPEED))) - return SCI_FAILURE_INVALID_PARAMETER_VALUE; + if (!ihost->remote_node_context_table) + return -ENOMEM; - if (user_phy->in_connection_align_insertion_frequency < - 3) - return SCI_FAILURE_INVALID_PARAMETER_VALUE; + size = ihost->task_context_entries * sizeof(struct scu_task_context), + ihost->task_context_table = dmam_alloc_coherent(dev, size, &ihost->tc_dma, + GFP_KERNEL); + if (!ihost->task_context_table) + return -ENOMEM; - if ((user_phy->in_connection_align_insertion_frequency < - 3) || - (user_phy->align_insertion_frequency == 0) || - (user_phy-> - notify_enable_spin_up_insertion_frequency == - 0)) - return SCI_FAILURE_INVALID_PARAMETER_VALUE; - } + size = SCI_UFI_TOTAL_SIZE; + ihost->ufi_buf = dmam_alloc_coherent(dev, size, &ihost->ufi_dma, GFP_KERNEL); + if (!ihost->ufi_buf) + return -ENOMEM; - if ((sci_parms->stp_inactivity_timeout == 0) || - (sci_parms->ssp_inactivity_timeout == 0) || - (sci_parms->stp_max_occupancy_timeout == 0) || - (sci_parms->ssp_max_occupancy_timeout == 0) || - (sci_parms->no_outbound_task_timeout == 0)) - return SCI_FAILURE_INVALID_PARAMETER_VALUE; + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq; + dma_addr_t dma; - memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); + ireq = dmam_alloc_coherent(dev, sizeof(*ireq), &dma, GFP_KERNEL); + if (!ireq) + return -ENOMEM; - return SCI_SUCCESS; + ireq->tc = &ihost->task_context_table[i]; + ireq->owning_controller = ihost; + ireq->request_daddr = dma; + ireq->isci_host = ihost; + ihost->reqs[i] = ireq; } - return SCI_FAILURE_INVALID_STATE; + return 0; } static int sci_controller_mem_init(struct isci_host *ihost) { - struct device *dev = &ihost->pdev->dev; - dma_addr_t dma; - size_t size; - int err; + int err = sci_controller_dma_alloc(ihost); - size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); - ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); - if (!ihost->completion_queue) - return -ENOMEM; - - writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); - writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); - - size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); - ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, - GFP_KERNEL); - if (!ihost->remote_node_context_table) - return -ENOMEM; - - writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); - writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); - - size = ihost->task_context_entries * sizeof(struct scu_task_context), - ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); - if (!ihost->task_context_table) - return -ENOMEM; - - ihost->task_context_dma = dma; - writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); - writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); - - err = sci_unsolicited_frame_control_construct(ihost); if (err) return err; + writel(lower_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_lower); + writel(upper_32_bits(ihost->cq_dma), &ihost->smu_registers->completion_queue_upper); + + writel(lower_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_lower); + writel(upper_32_bits(ihost->rnc_dma), &ihost->smu_registers->remote_node_context_upper); + + writel(lower_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_lower); + writel(upper_32_bits(ihost->tc_dma), &ihost->smu_registers->host_task_table_upper); + + sci_unsolicited_frame_control_construct(ihost); + /* * Inform the silicon as to the location of the UF headers and * address table. @@ -2491,22 +2322,22 @@ static int sci_controller_mem_init(struct isci_host *ihost) return 0; } +/** + * isci_host_init - (re-)initialize hardware and internal (private) state + * @ihost: host to init + * + * Any public facing objects (like asd_sas_port, and asd_sas_phys), or + * one-time initialization objects like locks and waitqueues, are + * not touched (they are initialized in isci_host_alloc) + */ int isci_host_init(struct isci_host *ihost) { - int err = 0, i; + int i, err; enum sci_status status; - struct sci_user_parameters sci_user_params; - struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); - - spin_lock_init(&ihost->state_lock); - spin_lock_init(&ihost->scic_lock); - init_waitqueue_head(&ihost->eventq); - - isci_host_change_state(ihost, isci_starting); - - status = sci_controller_construct(ihost, scu_base(ihost), - smu_base(ihost)); + spin_lock_irq(&ihost->scic_lock); + status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); + spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, "%s: sci_controller_construct failed - status = %x\n", @@ -2515,48 +2346,6 @@ int isci_host_init(struct isci_host *ihost) return -ENODEV; } - ihost->sas_ha.dev = &ihost->pdev->dev; - ihost->sas_ha.lldd_ha = ihost; - - /* - * grab initial values stored in the controller object for OEM and USER - * parameters - */ - isci_user_parameters_get(&sci_user_params); - status = sci_user_parameters_set(ihost, &sci_user_params); - if (status != SCI_SUCCESS) { - dev_warn(&ihost->pdev->dev, - "%s: sci_user_parameters_set failed\n", - __func__); - return -ENODEV; - } - - /* grab any OEM parameters specified in orom */ - if (pci_info->orom) { - status = isci_parse_oem_parameters(&ihost->oem_parameters, - pci_info->orom, - ihost->id); - if (status != SCI_SUCCESS) { - dev_warn(&ihost->pdev->dev, - "parsing firmware oem parameters failed\n"); - return -EINVAL; - } - } - - status = sci_oem_parameters_set(ihost); - if (status != SCI_SUCCESS) { - dev_warn(&ihost->pdev->dev, - "%s: sci_oem_parameters_set failed\n", - __func__); - return -ENODEV; - } - - tasklet_init(&ihost->completion_tasklet, - isci_host_completion_routine, (unsigned long)ihost); - - INIT_LIST_HEAD(&ihost->requests_to_complete); - INIT_LIST_HEAD(&ihost->requests_to_errorback); - spin_lock_irq(&ihost->scic_lock); status = sci_controller_initialize(ihost); spin_unlock_irq(&ihost->scic_lock); @@ -2572,43 +2361,12 @@ int isci_host_init(struct isci_host *ihost) if (err) return err; - for (i = 0; i < SCI_MAX_PORTS; i++) - isci_port_init(&ihost->ports[i], ihost, i); - - for (i = 0; i < SCI_MAX_PHYS; i++) - isci_phy_init(&ihost->phys[i], ihost, i); - /* enable sgpio */ writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); for (i = 0; i < isci_gpio_count(ihost); i++) writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); - for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { - struct isci_remote_device *idev = &ihost->devices[i]; - - INIT_LIST_HEAD(&idev->reqs_in_process); - INIT_LIST_HEAD(&idev->node); - } - - for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { - struct isci_request *ireq; - dma_addr_t dma; - - ireq = dmam_alloc_coherent(&ihost->pdev->dev, - sizeof(struct isci_request), &dma, - GFP_KERNEL); - if (!ireq) - return -ENOMEM; - - ireq->tc = &ihost->task_context_table[i]; - ireq->owning_controller = ihost; - spin_lock_init(&ireq->state_lock); - ireq->request_daddr = dma; - ireq->isci_host = ihost; - ihost->reqs[i] = ireq; - } - return 0; } @@ -2654,7 +2412,7 @@ void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, } } -static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) +bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) { u32 index; @@ -2680,7 +2438,7 @@ void sci_controller_remote_device_stopped(struct isci_host *ihost, } if (!sci_controller_has_remote_devices_stopping(ihost)) - sci_change_state(&ihost->sm, SCIC_STOPPED); + isci_host_stop_complete(ihost); } void sci_controller_post_request(struct isci_host *ihost, u32 request) @@ -2842,7 +2600,8 @@ enum sci_status sci_controller_start_io(struct isci_host *ihost, enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { - dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } @@ -2866,22 +2625,26 @@ enum sci_status sci_controller_terminate_request(struct isci_host *ihost, enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { - dev_warn(&ihost->pdev->dev, - "invalid state to terminate request\n"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } - status = sci_io_request_terminate(ireq); - if (status != SCI_SUCCESS) - return status; - /* - * Utilize the original post context command and or in the POST_TC_ABORT - * request sub-type. - */ - sci_controller_post_request(ihost, - ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); - return SCI_SUCCESS; + dev_dbg(&ihost->pdev->dev, "%s: status=%d; ireq=%p; flags=%lx\n", + __func__, status, ireq, ireq->flags); + + if ((status == SCI_SUCCESS) && + !test_bit(IREQ_PENDING_ABORT, &ireq->flags) && + !test_and_set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags)) { + /* Utilize the original post context command and or in the + * POST_TC_ABORT request sub-type. + */ + sci_controller_post_request( + ihost, ireq->post_context | + SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); + } + return status; } /** @@ -2915,7 +2678,8 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost, clear_bit(IREQ_ACTIVE, &ireq->flags); return SCI_SUCCESS; default: - dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } @@ -2926,7 +2690,8 @@ enum sci_status sci_controller_continue_io(struct isci_request *ireq) struct isci_host *ihost = ireq->owning_controller; if (ihost->sm.current_state_id != SCIC_READY) { - dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); + dev_warn(&ihost->pdev->dev, "%s invalid state: %d\n", + __func__, ihost->sm.current_state_id); return SCI_FAILURE_INVALID_STATE; } diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index adbad69d1069..9ab58e0540e7 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -55,6 +55,7 @@ #ifndef _SCI_HOST_H_ #define _SCI_HOST_H_ +#include #include "remote_device.h" #include "phy.h" #include "isci.h" @@ -108,6 +109,8 @@ struct sci_port_configuration_agent; typedef void (*port_config_fn)(struct isci_host *, struct sci_port_configuration_agent *, struct isci_port *, struct isci_phy *); +bool is_port_config_apc(struct isci_host *ihost); +bool is_controller_start_complete(struct isci_host *ihost); struct sci_port_configuration_agent { u16 phy_configured_mask; @@ -157,13 +160,17 @@ struct isci_host { struct sci_power_control power_control; u8 io_request_sequence[SCI_MAX_IO_REQUESTS]; struct scu_task_context *task_context_table; - dma_addr_t task_context_dma; + dma_addr_t tc_dma; union scu_remote_node_context *remote_node_context_table; + dma_addr_t rnc_dma; u32 *completion_queue; + dma_addr_t cq_dma; u32 completion_queue_get; u32 logical_port_entries; u32 remote_node_entries; u32 task_context_entries; + void *ufi_buf; + dma_addr_t ufi_dma; struct sci_unsolicited_frame_control uf_control; /* phy startup */ @@ -190,17 +197,13 @@ struct isci_host { struct asd_sas_port sas_ports[SCI_MAX_PORTS]; struct sas_ha_struct sas_ha; - spinlock_t state_lock; struct pci_dev *pdev; - enum isci_status status; #define IHOST_START_PENDING 0 #define IHOST_STOP_PENDING 1 + #define IHOST_IRQ_ENABLED 2 unsigned long flags; wait_queue_head_t eventq; - struct Scsi_Host *shost; struct tasklet_struct completion_tasklet; - struct list_head requests_to_complete; - struct list_head requests_to_errorback; spinlock_t scic_lock; struct isci_request *reqs[SCI_MAX_IO_REQUESTS]; struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES]; @@ -273,13 +276,6 @@ enum sci_controller_states { */ SCIC_STOPPING, - /** - * This state indicates that the controller has successfully been stopped. - * In this state no new IO operations are permitted. - * This state is entered from the STOPPING state. - */ - SCIC_STOPPED, - /** * This state indicates that the controller could not successfully be * initialized. In this state no new IO operations are permitted. @@ -309,32 +305,16 @@ static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev) return pci_get_drvdata(pdev); } +static inline struct Scsi_Host *to_shost(struct isci_host *ihost) +{ + return ihost->sas_ha.core.shost; +} + #define for_each_isci_host(id, ihost, pdev) \ for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \ id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \ ihost = to_pci_info(pdev)->hosts[++id]) -static inline enum isci_status isci_host_get_state(struct isci_host *isci_host) -{ - return isci_host->status; -} - -static inline void isci_host_change_state(struct isci_host *isci_host, - enum isci_status status) -{ - unsigned long flags; - - dev_dbg(&isci_host->pdev->dev, - "%s: isci_host = %p, state = 0x%x", - __func__, - isci_host, - status); - spin_lock_irqsave(&isci_host->state_lock, flags); - isci_host->status = status; - spin_unlock_irqrestore(&isci_host->state_lock, flags); - -} - static inline void wait_for_start(struct isci_host *ihost) { wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags)); @@ -360,6 +340,11 @@ static inline struct isci_host *dev_to_ihost(struct domain_device *dev) return dev->port->ha->lldd_ha; } +static inline struct isci_host *idev_to_ihost(struct isci_remote_device *idev) +{ + return dev_to_ihost(idev->domain_dev); +} + /* we always use protocol engine group zero */ #define ISCI_PEG 0 @@ -378,8 +363,7 @@ static inline int sci_remote_device_node_count(struct isci_remote_device *idev) { struct domain_device *dev = idev->domain_dev; - if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && - !idev->is_direct_attached) + if (dev_is_sata(dev) && dev->parent) return SCU_STP_REMOTE_NODE_COUNT; return SCU_SSP_REMOTE_NODE_COUNT; } @@ -475,36 +459,17 @@ void sci_controller_free_remote_node_context( struct isci_remote_device *idev, u16 node_id); -struct isci_request *sci_request_by_tag(struct isci_host *ihost, - u16 io_tag); - -void sci_controller_power_control_queue_insert( - struct isci_host *ihost, - struct isci_phy *iphy); - -void sci_controller_power_control_queue_remove( - struct isci_host *ihost, - struct isci_phy *iphy); - -void sci_controller_link_up( - struct isci_host *ihost, - struct isci_port *iport, - struct isci_phy *iphy); - -void sci_controller_link_down( - struct isci_host *ihost, - struct isci_port *iport, - struct isci_phy *iphy); - -void sci_controller_remote_device_stopped( - struct isci_host *ihost, - struct isci_remote_device *idev); - -void sci_controller_copy_task_context( - struct isci_host *ihost, - struct isci_request *ireq); - -void sci_controller_register_setup(struct isci_host *ihost); +struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag); +void sci_controller_power_control_queue_insert(struct isci_host *ihost, + struct isci_phy *iphy); +void sci_controller_power_control_queue_remove(struct isci_host *ihost, + struct isci_phy *iphy); +void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy); +void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, + struct isci_phy *iphy); +void sci_controller_remote_device_stopped(struct isci_host *ihost, + struct isci_remote_device *idev); enum sci_status sci_controller_continue_io(struct isci_request *ireq); int isci_host_scan_finished(struct Scsi_Host *, unsigned long); @@ -512,29 +477,14 @@ void isci_host_scan_start(struct Scsi_Host *); u16 isci_alloc_tag(struct isci_host *ihost); enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag); void isci_tci_free(struct isci_host *ihost, u16 tci); +void ireq_done(struct isci_host *ihost, struct isci_request *ireq, struct sas_task *task); int isci_host_init(struct isci_host *); - -void isci_host_init_controller_names( - struct isci_host *isci_host, - unsigned int controller_idx); - -void isci_host_deinit( - struct isci_host *); - -void isci_host_port_link_up( - struct isci_host *, - struct isci_port *, - struct isci_phy *); -int isci_host_dev_found(struct domain_device *); - -void isci_host_remote_device_start_complete( - struct isci_host *, - struct isci_remote_device *, - enum sci_status); - -void sci_controller_disable_interrupts( - struct isci_host *ihost); +void isci_host_completion_routine(unsigned long data); +void isci_host_deinit(struct isci_host *); +void sci_controller_disable_interrupts(struct isci_host *ihost); +bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost); +void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status); enum sci_status sci_controller_start_io( struct isci_host *ihost, diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 5137db5a5d85..47e28b555029 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -271,13 +271,12 @@ static void isci_unregister(struct isci_host *isci_host) if (!isci_host) return; - shost = isci_host->shost; - sas_unregister_ha(&isci_host->sas_ha); - sas_remove_host(isci_host->shost); - scsi_remove_host(isci_host->shost); - scsi_host_put(isci_host->shost); + shost = to_shost(isci_host); + sas_remove_host(shost); + scsi_remove_host(shost); + scsi_host_put(shost); } static int __devinit isci_pci_init(struct pci_dev *pdev) @@ -397,38 +396,199 @@ static int isci_setup_interrupts(struct pci_dev *pdev) return err; } +static void isci_user_parameters_get(struct sci_user_parameters *u) +{ + int i; + + for (i = 0; i < SCI_MAX_PHYS; i++) { + struct sci_phy_user_params *u_phy = &u->phys[i]; + + u_phy->max_speed_generation = phy_gen; + + /* we are not exporting these for now */ + u_phy->align_insertion_frequency = 0x7f; + u_phy->in_connection_align_insertion_frequency = 0xff; + u_phy->notify_enable_spin_up_insertion_frequency = 0x33; + } + + u->stp_inactivity_timeout = stp_inactive_to; + u->ssp_inactivity_timeout = ssp_inactive_to; + u->stp_max_occupancy_timeout = stp_max_occ_to; + u->ssp_max_occupancy_timeout = ssp_max_occ_to; + u->no_outbound_task_timeout = no_outbound_task_to; + u->max_concurr_spinup = max_concurr_spinup; +} + +static enum sci_status sci_user_parameters_set(struct isci_host *ihost, + struct sci_user_parameters *sci_parms) +{ + u16 index; + + /* + * Validate the user parameters. If they are not legal, then + * return a failure. + */ + for (index = 0; index < SCI_MAX_PHYS; index++) { + struct sci_phy_user_params *u; + + u = &sci_parms->phys[index]; + + if (!((u->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && + (u->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + if (u->in_connection_align_insertion_frequency < 3) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + if ((u->in_connection_align_insertion_frequency < 3) || + (u->align_insertion_frequency == 0) || + (u->notify_enable_spin_up_insertion_frequency == 0)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + } + + if ((sci_parms->stp_inactivity_timeout == 0) || + (sci_parms->ssp_inactivity_timeout == 0) || + (sci_parms->stp_max_occupancy_timeout == 0) || + (sci_parms->ssp_max_occupancy_timeout == 0) || + (sci_parms->no_outbound_task_timeout == 0)) + return SCI_FAILURE_INVALID_PARAMETER_VALUE; + + memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); + + return SCI_SUCCESS; +} + +static void sci_oem_defaults(struct isci_host *ihost) +{ + /* these defaults are overridden by the platform / firmware */ + struct sci_user_parameters *user = &ihost->user_parameters; + struct sci_oem_params *oem = &ihost->oem_parameters; + int i; + + /* Default to APC mode. */ + oem->controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; + + /* Default to APC mode. */ + oem->controller.max_concurr_spin_up = 1; + + /* Default to no SSC operation. */ + oem->controller.do_enable_ssc = false; + + /* Default to short cables on all phys. */ + oem->controller.cable_selection_mask = 0; + + /* Initialize all of the port parameter information to narrow ports. */ + for (i = 0; i < SCI_MAX_PORTS; i++) + oem->ports[i].phy_mask = 0; + + /* Initialize all of the phy parameter information. */ + for (i = 0; i < SCI_MAX_PHYS; i++) { + /* Default to 3G (i.e. Gen 2). */ + user->phys[i].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED; + + /* the frequencies cannot be 0 */ + user->phys[i].align_insertion_frequency = 0x7f; + user->phys[i].in_connection_align_insertion_frequency = 0xff; + user->phys[i].notify_enable_spin_up_insertion_frequency = 0x33; + + /* Previous Vitesse based expanders had a arbitration issue that + * is worked around by having the upper 32-bits of SAS address + * with a value greater then the Vitesse company identifier. + * Hence, usage of 0x5FCFFFFF. + */ + oem->phys[i].sas_address.low = 0x1 + ihost->id; + oem->phys[i].sas_address.high = 0x5FCFFFFF; + } + + user->stp_inactivity_timeout = 5; + user->ssp_inactivity_timeout = 5; + user->stp_max_occupancy_timeout = 5; + user->ssp_max_occupancy_timeout = 20; + user->no_outbound_task_timeout = 2; +} + static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) { - struct isci_host *isci_host; + struct isci_orom *orom = to_pci_info(pdev)->orom; + struct sci_user_parameters sci_user_params; + u8 oem_version = ISCI_ROM_VER_1_0; + struct isci_host *ihost; struct Scsi_Host *shost; - int err; + int err, i; - isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL); - if (!isci_host) + ihost = devm_kzalloc(&pdev->dev, sizeof(*ihost), GFP_KERNEL); + if (!ihost) return NULL; - isci_host->pdev = pdev; - isci_host->id = id; + ihost->pdev = pdev; + ihost->id = id; + spin_lock_init(&ihost->scic_lock); + init_waitqueue_head(&ihost->eventq); + ihost->sas_ha.dev = &ihost->pdev->dev; + ihost->sas_ha.lldd_ha = ihost; + tasklet_init(&ihost->completion_tasklet, + isci_host_completion_routine, (unsigned long)ihost); + + /* validate module parameters */ + /* TODO: kill struct sci_user_parameters and reference directly */ + sci_oem_defaults(ihost); + isci_user_parameters_get(&sci_user_params); + if (sci_user_parameters_set(ihost, &sci_user_params)) { + dev_warn(&pdev->dev, + "%s: sci_user_parameters_set failed\n", __func__); + return NULL; + } + + /* sanity check platform (or 'firmware') oem parameters */ + if (orom) { + if (id < 0 || id >= SCI_MAX_CONTROLLERS || id > orom->hdr.num_elements) { + dev_warn(&pdev->dev, "parsing firmware oem parameters failed\n"); + return NULL; + } + ihost->oem_parameters = orom->ctrl[id]; + oem_version = orom->hdr.version; + } + + /* validate oem parameters (platform, firmware, or built-in defaults) */ + if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) { + dev_warn(&pdev->dev, "oem parameter validation failed\n"); + return NULL; + } + + for (i = 0; i < SCI_MAX_PORTS; i++) { + struct isci_port *iport = &ihost->ports[i]; + + INIT_LIST_HEAD(&iport->remote_dev_list); + iport->isci_host = ihost; + } + + for (i = 0; i < SCI_MAX_PHYS; i++) + isci_phy_init(&ihost->phys[i], ihost, i); + + for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { + struct isci_remote_device *idev = &ihost->devices[i]; + + INIT_LIST_HEAD(&idev->node); + } shost = scsi_host_alloc(&isci_sht, sizeof(void *)); if (!shost) return NULL; - isci_host->shost = shost; dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: " "{%s, %s, %s, %s}\n", - (is_cable_select_overridden() ? "* " : ""), isci_host->id, - lookup_cable_names(decode_cable_selection(isci_host, 3)), - lookup_cable_names(decode_cable_selection(isci_host, 2)), - lookup_cable_names(decode_cable_selection(isci_host, 1)), - lookup_cable_names(decode_cable_selection(isci_host, 0))); + (is_cable_select_overridden() ? "* " : ""), ihost->id, + lookup_cable_names(decode_cable_selection(ihost, 3)), + lookup_cable_names(decode_cable_selection(ihost, 2)), + lookup_cable_names(decode_cable_selection(ihost, 1)), + lookup_cable_names(decode_cable_selection(ihost, 0))); - err = isci_host_init(isci_host); + err = isci_host_init(ihost); if (err) goto err_shost; - SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha; - isci_host->sas_ha.core.shost = shost; + SHOST_TO_SAS_HA(shost) = &ihost->sas_ha; + ihost->sas_ha.core.shost = shost; shost->transportt = isci_transport_template; shost->max_id = ~0; @@ -439,11 +599,11 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id) if (err) goto err_shost; - err = isci_register_sas_ha(isci_host); + err = isci_register_sas_ha(ihost); if (err) goto err_shost_remove; - return isci_host; + return ihost; err_shost_remove: scsi_remove_host(shost); @@ -476,7 +636,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic if (!orom) orom = isci_request_oprom(pdev); - for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) { + for (i = 0; orom && i < num_controllers(pdev); i++) { if (sci_oem_parameters_validate(&orom->ctrl[i], orom->hdr.version)) { dev_warn(&pdev->dev, @@ -525,11 +685,11 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic pci_info->hosts[i] = h; /* turn on DIF support */ - scsi_host_set_prot(h->shost, + scsi_host_set_prot(to_shost(h), SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION); - scsi_host_set_guard(h->shost, SHOST_DIX_GUARD_CRC); + scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC); } err = isci_setup_interrupts(pdev); @@ -537,7 +697,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic goto err_host_alloc; for_each_isci_host(i, isci_host, pdev) - scsi_scan_host(isci_host->shost); + scsi_scan_host(to_shost(isci_host)); return 0; diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index fab3586840b5..18f43d4c30ba 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -580,7 +580,7 @@ static void sci_phy_start_sas_link_training(struct isci_phy *iphy) sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN); - iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS; + iphy->protocol = SAS_PROTOCOL_SSP; } static void sci_phy_start_sata_link_training(struct isci_phy *iphy) @@ -591,7 +591,7 @@ static void sci_phy_start_sata_link_training(struct isci_phy *iphy) */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER); - iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; + iphy->protocol = SAS_PROTOCOL_SATA; } /** @@ -668,6 +668,19 @@ static const char *phy_event_name(u32 event_code) phy_to_host(iphy)->id, iphy->phy_index, \ phy_state_name(state), phy_event_name(code), code) + +void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout) +{ + u32 val; + + /* Extend timeout */ + val = readl(&iphy->link_layer_registers->transmit_comsas_signal); + val &= ~SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK); + val |= SCU_SAS_LLTXCOMSAS_GEN_VAL(NEGTIME, timeout); + + writel(val, &iphy->link_layer_registers->transmit_comsas_signal); +} + enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) { enum sci_phy_states state = iphy->sm.current_state_id; @@ -683,6 +696,13 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) sci_phy_start_sata_link_training(iphy); iphy->is_in_link_training = true; break; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + /* Extend timeout value */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); + + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; default: phy_event_dbg(iphy, state, event_code); return SCI_FAILURE; @@ -717,9 +737,19 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; + case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + /* Extend the timeout value */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); + + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE; @@ -740,7 +770,14 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) sci_phy_start_sata_link_training(iphy); break; case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT: + /* Extend the timeout value */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED); + + /* Start the oob/sn state machine over again */ + sci_change_state(&iphy->sm, SCI_PHY_STARTING); + break; case SCU_EVENT_LINK_FAILURE: + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); case SCU_EVENT_HARD_RESET_RECEIVED: /* Start the oob/sn state machine over again */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); @@ -753,6 +790,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) case SCI_PHY_SUB_AWAIT_SAS_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; @@ -764,6 +804,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) case SCI_PHY_SUB_AWAIT_SATA_POWER: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; @@ -788,6 +831,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; @@ -797,7 +843,7 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) */ break; case SCU_EVENT_SATA_PHY_DETECTED: - iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA; + iphy->protocol = SAS_PROTOCOL_SATA; /* We have received the SATA PHY notification change state */ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN); @@ -836,6 +882,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) SCI_PHY_SUB_AWAIT_SIG_FIS_UF); break; case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; @@ -859,6 +908,9 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) break; case SCU_EVENT_LINK_FAILURE: + /* Change the timeout value to default */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; @@ -871,16 +923,26 @@ enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code) case SCI_PHY_READY: switch (scu_get_event_code(event_code)) { case SCU_EVENT_LINK_FAILURE: + /* Set default timeout */ + scu_link_layer_set_txcomsas_timeout(iphy, SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT); + /* Link failure change state back to the starting state */ sci_change_state(&iphy->sm, SCI_PHY_STARTING); break; case SCU_EVENT_BROADCAST_CHANGE: + case SCU_EVENT_BROADCAST_SES: + case SCU_EVENT_BROADCAST_RESERVED0: + case SCU_EVENT_BROADCAST_RESERVED1: + case SCU_EVENT_BROADCAST_EXPANDER: + case SCU_EVENT_BROADCAST_AEN: /* Broadcast change received. Notify the port. */ if (phy_get_non_dummy_port(iphy) != NULL) sci_port_broadcast_change_received(iphy->owning_port, iphy); else iphy->bcn_received_while_port_unassigned = true; break; + case SCU_EVENT_BROADCAST_RESERVED3: + case SCU_EVENT_BROADCAST_RESERVED4: default: phy_event_warn(iphy, state, event_code); return SCI_FAILURE_INVALID_STATE; @@ -1215,7 +1277,7 @@ static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm) scu_link_layer_start_oob(iphy); /* We don't know what kind of phy we are going to be just yet */ - iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; + iphy->protocol = SAS_PROTOCOL_NONE; iphy->bcn_received_while_port_unassigned = false; if (iphy->sm.previous_state_id == SCI_PHY_READY) @@ -1250,7 +1312,7 @@ static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm) */ sci_port_deactivate_phy(iphy->owning_port, iphy, false); - if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { + if (iphy->protocol == SAS_PROTOCOL_SSP) { scu_link_layer_tx_hard_reset(iphy); } else { /* The SCU does not need to have a discrete reset state so @@ -1316,7 +1378,7 @@ void sci_phy_construct(struct isci_phy *iphy, iphy->owning_port = iport; iphy->phy_index = phy_index; iphy->bcn_received_while_port_unassigned = false; - iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN; + iphy->protocol = SAS_PROTOCOL_NONE; iphy->link_layer_registers = NULL; iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN; @@ -1380,12 +1442,14 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, switch (func) { case PHY_FUNC_DISABLE: spin_lock_irqsave(&ihost->scic_lock, flags); + scu_link_layer_start_oob(iphy); sci_phy_stop(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); break; case PHY_FUNC_LINK_RESET: spin_lock_irqsave(&ihost->scic_lock, flags); + scu_link_layer_start_oob(iphy); sci_phy_stop(iphy); sci_phy_start(iphy); spin_unlock_irqrestore(&ihost->scic_lock, flags); diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h index 0e45833ba06d..45fecfa36a98 100644 --- a/drivers/scsi/isci/phy.h +++ b/drivers/scsi/isci/phy.h @@ -76,13 +76,6 @@ */ #define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250 -enum sci_phy_protocol { - SCIC_SDS_PHY_PROTOCOL_UNKNOWN, - SCIC_SDS_PHY_PROTOCOL_SAS, - SCIC_SDS_PHY_PROTOCOL_SATA, - SCIC_SDS_MAX_PHY_PROTOCOLS -}; - /** * isci_phy - hba local phy infrastructure * @sm: @@ -95,7 +88,7 @@ struct isci_phy { struct sci_base_state_machine sm; struct isci_port *owning_port; enum sas_linkrate max_negotiated_speed; - enum sci_phy_protocol protocol; + enum sas_protocol protocol; u8 phy_index; bool bcn_received_while_port_unassigned; bool is_in_link_training; diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 5fada73b71ff..2fb85bf75449 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -184,7 +184,7 @@ static void isci_port_link_up(struct isci_host *isci_host, sci_port_get_properties(iport, &properties); - if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) { + if (iphy->protocol == SAS_PROTOCOL_SATA) { u64 attached_sas_address; iphy->sas_phy.oob_mode = SATA_OOB_MODE; @@ -204,7 +204,7 @@ static void isci_port_link_up(struct isci_host *isci_host, memcpy(&iphy->sas_phy.attached_sas_addr, &attached_sas_address, sizeof(attached_sas_address)); - } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { + } else if (iphy->protocol == SAS_PROTOCOL_SSP) { iphy->sas_phy.oob_mode = SAS_OOB_MODE; iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame); @@ -251,10 +251,10 @@ static void isci_port_link_down(struct isci_host *isci_host, if (isci_phy->sas_phy.port && isci_phy->sas_phy.port->num_phys == 1) { /* change the state for all devices on this port. The - * next task sent to this device will be returned as - * SAS_TASK_UNDELIVERED, and the scsi mid layer will - * remove the target - */ + * next task sent to this device will be returned as + * SAS_TASK_UNDELIVERED, and the scsi mid layer will + * remove the target + */ list_for_each_entry(isci_device, &isci_port->remote_dev_list, node) { @@ -517,7 +517,7 @@ void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_a */ iphy = sci_port_get_a_connected_phy(iport); if (iphy) { - if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) { + if (iphy->protocol != SAS_PROTOCOL_SATA) { sci_phy_get_attached_sas_address(iphy, sas); } else { sci_phy_get_sas_address(iphy, sas); @@ -624,7 +624,7 @@ static void sci_port_activate_phy(struct isci_port *iport, { struct isci_host *ihost = iport->owning_controller; - if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME)) + if (iphy->protocol != SAS_PROTOCOL_SATA && (flags & PF_RESUME)) sci_phy_resume(iphy); iport->active_phy_mask |= 1 << iphy->phy_index; @@ -751,12 +751,10 @@ static bool sci_port_is_wide(struct isci_port *iport) * wide ports and direct attached phys. Since there are no wide ported SATA * devices this could become an invalid port configuration. */ -bool sci_port_link_detected( - struct isci_port *iport, - struct isci_phy *iphy) +bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) { if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) && - (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) { + (iphy->protocol == SAS_PROTOCOL_SATA)) { if (sci_port_is_wide(iport)) { sci_port_invalid_link_up(iport, iphy); return false; @@ -1201,6 +1199,8 @@ enum sci_status sci_port_add_phy(struct isci_port *iport, enum sci_status status; enum sci_port_states state; + sci_port_bcn_enable(iport); + state = iport->sm.current_state_id; switch (state) { case SCI_PORT_STOPPED: { @@ -1548,6 +1548,29 @@ static void sci_port_failed_state_enter(struct sci_base_state_machine *sm) isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT); } +void sci_port_set_hang_detection_timeout(struct isci_port *iport, u32 timeout) +{ + int phy_index; + u32 phy_mask = iport->active_phy_mask; + + if (timeout) + ++iport->hang_detect_users; + else if (iport->hang_detect_users > 1) + --iport->hang_detect_users; + else + iport->hang_detect_users = 0; + + if (timeout || (iport->hang_detect_users == 0)) { + for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) { + if ((phy_mask >> phy_index) & 1) { + writel(timeout, + &iport->phy_table[phy_index] + ->link_layer_registers + ->link_layer_hang_detection_timeout); + } + } + } +} /* --------------------------------------------------------------------------- */ static const struct sci_base_state sci_port_state_table[] = { @@ -1596,6 +1619,7 @@ void sci_port_construct(struct isci_port *iport, u8 index, iport->started_request_count = 0; iport->assigned_device_count = 0; + iport->hang_detect_users = 0; iport->reserved_rni = SCU_DUMMY_INDEX; iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG; @@ -1608,13 +1632,6 @@ void sci_port_construct(struct isci_port *iport, u8 index, iport->phy_table[index] = NULL; } -void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index) -{ - INIT_LIST_HEAD(&iport->remote_dev_list); - INIT_LIST_HEAD(&iport->domain_dev_list); - iport->isci_host = ihost; -} - void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy) { struct isci_host *ihost = iport->owning_controller; @@ -1671,17 +1688,6 @@ int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *ipor __func__, iport, status); } - - /* If the hard reset for the port has failed, consider this - * the same as link failures on all phys in the port. - */ - if (ret != TMF_RESP_FUNC_COMPLETE) { - - dev_err(&ihost->pdev->dev, - "%s: iport = %p; hard reset failed " - "(0x%x) - driving explicit link fail for all phys\n", - __func__, iport, iport->hard_reset_status); - } return ret; } @@ -1740,7 +1746,7 @@ void isci_port_formed(struct asd_sas_phy *phy) struct isci_host *ihost = phy->ha->lldd_ha; struct isci_phy *iphy = to_iphy(phy); struct asd_sas_port *port = phy->port; - struct isci_port *iport; + struct isci_port *iport = NULL; unsigned long flags; int i; diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h index 6b56240c2051..861e8f72811b 100644 --- a/drivers/scsi/isci/port.h +++ b/drivers/scsi/isci/port.h @@ -97,7 +97,6 @@ enum isci_status { struct isci_port { struct isci_host *isci_host; struct list_head remote_dev_list; - struct list_head domain_dev_list; #define IPORT_RESET_PENDING 0 unsigned long state; enum sci_status hard_reset_status; @@ -112,6 +111,7 @@ struct isci_port { u16 reserved_tag; u32 started_request_count; u32 assigned_device_count; + u32 hang_detect_users; u32 not_ready_reason; struct isci_phy *phy_table[SCI_MAX_PHYS]; struct isci_host *owning_controller; @@ -270,14 +270,13 @@ void sci_port_get_attached_sas_address( struct isci_port *iport, struct sci_sas_address *sas_address); +void sci_port_set_hang_detection_timeout( + struct isci_port *isci_port, + u32 timeout); + void isci_port_formed(struct asd_sas_phy *); void isci_port_deformed(struct asd_sas_phy *); -void isci_port_init( - struct isci_port *port, - struct isci_host *host, - int index); - int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy); int isci_ata_check_ready(struct domain_device *dev); diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 6d1e9544cbe5..cd962da4a57a 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -57,7 +57,7 @@ #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10) #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10) -#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250) +#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (1000) enum SCIC_SDS_APC_ACTIVITY { SCIC_SDS_APC_SKIP_PHY, @@ -472,13 +472,9 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost, * down event or a link up event where we can not yet tell to which a phy * belongs. */ -static void sci_apc_agent_start_timer( - struct sci_port_configuration_agent *port_agent, - u32 timeout) +static void sci_apc_agent_start_timer(struct sci_port_configuration_agent *port_agent, + u32 timeout) { - if (port_agent->timer_pending) - sci_del_timer(&port_agent->timer); - port_agent->timer_pending = true; sci_mod_timer(&port_agent->timer, timeout); } @@ -697,6 +693,9 @@ static void apc_agent_timeout(unsigned long data) &ihost->phys[index], false); } + if (is_controller_start_complete(ihost)) + sci_controller_transition_to_ready(ihost, SCI_SUCCESS); + done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } @@ -732,6 +731,11 @@ void sci_port_configuration_agent_construct( } } +bool is_port_config_apc(struct isci_host *ihost) +{ + return ihost->port_agent.link_up_handler == sci_apc_agent_link_up; +} + enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent) diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c index 9b8117b9d756..4d95654c3fd4 100644 --- a/drivers/scsi/isci/probe_roms.c +++ b/drivers/scsi/isci/probe_roms.c @@ -112,18 +112,6 @@ struct isci_orom *isci_request_oprom(struct pci_dev *pdev) return rom; } -enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, - struct isci_orom *orom, int scu_index) -{ - /* check for valid inputs */ - if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS || - scu_index > orom->hdr.num_elements || !oem) - return -EINVAL; - - *oem = orom->ctrl[scu_index]; - return 0; -} - struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw) { struct isci_orom *orom = NULL, *data; diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h index bb0e9d4d97c9..e08b578241f8 100644 --- a/drivers/scsi/isci/probe_roms.h +++ b/drivers/scsi/isci/probe_roms.h @@ -156,8 +156,6 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version); struct isci_orom; struct isci_orom *isci_request_oprom(struct pci_dev *pdev); -enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem, - struct isci_orom *orom, int scu_index); struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw); struct isci_orom *isci_get_efi_var(struct pci_dev *pdev); diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 7eb0ccd45fe6..97f3ceb8d724 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h @@ -1239,6 +1239,14 @@ struct scu_transport_layer_registers { #define SCU_SAS_LLCTL_GEN_BIT(name) \ SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_DEFAULT (0xF0) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_EXTENDED (0x1FF) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_SHIFT (0) +#define SCU_SAS_LINK_LAYER_TXCOMSAS_NEGTIME_MASK (0x3FF) + +#define SCU_SAS_LLTXCOMSAS_GEN_VAL(name, value) \ + SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_TXCOMSAS_ ## name, value) + /* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */ #define SCU_PSZGCR_OFFSET 0x00E4 diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 8f501b0a81d6..c3aa6c5457b9 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -72,46 +72,11 @@ const char *dev_state_name(enum sci_remote_device_states state) } #undef C -/** - * isci_remote_device_not_ready() - This function is called by the ihost when - * the remote device is not ready. We mark the isci device as ready (not - * "ready_for_io") and signal the waiting proccess. - * @isci_host: This parameter specifies the isci host object. - * @isci_device: This parameter specifies the remote device - * - * sci_lock is held on entrance to this function. - */ -static void isci_remote_device_not_ready(struct isci_host *ihost, - struct isci_remote_device *idev, u32 reason) +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, + enum sci_remote_node_suspension_reasons reason) { - struct isci_request *ireq; - - dev_dbg(&ihost->pdev->dev, - "%s: isci_device = %p\n", __func__, idev); - - switch (reason) { - case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED: - set_bit(IDEV_GONE, &idev->flags); - break; - case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: - set_bit(IDEV_IO_NCQERROR, &idev->flags); - - /* Kill all outstanding requests for the device. */ - list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) { - - dev_dbg(&ihost->pdev->dev, - "%s: isci_device = %p request = %p\n", - __func__, idev, ireq); - - sci_controller_terminate_request(ihost, - idev, - ireq); - } - /* Fall through into the default case... */ - default: - clear_bit(IDEV_IO_READY, &idev->flags); - break; - } + return sci_remote_node_context_suspend(&idev->rnc, reason, + SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); } /** @@ -133,6 +98,225 @@ static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote wake_up(&ihost->eventq); } +static enum sci_status sci_remote_device_terminate_req( + struct isci_host *ihost, + struct isci_remote_device *idev, + int check_abort, + struct isci_request *ireq) +{ + if (!test_bit(IREQ_ACTIVE, &ireq->flags) || + (ireq->target_device != idev) || + (check_abort && !test_bit(IREQ_PENDING_ABORT, &ireq->flags))) + return SCI_SUCCESS; + + dev_dbg(&ihost->pdev->dev, + "%s: idev=%p; flags=%lx; req=%p; req target=%p\n", + __func__, idev, idev->flags, ireq, ireq->target_device); + + set_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); + + return sci_controller_terminate_request(ihost, idev, ireq); +} + +static enum sci_status sci_remote_device_terminate_reqs_checkabort( + struct isci_remote_device *idev, + int chk) +{ + struct isci_host *ihost = idev->owning_port->owning_controller; + enum sci_status status = SCI_SUCCESS; + u32 i; + + for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { + struct isci_request *ireq = ihost->reqs[i]; + enum sci_status s; + + s = sci_remote_device_terminate_req(ihost, idev, chk, ireq); + if (s != SCI_SUCCESS) + status = s; + } + return status; +} + +static bool isci_compare_suspendcount( + struct isci_remote_device *idev, + u32 localcount) +{ + smp_rmb(); + + /* Check for a change in the suspend count, or the RNC + * being destroyed. + */ + return (localcount != idev->rnc.suspend_count) + || sci_remote_node_context_is_being_destroyed(&idev->rnc); +} + +static bool isci_check_reqterm( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq, + u32 localcount) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(&ihost->scic_lock, flags); + res = isci_compare_suspendcount(idev, localcount) + && !test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return res; +} + +static bool isci_check_devempty( + struct isci_host *ihost, + struct isci_remote_device *idev, + u32 localcount) +{ + unsigned long flags; + bool res; + + spin_lock_irqsave(&ihost->scic_lock, flags); + res = isci_compare_suspendcount(idev, localcount) + && idev->started_request_count == 0; + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return res; +} + +enum sci_status isci_remote_device_terminate_requests( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + enum sci_status status = SCI_SUCCESS; + unsigned long flags; + u32 rnc_suspend_count; + + spin_lock_irqsave(&ihost->scic_lock, flags); + + if (isci_get_device(idev) == NULL) { + dev_dbg(&ihost->pdev->dev, "%s: failed isci_get_device(idev=%p)\n", + __func__, idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + status = SCI_FAILURE; + } else { + /* If already suspended, don't wait for another suspension. */ + smp_rmb(); + rnc_suspend_count + = sci_remote_node_context_is_suspended(&idev->rnc) + ? 0 : idev->rnc.suspend_count; + + dev_dbg(&ihost->pdev->dev, + "%s: idev=%p, ireq=%p; started_request_count=%d, " + "rnc_suspend_count=%d, rnc.suspend_count=%d" + "about to wait\n", + __func__, idev, ireq, idev->started_request_count, + rnc_suspend_count, idev->rnc.suspend_count); + + #define MAX_SUSPEND_MSECS 10000 + if (ireq) { + /* Terminate a specific TC. */ + set_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); + sci_remote_device_terminate_req(ihost, idev, 0, ireq); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!wait_event_timeout(ihost->eventq, + isci_check_reqterm(ihost, idev, ireq, + rnc_suspend_count), + msecs_to_jiffies(MAX_SUSPEND_MSECS))) { + + dev_warn(&ihost->pdev->dev, "%s host%d timeout single\n", + __func__, ihost->id); + dev_dbg(&ihost->pdev->dev, + "%s: ******* Timeout waiting for " + "suspend; idev=%p, current state %s; " + "started_request_count=%d, flags=%lx\n\t" + "rnc_suspend_count=%d, rnc.suspend_count=%d " + "RNC: current state %s, current " + "suspend_type %x dest state %d;\n" + "ireq=%p, ireq->flags = %lx\n", + __func__, idev, + dev_state_name(idev->sm.current_state_id), + idev->started_request_count, idev->flags, + rnc_suspend_count, idev->rnc.suspend_count, + rnc_state_name(idev->rnc.sm.current_state_id), + idev->rnc.suspend_type, + idev->rnc.destination_state, + ireq, ireq->flags); + } + spin_lock_irqsave(&ihost->scic_lock, flags); + clear_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags); + if (!test_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) + isci_free_tag(ihost, ireq->io_tag); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + } else { + /* Terminate all TCs. */ + sci_remote_device_terminate_requests(idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!wait_event_timeout(ihost->eventq, + isci_check_devempty(ihost, idev, + rnc_suspend_count), + msecs_to_jiffies(MAX_SUSPEND_MSECS))) { + + dev_warn(&ihost->pdev->dev, "%s host%d timeout all\n", + __func__, ihost->id); + dev_dbg(&ihost->pdev->dev, + "%s: ******* Timeout waiting for " + "suspend; idev=%p, current state %s; " + "started_request_count=%d, flags=%lx\n\t" + "rnc_suspend_count=%d, " + "RNC: current state %s, " + "rnc.suspend_count=%d, current " + "suspend_type %x dest state %d\n", + __func__, idev, + dev_state_name(idev->sm.current_state_id), + idev->started_request_count, idev->flags, + rnc_suspend_count, + rnc_state_name(idev->rnc.sm.current_state_id), + idev->rnc.suspend_count, + idev->rnc.suspend_type, + idev->rnc.destination_state); + } + } + dev_dbg(&ihost->pdev->dev, "%s: idev=%p, wait done\n", + __func__, idev); + isci_put_device(idev); + } + return status; +} + +/** +* isci_remote_device_not_ready() - This function is called by the ihost when +* the remote device is not ready. We mark the isci device as ready (not +* "ready_for_io") and signal the waiting proccess. +* @isci_host: This parameter specifies the isci host object. +* @isci_device: This parameter specifies the remote device +* +* sci_lock is held on entrance to this function. +*/ +static void isci_remote_device_not_ready(struct isci_host *ihost, + struct isci_remote_device *idev, + u32 reason) +{ + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p; reason = %d\n", __func__, idev, reason); + + switch (reason) { + case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED: + set_bit(IDEV_IO_NCQERROR, &idev->flags); + + /* Suspend the remote device so the I/O can be terminated. */ + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); + + /* Kill all outstanding requests for the device. */ + sci_remote_device_terminate_requests(idev); + + /* Fall through into the default case... */ + default: + clear_bit(IDEV_IO_READY, &idev->flags); + break; + } +} + /* called once the remote node context is ready to be freed. * The remote device can now report that its stop operation is complete. none */ @@ -144,26 +328,10 @@ static void rnc_destruct_done(void *_dev) sci_change_state(&idev->sm, SCI_DEV_STOPPED); } -static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev) +enum sci_status sci_remote_device_terminate_requests( + struct isci_remote_device *idev) { - struct isci_host *ihost = idev->owning_port->owning_controller; - enum sci_status status = SCI_SUCCESS; - u32 i; - - for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { - struct isci_request *ireq = ihost->reqs[i]; - enum sci_status s; - - if (!test_bit(IREQ_ACTIVE, &ireq->flags) || - ireq->target_device != idev) - continue; - - s = sci_controller_terminate_request(ihost, idev, ireq); - if (s != SCI_SUCCESS) - status = s; - } - - return status; + return sci_remote_device_terminate_reqs_checkabort(idev, 0); } enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, @@ -201,13 +369,16 @@ enum sci_status sci_remote_device_stop(struct isci_remote_device *idev, case SCI_SMP_DEV_IDLE: case SCI_SMP_DEV_CMD: sci_change_state(sm, SCI_DEV_STOPPING); - if (idev->started_request_count == 0) { + if (idev->started_request_count == 0) sci_remote_node_context_destruct(&idev->rnc, - rnc_destruct_done, idev); - return SCI_SUCCESS; - } else - return sci_remote_device_terminate_requests(idev); - break; + rnc_destruct_done, + idev); + else { + sci_remote_device_suspend( + idev, SCI_SW_SUSPEND_LINKHANG_DETECT); + sci_remote_device_terminate_requests(idev); + } + return SCI_SUCCESS; case SCI_DEV_STOPPING: /* All requests should have been terminated, but if there is an * attempt to stop a device already in the stopping state, then @@ -265,22 +436,6 @@ enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev return SCI_SUCCESS; } -enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, - u32 suspend_type) -{ - struct sci_base_state_machine *sm = &idev->sm; - enum sci_remote_device_states state = sm->current_state_id; - - if (state != SCI_STP_DEV_CMD) { - dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %s\n", - __func__, dev_state_name(state)); - return SCI_FAILURE_INVALID_STATE; - } - - return sci_remote_node_context_suspend(&idev->rnc, - suspend_type, NULL, NULL); -} - enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev, u32 frame_index) { @@ -412,9 +567,9 @@ static void atapi_remote_device_resume_done(void *_dev) enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, u32 event_code) { + enum sci_status status; struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; - enum sci_status status; switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_OPS_MISC: @@ -427,9 +582,7 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, status = SCI_SUCCESS; /* Suspend the associated RNC */ - sci_remote_node_context_suspend(&idev->rnc, - SCI_SOFTWARE_SUSPENSION, - NULL, NULL); + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_NORMAL); dev_dbg(scirdev_to_dev(idev), "%s: device: %p event code: %x: %s\n", @@ -455,6 +608,10 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, if (status != SCI_SUCCESS) return status; + /* Decode device-specific states that may require an RNC resume during + * normal operation. When the abort path is active, these resumes are + * managed when the abort path exits. + */ if (state == SCI_STP_DEV_ATAPI_ERROR) { /* For ATAPI error state resume the RNC right away. */ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || @@ -743,10 +900,6 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, if (status != SCI_SUCCESS) return status; - status = sci_remote_node_context_start_task(&idev->rnc, ireq); - if (status != SCI_SUCCESS) - goto out; - status = sci_request_start(ireq); if (status != SCI_SUCCESS) goto out; @@ -765,11 +918,11 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, * the correct action when the remote node context is suspended * and later resumed. */ - sci_remote_node_context_suspend(&idev->rnc, - SCI_SOFTWARE_SUSPENSION, NULL, NULL); - sci_remote_node_context_resume(&idev->rnc, - sci_remote_device_continue_request, - idev); + sci_remote_device_suspend(idev, + SCI_SW_SUSPEND_LINKHANG_DETECT); + + status = sci_remote_node_context_start_task(&idev->rnc, ireq, + sci_remote_device_continue_request, idev); out: sci_remote_device_start_request(idev, ireq, status); @@ -783,7 +936,9 @@ enum sci_status sci_remote_device_start_task(struct isci_host *ihost, if (status != SCI_SUCCESS) return status; - status = sci_remote_node_context_start_task(&idev->rnc, ireq); + /* Resume the RNC as needed: */ + status = sci_remote_node_context_start_task(&idev->rnc, ireq, + NULL, NULL); if (status != SCI_SUCCESS) break; @@ -892,7 +1047,7 @@ static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_ * here should go through isci_remote_device_nuke_requests. * If we hit this condition, we will need a way to complete * io requests in process */ - BUG_ON(!list_empty(&idev->reqs_in_process)); + BUG_ON(idev->started_request_count > 0); sci_remote_device_destruct(idev); list_del_init(&idev->node); @@ -954,14 +1109,21 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; - sci_remote_node_context_suspend( - &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL); + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); + + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); } static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm) { struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); + struct isci_host *ihost = idev->owning_port->owning_controller; + + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p\n", __func__, idev); sci_remote_node_context_resume(&idev->rnc, NULL, NULL); } @@ -1113,33 +1275,20 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport, { enum sci_status status; struct sci_port_properties properties; - struct domain_device *dev = idev->domain_dev; sci_remote_device_construct(iport, idev); - /* - * This information is request to determine how many remote node context - * entries will be needed to store the remote node. - */ - idev->is_direct_attached = true; - sci_port_get_properties(iport, &properties); /* Get accurate port width from port's phy mask for a DA device. */ idev->device_port_width = hweight32(properties.phy_mask); status = sci_controller_allocate_remote_node_context(iport->owning_controller, - idev, - &idev->rnc.remote_node_index); + idev, + &idev->rnc.remote_node_index); if (status != SCI_SUCCESS) return status; - if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || - (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) - /* pass */; - else - return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - idev->connection_rate = sci_port_get_max_allowed_speed(iport); return SCI_SUCCESS; @@ -1171,19 +1320,13 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, if (status != SCI_SUCCESS) return status; - if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV || - (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev)) - /* pass */; - else - return SCI_FAILURE_UNSUPPORTED_PROTOCOL; - - /* - * For SAS-2 the physical link rate is actually a logical link + /* For SAS-2 the physical link rate is actually a logical link * rate that incorporates multiplexing. The SCU doesn't * incorporate multiplexing and for the purposes of the * connection the logical link rate is that same as the * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay - * one another, so this code works for both situations. */ + * one another, so this code works for both situations. + */ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport), dev->linkrate); @@ -1193,6 +1336,105 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, return SCI_SUCCESS; } +enum sci_status sci_remote_device_resume( + struct isci_remote_device *idev, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum sci_status status; + + status = sci_remote_node_context_resume(&idev->rnc, cb_fn, cb_p); + if (status != SCI_SUCCESS) + dev_dbg(scirdev_to_dev(idev), "%s: failed to resume: %d\n", + __func__, status); + return status; +} + +static void isci_remote_device_resume_from_abort_complete(void *cbparam) +{ + struct isci_remote_device *idev = cbparam; + struct isci_host *ihost = idev->owning_port->owning_controller; + scics_sds_remote_node_context_callback abort_resume_cb = + idev->abort_resume_cb; + + dev_dbg(scirdev_to_dev(idev), "%s: passing-along resume: %p\n", + __func__, abort_resume_cb); + + if (abort_resume_cb != NULL) { + idev->abort_resume_cb = NULL; + abort_resume_cb(idev->abort_resume_cbparam); + } + clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + wake_up(&ihost->eventq); +} + +static bool isci_remote_device_test_resume_done( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + unsigned long flags; + bool done; + + spin_lock_irqsave(&ihost->scic_lock, flags); + done = !test_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags) + || test_bit(IDEV_STOP_PENDING, &idev->flags) + || sci_remote_node_context_is_being_destroyed(&idev->rnc); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return done; +} + +void isci_remote_device_wait_for_resume_from_abort( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + dev_dbg(&ihost->pdev->dev, "%s: starting resume wait: %p\n", + __func__, idev); + + #define MAX_RESUME_MSECS 10000 + if (!wait_event_timeout(ihost->eventq, + isci_remote_device_test_resume_done(ihost, idev), + msecs_to_jiffies(MAX_RESUME_MSECS))) { + + dev_warn(&ihost->pdev->dev, "%s: #### Timeout waiting for " + "resume: %p\n", __func__, idev); + } + clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + + dev_dbg(&ihost->pdev->dev, "%s: resume wait done: %p\n", + __func__, idev); +} + +enum sci_status isci_remote_device_resume_from_abort( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + unsigned long flags; + enum sci_status status = SCI_SUCCESS; + int destroyed; + + spin_lock_irqsave(&ihost->scic_lock, flags); + /* Preserve any current resume callbacks, for instance from other + * resumptions. + */ + idev->abort_resume_cb = idev->rnc.user_callback; + idev->abort_resume_cbparam = idev->rnc.user_cookie; + set_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + clear_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); + destroyed = sci_remote_node_context_is_being_destroyed(&idev->rnc); + if (!destroyed) + status = sci_remote_device_resume( + idev, isci_remote_device_resume_from_abort_complete, + idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + if (!destroyed && (status == SCI_SUCCESS)) + isci_remote_device_wait_for_resume_from_abort(ihost, idev); + else + clear_bit(IDEV_ABORT_PATH_RESUME_PENDING, &idev->flags); + + return status; +} + /** * sci_remote_device_start() - This method will start the supplied remote * device. This method enables normal IO requests to flow through to the @@ -1207,7 +1449,7 @@ static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport, * the device when there have been no phys added to it. */ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, - u32 timeout) + u32 timeout) { struct sci_base_state_machine *sm = &idev->sm; enum sci_remote_device_states state = sm->current_state_id; @@ -1219,9 +1461,8 @@ static enum sci_status sci_remote_device_start(struct isci_remote_device *idev, return SCI_FAILURE_INVALID_STATE; } - status = sci_remote_node_context_resume(&idev->rnc, - remote_device_resume_done, - idev); + status = sci_remote_device_resume(idev, remote_device_resume_done, + idev); if (status != SCI_SUCCESS) return status; @@ -1259,20 +1500,6 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, return status; } -void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev) -{ - DECLARE_COMPLETION_ONSTACK(aborted_task_completion); - - dev_dbg(&ihost->pdev->dev, - "%s: idev = %p\n", __func__, idev); - - /* Cleanup all requests pending for this device. */ - isci_terminate_pending_requests(ihost, idev); - - dev_dbg(&ihost->pdev->dev, - "%s: idev = %p, done\n", __func__, idev); -} - /** * This function builds the isci_remote_device when a libsas dev_found message * is received. @@ -1297,10 +1524,6 @@ isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport) dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__); return NULL; } - - if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n")) - return NULL; - if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n")) return NULL; @@ -1342,14 +1565,8 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem spin_lock_irqsave(&ihost->scic_lock, flags); idev->domain_dev->lldd_dev = NULL; /* disable new lookups */ set_bit(IDEV_GONE, &idev->flags); - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - /* Kill all outstanding requests. */ - isci_remote_device_nuke_requests(ihost, idev); set_bit(IDEV_STOP_PENDING, &idev->flags); - - spin_lock_irqsave(&ihost->scic_lock, flags); status = sci_remote_device_stop(idev, 50); spin_unlock_irqrestore(&ihost->scic_lock, flags); @@ -1359,6 +1576,9 @@ enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_rem else wait_for_device_stop(ihost, idev); + dev_dbg(&ihost->pdev->dev, + "%s: isci_device = %p, waiting done.\n", __func__, idev); + return status; } @@ -1434,3 +1654,73 @@ int isci_remote_device_found(struct domain_device *dev) return status == SCI_SUCCESS ? 0 : -ENODEV; } + +enum sci_status isci_remote_device_suspend_terminate( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq) +{ + unsigned long flags; + enum sci_status status; + + /* Put the device into suspension. */ + spin_lock_irqsave(&ihost->scic_lock, flags); + set_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags); + sci_remote_device_suspend(idev, SCI_SW_SUSPEND_LINKHANG_DETECT); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + /* Terminate and wait for the completions. */ + status = isci_remote_device_terminate_requests(ihost, idev, ireq); + if (status != SCI_SUCCESS) + dev_dbg(&ihost->pdev->dev, + "%s: isci_remote_device_terminate_requests(%p) " + "returned %d!\n", + __func__, idev, status); + + /* NOTE: RNC resumption is left to the caller! */ + return status; +} + +int isci_remote_device_is_safe_to_abort( + struct isci_remote_device *idev) +{ + return sci_remote_node_context_is_safe_to_abort(&idev->rnc); +} + +enum sci_status sci_remote_device_abort_requests_pending_abort( + struct isci_remote_device *idev) +{ + return sci_remote_device_terminate_reqs_checkabort(idev, 1); +} + +enum sci_status isci_remote_device_reset_complete( + struct isci_host *ihost, + struct isci_remote_device *idev) +{ + unsigned long flags; + enum sci_status status; + + spin_lock_irqsave(&ihost->scic_lock, flags); + status = sci_remote_device_reset_complete(idev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + + return status; +} + +void isci_dev_set_hang_detection_timeout( + struct isci_remote_device *idev, + u32 timeout) +{ + if (dev_is_sata(idev->domain_dev)) { + if (timeout) { + if (test_and_set_bit(IDEV_RNC_LLHANG_ENABLED, + &idev->flags)) + return; /* Already enabled. */ + } else if (!test_and_clear_bit(IDEV_RNC_LLHANG_ENABLED, + &idev->flags)) + return; /* Not enabled. */ + + sci_port_set_hang_detection_timeout(idev->owning_port, + timeout); + } +} diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 58637ee08f55..7674caae1d88 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -85,27 +85,38 @@ struct isci_remote_device { #define IDEV_GONE 3 #define IDEV_IO_READY 4 #define IDEV_IO_NCQERROR 5 + #define IDEV_RNC_LLHANG_ENABLED 6 + #define IDEV_ABORT_PATH_ACTIVE 7 + #define IDEV_ABORT_PATH_RESUME_PENDING 8 unsigned long flags; struct kref kref; struct isci_port *isci_port; struct domain_device *domain_dev; struct list_head node; - struct list_head reqs_in_process; struct sci_base_state_machine sm; u32 device_port_width; enum sas_linkrate connection_rate; - bool is_direct_attached; struct isci_port *owning_port; struct sci_remote_node_context rnc; /* XXX unify with device reference counting and delete */ u32 started_request_count; struct isci_request *working_request; u32 not_ready_reason; + scics_sds_remote_node_context_callback abort_resume_cb; + void *abort_resume_cbparam; }; #define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000 /* device reference routines must be called under sci_lock */ +static inline struct isci_remote_device *isci_get_device( + struct isci_remote_device *idev) +{ + if (idev) + kref_get(&idev->kref); + return idev; +} + static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev) { struct isci_remote_device *idev = dev->lldd_dev; @@ -302,6 +313,8 @@ static inline void sci_remote_device_decrement_request_count(struct isci_remote_ idev->started_request_count--; } +void isci_dev_set_hang_detection_timeout(struct isci_remote_device *idev, u32 timeout); + enum sci_status sci_remote_device_frame_handler( struct isci_remote_device *idev, u32 frame_index); @@ -325,12 +338,50 @@ enum sci_status sci_remote_device_complete_io( struct isci_remote_device *idev, struct isci_request *ireq); -enum sci_status sci_remote_device_suspend( - struct isci_remote_device *idev, - u32 suspend_type); - void sci_remote_device_post_request( struct isci_remote_device *idev, u32 request); +enum sci_status sci_remote_device_terminate_requests( + struct isci_remote_device *idev); + +int isci_remote_device_is_safe_to_abort( + struct isci_remote_device *idev); + +enum sci_status +sci_remote_device_abort_requests_pending_abort( + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_suspend( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status sci_remote_device_resume( + struct isci_remote_device *idev, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); + +enum sci_status isci_remote_device_resume_from_abort( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_reset( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_reset_complete( + struct isci_host *ihost, + struct isci_remote_device *idev); + +enum sci_status isci_remote_device_suspend_terminate( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); + +enum sci_status isci_remote_device_terminate_requests( + struct isci_host *ihost, + struct isci_remote_device *idev, + struct isci_request *ireq); +enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev, + enum sci_remote_node_suspension_reasons reason); #endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */ diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c index 3a9463481f38..1910100638a2 100644 --- a/drivers/scsi/isci/remote_node_context.c +++ b/drivers/scsi/isci/remote_node_context.c @@ -52,7 +52,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - +#include #include "host.h" #include "isci.h" #include "remote_device.h" @@ -90,6 +90,15 @@ bool sci_remote_node_context_is_ready( return false; } +bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) +{ + u32 current_state = sci_rnc->sm.current_state_id; + + if (current_state == SCI_RNC_TX_RX_SUSPENDED) + return true; + return false; +} + static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) { if (id < ihost->remote_node_entries && @@ -131,7 +140,7 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont rnc->ssp.arbitration_wait_time = 0; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev_is_sata(dev)) { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = @@ -151,7 +160,6 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont rnc->ssp.oaf_source_zone_group = 0; rnc->ssp.oaf_more_compatibility_features = 0; } - /** * * @sci_rnc: @@ -165,23 +173,30 @@ static void sci_remote_node_context_construct_buffer(struct sci_remote_node_cont static void sci_remote_node_context_setup_to_resume( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, - void *callback_parameter) + void *callback_parameter, + enum sci_remote_node_context_destination_state dest_param) { - if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { - sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; - sci_rnc->user_callback = callback; - sci_rnc->user_cookie = callback_parameter; + if (sci_rnc->destination_state != RNC_DEST_FINAL) { + sci_rnc->destination_state = dest_param; + if (callback != NULL) { + sci_rnc->user_callback = callback; + sci_rnc->user_cookie = callback_parameter; + } } } -static void sci_remote_node_context_setup_to_destory( +static void sci_remote_node_context_setup_to_destroy( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { - sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; + struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); + + sci_rnc->destination_state = RNC_DEST_FINAL; sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; + + wake_up(&ihost->eventq); } /** @@ -203,9 +218,19 @@ static void sci_remote_node_context_notify_user( static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) { - if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) + switch (rnc->destination_state) { + case RNC_DEST_READY: + case RNC_DEST_SUSPENDED_RESUME: + rnc->destination_state = RNC_DEST_READY; + /* Fall through... */ + case RNC_DEST_FINAL: sci_remote_node_context_resume(rnc, rnc->user_callback, - rnc->user_cookie); + rnc->user_cookie); + break; + default: + rnc->destination_state = RNC_DEST_UNSPECIFIED; + break; + } } static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) @@ -219,13 +244,12 @@ static void sci_remote_node_context_validate_context_buffer(struct sci_remote_no rnc_buffer->ssp.is_valid = true; - if (!idev->is_direct_attached && - (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { + if (dev_is_sata(dev) && dev->parent) { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); - if (idev->is_direct_attached) + if (!dev->parent) sci_port_setup_transports(idev->owning_port, sci_rnc->remote_node_index); } @@ -248,13 +272,18 @@ static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_ static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev = rnc_to_dev(rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ if (sm->previous_state_id == SCI_RNC_INVALIDATING) { - rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + rnc->destination_state = RNC_DEST_UNSPECIFIED; sci_remote_node_context_notify_user(rnc); + + smp_wmb(); + wake_up(&ihost->eventq); } } @@ -269,6 +298,8 @@ static void sci_remote_node_context_invalidating_state_enter(struct sci_base_sta { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + /* Terminate all outstanding requests. */ + sci_remote_device_terminate_requests(rnc_to_dev(rnc)); sci_remote_node_context_invalidate_context_buffer(rnc); } @@ -287,10 +318,8 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m * resume because of a target reset we also need to update * the STPTLDARNI register with the RNi of the device */ - if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && - idev->is_direct_attached) - sci_port_setup_transports(idev->owning_port, - rnc->remote_node_index); + if (dev_is_sata(dev) && !dev->parent) + sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } @@ -298,10 +327,22 @@ static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_m static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + enum sci_remote_node_context_destination_state dest_select; + int tell_user = 1; - rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + dest_select = rnc->destination_state; + rnc->destination_state = RNC_DEST_UNSPECIFIED; - if (rnc->user_callback) + if ((dest_select == RNC_DEST_SUSPENDED) || + (dest_select == RNC_DEST_SUSPENDED_RESUME)) { + sci_remote_node_context_suspend( + rnc, rnc->suspend_reason, + SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); + + if (dest_select == RNC_DEST_SUSPENDED_RESUME) + tell_user = 0; /* Wait until ready again. */ + } + if (tell_user) sci_remote_node_context_notify_user(rnc); } @@ -315,10 +356,34 @@ static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_sta static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev = rnc_to_dev(rnc); + struct isci_host *ihost = idev->owning_port->owning_controller; + u32 new_count = rnc->suspend_count + 1; + if (new_count == 0) + rnc->suspend_count = 1; + else + rnc->suspend_count = new_count; + smp_wmb(); + + /* Terminate outstanding requests pending abort. */ + sci_remote_device_abort_requests_pending_abort(idev); + + wake_up(&ihost->eventq); sci_remote_node_context_continue_state_transitions(rnc); } +static void sci_remote_node_context_await_suspend_state_exit( + struct sci_base_state_machine *sm) +{ + struct sci_remote_node_context *rnc + = container_of(sm, typeof(*rnc), sm); + struct isci_remote_device *idev = rnc_to_dev(rnc); + + if (dev_is_sata(idev->domain_dev)) + isci_dev_set_hang_detection_timeout(idev, 0); +} + static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_INITIAL] = { .enter_state = sci_remote_node_context_initial_state_enter, @@ -341,7 +406,9 @@ static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_TX_RX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, }, - [SCI_RNC_AWAIT_SUSPENSION] = { }, + [SCI_RNC_AWAIT_SUSPENSION] = { + .exit_state = sci_remote_node_context_await_suspend_state_exit, + }, }; void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, @@ -350,7 +417,7 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, memset(rnc, 0, sizeof(struct sci_remote_node_context)); rnc->remote_node_index = remote_node_index; - rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; + rnc->destination_state = RNC_DEST_UNSPECIFIED; sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); } @@ -359,6 +426,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con u32 event_code) { enum scis_sds_remote_node_context_states state; + u32 next_state; state = sci_rnc->sm.current_state_id; switch (state) { @@ -373,18 +441,18 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con break; case SCI_RNC_INVALIDATING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { - if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) - state = SCI_RNC_INITIAL; + if (sci_rnc->destination_state == RNC_DEST_FINAL) + next_state = SCI_RNC_INITIAL; else - state = SCI_RNC_POSTING; - sci_change_state(&sci_rnc->sm, state); + next_state = SCI_RNC_POSTING; + sci_change_state(&sci_rnc->sm, next_state); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being invalidated anyway */ - dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspeneded by hardware while being " "invalidated.\n", __func__, sci_rnc); @@ -403,7 +471,7 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being resumed anyway */ - dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspeneded by hardware while being resumed.\n", __func__, sci_rnc); @@ -417,11 +485,11 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); - sci_rnc->suspension_code = scu_get_event_specifier(event_code); + sci_rnc->suspend_type = scu_get_event_type(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); - sci_rnc->suspension_code = scu_get_event_specifier(event_code); + sci_rnc->suspend_type = scu_get_event_type(event_code); break; default: goto out; @@ -430,27 +498,29 @@ enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_con case SCI_RNC_AWAIT_SUSPENSION: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: - sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); - sci_rnc->suspension_code = scu_get_event_specifier(event_code); + next_state = SCI_RNC_TX_SUSPENDED; break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: - sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); - sci_rnc->suspension_code = scu_get_event_specifier(event_code); + next_state = SCI_RNC_TX_RX_SUSPENDED; break; default: goto out; } + if (sci_rnc->suspend_type == scu_get_event_type(event_code)) + sci_change_state(&sci_rnc->sm, next_state); break; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: invalid state %d\n", __func__, state); + "%s: invalid state: %s\n", __func__, + rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; out: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: code: %#x state: %d\n", __func__, event_code, state); + "%s: code: %#x state: %s\n", __func__, event_code, + rnc_state_name(state)); return SCI_FAILURE; } @@ -464,20 +534,23 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: - sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: - case SCI_RNC_AWAIT_SUSPENSION: - sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; + case SCI_RNC_AWAIT_SUSPENSION: + sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); + return SCI_SUCCESS; case SCI_RNC_INITIAL: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: invalid state %d\n", __func__, state); + "%s: invalid state: %s\n", __func__, + rnc_state_name(state)); /* We have decided that the destruct request on the remote node context * can not fail since it is either in the initial/destroyed state or is * can be destroyed. @@ -485,35 +558,101 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: invalid state %d\n", __func__, state); + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } -enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, - u32 suspend_type, - scics_sds_remote_node_context_callback cb_fn, - void *cb_p) +enum sci_status sci_remote_node_context_suspend( + struct sci_remote_node_context *sci_rnc, + enum sci_remote_node_suspension_reasons suspend_reason, + u32 suspend_type) { - enum scis_sds_remote_node_context_states state; + enum scis_sds_remote_node_context_states state + = sci_rnc->sm.current_state_id; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); + enum sci_status status = SCI_FAILURE_INVALID_STATE; + enum sci_remote_node_context_destination_state dest_param = + RNC_DEST_UNSPECIFIED; - state = sci_rnc->sm.current_state_id; - if (state != SCI_RNC_READY) { + dev_dbg(scirdev_to_dev(idev), + "%s: current state %s, current suspend_type %x dest state %d," + " arg suspend_reason %d, arg suspend_type %x", + __func__, rnc_state_name(state), sci_rnc->suspend_type, + sci_rnc->destination_state, suspend_reason, + suspend_type); + + /* Disable automatic state continuations if explicitly suspending. */ + if ((suspend_reason == SCI_HW_SUSPEND) || + (sci_rnc->destination_state == RNC_DEST_FINAL)) + dest_param = sci_rnc->destination_state; + + switch (state) { + case SCI_RNC_READY: + break; + case SCI_RNC_INVALIDATING: + if (sci_rnc->destination_state == RNC_DEST_FINAL) { + dev_warn(scirdev_to_dev(idev), + "%s: already destroying %p\n", + __func__, sci_rnc); + return SCI_FAILURE_INVALID_STATE; + } + /* Fall through and handle like SCI_RNC_POSTING */ + case SCI_RNC_RESUMING: + /* Fall through and handle like SCI_RNC_POSTING */ + case SCI_RNC_POSTING: + /* Set the destination state to AWAIT - this signals the + * entry into the SCI_RNC_READY state that a suspension + * needs to be done immediately. + */ + if (sci_rnc->destination_state != RNC_DEST_FINAL) + sci_rnc->destination_state = RNC_DEST_SUSPENDED; + sci_rnc->suspend_type = suspend_type; + sci_rnc->suspend_reason = suspend_reason; + return SCI_SUCCESS; + + case SCI_RNC_TX_SUSPENDED: + if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) + status = SCI_SUCCESS; + break; + case SCI_RNC_TX_RX_SUSPENDED: + if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) + status = SCI_SUCCESS; + break; + case SCI_RNC_AWAIT_SUSPENSION: + if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) + || (suspend_type == sci_rnc->suspend_type)) + return SCI_SUCCESS; + break; + default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: invalid state %d\n", __func__, state); + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } + sci_rnc->destination_state = dest_param; + sci_rnc->suspend_type = suspend_type; + sci_rnc->suspend_reason = suspend_reason; - sci_rnc->user_callback = cb_fn; - sci_rnc->user_cookie = cb_p; - sci_rnc->suspension_code = suspend_type; + if (status == SCI_SUCCESS) { /* Already in the destination state? */ + struct isci_host *ihost = idev->owning_port->owning_controller; - if (suspend_type == SCI_SOFTWARE_SUSPENSION) { - sci_remote_device_post_request(rnc_to_dev(sci_rnc), - SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); + wake_up_all(&ihost->eventq); /* Let observers look. */ + return SCI_SUCCESS; } + if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || + (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { + + if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) + isci_dev_set_hang_detection_timeout(idev, 0x00000001); + + sci_remote_device_post_request( + idev, SCI_SOFTWARE_SUSPEND_CMD); + } + if (state != SCI_RNC_AWAIT_SUSPENSION) + sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); - sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); return SCI_SUCCESS; } @@ -522,56 +661,86 @@ enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *s void *cb_p) { enum scis_sds_remote_node_context_states state; + struct isci_remote_device *idev = rnc_to_dev(sci_rnc); state = sci_rnc->sm.current_state_id; + dev_dbg(scirdev_to_dev(idev), + "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " + "dev resume path %s\n", + __func__, rnc_state_name(state), cb_fn, cb_p, + sci_rnc->destination_state, + test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) + ? "" : ""); + switch (state) { case SCI_RNC_INITIAL: if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; - sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); - sci_remote_node_context_construct_buffer(sci_rnc); - sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); + sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, + RNC_DEST_READY); + if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { + sci_remote_node_context_construct_buffer(sci_rnc); + sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); + } return SCI_SUCCESS; + case SCI_RNC_POSTING: case SCI_RNC_INVALIDATING: case SCI_RNC_RESUMING: - if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) - return SCI_FAILURE_INVALID_STATE; - - sci_rnc->user_callback = cb_fn; - sci_rnc->user_cookie = cb_p; + /* We are still waiting to post when a resume was + * requested. + */ + switch (sci_rnc->destination_state) { + case RNC_DEST_SUSPENDED: + case RNC_DEST_SUSPENDED_RESUME: + /* Previously waiting to suspend after posting. + * Now continue onto resumption. + */ + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, + RNC_DEST_SUSPENDED_RESUME); + break; + default: + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, + RNC_DEST_READY); + break; + } return SCI_SUCCESS; - case SCI_RNC_TX_SUSPENDED: { - struct isci_remote_device *idev = rnc_to_dev(sci_rnc); - struct domain_device *dev = idev->domain_dev; - sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); - - /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ - if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) - sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); - else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { - if (idev->is_direct_attached) { - /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ - sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); - } else { - sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); - } - } else - return SCI_FAILURE; - return SCI_SUCCESS; - } + case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: - sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); - sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); - return SCI_FAILURE_INVALID_STATE; + { + struct domain_device *dev = idev->domain_dev; + /* If this is an expander attached SATA device we must + * invalidate and repost the RNC since this is the only + * way to clear the TCi to NCQ tag mapping table for + * the RNi. All other device types we can just resume. + */ + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, RNC_DEST_READY); + + if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { + if ((dev_is_sata(dev) && dev->parent) || + (sci_rnc->destination_state == RNC_DEST_FINAL)) + sci_change_state(&sci_rnc->sm, + SCI_RNC_INVALIDATING); + else + sci_change_state(&sci_rnc->sm, + SCI_RNC_RESUMING); + } + } + return SCI_SUCCESS; + case SCI_RNC_AWAIT_SUSPENSION: - sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); + sci_remote_node_context_setup_to_resume( + sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: invalid state %d\n", __func__, state); + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } @@ -590,35 +759,51 @@ enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: invalid state %d\n", __func__, state); + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; default: - break; + dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: invalid state %s\n", __func__, + rnc_state_name(state)); + return SCI_FAILURE_INVALID_STATE; } - dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), - "%s: requested to start IO while still resuming, %d\n", - __func__, state); - return SCI_FAILURE_INVALID_STATE; } -enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, - struct isci_request *ireq) +enum sci_status sci_remote_node_context_start_task( + struct sci_remote_node_context *sci_rnc, + struct isci_request *ireq, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p) +{ + enum sci_status status = sci_remote_node_context_resume(sci_rnc, + cb_fn, cb_p); + if (status != SCI_SUCCESS) + dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), + "%s: resume failed: %d\n", __func__, status); + return status; +} + +int sci_remote_node_context_is_safe_to_abort( + struct sci_remote_node_context *sci_rnc) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { + case SCI_RNC_INVALIDATING: + case SCI_RNC_TX_RX_SUSPENDED: + return 1; + case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: - case SCI_RNC_AWAIT_SUSPENSION: - return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: - case SCI_RNC_TX_RX_SUSPENDED: - sci_remote_node_context_resume(sci_rnc, NULL, NULL); - return SCI_SUCCESS; + case SCI_RNC_AWAIT_SUSPENSION: + case SCI_RNC_INITIAL: + return 0; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); - return SCI_FAILURE_INVALID_STATE; + return 0; } } diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h index a241e0f4c865..a703b9ce0c2c 100644 --- a/drivers/scsi/isci/remote_node_context.h +++ b/drivers/scsi/isci/remote_node_context.h @@ -75,8 +75,13 @@ */ #define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF -#define SCU_HARDWARE_SUSPENSION (0) -#define SCI_SOFTWARE_SUSPENSION (1) +enum sci_remote_node_suspension_reasons { + SCI_HW_SUSPEND, + SCI_SW_SUSPEND_NORMAL, + SCI_SW_SUSPEND_LINKHANG_DETECT +}; +#define SCI_SOFTWARE_SUSPEND_CMD SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX +#define SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT SCU_EVENT_TL_RNC_SUSPEND_TX_RX struct isci_request; struct isci_remote_device; @@ -137,9 +142,13 @@ const char *rnc_state_name(enum scis_sds_remote_node_context_states state); * node context. */ enum sci_remote_node_context_destination_state { - SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED, - SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY, - SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL + RNC_DEST_UNSPECIFIED, + RNC_DEST_READY, + RNC_DEST_FINAL, + RNC_DEST_SUSPENDED, /* Set when suspend during post/invalidate */ + RNC_DEST_SUSPENDED_RESUME /* Set when a resume was done during posting + * or invalidating and already suspending. + */ }; /** @@ -156,10 +165,12 @@ struct sci_remote_node_context { u16 remote_node_index; /** - * This field is the recored suspension code or the reason for the remote node + * This field is the recored suspension type of the remote node * context suspension. */ - u32 suspension_code; + u32 suspend_type; + enum sci_remote_node_suspension_reasons suspend_reason; + u32 suspend_count; /** * This field is true if the remote node context is resuming from its current @@ -193,6 +204,8 @@ void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, bool sci_remote_node_context_is_ready( struct sci_remote_node_context *sci_rnc); +bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc); + enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code); enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, @@ -200,14 +213,24 @@ enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context void *callback_parameter); enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, u32 suspend_type, - scics_sds_remote_node_context_callback cb_fn, - void *cb_p); + u32 suspension_code); enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p); enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, - struct isci_request *ireq); + struct isci_request *ireq, + scics_sds_remote_node_context_callback cb_fn, + void *cb_p); enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq); +int sci_remote_node_context_is_safe_to_abort( + struct sci_remote_node_context *sci_rnc); +static inline bool sci_remote_node_context_is_being_destroyed( + struct sci_remote_node_context *sci_rnc) +{ + return (sci_rnc->destination_state == RNC_DEST_FINAL) + || ((sci_rnc->sm.current_state_id == SCI_RNC_INITIAL) + && (sci_rnc->destination_state == RNC_DEST_UNSPECIFIED)); +} #endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 2def1e3960f6..7a0431c73493 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -92,11 +92,11 @@ static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, if (idx == 0) { offset = (void *) &ireq->tc->sgl_pair_ab - (void *) &ihost->task_context_table[0]; - return ihost->task_context_dma + offset; + return ihost->tc_dma + offset; } else if (idx == 1) { offset = (void *) &ireq->tc->sgl_pair_cd - (void *) &ihost->task_context_table[0]; - return ihost->task_context_dma + offset; + return ihost->tc_dma + offset; } return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); @@ -730,7 +730,7 @@ static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *i { struct sas_task *task = isci_request_access_task(ireq); - ireq->protocol = SCIC_SSP_PROTOCOL; + ireq->protocol = SAS_PROTOCOL_SSP; scu_ssp_io_request_construct_task_context(ireq, task->data_dir, @@ -763,7 +763,7 @@ static enum sci_status sci_io_request_construct_basic_sata(struct isci_request * bool copy = false; struct sas_task *task = isci_request_access_task(ireq); - ireq->protocol = SCIC_STP_PROTOCOL; + ireq->protocol = SAS_PROTOCOL_STP; copy = (task->data_dir == DMA_NONE) ? false : true; @@ -863,6 +863,8 @@ sci_io_request_terminate(struct isci_request *ireq) switch (state) { case SCI_REQ_CONSTRUCTED: + /* Set to make sure no HW terminate posting is done: */ + set_bit(IREQ_TC_ABORT_POSTED, &ireq->flags); ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; ireq->sci_status = SCI_FAILURE_IO_TERMINATED; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); @@ -883,8 +885,7 @@ sci_io_request_terminate(struct isci_request *ireq) case SCI_REQ_ATAPI_WAIT_PIO_SETUP: case SCI_REQ_ATAPI_WAIT_D2H: case SCI_REQ_ATAPI_WAIT_TC_COMP: - sci_change_state(&ireq->sm, SCI_REQ_ABORTING); - return SCI_SUCCESS; + /* Fall through and change state to ABORTING... */ case SCI_REQ_TASK_WAIT_TC_RESP: /* The task frame was already confirmed to have been * sent by the SCU HW. Since the state machine is @@ -893,20 +894,21 @@ sci_io_request_terminate(struct isci_request *ireq) * and don't wait for the task response. */ sci_change_state(&ireq->sm, SCI_REQ_ABORTING); - sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); - return SCI_SUCCESS; + /* Fall through and handle like ABORTING... */ case SCI_REQ_ABORTING: - /* If a request has a termination requested twice, return - * a failure indication, since HW confirmation of the first - * abort is still outstanding. + if (!isci_remote_device_is_safe_to_abort(ireq->target_device)) + set_bit(IREQ_PENDING_ABORT, &ireq->flags); + else + clear_bit(IREQ_PENDING_ABORT, &ireq->flags); + /* If the request is only waiting on the remote device + * suspension, return SUCCESS so the caller will wait too. */ + return SCI_SUCCESS; case SCI_REQ_COMPLETED: default: dev_warn(&ireq->owning_controller->pdev->dev, "%s: SCIC IO Request requested to abort while in wrong " - "state %d\n", - __func__, - ireq->sm.current_state_id); + "state %d\n", __func__, ireq->sm.current_state_id); break; } @@ -1070,7 +1072,7 @@ request_started_state_tc_event(struct isci_request *ireq, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): - if (ireq->protocol == SCIC_STP_PROTOCOL) { + if (ireq->protocol == SAS_PROTOCOL_STP) { ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT; ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; @@ -2117,7 +2119,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq */ if (ireq->stp.rsp.fis_type == FIS_REGD2H) { sci_remote_device_suspend(ireq->target_device, - SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); + SCI_SW_SUSPEND_NORMAL); ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; @@ -2138,13 +2140,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR * - this comes only for B0 */ - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): - sci_remote_device_suspend(ireq->target_device, - SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - /* Fall through to the default case */ default: /* All other completion status cause the IO to be complete. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); @@ -2262,15 +2257,151 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire return status; } +static int sci_request_smp_completion_status_is_tx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + return 1; + } + return 0; +} + +static int sci_request_smp_completion_status_is_tx_rx_suspend( + unsigned int completion_status) +{ + return 0; /* There are no Tx/Rx SMP suspend conditions. */ +} + +static int sci_request_ssp_completion_status_is_tx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_DONE_TX_RAW_CMD_ERR: + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: + return 1; + } + return 0; +} + +static int sci_request_ssp_completion_status_is_tx_rx_suspend( + unsigned int completion_status) +{ + return 0; /* There are no Tx/Rx SSP suspend conditions. */ +} + +static int sci_request_stpsata_completion_status_is_tx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_DONE_TX_RAW_CMD_ERR: + case SCU_TASK_DONE_LL_R_ERR: + case SCU_TASK_DONE_LL_PERR: + case SCU_TASK_DONE_REG_ERR: + case SCU_TASK_DONE_SDB_ERR: + case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: + case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: + case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: + case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: + case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: + case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: + case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: + return 1; + } + return 0; +} + + +static int sci_request_stpsata_completion_status_is_tx_rx_suspend( + unsigned int completion_status) +{ + switch (completion_status) { + case SCU_TASK_DONE_LF_ERR: + case SCU_TASK_DONE_LL_SY_TERM: + case SCU_TASK_DONE_LL_LF_TERM: + case SCU_TASK_DONE_BREAK_RCVD: + case SCU_TASK_DONE_INV_FIS_LEN: + case SCU_TASK_DONE_UNEXP_FIS: + case SCU_TASK_DONE_UNEXP_SDBFIS: + case SCU_TASK_DONE_MAX_PLD_ERR: + return 1; + } + return 0; +} + +static void sci_request_handle_suspending_completions( + struct isci_request *ireq, + u32 completion_code) +{ + int is_tx = 0; + int is_tx_rx = 0; + + switch (ireq->protocol) { + case SAS_PROTOCOL_SMP: + is_tx = sci_request_smp_completion_status_is_tx_suspend( + completion_code); + is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( + completion_code); + break; + case SAS_PROTOCOL_SSP: + is_tx = sci_request_ssp_completion_status_is_tx_suspend( + completion_code); + is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( + completion_code); + break; + case SAS_PROTOCOL_STP: + is_tx = sci_request_stpsata_completion_status_is_tx_suspend( + completion_code); + is_tx_rx = + sci_request_stpsata_completion_status_is_tx_rx_suspend( + completion_code); + break; + default: + dev_warn(&ireq->isci_host->pdev->dev, + "%s: request %p has no valid protocol\n", + __func__, ireq); + break; + } + if (is_tx || is_tx_rx) { + BUG_ON(is_tx && is_tx_rx); + + sci_remote_node_context_suspend( + &ireq->target_device->rnc, + SCI_HW_SUSPEND, + (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX + : SCU_EVENT_TL_RNC_SUSPEND_TX); + } +} + enum sci_status sci_io_request_tc_completion(struct isci_request *ireq, - u32 completion_code) + u32 completion_code) { enum sci_base_request_states state; struct isci_host *ihost = ireq->owning_controller; state = ireq->sm.current_state_id; + /* Decode those completions that signal upcoming suspension events. */ + sci_request_handle_suspending_completions( + ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); + switch (state) { case SCI_REQ_STARTED: return request_started_state_tc_event(ireq, completion_code); @@ -2362,9 +2493,6 @@ static void isci_request_process_response_iu( * @request: This parameter is the completed isci_request object. * @response_ptr: This parameter specifies the service response for the I/O. * @status_ptr: This parameter specifies the exec status for the I/O. - * @complete_to_host_ptr: This parameter specifies the action to be taken by - * the LLDD with respect to completing this request or forcing an abort - * condition on the I/O. * @open_rej_reason: This parameter specifies the encoded reason for the * abandon-class reject. * @@ -2375,14 +2503,12 @@ static void isci_request_set_open_reject_status( struct sas_task *task, enum service_response *response_ptr, enum exec_status *status_ptr, - enum isci_completion_selection *complete_to_host_ptr, enum sas_open_rej_reason open_rej_reason) { /* Task in the target is done. */ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); *response_ptr = SAS_TASK_UNDELIVERED; *status_ptr = SAS_OPEN_REJECT; - *complete_to_host_ptr = isci_perform_normal_io_completion; task->task_status.open_rej_reason = open_rej_reason; } @@ -2392,9 +2518,6 @@ static void isci_request_set_open_reject_status( * @request: This parameter is the completed isci_request object. * @response_ptr: This parameter specifies the service response for the I/O. * @status_ptr: This parameter specifies the exec status for the I/O. - * @complete_to_host_ptr: This parameter specifies the action to be taken by - * the LLDD with respect to completing this request or forcing an abort - * condition on the I/O. * * none. */ @@ -2403,8 +2526,7 @@ static void isci_request_handle_controller_specific_errors( struct isci_request *request, struct sas_task *task, enum service_response *response_ptr, - enum exec_status *status_ptr, - enum isci_completion_selection *complete_to_host_ptr) + enum exec_status *status_ptr) { unsigned int cstatus; @@ -2445,9 +2567,6 @@ static void isci_request_handle_controller_specific_errors( *status_ptr = SAS_ABORTED_TASK; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - - *complete_to_host_ptr = - isci_perform_normal_io_completion; } else { /* Task in the target is not done. */ *response_ptr = SAS_TASK_UNDELIVERED; @@ -2458,9 +2577,6 @@ static void isci_request_handle_controller_specific_errors( *status_ptr = SAM_STAT_TASK_ABORTED; clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - - *complete_to_host_ptr = - isci_perform_error_io_completion; } break; @@ -2489,8 +2605,6 @@ static void isci_request_handle_controller_specific_errors( *status_ptr = SAS_ABORTED_TASK; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - - *complete_to_host_ptr = isci_perform_normal_io_completion; break; @@ -2501,7 +2615,7 @@ static void isci_request_handle_controller_specific_errors( isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_WRONG_DEST); + SAS_OREJ_WRONG_DEST); break; case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: @@ -2511,56 +2625,56 @@ static void isci_request_handle_controller_specific_errors( */ isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB0); + SAS_OREJ_RESV_AB0); break; case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB1); + SAS_OREJ_RESV_AB1); break; case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB2); + SAS_OREJ_RESV_AB2); break; case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_RESV_AB3); + SAS_OREJ_RESV_AB3); break; case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_BAD_DEST); + SAS_OREJ_BAD_DEST); break; case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_STP_NORES); + SAS_OREJ_STP_NORES); break; case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_EPROTO); + SAS_OREJ_EPROTO); break; case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: isci_request_set_open_reject_status( request, task, response_ptr, status_ptr, - complete_to_host_ptr, SAS_OREJ_CONN_RATE); + SAS_OREJ_CONN_RATE); break; case SCU_TASK_DONE_LL_R_ERR: @@ -2592,97 +2706,14 @@ static void isci_request_handle_controller_specific_errors( *response_ptr = SAS_TASK_UNDELIVERED; *status_ptr = SAM_STAT_TASK_ABORTED; - if (task->task_proto == SAS_PROTOCOL_SMP) { + if (task->task_proto == SAS_PROTOCOL_SMP) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - - *complete_to_host_ptr = isci_perform_normal_io_completion; - } else { + else clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - - *complete_to_host_ptr = isci_perform_error_io_completion; - } break; } } -/** - * isci_task_save_for_upper_layer_completion() - This function saves the - * request for later completion to the upper layer driver. - * @host: This parameter is a pointer to the host on which the the request - * should be queued (either as an error or success). - * @request: This parameter is the completed request. - * @response: This parameter is the response code for the completed task. - * @status: This parameter is the status code for the completed task. - * - * none. - */ -static void isci_task_save_for_upper_layer_completion( - struct isci_host *host, - struct isci_request *request, - enum service_response response, - enum exec_status status, - enum isci_completion_selection task_notification_selection) -{ - struct sas_task *task = isci_request_access_task(request); - - task_notification_selection - = isci_task_set_completion_status(task, response, status, - task_notification_selection); - - /* Tasks aborted specifically by a call to the lldd_abort_task - * function should not be completed to the host in the regular path. - */ - switch (task_notification_selection) { - - case isci_perform_normal_io_completion: - /* Normal notification (task_done) */ - - /* Add to the completed list. */ - list_add(&request->completed_node, - &host->requests_to_complete); - - /* Take the request off the device's pending request list. */ - list_del_init(&request->dev_node); - break; - - case isci_perform_aborted_io_completion: - /* No notification to libsas because this request is - * already in the abort path. - */ - /* Wake up whatever process was waiting for this - * request to complete. - */ - WARN_ON(request->io_request_completion == NULL); - - if (request->io_request_completion != NULL) { - - /* Signal whoever is waiting that this - * request is complete. - */ - complete(request->io_request_completion); - } - break; - - case isci_perform_error_io_completion: - /* Use sas_task_abort */ - /* Add to the aborted list. */ - list_add(&request->completed_node, - &host->requests_to_errorback); - break; - - default: - /* Add to the error to libsas list. */ - list_add(&request->completed_node, - &host->requests_to_errorback); - break; - } - dev_dbg(&host->pdev->dev, - "%s: %d - task = %p, response=%d (%d), status=%d (%d)\n", - __func__, task_notification_selection, task, - (task) ? task->task_status.resp : 0, response, - (task) ? task->task_status.stat : 0, status); -} - static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) { struct task_status_struct *ts = &task->task_status; @@ -2715,295 +2746,164 @@ static void isci_request_io_request_complete(struct isci_host *ihost, struct isci_remote_device *idev = request->target_device; enum service_response response = SAS_TASK_UNDELIVERED; enum exec_status status = SAS_ABORTED_TASK; - enum isci_request_status request_status; - enum isci_completion_selection complete_to_host - = isci_perform_normal_io_completion; dev_dbg(&ihost->pdev->dev, - "%s: request = %p, task = %p,\n" + "%s: request = %p, task = %p, " "task->data_dir = %d completion_status = 0x%x\n", - __func__, - request, - task, - task->data_dir, - completion_status); + __func__, request, task, task->data_dir, completion_status); - spin_lock(&request->state_lock); - request_status = request->status; + /* The request is done from an SCU HW perspective. */ - /* Decode the request status. Note that if the request has been - * aborted by a task management function, we don't care - * what the status is. - */ - switch (request_status) { + /* This is an active request being completed from the core. */ + switch (completion_status) { - case aborted: - /* "aborted" indicates that the request was aborted by a task - * management function, since once a task management request is - * perfomed by the device, the request only completes because - * of the subsequent driver terminate. - * - * Aborted also means an external thread is explicitly managing - * this request, so that we do not complete it up the stack. - * - * The target is still there (since the TMF was successful). - */ + case SCI_IO_FAILURE_RESPONSE_VALID: + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", + __func__, request, task); + + if (sas_protocol_ata(task->task_proto)) { + isci_process_stp_response(task, &request->stp.rsp); + } else if (SAS_PROTOCOL_SSP == task->task_proto) { + + /* crack the iu response buffer. */ + resp_iu = &request->ssp.rsp; + isci_request_process_response_iu(task, resp_iu, + &ihost->pdev->dev); + + } else if (SAS_PROTOCOL_SMP == task->task_proto) { + + dev_err(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_RESPONSE_VALID: " + "SAS_PROTOCOL_SMP protocol\n", + __func__); + + } else + dev_err(&ihost->pdev->dev, + "%s: unknown protocol\n", __func__); + + /* use the task status set in the task struct by the + * isci_request_process_response_iu call. + */ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = task->task_status.resp; + status = task->task_status.stat; + break; + + case SCI_IO_SUCCESS: + case SCI_IO_SUCCESS_IO_DONE_EARLY: + response = SAS_TASK_COMPLETE; + status = SAM_STAT_GOOD; + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + + if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { + + /* This was an SSP / STP / SATA transfer. + * There is a possibility that less data than + * the maximum was transferred. + */ + u32 transferred_length = sci_req_tx_bytes(request); + + task->task_status.residual + = task->total_xfer_len - transferred_length; + + /* If there were residual bytes, call this an + * underrun. + */ + if (task->task_status.residual != 0) + status = SAS_DATA_UNDERRUN; + + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", + __func__, status); + + } else + dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n", + __func__); + break; + + case SCI_IO_FAILURE_TERMINATED: + + dev_dbg(&ihost->pdev->dev, + "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", + __func__, request, task); + + /* The request was terminated explicitly. */ + set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + response = SAS_TASK_UNDELIVERED; /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; + break; + + case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: + + isci_request_handle_controller_specific_errors(idev, request, + task, &response, + &status); + break; + + case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: + /* This is a special case, in that the I/O completion + * is telling us that the device needs a reset. + * In order for the device reset condition to be + * noticed, the I/O has to be handled in the error + * handler. Set the reset flag and cause the + * SCSI error thread to be scheduled. + */ + spin_lock_irqsave(&task->task_state_lock, task_flags); + task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; + spin_unlock_irqrestore(&task->task_state_lock, task_flags); + + /* Fail the I/O. */ + response = SAS_TASK_UNDELIVERED; + status = SAM_STAT_TASK_ABORTED; + + clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); + break; + + case SCI_FAILURE_RETRY_REQUIRED: + + /* Fail the I/O so it can be retried. */ + response = SAS_TASK_UNDELIVERED; if (!idev) status = SAS_DEVICE_UNKNOWN; else status = SAS_ABORTED_TASK; - complete_to_host = isci_perform_aborted_io_completion; - /* This was an aborted request. */ - - spin_unlock(&request->state_lock); - break; - - case aborting: - /* aborting means that the task management function tried and - * failed to abort the request. We need to note the request - * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the - * target as down. - * - * Aborting also means an external thread is explicitly managing - * this request, so that we do not complete it up the stack. - */ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - response = SAS_TASK_UNDELIVERED; - - if (!idev) - /* The device has been /is being stopped. Note that - * we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_PHY_DOWN; - - complete_to_host = isci_perform_aborted_io_completion; - - /* This was an aborted request. */ - - spin_unlock(&request->state_lock); break; - case terminating: - - /* This was an terminated request. This happens when - * the I/O is being terminated because of an action on - * the device (reset, tear down, etc.), and the I/O needs - * to be completed up the stack. - */ - set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - response = SAS_TASK_UNDELIVERED; - - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if (!idev) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; - - complete_to_host = isci_perform_aborted_io_completion; - - /* This was a terminated request. */ - - spin_unlock(&request->state_lock); - break; - - case dead: - /* This was a terminated request that timed-out during the - * termination process. There is no task to complete to - * libsas. - */ - complete_to_host = isci_perform_normal_io_completion; - spin_unlock(&request->state_lock); - break; default: + /* Catch any otherwise unhandled error codes here. */ + dev_dbg(&ihost->pdev->dev, + "%s: invalid completion code: 0x%x - " + "isci_request = %p\n", + __func__, completion_status, request); - /* The request is done from an SCU HW perspective. */ - request->status = completed; + response = SAS_TASK_UNDELIVERED; - spin_unlock(&request->state_lock); + /* See if the device has been/is being stopped. Note + * that we ignore the quiesce state, since we are + * concerned about the actual device state. + */ + if (!idev) + status = SAS_DEVICE_UNKNOWN; + else + status = SAS_ABORTED_TASK; - /* This is an active request being completed from the core. */ - switch (completion_status) { - - case SCI_IO_FAILURE_RESPONSE_VALID: - dev_dbg(&ihost->pdev->dev, - "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n", - __func__, - request, - task); - - if (sas_protocol_ata(task->task_proto)) { - isci_process_stp_response(task, &request->stp.rsp); - } else if (SAS_PROTOCOL_SSP == task->task_proto) { - - /* crack the iu response buffer. */ - resp_iu = &request->ssp.rsp; - isci_request_process_response_iu(task, resp_iu, - &ihost->pdev->dev); - - } else if (SAS_PROTOCOL_SMP == task->task_proto) { - - dev_err(&ihost->pdev->dev, - "%s: SCI_IO_FAILURE_RESPONSE_VALID: " - "SAS_PROTOCOL_SMP protocol\n", - __func__); - - } else - dev_err(&ihost->pdev->dev, - "%s: unknown protocol\n", __func__); - - /* use the task status set in the task struct by the - * isci_request_process_response_iu call. - */ + if (SAS_PROTOCOL_SMP == task->task_proto) set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - response = task->task_status.resp; - status = task->task_status.stat; - break; - - case SCI_IO_SUCCESS: - case SCI_IO_SUCCESS_IO_DONE_EARLY: - - response = SAS_TASK_COMPLETE; - status = SAM_STAT_GOOD; - set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - - if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { - - /* This was an SSP / STP / SATA transfer. - * There is a possibility that less data than - * the maximum was transferred. - */ - u32 transferred_length = sci_req_tx_bytes(request); - - task->task_status.residual - = task->total_xfer_len - transferred_length; - - /* If there were residual bytes, call this an - * underrun. - */ - if (task->task_status.residual != 0) - status = SAS_DATA_UNDERRUN; - - dev_dbg(&ihost->pdev->dev, - "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n", - __func__, - status); - - } else - dev_dbg(&ihost->pdev->dev, - "%s: SCI_IO_SUCCESS\n", - __func__); - - break; - - case SCI_IO_FAILURE_TERMINATED: - dev_dbg(&ihost->pdev->dev, - "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n", - __func__, - request, - task); - - /* The request was terminated explicitly. No handling - * is needed in the SCSI error handler path. - */ - set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - response = SAS_TASK_UNDELIVERED; - - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if (!idev) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; - - complete_to_host = isci_perform_normal_io_completion; - break; - - case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: - - isci_request_handle_controller_specific_errors( - idev, request, task, &response, &status, - &complete_to_host); - - break; - - case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: - /* This is a special case, in that the I/O completion - * is telling us that the device needs a reset. - * In order for the device reset condition to be - * noticed, the I/O has to be handled in the error - * handler. Set the reset flag and cause the - * SCSI error thread to be scheduled. - */ - spin_lock_irqsave(&task->task_state_lock, task_flags); - task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; - spin_unlock_irqrestore(&task->task_state_lock, task_flags); - - /* Fail the I/O. */ - response = SAS_TASK_UNDELIVERED; - status = SAM_STAT_TASK_ABORTED; - - complete_to_host = isci_perform_error_io_completion; + else clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - break; - - case SCI_FAILURE_RETRY_REQUIRED: - - /* Fail the I/O so it can be retried. */ - response = SAS_TASK_UNDELIVERED; - if (!idev) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; - - complete_to_host = isci_perform_normal_io_completion; - set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - break; - - - default: - /* Catch any otherwise unhandled error codes here. */ - dev_dbg(&ihost->pdev->dev, - "%s: invalid completion code: 0x%x - " - "isci_request = %p\n", - __func__, completion_status, request); - - response = SAS_TASK_UNDELIVERED; - - /* See if the device has been/is being stopped. Note - * that we ignore the quiesce state, since we are - * concerned about the actual device state. - */ - if (!idev) - status = SAS_DEVICE_UNKNOWN; - else - status = SAS_ABORTED_TASK; - - if (SAS_PROTOCOL_SMP == task->task_proto) { - set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - complete_to_host = isci_perform_normal_io_completion; - } else { - clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - complete_to_host = isci_perform_error_io_completion; - } - break; - } break; } @@ -3038,10 +2938,18 @@ static void isci_request_io_request_complete(struct isci_host *ihost, break; } - /* Put the completed request on the correct list */ - isci_task_save_for_upper_layer_completion(ihost, request, response, - status, complete_to_host - ); + spin_lock_irqsave(&task->task_state_lock, task_flags); + + task->task_status.resp = response; + task->task_status.stat = status; + + if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { + /* Normal notification (task_done) */ + task->task_state_flags |= SAS_TASK_STATE_DONE; + task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | + SAS_TASK_STATE_PENDING); + } + spin_unlock_irqrestore(&task->task_state_lock, task_flags); /* complete the io request to the core. */ sci_controller_complete_io(ihost, request->target_device, request); @@ -3051,6 +2959,8 @@ static void isci_request_io_request_complete(struct isci_host *ihost, * task to recognize the already completed case. */ set_bit(IREQ_TERMINATED, &request->flags); + + ireq_done(ihost, request, task); } static void sci_request_started_state_enter(struct sci_base_state_machine *sm) @@ -3169,7 +3079,7 @@ sci_general_request_construct(struct isci_host *ihost, sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT); ireq->target_device = idev; - ireq->protocol = SCIC_NO_PROTOCOL; + ireq->protocol = SAS_PROTOCOL_NONE; ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; ireq->sci_status = SCI_SUCCESS; @@ -3193,7 +3103,7 @@ sci_io_request_construct(struct isci_host *ihost, if (dev->dev_type == SAS_END_DEV) /* pass */; - else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) + else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); else if (dev_is_expander(dev)) /* pass */; @@ -3215,10 +3125,15 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); - if (dev->dev_type == SAS_END_DEV || - dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); + + /* Set the protocol indicator. */ + if (dev_is_sata(dev)) + ireq->protocol = SAS_PROTOCOL_STP; + else + ireq->protocol = SAS_PROTOCOL_SSP; } else status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; @@ -3311,7 +3226,7 @@ sci_io_request_construct_smp(struct device *dev, if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) return SCI_FAILURE; - ireq->protocol = SCIC_SMP_PROTOCOL; + ireq->protocol = SAS_PROTOCOL_SMP; /* byte swap the smp request. */ @@ -3496,9 +3411,6 @@ static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 t ireq->io_request_completion = NULL; ireq->flags = 0; ireq->num_sg_entries = 0; - INIT_LIST_HEAD(&ireq->completed_node); - INIT_LIST_HEAD(&ireq->dev_node); - isci_request_change_state(ireq, allocated); return ireq; } @@ -3582,26 +3494,15 @@ int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *ide spin_unlock_irqrestore(&ihost->scic_lock, flags); return status; } - /* Either I/O started OK, or the core has signaled that * the device needs a target reset. - * - * In either case, hold onto the I/O for later. - * - * Update it's status and add it to the list in the - * remote device object. */ - list_add(&ireq->dev_node, &idev->reqs_in_process); - - if (status == SCI_SUCCESS) { - isci_request_change_state(ireq, started); - } else { + if (status != SCI_SUCCESS) { /* The request did not really start in the * hardware, so clear the request handle * here so no terminations will be done. */ set_bit(IREQ_TERMINATED, &ireq->flags); - isci_request_change_state(ireq, completed); } spin_unlock_irqrestore(&ihost->scic_lock, flags); diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 057f2378452d..aff95317fcf4 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -60,30 +60,6 @@ #include "host.h" #include "scu_task_context.h" -/** - * struct isci_request_status - This enum defines the possible states of an I/O - * request. - * - * - */ -enum isci_request_status { - unallocated = 0x00, - allocated = 0x01, - started = 0x02, - completed = 0x03, - aborting = 0x04, - aborted = 0x05, - terminating = 0x06, - dead = 0x07 -}; - -enum sci_request_protocol { - SCIC_NO_PROTOCOL, - SCIC_SMP_PROTOCOL, - SCIC_SSP_PROTOCOL, - SCIC_STP_PROTOCOL -}; /* XXX remove me, use sas_task.{dev|task_proto} instead */; - /** * isci_stp_request - extra request infrastructure to handle pio/atapi protocol * @pio_len - number of bytes requested at PIO setup @@ -104,11 +80,14 @@ struct isci_stp_request { }; struct isci_request { - enum isci_request_status status; #define IREQ_COMPLETE_IN_TARGET 0 #define IREQ_TERMINATED 1 #define IREQ_TMF 2 #define IREQ_ACTIVE 3 + #define IREQ_PENDING_ABORT 4 /* Set == device was not suspended yet */ + #define IREQ_TC_ABORT_POSTED 5 + #define IREQ_ABORT_PATH_ACTIVE 6 + #define IREQ_NO_AUTO_FREE_TAG 7 /* Set when being explicitly managed */ unsigned long flags; /* XXX kill ttype and ttype_ptr, allocate full sas_task */ union ttype_ptr_union { @@ -116,11 +95,6 @@ struct isci_request { struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */ } ttype_ptr; struct isci_host *isci_host; - /* For use in the requests_to_{complete|abort} lists: */ - struct list_head completed_node; - /* For use in the reqs_in_process list: */ - struct list_head dev_node; - spinlock_t state_lock; dma_addr_t request_daddr; dma_addr_t zero_scatter_daddr; unsigned int num_sg_entries; @@ -140,7 +114,7 @@ struct isci_request { struct isci_host *owning_controller; struct isci_remote_device *target_device; u16 io_tag; - enum sci_request_protocol protocol; + enum sas_protocol protocol; u32 scu_status; /* hardware result */ u32 sci_status; /* upper layer disposition */ u32 post_context; @@ -309,92 +283,6 @@ sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr) return ireq->request_daddr + (requested_addr - base_addr); } -/** - * isci_request_change_state() - This function sets the status of the request - * object. - * @request: This parameter points to the isci_request object - * @status: This Parameter is the new status of the object - * - */ -static inline enum isci_request_status -isci_request_change_state(struct isci_request *isci_request, - enum isci_request_status status) -{ - enum isci_request_status old_state; - unsigned long flags; - - dev_dbg(&isci_request->isci_host->pdev->dev, - "%s: isci_request = %p, state = 0x%x\n", - __func__, - isci_request, - status); - - BUG_ON(isci_request == NULL); - - spin_lock_irqsave(&isci_request->state_lock, flags); - old_state = isci_request->status; - isci_request->status = status; - spin_unlock_irqrestore(&isci_request->state_lock, flags); - - return old_state; -} - -/** - * isci_request_change_started_to_newstate() - This function sets the status of - * the request object. - * @request: This parameter points to the isci_request object - * @status: This Parameter is the new status of the object - * - * state previous to any change. - */ -static inline enum isci_request_status -isci_request_change_started_to_newstate(struct isci_request *isci_request, - struct completion *completion_ptr, - enum isci_request_status newstate) -{ - enum isci_request_status old_state; - unsigned long flags; - - spin_lock_irqsave(&isci_request->state_lock, flags); - - old_state = isci_request->status; - - if (old_state == started || old_state == aborting) { - BUG_ON(isci_request->io_request_completion != NULL); - - isci_request->io_request_completion = completion_ptr; - isci_request->status = newstate; - } - - spin_unlock_irqrestore(&isci_request->state_lock, flags); - - dev_dbg(&isci_request->isci_host->pdev->dev, - "%s: isci_request = %p, old_state = 0x%x\n", - __func__, - isci_request, - old_state); - - return old_state; -} - -/** - * isci_request_change_started_to_aborted() - This function sets the status of - * the request object. - * @request: This parameter points to the isci_request object - * @completion_ptr: This parameter is saved as the kernel completion structure - * signalled when the old request completes. - * - * state previous to any change. - */ -static inline enum isci_request_status -isci_request_change_started_to_aborted(struct isci_request *isci_request, - struct completion *completion_ptr) -{ - return isci_request_change_started_to_newstate(isci_request, - completion_ptr, - aborted); -} - #define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr) #define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr) @@ -404,8 +292,6 @@ struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, u16 tag); int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, struct sas_task *task, u16 tag); -void isci_terminate_pending_requests(struct isci_host *ihost, - struct isci_remote_device *idev); enum sci_status sci_task_request_construct(struct isci_host *ihost, struct isci_remote_device *idev, @@ -421,5 +307,4 @@ static inline int isci_task_is_ncq_recovery(struct sas_task *task) task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ); } - #endif /* !defined(_ISCI_REQUEST_H_) */ diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h index c8b329c695f9..071cb74a211c 100644 --- a/drivers/scsi/isci/scu_completion_codes.h +++ b/drivers/scsi/isci/scu_completion_codes.h @@ -224,6 +224,7 @@ * 32-bit value like we want, each immediate value must be cast to a u32. */ #define SCU_TASK_DONE_GOOD ((u32)0x00) +#define SCU_TASK_DONE_TX_RAW_CMD_ERR ((u32)0x08) #define SCU_TASK_DONE_CRC_ERR ((u32)0x14) #define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14) #define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15) @@ -237,6 +238,7 @@ #define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A) #define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A) #define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B) +#define SCU_TASK_DONE_BREAK_RCVD ((u32)0x1B) #define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B) #define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C) #define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C) diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index 374254ede9d4..6bc74eb012c9 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -78,54 +78,25 @@ static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task, enum exec_status status) { - enum isci_completion_selection disposition; + unsigned long flags; - disposition = isci_perform_normal_io_completion; - disposition = isci_task_set_completion_status(task, response, status, - disposition); + /* Normal notification (task_done) */ + dev_dbg(&ihost->pdev->dev, "%s: task = %p, response=%d, status=%d\n", + __func__, task, response, status); - /* Tasks aborted specifically by a call to the lldd_abort_task - * function should not be completed to the host in the regular path. - */ - switch (disposition) { - case isci_perform_normal_io_completion: - /* Normal notification (task_done) */ - dev_dbg(&ihost->pdev->dev, - "%s: Normal - task = %p, response=%d, " - "status=%d\n", - __func__, task, response, status); + spin_lock_irqsave(&task->task_state_lock, flags); - task->lldd_task = NULL; - task->task_done(task); - break; + task->task_status.resp = response; + task->task_status.stat = status; - case isci_perform_aborted_io_completion: - /* - * No notification because this request is already in the - * abort path. - */ - dev_dbg(&ihost->pdev->dev, - "%s: Aborted - task = %p, response=%d, " - "status=%d\n", - __func__, task, response, status); - break; + /* Normal notification (task_done) */ + task->task_state_flags |= SAS_TASK_STATE_DONE; + task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | + SAS_TASK_STATE_PENDING); + task->lldd_task = NULL; + spin_unlock_irqrestore(&task->task_state_lock, flags); - case isci_perform_error_io_completion: - /* Use sas_task_abort */ - dev_dbg(&ihost->pdev->dev, - "%s: Error - task = %p, response=%d, " - "status=%d\n", - __func__, task, response, status); - sas_task_abort(task); - break; - - default: - dev_dbg(&ihost->pdev->dev, - "%s: isci task notification default case!", - __func__); - sas_task_abort(task); - break; - } + task->task_done(task); } #define for_each_sas_task(num, task) \ @@ -289,60 +260,6 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, return ireq; } -/** -* isci_request_mark_zombie() - This function must be called with scic_lock held. -*/ -static void isci_request_mark_zombie(struct isci_host *ihost, struct isci_request *ireq) -{ - struct completion *tmf_completion = NULL; - struct completion *req_completion; - - /* Set the request state to "dead". */ - ireq->status = dead; - - req_completion = ireq->io_request_completion; - ireq->io_request_completion = NULL; - - if (test_bit(IREQ_TMF, &ireq->flags)) { - /* Break links with the TMF request. */ - struct isci_tmf *tmf = isci_request_access_tmf(ireq); - - /* In the case where a task request is dying, - * the thread waiting on the complete will sit and - * timeout unless we wake it now. Since the TMF - * has a default error status, complete it here - * to wake the waiting thread. - */ - if (tmf) { - tmf_completion = tmf->complete; - tmf->complete = NULL; - } - ireq->ttype_ptr.tmf_task_ptr = NULL; - dev_dbg(&ihost->pdev->dev, "%s: tmf_code %d, managed tag %#x\n", - __func__, tmf->tmf_code, tmf->io_tag); - } else { - /* Break links with the sas_task - the callback is done - * elsewhere. - */ - struct sas_task *task = isci_request_access_task(ireq); - - if (task) - task->lldd_task = NULL; - - ireq->ttype_ptr.io_task_ptr = NULL; - } - - dev_warn(&ihost->pdev->dev, "task context unrecoverable (tag: %#x)\n", - ireq->io_tag); - - /* Don't force waiting threads to timeout. */ - if (req_completion) - complete(req_completion); - - if (tmf_completion != NULL) - complete(tmf_completion); -} - static int isci_task_execute_tmf(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_tmf *tmf, unsigned long timeout_ms) @@ -400,17 +317,11 @@ static int isci_task_execute_tmf(struct isci_host *ihost, spin_unlock_irqrestore(&ihost->scic_lock, flags); goto err_tci; } - - if (tmf->cb_state_func != NULL) - tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data); - - isci_request_change_state(ireq, started); - - /* add the request to the remote device request list. */ - list_add(&ireq->dev_node, &idev->reqs_in_process); - spin_unlock_irqrestore(&ihost->scic_lock, flags); + /* The RNC must be unsuspended before the TMF can get a response. */ + isci_remote_device_resume_from_abort(ihost, idev); + /* Wait for the TMF to complete, or a timeout. */ timeleft = wait_for_completion_timeout(&completion, msecs_to_jiffies(timeout_ms)); @@ -419,32 +330,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost, /* The TMF did not complete - this could be because * of an unplug. Terminate the TMF request now. */ - spin_lock_irqsave(&ihost->scic_lock, flags); - - if (tmf->cb_state_func != NULL) - tmf->cb_state_func(isci_tmf_timed_out, tmf, - tmf->cb_data); - - sci_controller_terminate_request(ihost, idev, ireq); - - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - timeleft = wait_for_completion_timeout( - &completion, - msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); - - if (!timeleft) { - /* Strange condition - the termination of the TMF - * request timed-out. - */ - spin_lock_irqsave(&ihost->scic_lock, flags); - - /* If the TMF status has not changed, kill it. */ - if (tmf->status == SCI_FAILURE_TIMEOUT) - isci_request_mark_zombie(ihost, ireq); - - spin_unlock_irqrestore(&ihost->scic_lock, flags); - } + isci_remote_device_suspend_terminate(ihost, idev, ireq); } isci_print_tmf(ihost, tmf); @@ -476,314 +362,20 @@ static int isci_task_execute_tmf(struct isci_host *ihost, } static void isci_task_build_tmf(struct isci_tmf *tmf, - enum isci_tmf_function_codes code, - void (*tmf_sent_cb)(enum isci_tmf_cb_state, - struct isci_tmf *, - void *), - void *cb_data) + enum isci_tmf_function_codes code) { memset(tmf, 0, sizeof(*tmf)); - - tmf->tmf_code = code; - tmf->cb_state_func = tmf_sent_cb; - tmf->cb_data = cb_data; + tmf->tmf_code = code; } static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf, enum isci_tmf_function_codes code, - void (*tmf_sent_cb)(enum isci_tmf_cb_state, - struct isci_tmf *, - void *), struct isci_request *old_request) { - isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request); + isci_task_build_tmf(tmf, code); tmf->io_tag = old_request->io_tag; } -/** - * isci_task_validate_request_to_abort() - This function checks the given I/O - * against the "started" state. If the request is still "started", it's - * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD - * BEFORE CALLING THIS FUNCTION. - * @isci_request: This parameter specifies the request object to control. - * @isci_host: This parameter specifies the ISCI host object - * @isci_device: This is the device to which the request is pending. - * @aborted_io_completion: This is a completion structure that will be added to - * the request in case it is changed to aborting; this completion is - * triggered when the request is fully completed. - * - * Either "started" on successful change of the task status to "aborted", or - * "unallocated" if the task cannot be controlled. - */ -static enum isci_request_status isci_task_validate_request_to_abort( - struct isci_request *isci_request, - struct isci_host *isci_host, - struct isci_remote_device *isci_device, - struct completion *aborted_io_completion) -{ - enum isci_request_status old_state = unallocated; - - /* Only abort the task if it's in the - * device's request_in_process list - */ - if (isci_request && !list_empty(&isci_request->dev_node)) { - old_state = isci_request_change_started_to_aborted( - isci_request, aborted_io_completion); - - } - - return old_state; -} - -static int isci_request_is_dealloc_managed(enum isci_request_status stat) -{ - switch (stat) { - case aborted: - case aborting: - case terminating: - case completed: - case dead: - return true; - default: - return false; - } -} - -/** - * isci_terminate_request_core() - This function will terminate the given - * request, and wait for it to complete. This function must only be called - * from a thread that can wait. Note that the request is terminated and - * completed (back to the host, if started there). - * @ihost: This SCU. - * @idev: The target. - * @isci_request: The I/O request to be terminated. - * - */ -static void isci_terminate_request_core(struct isci_host *ihost, - struct isci_remote_device *idev, - struct isci_request *isci_request) -{ - enum sci_status status = SCI_SUCCESS; - bool was_terminated = false; - bool needs_cleanup_handling = false; - unsigned long flags; - unsigned long termination_completed = 1; - struct completion *io_request_completion; - - dev_dbg(&ihost->pdev->dev, - "%s: device = %p; request = %p\n", - __func__, idev, isci_request); - - spin_lock_irqsave(&ihost->scic_lock, flags); - - io_request_completion = isci_request->io_request_completion; - - /* Note that we are not going to control - * the target to abort the request. - */ - set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags); - - /* Make sure the request wasn't just sitting around signalling - * device condition (if the request handle is NULL, then the - * request completed but needed additional handling here). - */ - if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) { - was_terminated = true; - needs_cleanup_handling = true; - status = sci_controller_terminate_request(ihost, - idev, - isci_request); - } - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - /* - * The only time the request to terminate will - * fail is when the io request is completed and - * being aborted. - */ - if (status != SCI_SUCCESS) { - dev_dbg(&ihost->pdev->dev, - "%s: sci_controller_terminate_request" - " returned = 0x%x\n", - __func__, status); - - isci_request->io_request_completion = NULL; - - } else { - if (was_terminated) { - dev_dbg(&ihost->pdev->dev, - "%s: before completion wait (%p/%p)\n", - __func__, isci_request, io_request_completion); - - /* Wait here for the request to complete. */ - termination_completed - = wait_for_completion_timeout( - io_request_completion, - msecs_to_jiffies(ISCI_TERMINATION_TIMEOUT_MSEC)); - - if (!termination_completed) { - - /* The request to terminate has timed out. */ - spin_lock_irqsave(&ihost->scic_lock, flags); - - /* Check for state changes. */ - if (!test_bit(IREQ_TERMINATED, - &isci_request->flags)) { - - /* The best we can do is to have the - * request die a silent death if it - * ever really completes. - */ - isci_request_mark_zombie(ihost, - isci_request); - needs_cleanup_handling = true; - } else - termination_completed = 1; - - spin_unlock_irqrestore(&ihost->scic_lock, - flags); - - if (!termination_completed) { - - dev_dbg(&ihost->pdev->dev, - "%s: *** Timeout waiting for " - "termination(%p/%p)\n", - __func__, io_request_completion, - isci_request); - - /* The request can no longer be referenced - * safely since it may go away if the - * termination every really does complete. - */ - isci_request = NULL; - } - } - if (termination_completed) - dev_dbg(&ihost->pdev->dev, - "%s: after completion wait (%p/%p)\n", - __func__, isci_request, io_request_completion); - } - - if (termination_completed) { - - isci_request->io_request_completion = NULL; - - /* Peek at the status of the request. This will tell - * us if there was special handling on the request such that it - * needs to be detached and freed here. - */ - spin_lock_irqsave(&isci_request->state_lock, flags); - - needs_cleanup_handling - = isci_request_is_dealloc_managed( - isci_request->status); - - spin_unlock_irqrestore(&isci_request->state_lock, flags); - - } - if (needs_cleanup_handling) { - - dev_dbg(&ihost->pdev->dev, - "%s: cleanup isci_device=%p, request=%p\n", - __func__, idev, isci_request); - - if (isci_request != NULL) { - spin_lock_irqsave(&ihost->scic_lock, flags); - isci_free_tag(ihost, isci_request->io_tag); - isci_request_change_state(isci_request, unallocated); - list_del_init(&isci_request->dev_node); - spin_unlock_irqrestore(&ihost->scic_lock, flags); - } - } - } -} - -/** - * isci_terminate_pending_requests() - This function will change the all of the - * requests on the given device's state to "aborting", will terminate the - * requests, and wait for them to complete. This function must only be - * called from a thread that can wait. Note that the requests are all - * terminated and completed (back to the host, if started there). - * @isci_host: This parameter specifies SCU. - * @idev: This parameter specifies the target. - * - */ -void isci_terminate_pending_requests(struct isci_host *ihost, - struct isci_remote_device *idev) -{ - struct completion request_completion; - enum isci_request_status old_state; - unsigned long flags; - LIST_HEAD(list); - - spin_lock_irqsave(&ihost->scic_lock, flags); - list_splice_init(&idev->reqs_in_process, &list); - - /* assumes that isci_terminate_request_core deletes from the list */ - while (!list_empty(&list)) { - struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node); - - /* Change state to "terminating" if it is currently - * "started". - */ - old_state = isci_request_change_started_to_newstate(ireq, - &request_completion, - terminating); - switch (old_state) { - case started: - case completed: - case aborting: - break; - default: - /* termination in progress, or otherwise dispositioned. - * We know the request was on 'list' so should be safe - * to move it back to reqs_in_process - */ - list_move(&ireq->dev_node, &idev->reqs_in_process); - ireq = NULL; - break; - } - - if (!ireq) - continue; - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - init_completion(&request_completion); - - dev_dbg(&ihost->pdev->dev, - "%s: idev=%p request=%p; task=%p old_state=%d\n", - __func__, idev, ireq, - (!test_bit(IREQ_TMF, &ireq->flags) - ? isci_request_access_task(ireq) - : NULL), - old_state); - - /* If the old_state is started: - * This request was not already being aborted. If it had been, - * then the aborting I/O (ie. the TMF request) would not be in - * the aborting state, and thus would be terminated here. Note - * that since the TMF completion's call to the kernel function - * "complete()" does not happen until the pending I/O request - * terminate fully completes, we do not have to implement a - * special wait here for already aborting requests - the - * termination of the TMF request will force the request - * to finish it's already started terminate. - * - * If old_state == completed: - * This request completed from the SCU hardware perspective - * and now just needs cleaning up in terms of freeing the - * request and potentially calling up to libsas. - * - * If old_state == aborting: - * This request has already gone through a TMF timeout, but may - * not have been terminated; needs cleaning up at least. - */ - isci_terminate_request_core(ihost, idev, ireq); - spin_lock_irqsave(&ihost->scic_lock, flags); - } - spin_unlock_irqrestore(&ihost->scic_lock, flags); -} - /** * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain * Template functions. @@ -807,7 +399,7 @@ static int isci_task_send_lu_reset_sas( * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED"). */ - isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL); + isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset); #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS); @@ -826,42 +418,44 @@ static int isci_task_send_lu_reset_sas( int isci_task_lu_reset(struct domain_device *dev, u8 *lun) { - struct isci_host *isci_host = dev_to_ihost(dev); - struct isci_remote_device *isci_device; + struct isci_host *ihost = dev_to_ihost(dev); + struct isci_remote_device *idev; unsigned long flags; - int ret; + int ret = TMF_RESP_FUNC_COMPLETE; - spin_lock_irqsave(&isci_host->scic_lock, flags); - isci_device = isci_lookup_device(dev); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); + idev = isci_get_device(dev->lldd_dev); + spin_unlock_irqrestore(&ihost->scic_lock, flags); - dev_dbg(&isci_host->pdev->dev, + dev_dbg(&ihost->pdev->dev, "%s: domain_device=%p, isci_host=%p; isci_device=%p\n", - __func__, dev, isci_host, isci_device); + __func__, dev, ihost, idev); - if (!isci_device) { - /* If the device is gone, stop the escalations. */ - dev_dbg(&isci_host->pdev->dev, "%s: No dev\n", __func__); + if (!idev) { + /* If the device is gone, escalate to I_T_Nexus_Reset. */ + dev_dbg(&ihost->pdev->dev, "%s: No dev\n", __func__); - ret = TMF_RESP_FUNC_COMPLETE; + ret = TMF_RESP_FUNC_FAILED; goto out; } - /* Send the task management part of the reset. */ - if (dev_is_sata(dev)) { - sas_ata_schedule_reset(dev); - ret = TMF_RESP_FUNC_COMPLETE; - } else - ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun); - - /* If the LUN reset worked, all the I/O can now be terminated. */ - if (ret == TMF_RESP_FUNC_COMPLETE) - /* Terminate all I/O now. */ - isci_terminate_pending_requests(isci_host, - isci_device); - + /* Suspend the RNC, kill all TCs */ + if (isci_remote_device_suspend_terminate(ihost, idev, NULL) + != SCI_SUCCESS) { + /* The suspend/terminate only fails if isci_get_device fails */ + ret = TMF_RESP_FUNC_FAILED; + goto out; + } + /* All pending I/Os have been terminated and cleaned up. */ + if (!test_bit(IDEV_GONE, &idev->flags)) { + if (dev_is_sata(dev)) + sas_ata_schedule_reset(dev); + else + /* Send the task management part of the reset. */ + ret = isci_task_send_lu_reset_sas(ihost, idev, lun); + } out: - isci_put_device(isci_device); + isci_put_device(idev); return ret; } @@ -881,63 +475,6 @@ int isci_task_clear_nexus_ha(struct sas_ha_struct *ha) /* Task Management Functions. Must be called from process context. */ -/** - * isci_abort_task_process_cb() - This is a helper function for the abort task - * TMF command. It manages the request state with respect to the successful - * transmission / completion of the abort task request. - * @cb_state: This parameter specifies when this function was called - after - * the TMF request has been started and after it has timed-out. - * @tmf: This parameter specifies the TMF in progress. - * - * - */ -static void isci_abort_task_process_cb( - enum isci_tmf_cb_state cb_state, - struct isci_tmf *tmf, - void *cb_data) -{ - struct isci_request *old_request; - - old_request = (struct isci_request *)cb_data; - - dev_dbg(&old_request->isci_host->pdev->dev, - "%s: tmf=%p, old_request=%p\n", - __func__, tmf, old_request); - - switch (cb_state) { - - case isci_tmf_started: - /* The TMF has been started. Nothing to do here, since the - * request state was already set to "aborted" by the abort - * task function. - */ - if ((old_request->status != aborted) - && (old_request->status != completed)) - dev_dbg(&old_request->isci_host->pdev->dev, - "%s: Bad request status (%d): tmf=%p, old_request=%p\n", - __func__, old_request->status, tmf, old_request); - break; - - case isci_tmf_timed_out: - - /* Set the task's state to "aborting", since the abort task - * function thread set it to "aborted" (above) in anticipation - * of the task management request working correctly. Since the - * timeout has now fired, the TMF request failed. We set the - * state such that the request completion will indicate the - * device is no longer present. - */ - isci_request_change_state(old_request, aborting); - break; - - default: - dev_dbg(&old_request->isci_host->pdev->dev, - "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n", - __func__, cb_state, tmf, old_request); - break; - } -} - /** * isci_task_abort_task() - This function is one of the SAS Domain Template * functions. This function is called by libsas to abort a specified task. @@ -947,22 +484,20 @@ static void isci_abort_task_process_cb( */ int isci_task_abort_task(struct sas_task *task) { - struct isci_host *isci_host = dev_to_ihost(task->dev); + struct isci_host *ihost = dev_to_ihost(task->dev); DECLARE_COMPLETION_ONSTACK(aborted_io_completion); struct isci_request *old_request = NULL; - enum isci_request_status old_state; - struct isci_remote_device *isci_device = NULL; + struct isci_remote_device *idev = NULL; struct isci_tmf tmf; int ret = TMF_RESP_FUNC_FAILED; unsigned long flags; - int perform_termination = 0; /* Get the isci_request reference from the task. Note that * this check does not depend on the pending request list * in the device, because tasks driving resets may land here * after completion in the core. */ - spin_lock_irqsave(&isci_host->scic_lock, flags); + spin_lock_irqsave(&ihost->scic_lock, flags); spin_lock(&task->task_state_lock); old_request = task->lldd_task; @@ -971,20 +506,29 @@ int isci_task_abort_task(struct sas_task *task) if (!(task->task_state_flags & SAS_TASK_STATE_DONE) && (task->task_state_flags & SAS_TASK_AT_INITIATOR) && old_request) - isci_device = isci_lookup_device(task->dev); + idev = isci_get_device(task->dev->lldd_dev); spin_unlock(&task->task_state_lock); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); - dev_dbg(&isci_host->pdev->dev, - "%s: dev = %p, task = %p, old_request == %p\n", - __func__, isci_device, task, old_request); + dev_warn(&ihost->pdev->dev, + "%s: dev = %p (%s%s), task = %p, old_request == %p\n", + __func__, idev, + (dev_is_sata(task->dev) ? "STP/SATA" + : ((dev_is_expander(task->dev)) + ? "SMP" + : "SSP")), + ((idev) ? ((test_bit(IDEV_GONE, &idev->flags)) + ? " IDEV_GONE" + : "") + : " "), + task, old_request); /* Device reset conditions signalled in task_state_flags are the * responsbility of libsas to observe at the start of the error * handler thread. */ - if (!isci_device || !old_request) { + if (!idev || !old_request) { /* The request has already completed and there * is nothing to do here other than to set the task * done bit, and indicate that the task abort function @@ -998,108 +542,72 @@ int isci_task_abort_task(struct sas_task *task) ret = TMF_RESP_FUNC_COMPLETE; - dev_dbg(&isci_host->pdev->dev, - "%s: abort task not needed for %p\n", - __func__, task); + dev_warn(&ihost->pdev->dev, + "%s: abort task not needed for %p\n", + __func__, task); goto out; } - - spin_lock_irqsave(&isci_host->scic_lock, flags); - - /* Check the request status and change to "aborted" if currently - * "starting"; if true then set the I/O kernel completion - * struct that will be triggered when the request completes. - */ - old_state = isci_task_validate_request_to_abort( - old_request, isci_host, isci_device, - &aborted_io_completion); - if ((old_state != started) && - (old_state != completed) && - (old_state != aborting)) { - - spin_unlock_irqrestore(&isci_host->scic_lock, flags); - - /* The request was already being handled by someone else (because - * they got to set the state away from started). - */ - dev_dbg(&isci_host->pdev->dev, - "%s: device = %p; old_request %p already being aborted\n", - __func__, - isci_device, old_request); - ret = TMF_RESP_FUNC_COMPLETE; + /* Suspend the RNC, kill the TC */ + if (isci_remote_device_suspend_terminate(ihost, idev, old_request) + != SCI_SUCCESS) { + dev_warn(&ihost->pdev->dev, + "%s: isci_remote_device_reset_terminate(dev=%p, " + "req=%p, task=%p) failed\n", + __func__, idev, old_request, task); + ret = TMF_RESP_FUNC_FAILED; goto out; } + spin_lock_irqsave(&ihost->scic_lock, flags); + if (task->task_proto == SAS_PROTOCOL_SMP || sas_protocol_ata(task->task_proto) || - test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { + test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) || + test_bit(IDEV_GONE, &idev->flags)) { - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); - dev_dbg(&isci_host->pdev->dev, - "%s: %s request" - " or complete_in_target (%d), thus no TMF\n", - __func__, - ((task->task_proto == SAS_PROTOCOL_SMP) - ? "SMP" - : (sas_protocol_ata(task->task_proto) - ? "SATA/STP" - : "") - ), - test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)); + /* No task to send, so explicitly resume the device here */ + isci_remote_device_resume_from_abort(ihost, idev); - if (test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) { - spin_lock_irqsave(&task->task_state_lock, flags); - task->task_state_flags |= SAS_TASK_STATE_DONE; - task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | - SAS_TASK_STATE_PENDING); - spin_unlock_irqrestore(&task->task_state_lock, flags); - ret = TMF_RESP_FUNC_COMPLETE; - } else { - spin_lock_irqsave(&task->task_state_lock, flags); - task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | - SAS_TASK_STATE_PENDING); - spin_unlock_irqrestore(&task->task_state_lock, flags); - } + dev_warn(&ihost->pdev->dev, + "%s: %s request" + " or complete_in_target (%d), " + "or IDEV_GONE (%d), thus no TMF\n", + __func__, + ((task->task_proto == SAS_PROTOCOL_SMP) + ? "SMP" + : (sas_protocol_ata(task->task_proto) + ? "SATA/STP" + : "") + ), + test_bit(IREQ_COMPLETE_IN_TARGET, + &old_request->flags), + test_bit(IDEV_GONE, &idev->flags)); - /* STP and SMP devices are not sent a TMF, but the - * outstanding I/O request is terminated below. This is - * because SATA/STP and SMP discovery path timeouts directly - * call the abort task interface for cleanup. - */ - perform_termination = 1; + spin_lock_irqsave(&task->task_state_lock, flags); + task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | + SAS_TASK_STATE_PENDING); + task->task_state_flags |= SAS_TASK_STATE_DONE; + spin_unlock_irqrestore(&task->task_state_lock, flags); + ret = TMF_RESP_FUNC_COMPLETE; } else { /* Fill in the tmf stucture */ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, - isci_abort_task_process_cb, old_request); - spin_unlock_irqrestore(&isci_host->scic_lock, flags); + spin_unlock_irqrestore(&ihost->scic_lock, flags); + /* Send the task management request. */ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* 1/2 second timeout */ - ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, + ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_ABORT_TASK_TIMEOUT_MS); - - if (ret == TMF_RESP_FUNC_COMPLETE) - perform_termination = 1; - else - dev_dbg(&isci_host->pdev->dev, - "%s: isci_task_send_tmf failed\n", __func__); } - if (perform_termination) { - set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags); - - /* Clean up the request on our side, and wait for the aborted - * I/O to complete. - */ - isci_terminate_request_core(isci_host, isci_device, - old_request); - } - - /* Make sure we do not leave a reference to aborted_io_completion */ - old_request->io_request_completion = NULL; - out: - isci_put_device(isci_device); +out: + dev_warn(&ihost->pdev->dev, + "%s: Done; dev = %p, task = %p , old_request == %p\n", + __func__, idev, task, old_request); + isci_put_device(idev); return ret; } @@ -1195,14 +703,11 @@ isci_task_request_complete(struct isci_host *ihost, { struct isci_tmf *tmf = isci_request_access_tmf(ireq); struct completion *tmf_complete = NULL; - struct completion *request_complete = ireq->io_request_completion; dev_dbg(&ihost->pdev->dev, "%s: request = %p, status=%d\n", __func__, ireq, completion_status); - isci_request_change_state(ireq, completed); - set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags); if (tmf) { @@ -1226,20 +731,11 @@ isci_task_request_complete(struct isci_host *ihost, */ set_bit(IREQ_TERMINATED, &ireq->flags); - /* As soon as something is in the terminate path, deallocation is - * managed there. Note that the final non-managed state of a task - * request is "completed". - */ - if ((ireq->status == completed) || - !isci_request_is_dealloc_managed(ireq->status)) { - isci_request_change_state(ireq, unallocated); - isci_free_tag(ihost, ireq->io_tag); - list_del_init(&ireq->dev_node); - } + if (test_and_clear_bit(IREQ_ABORT_PATH_ACTIVE, &ireq->flags)) + wake_up_all(&ihost->eventq); - /* "request_complete" is set if the task was being terminated. */ - if (request_complete) - complete(request_complete); + if (!test_bit(IREQ_NO_AUTO_FREE_TAG, &ireq->flags)) + isci_free_tag(ihost, ireq->io_tag); /* The task management part completes last. */ if (tmf_complete) @@ -1250,48 +746,38 @@ static int isci_reset_device(struct isci_host *ihost, struct domain_device *dev, struct isci_remote_device *idev) { - int rc; - unsigned long flags; - enum sci_status status; + int rc = TMF_RESP_FUNC_COMPLETE, reset_stat = -1; struct sas_phy *phy = sas_get_local_phy(dev); struct isci_port *iport = dev->port->lldd_port; dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev); - spin_lock_irqsave(&ihost->scic_lock, flags); - status = sci_remote_device_reset(idev); - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - if (status != SCI_SUCCESS) { - dev_dbg(&ihost->pdev->dev, - "%s: sci_remote_device_reset(%p) returned %d!\n", - __func__, idev, status); + /* Suspend the RNC, terminate all outstanding TCs. */ + if (isci_remote_device_suspend_terminate(ihost, idev, NULL) + != SCI_SUCCESS) { rc = TMF_RESP_FUNC_FAILED; goto out; } + /* Note that since the termination for outstanding requests succeeded, + * this function will return success. This is because the resets will + * only fail if the device has been removed (ie. hotplug), and the + * primary duty of this function is to cleanup tasks, so that is the + * relevant status. + */ + if (!test_bit(IDEV_GONE, &idev->flags)) { + if (scsi_is_sas_phy_local(phy)) { + struct isci_phy *iphy = &ihost->phys[phy->number]; - if (scsi_is_sas_phy_local(phy)) { - struct isci_phy *iphy = &ihost->phys[phy->number]; - - rc = isci_port_perform_hard_reset(ihost, iport, iphy); - } else - rc = sas_phy_reset(phy, !dev_is_sata(dev)); - - /* Terminate in-progress I/O now. */ - isci_remote_device_nuke_requests(ihost, idev); - - /* Since all pending TCs have been cleaned, resume the RNC. */ - spin_lock_irqsave(&ihost->scic_lock, flags); - status = sci_remote_device_reset_complete(idev); - spin_unlock_irqrestore(&ihost->scic_lock, flags); - - if (status != SCI_SUCCESS) { - dev_dbg(&ihost->pdev->dev, - "%s: sci_remote_device_reset_complete(%p) " - "returned %d!\n", __func__, idev, status); + reset_stat = isci_port_perform_hard_reset(ihost, iport, + iphy); + } else + reset_stat = sas_phy_reset(phy, !dev_is_sata(dev)); } + /* Explicitly resume the RNC here, since there was no task sent. */ + isci_remote_device_resume_from_abort(ihost, idev); - dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev); + dev_dbg(&ihost->pdev->dev, "%s: idev %p complete, reset_stat=%d.\n", + __func__, idev, reset_stat); out: sas_put_local_phy(phy); return rc; @@ -1305,7 +791,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev) int ret; spin_lock_irqsave(&ihost->scic_lock, flags); - idev = isci_lookup_device(dev); + idev = isci_get_device(dev->lldd_dev); spin_unlock_irqrestore(&ihost->scic_lock, flags); if (!idev) { diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 7b6d0e32fd9b..9c06cbad1d26 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -62,19 +62,6 @@ struct isci_request; -/** - * enum isci_tmf_cb_state - This enum defines the possible states in which the - * TMF callback function is invoked during the TMF execution process. - * - * - */ -enum isci_tmf_cb_state { - - isci_tmf_init_state = 0, - isci_tmf_started, - isci_tmf_timed_out -}; - /** * enum isci_tmf_function_codes - This enum defines the possible preparations * of task management requests. @@ -87,6 +74,7 @@ enum isci_tmf_function_codes { isci_tmf_ssp_task_abort = TMF_ABORT_TASK, isci_tmf_ssp_lun_reset = TMF_LU_RESET, }; + /** * struct isci_tmf - This class represents the task management object which * acts as an interface to libsas for processing task management requests @@ -106,15 +94,6 @@ struct isci_tmf { u16 io_tag; enum isci_tmf_function_codes tmf_code; int status; - - /* The optional callback function allows the user process to - * track the TMF transmit / timeout conditions. - */ - void (*cb_state_func)( - enum isci_tmf_cb_state, - struct isci_tmf *, void *); - void *cb_data; - }; static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf) @@ -208,113 +187,4 @@ int isci_queuecommand( struct scsi_cmnd *scsi_cmd, void (*donefunc)(struct scsi_cmnd *)); -/** - * enum isci_completion_selection - This enum defines the possible actions to - * take with respect to a given request's notification back to libsas. - * - * - */ -enum isci_completion_selection { - - isci_perform_normal_io_completion, /* Normal notify (task_done) */ - isci_perform_aborted_io_completion, /* No notification. */ - isci_perform_error_io_completion /* Use sas_task_abort */ -}; - -/** - * isci_task_set_completion_status() - This function sets the completion status - * for the request. - * @task: This parameter is the completed request. - * @response: This parameter is the response code for the completed task. - * @status: This parameter is the status code for the completed task. - * -* @return The new notification mode for the request. -*/ -static inline enum isci_completion_selection -isci_task_set_completion_status( - struct sas_task *task, - enum service_response response, - enum exec_status status, - enum isci_completion_selection task_notification_selection) -{ - unsigned long flags; - - spin_lock_irqsave(&task->task_state_lock, flags); - - /* If a device reset is being indicated, make sure the I/O - * is in the error path. - */ - if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) { - /* Fail the I/O to make sure it goes into the error path. */ - response = SAS_TASK_UNDELIVERED; - status = SAM_STAT_TASK_ABORTED; - - task_notification_selection = isci_perform_error_io_completion; - } - task->task_status.resp = response; - task->task_status.stat = status; - - switch (task->task_proto) { - - case SAS_PROTOCOL_SATA: - case SAS_PROTOCOL_STP: - case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: - - if (task_notification_selection - == isci_perform_error_io_completion) { - /* SATA/STP I/O has it's own means of scheduling device - * error handling on the normal path. - */ - task_notification_selection - = isci_perform_normal_io_completion; - } - break; - default: - break; - } - - switch (task_notification_selection) { - - case isci_perform_error_io_completion: - - if (task->task_proto == SAS_PROTOCOL_SMP) { - /* There is no error escalation in the SMP case. - * Convert to a normal completion to avoid the - * timeout in the discovery path and to let the - * next action take place quickly. - */ - task_notification_selection - = isci_perform_normal_io_completion; - - /* Fall through to the normal case... */ - } else { - /* Use sas_task_abort */ - /* Leave SAS_TASK_STATE_DONE clear - * Leave SAS_TASK_AT_INITIATOR set. - */ - break; - } - - case isci_perform_aborted_io_completion: - /* This path can occur with task-managed requests as well as - * requests terminated because of LUN or device resets. - */ - /* Fall through to the normal case... */ - case isci_perform_normal_io_completion: - /* Normal notification (task_done) */ - task->task_state_flags |= SAS_TASK_STATE_DONE; - task->task_state_flags &= ~(SAS_TASK_AT_INITIATOR | - SAS_TASK_STATE_PENDING); - break; - default: - WARN_ONCE(1, "unknown task_notification_selection: %d\n", - task_notification_selection); - break; - } - - spin_unlock_irqrestore(&task->task_state_lock, flags); - - return task_notification_selection; - -} #endif /* !defined(_SCI_TASK_H_) */ diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c index 16f88ab939c8..04a6d0d59a22 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.c +++ b/drivers/scsi/isci/unsolicited_frame_control.c @@ -57,31 +57,19 @@ #include "unsolicited_frame_control.h" #include "registers.h" -int sci_unsolicited_frame_control_construct(struct isci_host *ihost) +void sci_unsolicited_frame_control_construct(struct isci_host *ihost) { struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control; struct sci_unsolicited_frame *uf; - u32 buf_len, header_len, i; - dma_addr_t dma; - size_t size; - void *virt; - - /* - * Prepare all of the memory sizes for the UF headers, UF address - * table, and UF buffers themselves. - */ - buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE; - header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header); - size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(uf_control->address_table.array[0]); + dma_addr_t dma = ihost->ufi_dma; + void *virt = ihost->ufi_buf; + int i; /* * The Unsolicited Frame buffers are set at the start of the UF * memory descriptor entry. The headers and address table will be * placed after the buffers. */ - virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL); - if (!virt) - return -ENOMEM; /* * Program the location of the UF header table into the SCU. @@ -93,8 +81,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) * headers, since we program the UF address table pointers to * NULL. */ - uf_control->headers.physical_address = dma + buf_len; - uf_control->headers.array = virt + buf_len; + uf_control->headers.physical_address = dma + SCI_UFI_BUF_SIZE; + uf_control->headers.array = virt + SCI_UFI_BUF_SIZE; /* * Program the location of the UF address table into the SCU. @@ -103,8 +91,8 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) * byte boundary already due to above programming headers being on a * 64-bit boundary and headers are on a 64-bytes in size. */ - uf_control->address_table.physical_address = dma + buf_len + header_len; - uf_control->address_table.array = virt + buf_len + header_len; + uf_control->address_table.physical_address = dma + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; + uf_control->address_table.array = virt + SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE; uf_control->get = 0; /* @@ -135,8 +123,6 @@ int sci_unsolicited_frame_control_construct(struct isci_host *ihost) virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE; } - - return 0; } enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control, diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h index 75d896686f5a..1bc551ec611f 100644 --- a/drivers/scsi/isci/unsolicited_frame_control.h +++ b/drivers/scsi/isci/unsolicited_frame_control.h @@ -257,9 +257,13 @@ struct sci_unsolicited_frame_control { }; +#define SCI_UFI_BUF_SIZE (SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE) +#define SCI_UFI_HDR_SIZE (SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header)) +#define SCI_UFI_TOTAL_SIZE (SCI_UFI_BUF_SIZE + SCI_UFI_HDR_SIZE + SCU_MAX_UNSOLICITED_FRAMES * sizeof(u64)) + struct isci_host; -int sci_unsolicited_frame_control_construct(struct isci_host *ihost); +void sci_unsolicited_frame_control_construct(struct isci_host *ihost); enum sci_status sci_unsolicited_frame_control_get_header( struct sci_unsolicited_frame_control *uf_control, diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index fb6610b249e1..c1402fb499ab 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c @@ -1742,17 +1742,19 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK; - if (mfs >= FC_SP_MIN_MAX_PAYLOAD && - mfs <= lport->mfs) { - lport->mfs = mfs; - fc_host_maxframe_size(lport->host) = mfs; - } else { + + if (mfs < FC_SP_MIN_MAX_PAYLOAD || mfs > FC_SP_MAX_MAX_PAYLOAD) { FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " "lport->mfs:%hu\n", mfs, lport->mfs); fc_lport_error(lport, fp); goto err; } + if (mfs <= lport->mfs) { + lport->mfs = mfs; + fc_host_maxframe_size(lport->host) = mfs; + } + csp_flags = ntohs(flp->fl_csp.sp_features); r_a_tov = ntohl(flp->fl_csp.sp_r_a_tov); e_d_tov = ntohl(flp->fl_csp.sp_e_d_tov); diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index bc0cecc6ad62..441d88ad99a7 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -546,11 +546,12 @@ static struct ata_port_info sata_port_info = { .port_ops = &sas_sata_ops }; -int sas_ata_init_host_and_port(struct domain_device *found_dev) +int sas_ata_init(struct domain_device *found_dev) { struct sas_ha_struct *ha = found_dev->port->ha; struct Scsi_Host *shost = ha->core.shost; struct ata_port *ap; + int rc; ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, @@ -567,8 +568,11 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev) ap->private_data = found_dev; ap->cbl = ATA_CBL_SATA; ap->scsi_host = shost; - /* publish initialized ata port */ - smp_wmb(); + rc = ata_sas_port_init(ap); + if (rc) { + ata_sas_port_destroy(ap); + return rc; + } found_dev->sata_dev.ap = ap; return 0; @@ -648,18 +652,13 @@ static void sas_get_ata_command_set(struct domain_device *dev) void sas_probe_sata(struct asd_sas_port *port) { struct domain_device *dev, *n; - int err; mutex_lock(&port->ha->disco_mutex); - list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { + list_for_each_entry(dev, &port->disco_list, disco_list_node) { if (!dev_is_sata(dev)) continue; - err = sas_ata_init_host_and_port(dev); - if (err) - sas_fail_probe(dev, __func__, err); - else - ata_sas_async_port_init(dev->sata_dev.ap); + ata_sas_async_probe(dev->sata_dev.ap); } mutex_unlock(&port->ha->disco_mutex); @@ -718,18 +717,6 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie) sas_put_device(dev); } -static bool sas_ata_dev_eh_valid(struct domain_device *dev) -{ - struct ata_port *ap; - - if (!dev_is_sata(dev)) - return false; - ap = dev->sata_dev.ap; - /* consume fully initialized ata ports */ - smp_rmb(); - return !!ap; -} - void sas_ata_strategy_handler(struct Scsi_Host *shost) { struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); @@ -753,7 +740,7 @@ void sas_ata_strategy_handler(struct Scsi_Host *shost) spin_lock(&port->dev_list_lock); list_for_each_entry(dev, &port->dev_list, dev_list_node) { - if (!sas_ata_dev_eh_valid(dev)) + if (!dev_is_sata(dev)) continue; async_schedule_domain(async_sas_ata_eh, dev, &async); } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 364679675602..629a0865b130 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -72,6 +72,7 @@ static int sas_get_port_device(struct asd_sas_port *port) struct asd_sas_phy *phy; struct sas_rphy *rphy; struct domain_device *dev; + int rc = -ENODEV; dev = sas_alloc_device(); if (!dev) @@ -110,9 +111,16 @@ static int sas_get_port_device(struct asd_sas_port *port) sas_init_dev(dev); + dev->port = port; switch (dev->dev_type) { - case SAS_END_DEV: case SATA_DEV: + rc = sas_ata_init(dev); + if (rc) { + rphy = NULL; + break; + } + /* fall through */ + case SAS_END_DEV: rphy = sas_end_device_alloc(port->port); break; case EDGE_DEV: @@ -131,19 +139,14 @@ static int sas_get_port_device(struct asd_sas_port *port) if (!rphy) { sas_put_device(dev); - return -ENODEV; + return rc; } - spin_lock_irq(&port->phy_list_lock); - list_for_each_entry(phy, &port->phy_list, port_phy_el) - sas_phy_set_target(phy, dev); - spin_unlock_irq(&port->phy_list_lock); rphy->identify.phy_identifier = phy->phy->identify.phy_identifier; memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE); sas_fill_in_rphy(dev, rphy); sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); port->port_dev = dev; - dev->port = port; dev->linkrate = port->linkrate; dev->min_linkrate = port->linkrate; dev->max_linkrate = port->linkrate; @@ -155,6 +158,7 @@ static int sas_get_port_device(struct asd_sas_port *port) sas_device_set_phy(dev, port->port); dev->rphy = rphy; + get_device(&dev->rphy->dev); if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) list_add_tail(&dev->disco_list_node, &port->disco_list); @@ -164,6 +168,11 @@ static int sas_get_port_device(struct asd_sas_port *port) spin_unlock_irq(&port->dev_list_lock); } + spin_lock_irq(&port->phy_list_lock); + list_for_each_entry(phy, &port->phy_list, port_phy_el) + sas_phy_set_target(phy, dev); + spin_unlock_irq(&port->phy_list_lock); + return 0; } @@ -205,8 +214,7 @@ void sas_notify_lldd_dev_gone(struct domain_device *dev) static void sas_probe_devices(struct work_struct *work) { struct domain_device *dev, *n; - struct sas_discovery_event *ev = - container_of(work, struct sas_discovery_event, work); + struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_PROBE, &port->disc.pending); @@ -255,6 +263,9 @@ void sas_free_device(struct kref *kref) { struct domain_device *dev = container_of(kref, typeof(*dev), kref); + put_device(&dev->rphy->dev); + dev->rphy = NULL; + if (dev->parent) sas_put_device(dev->parent); @@ -291,8 +302,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d static void sas_destruct_devices(struct work_struct *work) { struct domain_device *dev, *n; - struct sas_discovery_event *ev = - container_of(work, struct sas_discovery_event, work); + struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_DESTRUCT, &port->disc.pending); @@ -302,7 +312,6 @@ static void sas_destruct_devices(struct work_struct *work) sas_remove_children(&dev->rphy->dev); sas_rphy_delete(dev->rphy); - dev->rphy = NULL; sas_unregister_common_dev(port, dev); } } @@ -314,11 +323,11 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) /* this rphy never saw sas_rphy_add */ list_del_init(&dev->disco_list_node); sas_rphy_free(dev->rphy); - dev->rphy = NULL; sas_unregister_common_dev(port, dev); + return; } - if (dev->rphy && !test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { + if (!test_and_set_bit(SAS_DEV_DESTROY, &dev->state)) { sas_rphy_unlink(dev->rphy); list_move_tail(&dev->disco_list_node, &port->destroy_list); sas_discover_event(dev->port, DISCE_DESTRUCT); @@ -377,8 +386,7 @@ static void sas_discover_domain(struct work_struct *work) { struct domain_device *dev; int error = 0; - struct sas_discovery_event *ev = - container_of(work, struct sas_discovery_event, work); + struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; clear_bit(DISCE_DISCOVER_DOMAIN, &port->disc.pending); @@ -419,8 +427,6 @@ static void sas_discover_domain(struct work_struct *work) if (error) { sas_rphy_free(dev->rphy); - dev->rphy = NULL; - list_del_init(&dev->disco_list_node); spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); @@ -437,8 +443,7 @@ static void sas_discover_domain(struct work_struct *work) static void sas_revalidate_domain(struct work_struct *work) { int res = 0; - struct sas_discovery_event *ev = - container_of(work, struct sas_discovery_event, work); + struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; struct sas_ha_struct *ha = port->ha; @@ -466,21 +471,25 @@ static void sas_revalidate_domain(struct work_struct *work) /* ---------- Events ---------- */ -static void sas_chain_work(struct sas_ha_struct *ha, struct work_struct *work) +static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw) { - /* chained work is not subject to SA_HA_DRAINING or SAS_HA_REGISTERED */ - scsi_queue_work(ha->core.shost, work); + /* chained work is not subject to SA_HA_DRAINING or + * SAS_HA_REGISTERED, because it is either submitted in the + * workqueue, or known to be submitted from a context that is + * not racing against draining + */ + scsi_queue_work(ha->core.shost, &sw->work); } static void sas_chain_event(int event, unsigned long *pending, - struct work_struct *work, + struct sas_work *sw, struct sas_ha_struct *ha) { if (!test_and_set_bit(event, pending)) { unsigned long flags; spin_lock_irqsave(&ha->state_lock, flags); - sas_chain_work(ha, work); + sas_chain_work(ha, sw); spin_unlock_irqrestore(&ha->state_lock, flags); } } @@ -519,7 +528,7 @@ void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port) disc->pending = 0; for (i = 0; i < DISC_NUM_EVENTS; i++) { - INIT_WORK(&disc->disc_work[i].work, sas_event_fns[i]); + INIT_SAS_WORK(&disc->disc_work[i].work, sas_event_fns[i]); disc->disc_work[i].port = port; } } diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c index 16639bbae629..4e4292d210c1 100644 --- a/drivers/scsi/libsas/sas_event.c +++ b/drivers/scsi/libsas/sas_event.c @@ -27,19 +27,21 @@ #include "sas_internal.h" #include "sas_dump.h" -void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work) +void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) { if (!test_bit(SAS_HA_REGISTERED, &ha->state)) return; - if (test_bit(SAS_HA_DRAINING, &ha->state)) - list_add(&work->entry, &ha->defer_q); - else - scsi_queue_work(ha->core.shost, work); + if (test_bit(SAS_HA_DRAINING, &ha->state)) { + /* add it to the defer list, if not already pending */ + if (list_empty(&sw->drain_node)) + list_add(&sw->drain_node, &ha->defer_q); + } else + scsi_queue_work(ha->core.shost, &sw->work); } static void sas_queue_event(int event, unsigned long *pending, - struct work_struct *work, + struct sas_work *work, struct sas_ha_struct *ha) { if (!test_and_set_bit(event, pending)) { @@ -55,7 +57,7 @@ static void sas_queue_event(int event, unsigned long *pending, void __sas_drain_work(struct sas_ha_struct *ha) { struct workqueue_struct *wq = ha->core.shost->work_q; - struct work_struct *w, *_w; + struct sas_work *sw, *_sw; set_bit(SAS_HA_DRAINING, &ha->state); /* flush submitters */ @@ -66,9 +68,9 @@ void __sas_drain_work(struct sas_ha_struct *ha) spin_lock_irq(&ha->state_lock); clear_bit(SAS_HA_DRAINING, &ha->state); - list_for_each_entry_safe(w, _w, &ha->defer_q, entry) { - list_del_init(&w->entry); - sas_queue_work(ha, w); + list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { + list_del_init(&sw->drain_node); + sas_queue_work(ha, sw); } spin_unlock_irq(&ha->state_lock); } @@ -151,7 +153,7 @@ int sas_init_events(struct sas_ha_struct *sas_ha) int i; for (i = 0; i < HA_NUM_EVENTS; i++) { - INIT_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); + INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]); sas_ha->ha_events[i].ha = sas_ha; } diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 05acd9e35fc4..caa0525d2523 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -202,6 +202,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; struct discover_resp *dr = &resp->disc; + struct sas_ha_struct *ha = dev->port->ha; struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; struct sas_rphy *rphy = dev->rphy; @@ -209,6 +210,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) char *type; if (new_phy) { + if (WARN_ON_ONCE(test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))) + return; phy->phy = sas_phy_alloc(&rphy->dev, phy_id); /* FIXME: error_handling */ @@ -233,6 +236,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_dev_type = to_dev_type(dr); + if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) + goto out; phy->phy_id = phy_id; phy->linkrate = dr->linkrate; phy->attached_sata_host = dr->attached_sata_host; @@ -240,7 +245,14 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) phy->attached_sata_ps = dr->attached_sata_ps; phy->attached_iproto = dr->iproto << 1; phy->attached_tproto = dr->tproto << 1; - memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); + /* help some expanders that fail to zero sas_address in the 'no + * device' case + */ + if (phy->attached_dev_type == NO_DEVICE || + phy->linkrate < SAS_LINK_RATE_1_5_GBPS) + memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); + else + memcpy(phy->attached_sas_addr, dr->attached_sas_addr, SAS_ADDR_SIZE); phy->attached_phy_id = dr->attached_phy_id; phy->phy_change_count = dr->change_count; phy->routing_attr = dr->routing_attr; @@ -266,6 +278,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) return; } + out: switch (phy->attached_dev_type) { case SATA_PENDING: type = "stp pending"; @@ -304,7 +317,15 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) else return; - SAS_DPRINTK("ex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", + /* if the attached device type changed and ata_eh is active, + * make sure we run revalidation when eh completes (see: + * sas_enable_revalidation) + */ + if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) + set_bit(DISCE_REVALIDATE_DOMAIN, &dev->port->disc.pending); + + SAS_DPRINTK("%sex %016llx phy%02d:%c:%X attached: %016llx (%s)\n", + test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state) ? "ata: " : "", SAS_ADDR(dev->sas_addr), phy->phy_id, sas_route_char(dev, phy), phy->linkrate, SAS_ADDR(phy->attached_sas_addr), type); @@ -776,13 +797,16 @@ static struct domain_device *sas_ex_discover_end_dev( if (res) goto out_free; + sas_init_dev(child); + res = sas_ata_init(child); + if (res) + goto out_free; rphy = sas_end_device_alloc(phy->port); - if (unlikely(!rphy)) + if (!rphy) goto out_free; - sas_init_dev(child); - child->rphy = rphy; + get_device(&rphy->dev); list_add_tail(&child->disco_list_node, &parent->port->disco_list); @@ -806,6 +830,7 @@ static struct domain_device *sas_ex_discover_end_dev( sas_init_dev(child); child->rphy = rphy; + get_device(&rphy->dev); sas_fill_in_rphy(child, rphy); list_add_tail(&child->disco_list_node, &parent->port->disco_list); @@ -830,8 +855,6 @@ static struct domain_device *sas_ex_discover_end_dev( out_list_del: sas_rphy_free(child->rphy); - child->rphy = NULL; - list_del(&child->disco_list_node); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); @@ -911,6 +934,7 @@ static struct domain_device *sas_ex_discover_expander( } port = parent->port; child->rphy = rphy; + get_device(&rphy->dev); edev = rphy_to_expander_device(rphy); child->dev_type = phy->attached_dev_type; kref_get(&parent->kref); @@ -934,6 +958,7 @@ static struct domain_device *sas_ex_discover_expander( res = sas_discover_expander(child); if (res) { + sas_rphy_delete(rphy); spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); spin_unlock_irq(&parent->port->dev_list_lock); @@ -1718,9 +1743,17 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, int phy_change_count = 0; res = sas_get_phy_change_count(dev, i, &phy_change_count); - if (res) - goto out; - else if (phy_change_count != ex->ex_phy[i].phy_change_count) { + switch (res) { + case SMP_RESP_PHY_VACANT: + case SMP_RESP_NO_PHY: + continue; + case SMP_RESP_FUNC_ACC: + break; + default: + return res; + } + + if (phy_change_count != ex->ex_phy[i].phy_change_count) { if (update) ex->ex_phy[i].phy_change_count = phy_change_count; @@ -1728,8 +1761,7 @@ static int sas_find_bcast_phy(struct domain_device *dev, int *phy_id, return 0; } } -out: - return res; + return 0; } static int sas_get_ex_change_count(struct domain_device *dev, int *ecc) diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 120bff64be30..10cb5ae30977 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -94,8 +94,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) void sas_hae_reset(struct work_struct *work) { - struct sas_ha_event *ev = - container_of(work, struct sas_ha_event, work); + struct sas_ha_event *ev = to_sas_ha_event(work); struct sas_ha_struct *ha = ev->ha; clear_bit(HAE_RESET, &ha->pending); @@ -369,14 +368,14 @@ static void sas_phy_release(struct sas_phy *phy) static void phy_reset_work(struct work_struct *work) { - struct sas_phy_data *d = container_of(work, typeof(*d), reset_work); + struct sas_phy_data *d = container_of(work, typeof(*d), reset_work.work); d->reset_result = transport_sas_phy_reset(d->phy, d->hard_reset); } static void phy_enable_work(struct work_struct *work) { - struct sas_phy_data *d = container_of(work, typeof(*d), enable_work); + struct sas_phy_data *d = container_of(work, typeof(*d), enable_work.work); d->enable_result = sas_phy_enable(d->phy, d->enable); } @@ -389,8 +388,8 @@ static int sas_phy_setup(struct sas_phy *phy) return -ENOMEM; mutex_init(&d->event_lock); - INIT_WORK(&d->reset_work, phy_reset_work); - INIT_WORK(&d->enable_work, phy_enable_work); + INIT_SAS_WORK(&d->reset_work, phy_reset_work); + INIT_SAS_WORK(&d->enable_work, phy_enable_work); d->phy = phy; phy->hostdata = d; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index f05c63879949..507e4cf12e56 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -45,10 +45,10 @@ struct sas_phy_data { struct mutex event_lock; int hard_reset; int reset_result; - struct work_struct reset_work; + struct sas_work reset_work; int enable; int enable_result; - struct work_struct enable_work; + struct sas_work enable_work; }; void sas_scsi_recover_host(struct Scsi_Host *shost); @@ -80,7 +80,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work); void sas_porte_link_reset_err(struct work_struct *work); void sas_porte_timer_event(struct work_struct *work); void sas_porte_hard_reset(struct work_struct *work); -void sas_queue_work(struct sas_ha_struct *ha, struct work_struct *work); +void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw); int sas_notify_lldd_dev_found(struct domain_device *); void sas_notify_lldd_dev_gone(struct domain_device *); diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index dcfd4a9105c5..521422e857ab 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -32,8 +32,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PHYE_LOSS_OF_SIGNAL, &phy->phy_events_pending); @@ -43,8 +42,7 @@ static void sas_phye_loss_of_signal(struct work_struct *work) static void sas_phye_oob_done(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PHYE_OOB_DONE, &phy->phy_events_pending); @@ -53,8 +51,7 @@ static void sas_phye_oob_done(struct work_struct *work) static void sas_phye_oob_error(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; struct sas_ha_struct *sas_ha = phy->ha; struct asd_sas_port *port = phy->port; @@ -85,8 +82,7 @@ static void sas_phye_oob_error(struct work_struct *work) static void sas_phye_spinup_hold(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; struct sas_ha_struct *sas_ha = phy->ha; struct sas_internal *i = @@ -127,14 +123,12 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->error = 0; INIT_LIST_HEAD(&phy->port_phy_el); for (k = 0; k < PORT_NUM_EVENTS; k++) { - INIT_WORK(&phy->port_events[k].work, - sas_port_event_fns[k]); + INIT_SAS_WORK(&phy->port_events[k].work, sas_port_event_fns[k]); phy->port_events[k].phy = phy; } for (k = 0; k < PHY_NUM_EVENTS; k++) { - INIT_WORK(&phy->phy_events[k].work, - sas_phy_event_fns[k]); + INIT_SAS_WORK(&phy->phy_events[k].work, sas_phy_event_fns[k]); phy->phy_events[k].phy = phy; } @@ -144,8 +138,7 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) spin_lock_init(&phy->sas_prim_lock); phy->frame_rcvd_size = 0; - phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, - i); + phy->phy = sas_phy_alloc(&sas_ha->core.shost->shost_gendev, i); if (!phy->phy) return -ENOMEM; diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index eb19c016d500..e884a8c58a0c 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -123,7 +123,7 @@ static void sas_form_port(struct asd_sas_phy *phy) spin_unlock_irqrestore(&sas_ha->phy_port_lock, flags); if (!port->port) { - port->port = sas_port_alloc(phy->phy->dev.parent, phy->id); + port->port = sas_port_alloc(phy->phy->dev.parent, port->id); BUG_ON(!port->port); sas_port_add(port->port); } @@ -208,8 +208,7 @@ void sas_deform_port(struct asd_sas_phy *phy, int gone) void sas_porte_bytes_dmaed(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PORTE_BYTES_DMAED, &phy->port_events_pending); @@ -219,8 +218,7 @@ void sas_porte_bytes_dmaed(struct work_struct *work) void sas_porte_broadcast_rcvd(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; unsigned long flags; u32 prim; @@ -237,8 +235,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work) void sas_porte_link_reset_err(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PORTE_LINK_RESET_ERR, &phy->port_events_pending); @@ -248,8 +245,7 @@ void sas_porte_link_reset_err(struct work_struct *work) void sas_porte_timer_event(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PORTE_TIMER_EVENT, &phy->port_events_pending); @@ -259,8 +255,7 @@ void sas_porte_timer_event(struct work_struct *work) void sas_porte_hard_reset(struct work_struct *work) { - struct asd_sas_event *ev = - container_of(work, struct asd_sas_event, work); + struct asd_sas_event *ev = to_asd_sas_event(work); struct asd_sas_phy *phy = ev->phy; clear_bit(PORTE_HARD_RESET, &phy->port_events_pending); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index f74cc0602f3b..bc3cc6d91117 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -1367,6 +1367,9 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job) struct qla_hw_data *ha = vha->hw; int rval = 0; + if (ha->flags.isp82xx_reset_hdlr_active) + return -EBUSY; + rval = qla2x00_optrom_setup(bsg_job, vha, 0); if (rval) return rval; diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 897731b93df2..62324a1d5573 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -15,7 +15,7 @@ * | Mailbox commands | 0x113e | 0x112c-0x112e | * | | | 0x113a | * | Device Discovery | 0x2086 | 0x2020-0x2022 | - * | Queue Command and IO tracing | 0x302f | 0x3006,0x3008 | + * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | * | | | 0x302d-0x302e | * | DPC Thread | 0x401c | | * | Async Events | 0x505d | 0x502b-0x502f | diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index f79844ce7122..ce42288049b5 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -1715,13 +1715,24 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) res = DID_ERROR << 16; break; } - } else { + } else if (lscsi_status != SAM_STAT_TASK_SET_FULL && + lscsi_status != SAM_STAT_BUSY) { + /* + * scsi status of task set and busy are considered to be + * task not completed. + */ + ql_dbg(ql_dbg_io, fcport->vha, 0x301f, "Dropped frame(s) detected (0x%x " - "of 0x%x bytes).\n", resid, scsi_bufflen(cp)); + "of 0x%x bytes).\n", resid, + scsi_bufflen(cp)); res = DID_ERROR << 16 | lscsi_status; goto check_scsi_status; + } else { + ql_dbg(ql_dbg_io, fcport->vha, 0x3030, + "scsi_status: 0x%x, lscsi_status: 0x%x\n", + scsi_status, lscsi_status); } res = DID_OK << 16 | lscsi_status; diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index f0528539bbbc..de722a933438 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -3125,6 +3125,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); + qla82xx_set_rst_ready(ha); if (ql2xmdenable) { if (qla82xx_md_collect(vha)) ql_log(ql_log_warn, vha, 0xb02c, diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index a2f999273a5f..7db803377c64 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -3577,9 +3577,25 @@ void qla2x00_relogin(struct scsi_qla_host *vha) continue; /* Attempt a retry. */ status = 1; - } else + } else { status = qla2x00_fabric_login(vha, fcport, &next_loopid); + if (status == QLA_SUCCESS) { + int status2; + uint8_t opts; + + opts = 0; + if (fcport->flags & + FCF_FCP2_DEVICE) + opts |= BIT_1; + status2 = + qla2x00_get_port_database( + vha, fcport, + opts); + if (status2 != QLA_SUCCESS) + status = 1; + } + } } else status = qla2x00_local_device_login(vha, fcport); diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 3c13c0a6be63..a683e766d1ae 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -1017,6 +1017,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha) !IS_CNA_CAPABLE(ha) && !IS_QLA2031(ha)) return; + if (ha->flags.isp82xx_reset_hdlr_active) + return; + ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr, ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header)); if (hdr.version == __constant_cpu_to_le16(0xffff)) diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 29d780c38040..f5fdb16bec9b 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,9 +7,9 @@ /* * Driver version */ -#define QLA2XXX_VERSION "8.03.07.13-k" +#define QLA2XXX_VERSION "8.04.00.03-k" #define QLA_DRIVER_MAJOR_VER 8 -#define QLA_DRIVER_MINOR_VER 3 -#define QLA_DRIVER_PATCH_VER 7 +#define QLA_DRIVER_MINOR_VER 4 +#define QLA_DRIVER_PATCH_VER 0 #define QLA_DRIVER_BETA_VER 3 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index dbe43924d7ae..62ddfd31d4ce 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -1638,7 +1638,7 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, request_fn_proc *request_fn) { struct request_queue *q; - struct device *dev = shost->shost_gendev.parent; + struct device *dev = shost->dma_dev; q = blk_init_queue(request_fn, NULL); if (!q) diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index efccd72c4a3e..1b3843117268 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -175,7 +175,8 @@ static void virtscsi_complete_free(void *buf) if (cmd->comp) complete_all(cmd->comp); - mempool_free(cmd, virtscsi_cmd_pool); + else + mempool_free(cmd, virtscsi_cmd_pool); } static void virtscsi_ctrl_done(struct virtqueue *vq) @@ -311,21 +312,22 @@ out: static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(comp); - int ret; + int ret = FAILED; cmd->comp = ∁ - ret = virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, - sizeof cmd->req.tmf, sizeof cmd->resp.tmf, - GFP_NOIO); - if (ret < 0) - return FAILED; + if (virtscsi_kick_cmd(vscsi, vscsi->ctrl_vq, cmd, + sizeof cmd->req.tmf, sizeof cmd->resp.tmf, + GFP_NOIO) < 0) + goto out; wait_for_completion(&comp); - if (cmd->resp.tmf.response != VIRTIO_SCSI_S_OK && - cmd->resp.tmf.response != VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) - return FAILED; + if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || + cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) + ret = SUCCESS; - return SUCCESS; +out: + mempool_free(cmd, virtscsi_cmd_pool); + return ret; } static int virtscsi_device_reset(struct scsi_cmnd *sc) diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 3ed748355b98..00c024039c97 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -74,7 +74,7 @@ config SPI_ATMEL This selects a driver for the Atmel SPI Controller, present on many AT32 (AVR32) and AT91 (ARM) chips. -config SPI_BFIN +config SPI_BFIN5XX tristate "SPI controller driver for ADI Blackfin5xx" depends on BLACKFIN help diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index a1d48e0ba3dc..9d75d2198ff5 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -15,7 +15,7 @@ obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o obj-$(CONFIG_SPI_ATH79) += spi-ath79.o obj-$(CONFIG_SPI_AU1550) += spi-au1550.o obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o -obj-$(CONFIG_SPI_BFIN) += spi-bfin5xx.o +obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index f01b2648452e..7491971139a6 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c @@ -1,7 +1,7 @@ /* * Broadcom BCM63xx SPI controller support * - * Copyright (C) 2009-2011 Florian Fainelli + * Copyright (C) 2009-2012 Florian Fainelli * Copyright (C) 2010 Tanguy Bouzeloc * * This program is free software; you can redistribute it and/or @@ -30,6 +30,8 @@ #include #include #include +#include +#include #include @@ -37,8 +39,6 @@ #define DRV_VER "0.1.2" struct bcm63xx_spi { - spinlock_t lock; - int stopping; struct completion done; void __iomem *regs; @@ -96,17 +96,12 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = { { 391000, SPI_CLK_0_391MHZ } }; -static int bcm63xx_spi_setup_transfer(struct spi_device *spi, - struct spi_transfer *t) +static int bcm63xx_spi_check_transfer(struct spi_device *spi, + struct spi_transfer *t) { - struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); u8 bits_per_word; - u8 clk_cfg, reg; - u32 hz; - int i; bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; - hz = (t) ? t->speed_hz : spi->max_speed_hz; if (bits_per_word != 8) { dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", __func__, bits_per_word); @@ -119,6 +114,19 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi, return -EINVAL; } + return 0; +} + +static void bcm63xx_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); + u32 hz; + u8 clk_cfg, reg; + int i; + + hz = (t) ? t->speed_hz : spi->max_speed_hz; + /* Find the closest clock configuration */ for (i = 0; i < SPI_CLK_MASK; i++) { if (hz <= bcm63xx_spi_freq_table[i][0]) { @@ -139,8 +147,6 @@ static int bcm63xx_spi_setup_transfer(struct spi_device *spi, bcm_spi_writeb(bs, reg, SPI_CLK_CFG); dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", clk_cfg, hz); - - return 0; } /* the spi->mode bits understood by this driver: */ @@ -153,9 +159,6 @@ static int bcm63xx_spi_setup(struct spi_device *spi) bs = spi_master_get_devdata(spi->master); - if (bs->stopping) - return -ESHUTDOWN; - if (!spi->bits_per_word) spi->bits_per_word = 8; @@ -165,7 +168,7 @@ static int bcm63xx_spi_setup(struct spi_device *spi) return -EINVAL; } - ret = bcm63xx_spi_setup_transfer(spi, NULL); + ret = bcm63xx_spi_check_transfer(spi, NULL); if (ret < 0) { dev_err(&spi->dev, "setup: unsupported mode bits %x\n", spi->mode & ~MODEBITS); @@ -190,28 +193,29 @@ static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs) bs->remaining_bytes -= size; } -static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) +static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi, + struct spi_transfer *t) { struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); u16 msg_ctl; u16 cmd; + /* Disable the CMD_DONE interrupt */ + bcm_spi_writeb(bs, 0, SPI_INT_MASK); + dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", t->tx_buf, t->rx_buf, t->len); /* Transmitter is inhibited */ bs->tx_ptr = t->tx_buf; bs->rx_ptr = t->rx_buf; - init_completion(&bs->done); if (t->tx_buf) { bs->remaining_bytes = t->len; bcm63xx_spi_fill_tx_fifo(bs); } - /* Enable the command done interrupt which - * we use to determine completion of a command */ - bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); + init_completion(&bs->done); /* Fill in the Message control register */ msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); @@ -230,33 +234,76 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); bcm_spi_writew(bs, cmd, SPI_CMD); - wait_for_completion(&bs->done); - /* Disable the CMD_DONE interrupt */ - bcm_spi_writeb(bs, 0, SPI_INT_MASK); + /* Enable the CMD_DONE interrupt */ + bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); return t->len - bs->remaining_bytes; } -static int bcm63xx_transfer(struct spi_device *spi, struct spi_message *m) +static int bcm63xx_spi_prepare_transfer(struct spi_master *master) { - struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + + pm_runtime_get_sync(&bs->pdev->dev); + + return 0; +} + +static int bcm63xx_spi_unprepare_transfer(struct spi_master *master) +{ + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + + pm_runtime_put(&bs->pdev->dev); + + return 0; +} + +static int bcm63xx_spi_transfer_one(struct spi_master *master, + struct spi_message *m) +{ + struct bcm63xx_spi *bs = spi_master_get_devdata(master); struct spi_transfer *t; - int ret = 0; - - if (unlikely(list_empty(&m->transfers))) - return -EINVAL; - - if (bs->stopping) - return -ESHUTDOWN; + struct spi_device *spi = m->spi; + int status = 0; + unsigned int timeout = 0; list_for_each_entry(t, &m->transfers, transfer_list) { - ret += bcm63xx_txrx_bufs(spi, t); + unsigned int len = t->len; + u8 rx_tail; + + status = bcm63xx_spi_check_transfer(spi, t); + if (status < 0) + goto exit; + + /* configure adapter for a new transfer */ + bcm63xx_spi_setup_transfer(spi, t); + + while (len) { + /* send the data */ + len -= bcm63xx_txrx_bufs(spi, t); + + timeout = wait_for_completion_timeout(&bs->done, HZ); + if (!timeout) { + status = -ETIMEDOUT; + goto exit; + } + + /* read out all data */ + rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL); + + /* Read out all the data */ + if (rx_tail) + memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail); + } + + m->actual_length += t->len; } +exit: + m->status = status; + spi_finalize_current_message(master); - m->complete(m->context); - - return ret; + return 0; } /* This driver supports single master mode only. Hence @@ -267,39 +314,15 @@ static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id) struct spi_master *master = (struct spi_master *)dev_id; struct bcm63xx_spi *bs = spi_master_get_devdata(master); u8 intr; - u16 cmd; /* Read interupts and clear them immediately */ intr = bcm_spi_readb(bs, SPI_INT_STATUS); bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); bcm_spi_writeb(bs, 0, SPI_INT_MASK); - /* A tansfer completed */ - if (intr & SPI_INTR_CMD_DONE) { - u8 rx_tail; - - rx_tail = bcm_spi_readb(bs, SPI_RX_TAIL); - - /* Read out all the data */ - if (rx_tail) - memcpy_fromio(bs->rx_ptr, bs->rx_io, rx_tail); - - /* See if there is more data to send */ - if (bs->remaining_bytes > 0) { - bcm63xx_spi_fill_tx_fifo(bs); - - /* Start the transfer */ - bcm_spi_writew(bs, SPI_HD_W << SPI_MSG_TYPE_SHIFT, - SPI_MSG_CTL); - cmd = bcm_spi_readw(bs, SPI_CMD); - cmd |= SPI_CMD_START_IMMEDIATE; - cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); - bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK); - bcm_spi_writew(bs, cmd, SPI_CMD); - } else { - complete(&bs->done); - } - } + /* A transfer completed */ + if (intr & SPI_INTR_CMD_DONE) + complete(&bs->done); return IRQ_HANDLED; } @@ -345,7 +368,6 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) } bs = spi_master_get_devdata(master); - init_completion(&bs->done); platform_set_drvdata(pdev, master); bs->pdev = pdev; @@ -379,12 +401,13 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) master->bus_num = pdata->bus_num; master->num_chipselect = pdata->num_chipselect; master->setup = bcm63xx_spi_setup; - master->transfer = bcm63xx_transfer; + master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer; + master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer; + master->transfer_one_message = bcm63xx_spi_transfer_one; + master->mode_bits = MODEBITS; bs->speed_hz = pdata->speed_hz; - bs->stopping = 0; bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); - spin_lock_init(&bs->lock); /* Initialize hardware */ clk_enable(bs->clk); @@ -418,18 +441,16 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct bcm63xx_spi *bs = spi_master_get_devdata(master); + spi_unregister_master(master); + /* reset spi block */ bcm_spi_writeb(bs, 0, SPI_INT_MASK); - spin_lock(&bs->lock); - bs->stopping = 1; /* HW shutdown */ clk_disable(bs->clk); clk_put(bs->clk); - spin_unlock(&bs->lock); platform_set_drvdata(pdev, 0); - spi_unregister_master(master); return 0; } diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c index 248a2cc671a9..1fe51198a622 100644 --- a/drivers/spi/spi-bfin-sport.c +++ b/drivers/spi/spi-bfin-sport.c @@ -252,19 +252,15 @@ static void bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data) { struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip; - unsigned int bits = (drv_data->ops == &bfin_sport_transfer_ops_u8 ? 7 : 15); bfin_sport_spi_disable(drv_data); dev_dbg(drv_data->dev, "restoring spi ctl state\n"); bfin_write(&drv_data->regs->tcr1, chip->ctl_reg); - bfin_write(&drv_data->regs->tcr2, bits); bfin_write(&drv_data->regs->tclkdiv, chip->baud); - bfin_write(&drv_data->regs->tfsdiv, bits); SSYNC(); bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS)); - bfin_write(&drv_data->regs->rcr2, bits); SSYNC(); bfin_sport_spi_cs_active(chip); @@ -420,11 +416,15 @@ bfin_sport_spi_pump_transfers(unsigned long data) drv_data->cs_change = transfer->cs_change; /* Bits per word setup */ - bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; - if (bits_per_word == 8) - drv_data->ops = &bfin_sport_transfer_ops_u8; - else + bits_per_word = transfer->bits_per_word ? : + message->spi->bits_per_word ? : 8; + if (bits_per_word % 16 == 0) drv_data->ops = &bfin_sport_transfer_ops_u16; + else + drv_data->ops = &bfin_sport_transfer_ops_u8; + bfin_write(&drv_data->regs->tcr2, bits_per_word - 1); + bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1); + bfin_write(&drv_data->regs->rcr2, bits_per_word - 1); drv_data->state = RUNNING_STATE; @@ -598,11 +598,12 @@ bfin_sport_spi_setup(struct spi_device *spi) } chip->cs_chg_udelay = chip_info->cs_chg_udelay; chip->idle_tx_val = chip_info->idle_tx_val; - spi->bits_per_word = chip_info->bits_per_word; } } - if (spi->bits_per_word != 8 && spi->bits_per_word != 16) { + if (spi->bits_per_word % 8) { + dev_err(&spi->dev, "%d bits_per_word is not supported\n", + spi->bits_per_word); ret = -EINVAL; goto error; } diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c index 3b83ff8b1e2b..9bb4d4af8547 100644 --- a/drivers/spi/spi-bfin5xx.c +++ b/drivers/spi/spi-bfin5xx.c @@ -396,7 +396,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) /* last read */ if (drv_data->rx) { dev_dbg(&drv_data->pdev->dev, "last read\n"); - if (n_bytes % 2) { + if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->rx; for (loop = 0; loop < n_bytes / 2; loop++) *buf++ = bfin_read(&drv_data->regs->rdbr); @@ -424,7 +424,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) if (drv_data->rx && drv_data->tx) { /* duplex */ dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n"); - if (n_bytes % 2) { + if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->rx; u16 *buf2 = (u16 *)drv_data->tx; for (loop = 0; loop < n_bytes / 2; loop++) { @@ -442,7 +442,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) } else if (drv_data->rx) { /* read */ dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n"); - if (n_bytes % 2) { + if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->rx; for (loop = 0; loop < n_bytes / 2; loop++) { *buf++ = bfin_read(&drv_data->regs->rdbr); @@ -458,7 +458,7 @@ static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id) } else if (drv_data->tx) { /* write */ dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n"); - if (n_bytes % 2) { + if (!(n_bytes % 2)) { u16 *buf = (u16 *)drv_data->tx; for (loop = 0; loop < n_bytes / 2; loop++) { bfin_read(&drv_data->regs->rdbr); @@ -587,6 +587,7 @@ static void bfin_spi_pump_transfers(unsigned long data) if (message->state == DONE_STATE) { dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n"); message->status = 0; + bfin_spi_flush(drv_data); bfin_spi_giveback(drv_data); return; } @@ -870,8 +871,10 @@ static void bfin_spi_pump_transfers(unsigned long data) message->actual_length += drv_data->len_in_bytes; /* Move to next transfer of this msg */ message->state = bfin_spi_next_transfer(drv_data); - if (drv_data->cs_change) + if (drv_data->cs_change && message->state != DONE_STATE) { + bfin_spi_flush(drv_data); bfin_spi_cs_deactive(drv_data, chip); + } } /* Schedule next transfer tasklet */ @@ -1026,7 +1029,6 @@ static int bfin_spi_setup(struct spi_device *spi) chip->cs_chg_udelay = chip_info->cs_chg_udelay; chip->idle_tx_val = chip_info->idle_tx_val; chip->pio_interrupt = chip_info->pio_interrupt; - spi->bits_per_word = chip_info->bits_per_word; } else { /* force a default base state */ chip->ctl_reg &= bfin_ctl_reg; diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index 6db2887852d6..e8055073e84d 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c @@ -545,13 +545,12 @@ static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) * in case of failure. */ static struct dma_async_tx_descriptor * -ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) +ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir) { struct spi_transfer *t = espi->current_msg->state; struct dma_async_tx_descriptor *txd; enum dma_slave_buswidth buswidth; struct dma_slave_config conf; - enum dma_transfer_direction slave_dirn; struct scatterlist *sg; struct sg_table *sgt; struct dma_chan *chan; @@ -567,14 +566,13 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) memset(&conf, 0, sizeof(conf)); conf.direction = dir; - if (dir == DMA_FROM_DEVICE) { + if (dir == DMA_DEV_TO_MEM) { chan = espi->dma_rx; buf = t->rx_buf; sgt = &espi->rx_sgt; conf.src_addr = espi->sspdr_phys; conf.src_addr_width = buswidth; - slave_dirn = DMA_DEV_TO_MEM; } else { chan = espi->dma_tx; buf = t->tx_buf; @@ -582,7 +580,6 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) conf.dst_addr = espi->sspdr_phys; conf.dst_addr_width = buswidth; - slave_dirn = DMA_MEM_TO_DEV; } ret = dmaengine_slave_config(chan, &conf); @@ -633,8 +630,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) if (!nents) return ERR_PTR(-ENOMEM); - txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, - slave_dirn, DMA_CTRL_ACK); + txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK); if (!txd) { dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); return ERR_PTR(-ENOMEM); @@ -651,12 +647,12 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) * unmapped. */ static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, - enum dma_data_direction dir) + enum dma_transfer_direction dir) { struct dma_chan *chan; struct sg_table *sgt; - if (dir == DMA_FROM_DEVICE) { + if (dir == DMA_DEV_TO_MEM) { chan = espi->dma_rx; sgt = &espi->rx_sgt; } else { @@ -677,16 +673,16 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) struct spi_message *msg = espi->current_msg; struct dma_async_tx_descriptor *rxd, *txd; - rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); + rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM); if (IS_ERR(rxd)) { dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); msg->status = PTR_ERR(rxd); return; } - txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); + txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV); if (IS_ERR(txd)) { - ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); + ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); msg->status = PTR_ERR(txd); return; @@ -705,8 +701,8 @@ static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) wait_for_completion(&espi->wait); - ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); - ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); + ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV); + ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM); } /** diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 09c925aaf320..400ae2121a2a 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -1667,9 +1667,15 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct /* cpsdvsr = 254 & scr = 255 */ min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX); - if (!((freq <= max_tclk) && (freq >= min_tclk))) { + if (freq > max_tclk) + dev_warn(&pl022->adev->dev, + "Max speed that can be programmed is %d Hz, you requested %d\n", + max_tclk, freq); + + if (freq < min_tclk) { dev_err(&pl022->adev->dev, - "controller data is incorrect: out of range frequency"); + "Requested frequency: %d Hz is less than minimum possible %d Hz\n", + freq, min_tclk); return -EINVAL; } @@ -1681,26 +1687,37 @@ static int calculate_effective_freq(struct pl022 *pl022, int freq, struct while (scr <= SCR_MAX) { tmp = spi_rate(rate, cpsdvsr, scr); - if (tmp > freq) + if (tmp > freq) { + /* we need lower freq */ scr++; + continue; + } + /* - * If found exact value, update and break. - * If found more closer value, update and continue. + * If found exact value, mark found and break. + * If found more closer value, update and break. */ - else if ((tmp == freq) || (tmp > best_freq)) { + if (tmp > best_freq) { best_freq = tmp; best_cpsdvsr = cpsdvsr; best_scr = scr; if (tmp == freq) - break; + found = 1; } - scr++; + /* + * increased scr will give lower rates, which are not + * required + */ + break; } cpsdvsr += 2; scr = SCR_MIN; } + WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n", + freq); + clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF); clk_freq->scr = (u8) (best_scr & 0xFF); dev_dbg(&pl022->adev->dev, @@ -1823,9 +1840,12 @@ static int pl022_setup(struct spi_device *spi) } else chip->cs_control = chip_info->cs_control; - if (bits <= 3) { - /* PL022 doesn't support less than 4-bits */ + /* Check bits per word with vendor specific range */ + if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) { status = -ENOTSUPP; + dev_err(&spi->dev, "illegal data size for this controller!\n"); + dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n", + pl022->vendor->max_bpw); goto err_config_params; } else if (bits <= 8) { dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); @@ -1838,20 +1858,10 @@ static int pl022_setup(struct spi_device *spi) chip->read = READING_U16; chip->write = WRITING_U16; } else { - if (pl022->vendor->max_bpw >= 32) { - dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); - chip->n_bytes = 4; - chip->read = READING_U32; - chip->write = WRITING_U32; - } else { - dev_err(&spi->dev, - "illegal data size for this controller!\n"); - dev_err(&spi->dev, - "a standard pl022 can only handle " - "1 <= n <= 16 bit words\n"); - status = -ENOTSUPP; - goto err_config_params; - } + dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); + chip->n_bytes = 4; + chip->read = READING_U32; + chip->write = WRITING_U32; } /* Now Initialize all register settings required for this chip */ diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 400df8cbee53..d91751f9ffe8 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #ifdef CONFIG_XFRM #include diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c index 56d74dc2fbd5..91a97b3e45c6 100644 --- a/drivers/staging/octeon/ethernet-tx.c +++ b/drivers/staging/octeon/ethernet-tx.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #ifdef CONFIG_XFRM #include diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c index 9112cd882154..60cba8194de3 100644 --- a/drivers/staging/octeon/ethernet.c +++ b/drivers/staging/octeon/ethernet.c @@ -31,6 +31,7 @@ #include #include #include +#include #include diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c index 2b45d3d1800c..04cd57f2a6da 100644 --- a/drivers/staging/ozwpan/ozpd.c +++ b/drivers/staging/ozwpan/ozpd.c @@ -383,8 +383,6 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f) pd->tx_pool = &f->link; pd->tx_pool_count++; f = 0; - } else { - kfree(f); } spin_unlock_bh(&pd->tx_frame_lock); if (f) diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index 7862513cc295..9cf29fcea11e 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c @@ -79,10 +79,6 @@ #define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190) #define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194) -#define OMAP343X_CTRL_REGADDR(reg) \ - OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg)) - - /* Forward Declarations: */ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt); static int bridge_brd_read(struct bridge_dev_context *dev_ctxt, @@ -418,19 +414,27 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, /* Assert RST1 i.e only the RST only for DSP megacell */ if (!status) { + /* + * XXX: ioremapping MUST be removed once ctrl + * function is made available. + */ + void __iomem *ctrl = ioremap(OMAP343X_CTRL_BASE, SZ_4K); + if (!ctrl) + return -ENOMEM; + (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); /* Mask address with 1K for compatibility */ __raw_writel(dsp_addr & OMAP3_IVA2_BOOTADDR_MASK, - OMAP343X_CTRL_REGADDR( - OMAP343X_CONTROL_IVA2_BOOTADDR)); + ctrl + OMAP343X_CONTROL_IVA2_BOOTADDR); /* * Set bootmode to self loop if dsp_debug flag is true */ __raw_writel((dsp_debug) ? OMAP3_IVA2_BOOTMOD_IDLE : 0, - OMAP343X_CTRL_REGADDR( - OMAP343X_CONTROL_IVA2_BOOTMOD)); + ctrl + OMAP343X_CONTROL_IVA2_BOOTMOD); + + iounmap(ctrl); } } if (!status) { diff --git a/drivers/staging/tidspbridge/core/wdt.c b/drivers/staging/tidspbridge/core/wdt.c index 70055c8111ed..870f934f4f3b 100644 --- a/drivers/staging/tidspbridge/core/wdt.c +++ b/drivers/staging/tidspbridge/core/wdt.c @@ -53,7 +53,10 @@ int dsp_wdt_init(void) int ret = 0; dsp_wdt.sm_wdt = NULL; - dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE); + dsp_wdt.reg_base = ioremap(OMAP34XX_WDT3_BASE, SZ_4K); + if (!dsp_wdt.reg_base) + return -ENOMEM; + tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); @@ -99,6 +102,9 @@ void dsp_wdt_exit(void) dsp_wdt.fclk = NULL; dsp_wdt.iclk = NULL; dsp_wdt.sm_wdt = NULL; + + if (dsp_wdt.reg_base) + iounmap(dsp_wdt.reg_base); dsp_wdt.reg_base = NULL; } diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig index 3ed2c8f656a5..7048e01f0817 100644 --- a/drivers/staging/zcache/Kconfig +++ b/drivers/staging/zcache/Kconfig @@ -2,7 +2,7 @@ config ZCACHE bool "Dynamic compression of swap pages and clean pagecache pages" # X86 dependency is because zsmalloc uses non-portable pte/tlb # functions - depends on (CLEANCACHE || FRONTSWAP) && CRYPTO && X86 + depends on (CLEANCACHE || FRONTSWAP) && CRYPTO=y && X86 select ZSMALLOC select CRYPTO_LZO default n diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 70c3ffb981e7..e320ec24aa1b 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -60,7 +60,6 @@ static void core_clear_initiator_node_from_tpg( int i; struct se_dev_entry *deve; struct se_lun *lun; - struct se_lun_acl *acl, *acl_tmp; spin_lock_irq(&nacl->device_list_lock); for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { @@ -81,28 +80,7 @@ static void core_clear_initiator_node_from_tpg( core_update_device_list_for_node(lun, NULL, deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0); - spin_lock(&lun->lun_acl_lock); - list_for_each_entry_safe(acl, acl_tmp, - &lun->lun_acl_list, lacl_list) { - if (!strcmp(acl->initiatorname, nacl->initiatorname) && - (acl->mapped_lun == deve->mapped_lun)) - break; - } - - if (!acl) { - pr_err("Unable to locate struct se_lun_acl for %s," - " mapped_lun: %u\n", nacl->initiatorname, - deve->mapped_lun); - spin_unlock(&lun->lun_acl_lock); - spin_lock_irq(&nacl->device_list_lock); - continue; - } - - list_del(&acl->lacl_list); - spin_unlock(&lun->lun_acl_lock); - spin_lock_irq(&nacl->device_list_lock); - kfree(acl); } spin_unlock_irq(&nacl->device_list_lock); } diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c index 24145c30c9b0..6cc4358f68c1 100644 --- a/drivers/tty/amiserial.c +++ b/drivers/tty/amiserial.c @@ -1073,8 +1073,10 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state, (new_serial.close_delay != port->close_delay) || (new_serial.xmit_fifo_size != state->xmit_fifo_size) || ((new_serial.flags & ~ASYNC_USR_MASK) != - (port->flags & ~ASYNC_USR_MASK))) + (port->flags & ~ASYNC_USR_MASK))) { + tty_unlock(); return -EPERM; + } port->flags = ((port->flags & ~ASYNC_USR_MASK) | (new_serial.flags & ASYNC_USR_MASK)); state->custom_divisor = new_serial.custom_divisor; diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c index e6c3dbd781d6..836fe2731234 100644 --- a/drivers/tty/serial/clps711x.c +++ b/drivers/tty/serial/clps711x.c @@ -154,10 +154,9 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id) port->x_char = 0; return IRQ_HANDLED; } - if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { - clps711xuart_stop_tx(port); - return IRQ_HANDLED; - } + + if (uart_circ_empty(xmit) || uart_tx_stopped(port)) + goto disable_tx_irq; count = port->fifosize >> 1; do { @@ -171,8 +170,11 @@ static irqreturn_t clps711xuart_int_tx(int irq, void *dev_id) if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); - if (uart_circ_empty(xmit)) - clps711xuart_stop_tx(port); + if (uart_circ_empty(xmit)) { + disable_tx_irq: + disable_irq_nosync(TX_IRQ(port)); + tx_enabled(port) = 0; + } return IRQ_HANDLED; } diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index bbbec4a74cfb..c2816f494807 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -1447,9 +1447,11 @@ static int pch_uart_verify_port(struct uart_port *port, __func__); return -EOPNOTSUPP; #endif - priv->use_dma = 1; priv->use_dma_flag = 1; dev_info(priv->port.dev, "PCH UART : Use DMA Mode\n"); + if (!priv->use_dma) + pch_request_dma(port); + priv->use_dma = 1; } return 0; diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c index 08ebe901bb59..654755a990df 100644 --- a/drivers/tty/serial/pmac_zilog.c +++ b/drivers/tty/serial/pmac_zilog.c @@ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) tty = NULL; if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { if (!ZS_IS_OPEN(uap_a)) { - pmz_debug("ChanA interrupt while open !\n"); + pmz_debug("ChanA interrupt while not open !\n"); goto skip_a; } write_zsreg(uap_a, R0, RES_H_IUS); @@ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) spin_lock(&uap_b->port.lock); tty = NULL; if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { - if (!ZS_IS_OPEN(uap_a)) { - pmz_debug("ChanB interrupt while open !\n"); + if (!ZS_IS_OPEN(uap_b)) { + pmz_debug("ChanB interrupt while not open !\n"); goto skip_b; } write_zsreg(uap_b, R0, RES_H_IUS); diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index 86dd1e302bb3..29ca20dbd335 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -1085,15 +1085,21 @@ void vt_set_led_state(int console, int leds) * * Handle console start. This is a wrapper for the VT layer * so that we can keep kbd knowledge internal + * + * FIXME: We eventually need to hold the kbd lock here to protect + * the LED updating. We can't do it yet because fn_hold calls stop_tty + * and start_tty under the kbd_event_lock, while normal tty paths + * don't hold the lock. We probably need to split out an LED lock + * but not during an -rc release! */ void vt_kbd_con_start(int console) { struct kbd_struct * kbd = kbd_table + console; - unsigned long flags; - spin_lock_irqsave(&kbd_event_lock, flags); +/* unsigned long flags; */ +/* spin_lock_irqsave(&kbd_event_lock, flags); */ clr_vc_kbd_led(kbd, VC_SCROLLOCK); set_leds(); - spin_unlock_irqrestore(&kbd_event_lock, flags); +/* spin_unlock_irqrestore(&kbd_event_lock, flags); */ } /** @@ -1102,22 +1108,28 @@ void vt_kbd_con_start(int console) * * Handle console stop. This is a wrapper for the VT layer * so that we can keep kbd knowledge internal + * + * FIXME: We eventually need to hold the kbd lock here to protect + * the LED updating. We can't do it yet because fn_hold calls stop_tty + * and start_tty under the kbd_event_lock, while normal tty paths + * don't hold the lock. We probably need to split out an LED lock + * but not during an -rc release! */ void vt_kbd_con_stop(int console) { struct kbd_struct * kbd = kbd_table + console; - unsigned long flags; - spin_lock_irqsave(&kbd_event_lock, flags); +/* unsigned long flags; */ +/* spin_lock_irqsave(&kbd_event_lock, flags); */ set_vc_kbd_led(kbd, VC_SCROLLOCK); set_leds(); - spin_unlock_irqrestore(&kbd_event_lock, flags); +/* spin_unlock_irqrestore(&kbd_event_lock, flags); */ } /* * This is the tasklet that updates LED state on all keyboards * attached to the box. The reason we use tasklet is that we * need to handle the scenario when keyboard handler is not - * registered yet but we already getting updates form VT to + * registered yet but we already getting updates from the VT to * update led state. */ static void kbd_bh(unsigned long dummy) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index c6f6560d436c..0bb2b3248dad 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -157,8 +157,9 @@ static void wdm_out_callback(struct urb *urb) spin_lock(&desc->iuspin); desc->werr = urb->status; spin_unlock(&desc->iuspin); - clear_bit(WDM_IN_USE, &desc->flags); kfree(desc->outbuf); + desc->outbuf = NULL; + clear_bit(WDM_IN_USE, &desc->flags); wake_up(&desc->wait); } @@ -338,7 +339,7 @@ static ssize_t wdm_write if (we < 0) return -EIO; - desc->outbuf = buf = kmalloc(count, GFP_KERNEL); + buf = kmalloc(count, GFP_KERNEL); if (!buf) { rv = -ENOMEM; goto outnl; @@ -406,10 +407,12 @@ static ssize_t wdm_write req->wIndex = desc->inum; req->wLength = cpu_to_le16(count); set_bit(WDM_IN_USE, &desc->flags); + desc->outbuf = buf; rv = usb_submit_urb(desc->command, GFP_KERNEL); if (rv < 0) { kfree(buf); + desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); } else { diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 622b4a48e732..57ed9e400c06 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -493,6 +493,15 @@ static int hcd_pci_suspend_noirq(struct device *dev) pci_save_state(pci_dev); + /* + * Some systems crash if an EHCI controller is in D3 during + * a sleep transition. We have to leave such controllers in D0. + */ + if (hcd->broken_pci_sleep) { + dev_dbg(dev, "Staying in PCI D0\n"); + return retval; + } + /* If the root hub is dead rather than suspended, disallow remote * wakeup. usb_hc_died() should ensure that both hosts are marked as * dying, so we only need to check the primary roothub. diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index a2aa9d652c67..ec6c97dadbe4 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -1667,7 +1667,6 @@ void usb_disconnect(struct usb_device **pdev) { struct usb_device *udev = *pdev; int i; - struct usb_hcd *hcd = bus_to_hcd(udev->bus); /* mark the device as inactive, so any further urb submissions for * this device (and any of its children) will fail immediately. @@ -1690,9 +1689,7 @@ void usb_disconnect(struct usb_device **pdev) * so that the hardware is now fully quiesced. */ dev_dbg (&udev->dev, "unregistering device\n"); - mutex_lock(hcd->bandwidth_mutex); usb_disable_device(udev, 0); - mutex_unlock(hcd->bandwidth_mutex); usb_hcd_synchronize_unlinks(udev); usb_remove_ep_devs(&udev->ep0); diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index aed3e07942d4..ca717da3be95 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -1136,8 +1136,6 @@ void usb_disable_interface(struct usb_device *dev, struct usb_interface *intf, * Deallocates hcd/hardware state for the endpoints (nuking all or most * pending urbs) and usbcore state for the interfaces, so that usbcore * must usb_set_configuration() before any interfaces could be used. - * - * Must be called with hcd->bandwidth_mutex held. */ void usb_disable_device(struct usb_device *dev, int skip_ep0) { @@ -1190,7 +1188,9 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0) usb_disable_endpoint(dev, i + USB_DIR_IN, false); } /* Remove endpoints from the host controller internal state */ + mutex_lock(hcd->bandwidth_mutex); usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); + mutex_unlock(hcd->bandwidth_mutex); /* Second pass: remove endpoint pointers */ } for (i = skip_ep0; i < 16; ++i) { @@ -1750,7 +1750,6 @@ free_interfaces: /* if it's already configured, clear out old state first. * getting rid of old interfaces means unbinding their drivers. */ - mutex_lock(hcd->bandwidth_mutex); if (dev->state != USB_STATE_ADDRESS) usb_disable_device(dev, 1); /* Skip ep0 */ @@ -1763,6 +1762,7 @@ free_interfaces: * host controller will not allow submissions to dropped endpoints. If * this call fails, the device state is unchanged. */ + mutex_lock(hcd->bandwidth_mutex); ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL); if (ret < 0) { mutex_unlock(hcd->bandwidth_mutex); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7bd815a507e8..99b58d84553a 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -206,11 +206,11 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc) for (i = 0; i < dwc->num_event_buffers; i++) { evt = dwc->ev_buffs[i]; - if (evt) { + if (evt) dwc3_free_one_event_buffer(dwc, evt); - dwc->ev_buffs[i] = NULL; - } } + + kfree(dwc->ev_buffs); } /** diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 25910e251c04..3584a169886f 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -353,6 +353,9 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, dwc->test_mode_nr = wIndex >> 8; dwc->test_mode = true; + break; + default: + return -EINVAL; } break; @@ -559,15 +562,20 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, length = trb->size & DWC3_TRB_SIZE_MASK; if (dwc->ep0_bounced) { + unsigned transfer_size = ur->length; + unsigned maxp = ep0->endpoint.maxpacket; + + transfer_size += (maxp - (transfer_size % maxp)); transferred = min_t(u32, ur->length, - ep0->endpoint.maxpacket - length); + transfer_size - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); dwc->ep0_bounced = false; } else { transferred = ur->length - length; - ur->actual += transferred; } + ur->actual += transferred; + if ((epnum & 1) && ur->actual < ur->length) { /* for some reason we did not get everything out */ diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 0c935d7c65bd..9d7bcd910074 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c @@ -1863,8 +1863,8 @@ static int __devinit at91udc_probe(struct platform_device *pdev) mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT); } else { - if (request_irq(udc->board.vbus_pin, at91_vbus_irq, - 0, driver_name, udc)) { + if (request_irq(gpio_to_irq(udc->board.vbus_pin), + at91_vbus_irq, 0, driver_name, udc)) { DBG("request vbus irq %d failed\n", udc->board.vbus_pin); retval = -EBUSY; @@ -1886,7 +1886,7 @@ static int __devinit at91udc_probe(struct platform_device *pdev) return 0; fail4: if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled) - free_irq(udc->board.vbus_pin, udc); + free_irq(gpio_to_irq(udc->board.vbus_pin), udc); fail3: if (gpio_is_valid(udc->board.vbus_pin)) gpio_free(udc->board.vbus_pin); @@ -1924,7 +1924,7 @@ static int __exit at91udc_remove(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 0); remove_debug_file(udc); if (gpio_is_valid(udc->board.vbus_pin)) { - free_irq(udc->board.vbus_pin, udc); + free_irq(gpio_to_irq(udc->board.vbus_pin), udc); gpio_free(udc->board.vbus_pin); } free_irq(udc->udp_irq, udc); diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c index a6dfd2164166..170cbe89d9f8 100644 --- a/drivers/usb/gadget/dummy_hcd.c +++ b/drivers/usb/gadget/dummy_hcd.c @@ -927,7 +927,6 @@ static int dummy_udc_stop(struct usb_gadget *g, dum->driver = NULL; - dummy_pullup(&dum->gadget, 0); return 0; } diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c index 1cbba70836bc..f52cb1ae45d9 100644 --- a/drivers/usb/gadget/f_fs.c +++ b/drivers/usb/gadget/f_fs.c @@ -712,7 +712,7 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value) if (code == FUNCTIONFS_INTERFACE_REVMAP) { struct ffs_function *func = ffs->func; ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV; - } else if (gadget->ops->ioctl) { + } else if (gadget && gadget->ops->ioctl) { ret = gadget->ops->ioctl(gadget, code, value); } else { ret = -ENOTTY; @@ -1382,6 +1382,7 @@ static void functionfs_unbind(struct ffs_data *ffs) ffs->ep0req = NULL; ffs->gadget = NULL; ffs_data_put(ffs); + clear_bit(FFS_FL_BOUND, &ffs->flags); } } diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index a371e966425f..cb8c162cae5a 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -2189,7 +2189,7 @@ unknown_cmnd: common->data_size_from_cmnd = 0; sprintf(unknown, "Unknown x%02x", common->cmnd[0]); reply = check_command(common, common->cmnd_size, - DATA_DIR_UNKNOWN, 0xff, 0, unknown); + DATA_DIR_UNKNOWN, ~0, 0, unknown); if (reply == 0) { common->curlun->sense_data = SS_INVALID_COMMAND; reply = -EINVAL; diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c index 7b1cf18df5e3..52343654f5df 100644 --- a/drivers/usb/gadget/f_rndis.c +++ b/drivers/usb/gadget/f_rndis.c @@ -500,6 +500,7 @@ rndis_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) if (buf) { memcpy(req->buf, buf, n); req->complete = rndis_response_complete; + req->context = rndis; rndis_free_response(rndis->config, buf); value = n; } diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c index 4fac56927741..a896d73f7a93 100644 --- a/drivers/usb/gadget/file_storage.c +++ b/drivers/usb/gadget/file_storage.c @@ -2579,7 +2579,7 @@ static int do_scsi_command(struct fsg_dev *fsg) fsg->data_size_from_cmnd = 0; sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]); if ((reply = check_command(fsg, fsg->cmnd_size, - DATA_DIR_UNKNOWN, 0xff, 0, unknown)) == 0) { + DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) { fsg->curlun->sense_data = SS_INVALID_COMMAND; reply = -EINVAL; } diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c index 5f94e79cd6b9..55abfb6bd612 100644 --- a/drivers/usb/gadget/fsl_udc_core.c +++ b/drivers/usb/gadget/fsl_udc_core.c @@ -730,7 +730,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) : (1 << (ep_index(ep))); /* check if the pipe is empty */ - if (!(list_empty(&ep->queue))) { + if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) { /* Add td to the end */ struct fsl_req *lastreq; lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); @@ -918,10 +918,6 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) return -ENOMEM; } - /* Update ep0 state */ - if ((ep_index(ep) == 0)) - udc->ep0_state = DATA_STATE_XMIT; - /* irq handler advances the queue */ if (req != NULL) list_add_tail(&req->queue, &ep->queue); @@ -1279,7 +1275,8 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction) udc->ep0_dir = USB_DIR_OUT; ep = &udc->eps[0]; - udc->ep0_state = WAIT_FOR_OUT_STATUS; + if (udc->ep0_state != DATA_STATE_XMIT) + udc->ep0_state = WAIT_FOR_OUT_STATUS; req->ep = ep; req->req.length = 0; @@ -1384,6 +1381,9 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value, list_add_tail(&req->queue, &ep->queue); udc->ep0_state = DATA_STATE_XMIT; + if (ep0_prime_status(udc, EP_DIR_OUT)) + ep0stall(udc); + return; stall: ep0stall(udc); @@ -1492,6 +1492,14 @@ static void setup_received_irq(struct fsl_udc *udc, spin_lock(&udc->lock); udc->ep0_state = (setup->bRequestType & USB_DIR_IN) ? DATA_STATE_XMIT : DATA_STATE_RECV; + /* + * If the data stage is IN, send status prime immediately. + * See 2.0 Spec chapter 8.5.3.3 for detail. + */ + if (udc->ep0_state == DATA_STATE_XMIT) + if (ep0_prime_status(udc, EP_DIR_OUT)) + ep0stall(udc); + } else { /* No data phase, IN status from gadget */ udc->ep0_dir = USB_DIR_IN; @@ -1520,9 +1528,8 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0, switch (udc->ep0_state) { case DATA_STATE_XMIT: - /* receive status phase */ - if (ep0_prime_status(udc, EP_DIR_OUT)) - ep0stall(udc); + /* already primed at setup_received_irq */ + udc->ep0_state = WAIT_FOR_OUT_STATUS; break; case DATA_STATE_RECV: /* send status phase */ diff --git a/drivers/usb/gadget/g_ffs.c b/drivers/usb/gadget/g_ffs.c index 331cd6729d3c..a85eaf40b948 100644 --- a/drivers/usb/gadget/g_ffs.c +++ b/drivers/usb/gadget/g_ffs.c @@ -161,7 +161,7 @@ static struct usb_composite_driver gfs_driver = { static struct ffs_data *gfs_ffs_data; static unsigned long gfs_registered; -static int gfs_init(void) +static int __init gfs_init(void) { ENTER(); @@ -169,7 +169,7 @@ static int gfs_init(void) } module_init(gfs_init); -static void gfs_exit(void) +static void __exit gfs_exit(void) { ENTER(); diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c index 69295ba9d99a..105b206cd844 100644 --- a/drivers/usb/gadget/s3c-hsotg.c +++ b/drivers/usb/gadget/s3c-hsotg.c @@ -340,7 +340,7 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg) /* currently we allocate TX FIFOs for all possible endpoints, * and assume that they are all the same size. */ - for (ep = 0; ep <= 15; ep++) { + for (ep = 1; ep <= 15; ep++) { val = addr; val |= size << S3C_DPTXFSIZn_DPTxFSize_SHIFT; addr += size; @@ -741,7 +741,7 @@ static void s3c_hsotg_start_req(struct s3c_hsotg *hsotg, /* write size / packets */ writel(epsize, hsotg->regs + epsize_reg); - if (using_dma(hsotg)) { + if (using_dma(hsotg) && !continuing) { unsigned int dma_reg; /* write DMA address to control register, buffer already @@ -1696,10 +1696,12 @@ static void s3c_hsotg_set_ep_maxpacket(struct s3c_hsotg *hsotg, reg |= mpsval; writel(reg, regs + S3C_DIEPCTL(ep)); - reg = readl(regs + S3C_DOEPCTL(ep)); - reg &= ~S3C_DxEPCTL_MPS_MASK; - reg |= mpsval; - writel(reg, regs + S3C_DOEPCTL(ep)); + if (ep) { + reg = readl(regs + S3C_DOEPCTL(ep)); + reg &= ~S3C_DxEPCTL_MPS_MASK; + reg |= mpsval; + writel(reg, regs + S3C_DOEPCTL(ep)); + } return; @@ -1919,7 +1921,8 @@ static void s3c_hsotg_epint(struct s3c_hsotg *hsotg, unsigned int idx, ints & S3C_DIEPMSK_TxFIFOEmpty) { dev_dbg(hsotg->dev, "%s: ep%d: TxFIFOEmpty\n", __func__, idx); - s3c_hsotg_trytx(hsotg, hs_ep); + if (!using_dma(hsotg)) + s3c_hsotg_trytx(hsotg, hs_ep); } } } diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c index 56da49f31d6c..e5e44f8cde9a 100644 --- a/drivers/usb/gadget/udc-core.c +++ b/drivers/usb/gadget/udc-core.c @@ -263,9 +263,9 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) if (udc_is_newstyle(udc)) { udc->driver->disconnect(udc->gadget); + usb_gadget_disconnect(udc->gadget); udc->driver->unbind(udc->gadget); usb_gadget_udc_stop(udc->gadget, udc->driver); - usb_gadget_disconnect(udc->gadget); } else { usb_gadget_stop(udc->gadget, udc->driver); } @@ -411,9 +411,13 @@ static ssize_t usb_udc_softconn_store(struct device *dev, struct usb_udc *udc = container_of(dev, struct usb_udc, dev); if (sysfs_streq(buf, "connect")) { + if (udc_is_newstyle(udc)) + usb_gadget_udc_start(udc->gadget, udc->driver); usb_gadget_connect(udc->gadget); } else if (sysfs_streq(buf, "disconnect")) { usb_gadget_disconnect(udc->gadget); + if (udc_is_newstyle(udc)) + usb_gadget_udc_stop(udc->gadget, udc->driver); } else { dev_err(dev, "unsupported command '%s'\n", buf); return -EINVAL; diff --git a/drivers/usb/gadget/uvc.h b/drivers/usb/gadget/uvc.h index bc78c606c12b..ca4e03a1c73a 100644 --- a/drivers/usb/gadget/uvc.h +++ b/drivers/usb/gadget/uvc.h @@ -28,7 +28,7 @@ struct uvc_request_data { - unsigned int length; + __s32 length; __u8 data[60]; }; diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c index d776adb2da67..0cdf89d32a15 100644 --- a/drivers/usb/gadget/uvc_queue.c +++ b/drivers/usb/gadget/uvc_queue.c @@ -543,11 +543,11 @@ done: return ret; } +/* called with queue->irqlock held.. */ static struct uvc_buffer * uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) { struct uvc_buffer *nextbuf; - unsigned long flags; if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && buf->buf.length != buf->buf.bytesused) { @@ -556,14 +556,12 @@ uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) return buf; } - spin_lock_irqsave(&queue->irqlock, flags); list_del(&buf->queue); if (!list_empty(&queue->irqqueue)) nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, queue); else nextbuf = NULL; - spin_unlock_irqrestore(&queue->irqlock, flags); buf->buf.sequence = queue->sequence++; do_gettimeofday(&buf->buf.timestamp); diff --git a/drivers/usb/gadget/uvc_v4l2.c b/drivers/usb/gadget/uvc_v4l2.c index f6e083b50191..54d7ca559cb2 100644 --- a/drivers/usb/gadget/uvc_v4l2.c +++ b/drivers/usb/gadget/uvc_v4l2.c @@ -39,7 +39,7 @@ uvc_send_response(struct uvc_device *uvc, struct uvc_request_data *data) if (data->length < 0) return usb_ep_set_halt(cdev->gadget->ep0); - req->length = min(uvc->event_length, data->length); + req->length = min_t(unsigned int, uvc->event_length, data->length); req->zero = data->length < uvc->event_length; req->dma = DMA_ADDR_INVALID; diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c index 3e7345172e03..d0a84bd3f3eb 100644 --- a/drivers/usb/host/ehci-fsl.c +++ b/drivers/usb/host/ehci-fsl.c @@ -218,6 +218,9 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci, u32 portsc; struct usb_hcd *hcd = ehci_to_hcd(ehci); void __iomem *non_ehci = hcd->regs; + struct fsl_usb2_platform_data *pdata; + + pdata = hcd->self.controller->platform_data; portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]); portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW); @@ -234,7 +237,9 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci, /* fall through */ case FSL_USB2_PHY_UTMI: /* enable UTMI PHY */ - setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN); + if (pdata->have_sysif_regs) + setbits32(non_ehci + FSL_SOC_USB_CTRL, + CTRL_UTMI_PHY_EN); portsc |= PORT_PTS_UTMI; break; case FSL_USB2_PHY_NONE: diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index 806cc95317aa..4a3bc5b7a06f 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -858,8 +858,13 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) goto dead; } + /* + * We don't use STS_FLR, but some controllers don't like it to + * remain on, so mask it out along with the other status bits. + */ + masked_status = status & (INTR_MASK | STS_FLR); + /* Shared IRQ? */ - masked_status = status & INTR_MASK; if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { spin_unlock(&ehci->lock); return IRQ_NONE; @@ -910,7 +915,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd) pcd_status = status; /* resume root hub? */ - if (!(cmd & CMD_RUN)) + if (ehci->rh_state == EHCI_RH_SUSPENDED) usb_hcd_resume_root_hub(hcd); /* get per-port change detect bits */ diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c index bba9850f32f0..5c78f9e71466 100644 --- a/drivers/usb/host/ehci-omap.c +++ b/drivers/usb/host/ehci-omap.c @@ -42,6 +42,7 @@ #include #include #include +#include /* EHCI Register Set */ #define EHCI_INSNREG04 (0xA0) @@ -191,6 +192,19 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) } } + if (pdata->phy_reset) { + if (gpio_is_valid(pdata->reset_gpio_port[0])) + gpio_request_one(pdata->reset_gpio_port[0], + GPIOF_OUT_INIT_LOW, "USB1 PHY reset"); + + if (gpio_is_valid(pdata->reset_gpio_port[1])) + gpio_request_one(pdata->reset_gpio_port[1], + GPIOF_OUT_INIT_LOW, "USB2 PHY reset"); + + /* Hold the PHY in RESET for enough time till DIR is high */ + udelay(10); + } + pm_runtime_enable(dev); pm_runtime_get_sync(dev); @@ -237,6 +251,19 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) /* root ports should always stay powered */ ehci_port_power(omap_ehci, 1); + if (pdata->phy_reset) { + /* Hold the PHY in RESET for enough time till + * PHY is settled and ready + */ + udelay(10); + + if (gpio_is_valid(pdata->reset_gpio_port[0])) + gpio_set_value(pdata->reset_gpio_port[0], 1); + + if (gpio_is_valid(pdata->reset_gpio_port[1])) + gpio_set_value(pdata->reset_gpio_port[1], 1); + } + return 0; err_add_hcd: @@ -259,8 +286,9 @@ err_io: */ static int ehci_hcd_omap_remove(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct usb_hcd *hcd = dev_get_drvdata(dev); + struct device *dev = &pdev->dev; + struct usb_hcd *hcd = dev_get_drvdata(dev); + struct ehci_hcd_omap_platform_data *pdata = dev->platform_data; usb_remove_hcd(hcd); disable_put_regulator(dev->platform_data); @@ -269,6 +297,13 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) pm_runtime_put_sync(dev); pm_runtime_disable(dev); + if (pdata->phy_reset) { + if (gpio_is_valid(pdata->reset_gpio_port[0])) + gpio_free(pdata->reset_gpio_port[0]); + + if (gpio_is_valid(pdata->reset_gpio_port[1])) + gpio_free(pdata->reset_gpio_port[1]); + } return 0; } diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c index 01bb7241d6ef..fe8dc069164e 100644 --- a/drivers/usb/host/ehci-pci.c +++ b/drivers/usb/host/ehci-pci.c @@ -144,6 +144,14 @@ static int ehci_pci_setup(struct usb_hcd *hcd) hcd->has_tt = 1; tdi_reset(ehci); } + if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) { + /* EHCI #1 or #2 on 6 Series/C200 Series chipset */ + if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) { + ehci_info(ehci, "broken D3 during system sleep on ASUS\n"); + hcd->broken_pci_sleep = 1; + device_set_wakeup_capable(&pdev->dev, false); + } + } break; case PCI_VENDOR_ID_TDI: if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 73544bd440bd..f214a80cdee2 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -37,9 +38,7 @@ struct tegra_ehci_hcd { struct clk *emc_clk; struct usb_phy *transceiver; int host_resumed; - int bus_suspended; int port_resuming; - int power_down_on_bus_suspend; enum tegra_usb_phy_port_speed port_speed; }; @@ -273,120 +272,6 @@ static void tegra_ehci_restart(struct usb_hcd *hcd) up_write(&ehci_cf_port_reset_rwsem); } -static int tegra_usb_suspend(struct usb_hcd *hcd) -{ - struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); - struct ehci_regs __iomem *hw = tegra->ehci->regs; - unsigned long flags; - - spin_lock_irqsave(&tegra->ehci->lock, flags); - - tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3; - ehci_halt(tegra->ehci); - clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - - spin_unlock_irqrestore(&tegra->ehci->lock, flags); - - tegra_ehci_power_down(hcd); - return 0; -} - -static int tegra_usb_resume(struct usb_hcd *hcd) -{ - struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - struct ehci_regs __iomem *hw = ehci->regs; - unsigned long val; - - set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); - tegra_ehci_power_up(hcd); - - if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) { - /* Wait for the phy to detect new devices - * before we restart the controller */ - msleep(10); - goto restart; - } - - /* Force the phy to keep data lines in suspend state */ - tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed); - - /* Enable host mode */ - tdi_reset(ehci); - - /* Enable Port Power */ - val = readl(&hw->port_status[0]); - val |= PORT_POWER; - writel(val, &hw->port_status[0]); - udelay(10); - - /* Check if the phy resume from LP0. When the phy resume from LP0 - * USB register will be reset. */ - if (!readl(&hw->async_next)) { - /* Program the field PTC based on the saved speed mode */ - val = readl(&hw->port_status[0]); - val &= ~PORT_TEST(~0); - if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH) - val |= PORT_TEST_FORCE; - else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL) - val |= PORT_TEST(6); - else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) - val |= PORT_TEST(7); - writel(val, &hw->port_status[0]); - udelay(10); - - /* Disable test mode by setting PTC field to NORMAL_OP */ - val = readl(&hw->port_status[0]); - val &= ~PORT_TEST(~0); - writel(val, &hw->port_status[0]); - udelay(10); - } - - /* Poll until CCS is enabled */ - if (handshake(ehci, &hw->port_status[0], PORT_CONNECT, - PORT_CONNECT, 2000)) { - pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__); - goto restart; - } - - /* Poll until PE is enabled */ - if (handshake(ehci, &hw->port_status[0], PORT_PE, - PORT_PE, 2000)) { - pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__); - goto restart; - } - - /* Clear the PCI status, to avoid an interrupt taken upon resume */ - val = readl(&hw->status); - val |= STS_PCD; - writel(val, &hw->status); - - /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */ - val = readl(&hw->port_status[0]); - if ((val & PORT_POWER) && (val & PORT_PE)) { - val |= PORT_SUSPEND; - writel(val, &hw->port_status[0]); - - /* Wait until port suspend completes */ - if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND, - PORT_SUSPEND, 1000)) { - pr_err("%s: timeout waiting for PORT_SUSPEND\n", - __func__); - goto restart; - } - } - - tegra_ehci_phy_restore_end(tegra->phy); - return 0; - -restart: - if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH) - tegra_ehci_phy_restore_end(tegra->phy); - - tegra_ehci_restart(hcd); - return 0; -} - static void tegra_ehci_shutdown(struct usb_hcd *hcd) { struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); @@ -434,36 +319,6 @@ static int tegra_ehci_setup(struct usb_hcd *hcd) return retval; } -#ifdef CONFIG_PM -static int tegra_ehci_bus_suspend(struct usb_hcd *hcd) -{ - struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); - int error_status = 0; - - error_status = ehci_bus_suspend(hcd); - if (!error_status && tegra->power_down_on_bus_suspend) { - tegra_usb_suspend(hcd); - tegra->bus_suspended = 1; - } - - return error_status; -} - -static int tegra_ehci_bus_resume(struct usb_hcd *hcd) -{ - struct tegra_ehci_hcd *tegra = dev_get_drvdata(hcd->self.controller); - - if (tegra->bus_suspended && tegra->power_down_on_bus_suspend) { - tegra_usb_resume(hcd); - tegra->bus_suspended = 0; - } - - tegra_usb_phy_preresume(tegra->phy); - tegra->port_resuming = 1; - return ehci_bus_resume(hcd); -} -#endif - struct temp_buffer { void *kmalloc_ptr; void *old_xfer_buffer; @@ -574,8 +429,8 @@ static const struct hc_driver tegra_ehci_hc_driver = { .hub_control = tegra_ehci_hub_control, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, #ifdef CONFIG_PM - .bus_suspend = tegra_ehci_bus_suspend, - .bus_resume = tegra_ehci_bus_resume, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, #endif .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, @@ -603,11 +458,187 @@ static int setup_vbus_gpio(struct platform_device *pdev) dev_err(&pdev->dev, "can't enable vbus\n"); return err; } - gpio_set_value(gpio, 1); return err; } +#ifdef CONFIG_PM + +static int controller_suspend(struct device *dev) +{ + struct tegra_ehci_hcd *tegra = + platform_get_drvdata(to_platform_device(dev)); + struct ehci_hcd *ehci = tegra->ehci; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + struct ehci_regs __iomem *hw = ehci->regs; + unsigned long flags; + + if (time_before(jiffies, ehci->next_statechange)) + msleep(10); + + spin_lock_irqsave(&ehci->lock, flags); + + tegra->port_speed = (readl(&hw->port_status[0]) >> 26) & 0x3; + ehci_halt(ehci); + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + + spin_unlock_irqrestore(&ehci->lock, flags); + + tegra_ehci_power_down(hcd); + return 0; +} + +static int controller_resume(struct device *dev) +{ + struct tegra_ehci_hcd *tegra = + platform_get_drvdata(to_platform_device(dev)); + struct ehci_hcd *ehci = tegra->ehci; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + struct ehci_regs __iomem *hw = ehci->regs; + unsigned long val; + + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + tegra_ehci_power_up(hcd); + + if (tegra->port_speed > TEGRA_USB_PHY_PORT_SPEED_HIGH) { + /* Wait for the phy to detect new devices + * before we restart the controller */ + msleep(10); + goto restart; + } + + /* Force the phy to keep data lines in suspend state */ + tegra_ehci_phy_restore_start(tegra->phy, tegra->port_speed); + + /* Enable host mode */ + tdi_reset(ehci); + + /* Enable Port Power */ + val = readl(&hw->port_status[0]); + val |= PORT_POWER; + writel(val, &hw->port_status[0]); + udelay(10); + + /* Check if the phy resume from LP0. When the phy resume from LP0 + * USB register will be reset. */ + if (!readl(&hw->async_next)) { + /* Program the field PTC based on the saved speed mode */ + val = readl(&hw->port_status[0]); + val &= ~PORT_TEST(~0); + if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_HIGH) + val |= PORT_TEST_FORCE; + else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_FULL) + val |= PORT_TEST(6); + else if (tegra->port_speed == TEGRA_USB_PHY_PORT_SPEED_LOW) + val |= PORT_TEST(7); + writel(val, &hw->port_status[0]); + udelay(10); + + /* Disable test mode by setting PTC field to NORMAL_OP */ + val = readl(&hw->port_status[0]); + val &= ~PORT_TEST(~0); + writel(val, &hw->port_status[0]); + udelay(10); + } + + /* Poll until CCS is enabled */ + if (handshake(ehci, &hw->port_status[0], PORT_CONNECT, + PORT_CONNECT, 2000)) { + pr_err("%s: timeout waiting for PORT_CONNECT\n", __func__); + goto restart; + } + + /* Poll until PE is enabled */ + if (handshake(ehci, &hw->port_status[0], PORT_PE, + PORT_PE, 2000)) { + pr_err("%s: timeout waiting for USB_PORTSC1_PE\n", __func__); + goto restart; + } + + /* Clear the PCI status, to avoid an interrupt taken upon resume */ + val = readl(&hw->status); + val |= STS_PCD; + writel(val, &hw->status); + + /* Put controller in suspend mode by writing 1 to SUSP bit of PORTSC */ + val = readl(&hw->port_status[0]); + if ((val & PORT_POWER) && (val & PORT_PE)) { + val |= PORT_SUSPEND; + writel(val, &hw->port_status[0]); + + /* Wait until port suspend completes */ + if (handshake(ehci, &hw->port_status[0], PORT_SUSPEND, + PORT_SUSPEND, 1000)) { + pr_err("%s: timeout waiting for PORT_SUSPEND\n", + __func__); + goto restart; + } + } + + tegra_ehci_phy_restore_end(tegra->phy); + goto done; + + restart: + if (tegra->port_speed <= TEGRA_USB_PHY_PORT_SPEED_HIGH) + tegra_ehci_phy_restore_end(tegra->phy); + + tegra_ehci_restart(hcd); + + done: + tegra_usb_phy_preresume(tegra->phy); + tegra->port_resuming = 1; + return 0; +} + +static int tegra_ehci_suspend(struct device *dev) +{ + struct tegra_ehci_hcd *tegra = + platform_get_drvdata(to_platform_device(dev)); + struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci); + int rc = 0; + + /* + * When system sleep is supported and USB controller wakeup is + * implemented: If the controller is runtime-suspended and the + * wakeup setting needs to be changed, call pm_runtime_resume(). + */ + if (HCD_HW_ACCESSIBLE(hcd)) + rc = controller_suspend(dev); + return rc; +} + +static int tegra_ehci_resume(struct device *dev) +{ + int rc; + + rc = controller_resume(dev); + if (rc == 0) { + pm_runtime_disable(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + } + return rc; +} + +static int tegra_ehci_runtime_suspend(struct device *dev) +{ + return controller_suspend(dev); +} + +static int tegra_ehci_runtime_resume(struct device *dev) +{ + return controller_resume(dev); +} + +static const struct dev_pm_ops tegra_ehci_pm_ops = { + .suspend = tegra_ehci_suspend, + .resume = tegra_ehci_resume, + .runtime_suspend = tegra_ehci_runtime_suspend, + .runtime_resume = tegra_ehci_runtime_resume, +}; + +#endif + static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32); static int tegra_ehci_probe(struct platform_device *pdev) @@ -722,7 +753,6 @@ static int tegra_ehci_probe(struct platform_device *pdev) } tegra->host_resumed = 1; - tegra->power_down_on_bus_suspend = pdata->power_down_on_bus_suspend; tegra->ehci = hcd_to_ehci(hcd); irq = platform_get_irq(pdev, 0); @@ -731,7 +761,6 @@ static int tegra_ehci_probe(struct platform_device *pdev) err = -ENODEV; goto fail; } - set_irq_flags(irq, IRQF_VALID); #ifdef CONFIG_USB_OTG_UTILS if (pdata->operating_mode == TEGRA_USB_OTG) { @@ -747,6 +776,14 @@ static int tegra_ehci_probe(struct platform_device *pdev) goto fail; } + pm_runtime_set_active(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + + /* Don't skip the pm_runtime_forbid call if wakeup isn't working */ + /* if (!pdata->power_down_on_bus_suspend) */ + pm_runtime_forbid(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); return err; fail: @@ -773,33 +810,6 @@ fail_hcd: return err; } -#ifdef CONFIG_PM -static int tegra_ehci_resume(struct platform_device *pdev) -{ - struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); - struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci); - - if (tegra->bus_suspended) - return 0; - - return tegra_usb_resume(hcd); -} - -static int tegra_ehci_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); - struct usb_hcd *hcd = ehci_to_hcd(tegra->ehci); - - if (tegra->bus_suspended) - return 0; - - if (time_before(jiffies, tegra->ehci->next_statechange)) - msleep(10); - - return tegra_usb_suspend(hcd); -} -#endif - static int tegra_ehci_remove(struct platform_device *pdev) { struct tegra_ehci_hcd *tegra = platform_get_drvdata(pdev); @@ -808,6 +818,10 @@ static int tegra_ehci_remove(struct platform_device *pdev) if (tegra == NULL || hcd == NULL) return -EINVAL; + pm_runtime_get_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + #ifdef CONFIG_USB_OTG_UTILS if (tegra->transceiver) { otg_set_host(tegra->transceiver->otg, NULL); @@ -848,13 +862,12 @@ static struct of_device_id tegra_ehci_of_match[] __devinitdata = { static struct platform_driver tegra_ehci_driver = { .probe = tegra_ehci_probe, .remove = tegra_ehci_remove, -#ifdef CONFIG_PM - .suspend = tegra_ehci_suspend, - .resume = tegra_ehci_resume, -#endif .shutdown = tegra_ehci_hcd_shutdown, .driver = { .name = "tegra-ehci", .of_match_table = tegra_ehci_of_match, +#ifdef CONFIG_PM + .pm = &tegra_ehci_pm_ops, +#endif } }; diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index 09f597ad6e00..13ebeca8e73e 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -94,7 +94,7 @@ static void at91_stop_hc(struct platform_device *pdev) /*-------------------------------------------------------------------------*/ -static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); +static void __devexit usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ @@ -108,7 +108,7 @@ static void usb_hcd_at91_remove (struct usb_hcd *, struct platform_device *); * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. */ -static int usb_hcd_at91_probe(const struct hc_driver *driver, +static int __devinit usb_hcd_at91_probe(const struct hc_driver *driver, struct platform_device *pdev) { int retval; @@ -203,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver, * context, "rmmod" or something similar. * */ -static void usb_hcd_at91_remove(struct usb_hcd *hcd, +static void __devexit usb_hcd_at91_remove(struct usb_hcd *hcd, struct platform_device *pdev) { usb_remove_hcd(hcd); @@ -545,7 +545,7 @@ static int __devinit ohci_at91_of_init(struct platform_device *pdev) /*-------------------------------------------------------------------------*/ -static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) +static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev) { struct at91_usbh_data *pdata; int i; @@ -620,7 +620,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev) return usb_hcd_at91_probe(&ohci_at91_hc_driver, pdev); } -static int ohci_hcd_at91_drv_remove(struct platform_device *pdev) +static int __devexit ohci_hcd_at91_drv_remove(struct platform_device *pdev) { struct at91_usbh_data *pdata = pdev->dev.platform_data; int i; @@ -696,7 +696,7 @@ MODULE_ALIAS("platform:at91_ohci"); static struct platform_driver ohci_hcd_at91_driver = { .probe = ohci_hcd_at91_drv_probe, - .remove = ohci_hcd_at91_drv_remove, + .remove = __devexit_p(ohci_hcd_at91_drv_remove), .shutdown = usb_hcd_platform_shutdown, .suspend = ohci_hcd_at91_drv_suspend, .resume = ohci_hcd_at91_drv_resume, diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index 959145baf3cf..9dcb68f04f03 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -423,7 +423,7 @@ alloc_sglist(int nents, int max, int vary) unsigned i; unsigned size = max; - sg = kmalloc(nents * sizeof *sg, GFP_KERNEL); + sg = kmalloc_array(nents, sizeof *sg, GFP_KERNEL); if (!sg) return NULL; sg_init_table(sg, nents); @@ -904,6 +904,9 @@ test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param) struct ctrl_ctx context; int i; + if (param->sglen == 0 || param->iterations > UINT_MAX / param->sglen) + return -EOPNOTSUPP; + spin_lock_init(&context.lock); context.dev = dev; init_completion(&context.complete); @@ -1981,8 +1984,6 @@ usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf) /* queued control messaging */ case 10: - if (param->sglen == 0) - break; retval = 0; dev_info(&intf->dev, "TEST 10: queue %d control calls, %d times\n", @@ -2276,6 +2277,8 @@ usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id) if (status < 0) { WARNING(dev, "couldn't get endpoints, %d\n", status); + kfree(dev->buf); + kfree(dev); return status; } /* may find bulk or ISO pipes */ diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index 897edda42270..70201462e19c 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -99,9 +99,7 @@ static void yurex_delete(struct kref *kref) usb_put_dev(dev->udev); if (dev->cntl_urb) { usb_kill_urb(dev->cntl_urb); - if (dev->cntl_req) - usb_free_coherent(dev->udev, YUREX_BUF_SIZE, - dev->cntl_req, dev->cntl_urb->setup_dma); + kfree(dev->cntl_req); if (dev->cntl_buffer) usb_free_coherent(dev->udev, YUREX_BUF_SIZE, dev->cntl_buffer, dev->cntl_urb->transfer_dma); @@ -234,9 +232,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ } /* allocate buffer for control req */ - dev->cntl_req = usb_alloc_coherent(dev->udev, YUREX_BUF_SIZE, - GFP_KERNEL, - &dev->cntl_urb->setup_dma); + dev->cntl_req = kmalloc(YUREX_BUF_SIZE, GFP_KERNEL); if (!dev->cntl_req) { err("Could not allocate cntl_req"); goto error; @@ -286,7 +282,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_ usb_rcvintpipe(dev->udev, dev->int_in_endpointAddr), dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt, dev, 1); - dev->cntl_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (usb_submit_urb(dev->urb, GFP_KERNEL)) { retval = -EIO; err("Could not submitting URB"); diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c index 97ab975fa442..768b4b55c816 100644 --- a/drivers/usb/musb/davinci.c +++ b/drivers/usb/musb/davinci.c @@ -386,7 +386,7 @@ static int davinci_musb_init(struct musb *musb) usb_nop_xceiv_register(); musb->xceiv = usb_get_transceiver(); if (!musb->xceiv) - return -ENODEV; + goto unregister; musb->mregs += DAVINCI_BASE_OFFSET; @@ -444,6 +444,7 @@ static int davinci_musb_init(struct musb *musb) fail: usb_put_transceiver(musb->xceiv); +unregister: usb_nop_xceiv_unregister(); return -ENODEV; } diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 0f8b82918a40..66aaccf04490 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c @@ -137,6 +137,9 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset) int i = 0; u8 r; u8 power; + int ret; + + pm_runtime_get_sync(phy->io_dev); /* Make sure the transceiver is not in low power mode */ power = musb_readb(addr, MUSB_POWER); @@ -154,15 +157,22 @@ static int musb_ulpi_read(struct usb_phy *phy, u32 offset) while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) & MUSB_ULPI_REG_CMPLT)) { i++; - if (i == 10000) - return -ETIMEDOUT; + if (i == 10000) { + ret = -ETIMEDOUT; + goto out; + } } r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); r &= ~MUSB_ULPI_REG_CMPLT; musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); - return musb_readb(addr, MUSB_ULPI_REG_DATA); + ret = musb_readb(addr, MUSB_ULPI_REG_DATA); + +out: + pm_runtime_put(phy->io_dev); + + return ret; } static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) @@ -171,6 +181,9 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) int i = 0; u8 r = 0; u8 power; + int ret = 0; + + pm_runtime_get_sync(phy->io_dev); /* Make sure the transceiver is not in low power mode */ power = musb_readb(addr, MUSB_POWER); @@ -184,15 +197,20 @@ static int musb_ulpi_write(struct usb_phy *phy, u32 offset, u32 data) while (!(musb_readb(addr, MUSB_ULPI_REG_CONTROL) & MUSB_ULPI_REG_CMPLT)) { i++; - if (i == 10000) - return -ETIMEDOUT; + if (i == 10000) { + ret = -ETIMEDOUT; + goto out; + } } r = musb_readb(addr, MUSB_ULPI_REG_CONTROL); r &= ~MUSB_ULPI_REG_CMPLT; musb_writeb(addr, MUSB_ULPI_REG_CONTROL, r); - return 0; +out: + pm_runtime_put(phy->io_dev); + + return ret; } #else #define musb_ulpi_read NULL @@ -1904,14 +1922,17 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) if (!musb->isr) { status = -ENODEV; - goto fail3; + goto fail2; } if (!musb->xceiv->io_ops) { + musb->xceiv->io_dev = musb->controller; musb->xceiv->io_priv = musb->mregs; musb->xceiv->io_ops = &musb_ulpi_access; } + pm_runtime_get_sync(musb->controller); + #ifndef CONFIG_MUSB_PIO_ONLY if (use_dma && dev->dma_mask) { struct dma_controller *c; @@ -2023,6 +2044,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) goto fail5; #endif + pm_runtime_put(musb->controller); + dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n", ({char *s; switch (musb->board_mode) { @@ -2047,6 +2070,9 @@ fail4: musb_gadget_cleanup(musb); fail3: + pm_runtime_put_sync(musb->controller); + +fail2: if (musb->irq_wake) device_init_wakeup(dev, 0); musb_platform_exit(musb); diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 93de517a32a0..f4a40f001c88 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h @@ -449,7 +449,7 @@ struct musb { * We added this flag to forcefully disable double * buffering until we get it working. */ - unsigned double_buffer_not_ok:1 __deprecated; + unsigned double_buffer_not_ok:1; struct musb_hdrc_config *config; diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 79cb0af779fa..ef8d744800ac 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2098,7 +2098,7 @@ static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh) } /* turn off DMA requests, discard state, stop polling ... */ - if (is_in) { + if (ep->epnum && is_in) { /* giveback saves bulk toggle */ csr = musb_h_flush_rxfifo(ep, 0); diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 2ae0bb309994..c7785e81254c 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c @@ -282,7 +282,8 @@ static void musb_otg_notifier_work(struct work_struct *data_notifier_work) static int omap2430_musb_init(struct musb *musb) { - u32 l, status = 0; + u32 l; + int status = 0; struct device *dev = musb->controller; struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; @@ -301,7 +302,7 @@ static int omap2430_musb_init(struct musb *musb) status = pm_runtime_get_sync(dev); if (status < 0) { - dev_err(dev, "pm_runtime_get_sync FAILED"); + dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); goto err1; } @@ -333,6 +334,7 @@ static int omap2430_musb_init(struct musb *musb) setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); + pm_runtime_put_noidle(musb->controller); return 0; err1: @@ -452,14 +454,14 @@ static int __devinit omap2430_probe(struct platform_device *pdev) goto err2; } + pm_runtime_enable(&pdev->dev); + ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err2; } - pm_runtime_enable(&pdev->dev); - return 0; err2: @@ -478,7 +480,6 @@ static int __devexit omap2430_remove(struct platform_device *pdev) platform_device_del(glue->musb); platform_device_put(glue->musb); - pm_runtime_put(&pdev->dev); kfree(glue); return 0; @@ -491,11 +492,13 @@ static int omap2430_runtime_suspend(struct device *dev) struct omap2430_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); - musb->context.otg_interfsel = musb_readl(musb->mregs, - OTG_INTERFSEL); + if (musb) { + musb->context.otg_interfsel = musb_readl(musb->mregs, + OTG_INTERFSEL); - omap2430_low_level_exit(musb); - usb_phy_set_suspend(musb->xceiv, 1); + omap2430_low_level_exit(musb); + usb_phy_set_suspend(musb->xceiv, 1); + } return 0; } @@ -505,11 +508,13 @@ static int omap2430_runtime_resume(struct device *dev) struct omap2430_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); - omap2430_low_level_init(musb); - musb_writel(musb->mregs, OTG_INTERFSEL, - musb->context.otg_interfsel); + if (musb) { + omap2430_low_level_init(musb); + musb_writel(musb->mregs, OTG_INTERFSEL, + musb->context.otg_interfsel); - usb_phy_set_suspend(musb->xceiv, 0); + usb_phy_set_suspend(musb->xceiv, 0); + } return 0; } diff --git a/drivers/usb/otg/gpio_vbus.c b/drivers/usb/otg/gpio_vbus.c index 3ece43a2e4c1..a0a2178974fe 100644 --- a/drivers/usb/otg/gpio_vbus.c +++ b/drivers/usb/otg/gpio_vbus.c @@ -96,7 +96,7 @@ static void gpio_vbus_work(struct work_struct *work) struct gpio_vbus_data *gpio_vbus = container_of(work, struct gpio_vbus_data, work); struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data; - int gpio; + int gpio, status; if (!gpio_vbus->phy.otg->gadget) return; @@ -108,7 +108,9 @@ static void gpio_vbus_work(struct work_struct *work) */ gpio = pdata->gpio_pullup; if (is_vbus_powered(pdata)) { + status = USB_EVENT_VBUS; gpio_vbus->phy.state = OTG_STATE_B_PERIPHERAL; + gpio_vbus->phy.last_event = status; usb_gadget_vbus_connect(gpio_vbus->phy.otg->gadget); /* drawing a "unit load" is *always* OK, except for OTG */ @@ -117,6 +119,9 @@ static void gpio_vbus_work(struct work_struct *work) /* optionally enable D+ pullup */ if (gpio_is_valid(gpio)) gpio_set_value(gpio, !pdata->gpio_pullup_inverted); + + atomic_notifier_call_chain(&gpio_vbus->phy.notifier, + status, gpio_vbus->phy.otg->gadget); } else { /* optionally disable D+ pullup */ if (gpio_is_valid(gpio)) @@ -125,7 +130,12 @@ static void gpio_vbus_work(struct work_struct *work) set_vbus_draw(gpio_vbus, 0); usb_gadget_vbus_disconnect(gpio_vbus->phy.otg->gadget); + status = USB_EVENT_NONE; gpio_vbus->phy.state = OTG_STATE_B_IDLE; + gpio_vbus->phy.last_event = status; + + atomic_notifier_call_chain(&gpio_vbus->phy.notifier, + status, gpio_vbus->phy.otg->gadget); } } @@ -287,6 +297,9 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) irq, err); goto err_irq; } + + ATOMIC_INIT_NOTIFIER_HEAD(&gpio_vbus->phy.notifier); + INIT_WORK(&gpio_vbus->work, gpio_vbus_work); gpio_vbus->vbus_draw = regulator_get(&pdev->dev, "vbus_draw"); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 0310e2df59f5..ec30f95ef399 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -287,7 +287,8 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, /* Issue the request, attempting to read 'size' bytes */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), request, REQTYPE_DEVICE_TO_HOST, 0x0000, - port_priv->bInterfaceNumber, buf, size, 300); + port_priv->bInterfaceNumber, buf, size, + USB_CTRL_GET_TIMEOUT); /* Convert data into an array of integers */ for (i = 0; i < length; i++) @@ -340,12 +341,14 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_DEVICE, 0x0000, - port_priv->bInterfaceNumber, buf, size, 300); + port_priv->bInterfaceNumber, buf, size, + USB_CTRL_SET_TIMEOUT); } else { result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_DEVICE, data[0], - port_priv->bInterfaceNumber, NULL, 0, 300); + port_priv->bInterfaceNumber, NULL, 0, + USB_CTRL_SET_TIMEOUT); } kfree(buf); diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index fdd5aa2c8d82..8c8bf806f6fa 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -221,7 +221,7 @@ static const struct sierra_iface_info typeB_interface_list = { }; /* 'blacklist' of interfaces not served by this driver */ -static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; +static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11, 19, 20 }; static const struct sierra_iface_info direct_ip_interface_blacklist = { .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), .ifaceinfo = direct_ip_non_serial_ifaces, @@ -289,7 +289,6 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ - { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */ /* Sierra Wireless C885 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ @@ -299,6 +298,9 @@ static const struct usb_device_id id_table[] = { /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ + { USB_DEVICE(0x1199, 0x68A2), /* Sierra Wireless MC77xx in QMI mode */ + .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist + }, { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 66797e9c5010..810c90ae2c55 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c @@ -645,7 +645,8 @@ void hwarc_neep_cb(struct urb *urb) dev_err(dev, "NEEP: URB error %d\n", urb->status); } result = usb_submit_urb(urb, GFP_ATOMIC); - if (result < 0) { + if (result < 0 && result != -ENODEV && result != -EPERM) { + /* ignoring unrecoverable errors */ dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", result); goto error; diff --git a/drivers/uwb/neh.c b/drivers/uwb/neh.c index a269937be1b8..8cb71bb333c2 100644 --- a/drivers/uwb/neh.c +++ b/drivers/uwb/neh.c @@ -107,6 +107,7 @@ struct uwb_rc_neh { u8 evt_type; __le16 evt; u8 context; + u8 completed; uwb_rc_cmd_cb_f cb; void *arg; @@ -409,6 +410,7 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size struct device *dev = &rc->uwb_dev.dev; struct uwb_rc_neh *neh; struct uwb_rceb *notif; + unsigned long flags; if (rceb->bEventContext == 0) { notif = kmalloc(size, GFP_ATOMIC); @@ -422,7 +424,11 @@ static void uwb_rc_neh_grok_event(struct uwb_rc *rc, struct uwb_rceb *rceb, size } else { neh = uwb_rc_neh_lookup(rc, rceb); if (neh) { - del_timer_sync(&neh->timer); + spin_lock_irqsave(&rc->neh_lock, flags); + /* to guard against a timeout */ + neh->completed = 1; + del_timer(&neh->timer); + spin_unlock_irqrestore(&rc->neh_lock, flags); uwb_rc_neh_cb(neh, rceb, size); } else dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", @@ -568,6 +574,10 @@ static void uwb_rc_neh_timer(unsigned long arg) unsigned long flags; spin_lock_irqsave(&rc->neh_lock, flags); + if (neh->completed) { + spin_unlock_irqrestore(&rc->neh_lock, flags); + return; + } if (neh->context) __uwb_rc_neh_rm(rc, neh); else diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index f0da2c32fbde..5c170100de9c 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -238,7 +239,7 @@ static void handle_tx(struct vhost_net *net) vq->heads[vq->upend_idx].len = len; ubuf->callback = vhost_zerocopy_callback; - ubuf->arg = vq->ubufs; + ubuf->ctx = vq->ubufs; ubuf->desc = vq->upend_idx; msg.msg_control = ubuf; msg.msg_controllen = sizeof(ubuf); @@ -283,8 +284,12 @@ static int peek_head_len(struct sock *sk) spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); head = skb_peek(&sk->sk_receive_queue); - if (likely(head)) + if (likely(head)) { len = head->len; + if (vlan_tx_tag_present(head)) + len += VLAN_HLEN; + } + spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); return len; } diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 947f00d8e091..51e4c1eeec4f 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -1598,10 +1598,9 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs) kfree(ubufs); } -void vhost_zerocopy_callback(void *arg) +void vhost_zerocopy_callback(struct ubuf_info *ubuf) { - struct ubuf_info *ubuf = arg; - struct vhost_ubuf_ref *ubufs = ubuf->arg; + struct vhost_ubuf_ref *ubufs = ubuf->ctx; struct vhost_virtqueue *vq = ubufs->vq; /* set len = 1 to mark this desc buffers done DMA */ diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 8dcf4cca6bf2..8de1fd5b8efb 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *); int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log, unsigned int log_num, u64 len); -void vhost_zerocopy_callback(void *arg); +void vhost_zerocopy_callback(struct ubuf_info *); int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq); #define vq_err(vq, fmt, ...) do { \ diff --git a/drivers/video/bfin-lq035q1-fb.c b/drivers/video/bfin-lq035q1-fb.c index 86922ac84412..353c02fe8a95 100644 --- a/drivers/video/bfin-lq035q1-fb.c +++ b/drivers/video/bfin-lq035q1-fb.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index 6468a297e341..39571f9e0162 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -22,7 +22,9 @@ #include #include +#include #include +#include #include #include diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index 26e83d7fdd6f..b0e2a4261afe 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -73,7 +73,7 @@ static void uvesafb_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *ns struct uvesafb_task *utask; struct uvesafb_ktask *task; - if (!cap_raised(current_cap(), CAP_SYS_ADMIN)) + if (!capable(CAP_SYS_ADMIN)) return; if (msg->seq >= UVESAFB_TASKS_MAX) diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c index cb4529c40d74..b7f5173ff9e9 100644 --- a/drivers/video/xen-fbfront.c +++ b/drivers/video/xen-fbfront.c @@ -365,7 +365,7 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, struct fb_info *fb_info; int fb_size; int val; - int ret; + int ret = 0; info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { @@ -458,26 +458,31 @@ static int __devinit xenfb_probe(struct xenbus_device *dev, xenfb_init_shared_page(info, fb_info); ret = xenfb_connect_backend(dev, info); - if (ret < 0) - goto error; + if (ret < 0) { + xenbus_dev_fatal(dev, ret, "xenfb_connect_backend"); + goto error_fb; + } ret = register_framebuffer(fb_info); if (ret) { - fb_deferred_io_cleanup(fb_info); - fb_dealloc_cmap(&fb_info->cmap); - framebuffer_release(fb_info); xenbus_dev_fatal(dev, ret, "register_framebuffer"); - goto error; + goto error_fb; } info->fb_info = fb_info; xenfb_make_preferred_console(); return 0; - error_nomem: - ret = -ENOMEM; - xenbus_dev_fatal(dev, ret, "allocating device memory"); - error: +error_fb: + fb_deferred_io_cleanup(fb_info); + fb_dealloc_cmap(&fb_info->cmap); + framebuffer_release(fb_info); +error_nomem: + if (!ret) { + ret = -ENOMEM; + xenbus_dev_fatal(dev, ret, "allocating device memory"); + } +error: xenfb_remove(dev); return ret; } diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index cbc7ceef2786..9f13b897fd64 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -435,16 +435,16 @@ static void hpwdt_start(void) { reload = SECS_TO_TICKS(soft_margin); iowrite16(reload, hpwdt_timer_reg); - iowrite16(0x85, hpwdt_timer_con); + iowrite8(0x85, hpwdt_timer_con); } static void hpwdt_stop(void) { unsigned long data; - data = ioread16(hpwdt_timer_con); + data = ioread8(hpwdt_timer_con); data &= 0xFE; - iowrite16(data, hpwdt_timer_con); + iowrite8(data, hpwdt_timer_con); } static void hpwdt_ping(void) diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 94243136f6bf..ea20c51d24c7 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -183,15 +183,17 @@ config XEN_ACPI_PROCESSOR depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ default m help - This ACPI processor uploads Power Management information to the Xen hypervisor. + This ACPI processor uploads Power Management information to the Xen + hypervisor. - To do that the driver parses the Power Management data and uploads said - information to the Xen hypervisor. Then the Xen hypervisor can select the - proper Cx and Pxx states. It also registers itslef as the SMM so that - other drivers (such as ACPI cpufreq scaling driver) will not load. + To do that the driver parses the Power Management data and uploads + said information to the Xen hypervisor. Then the Xen hypervisor can + select the proper Cx and Pxx states. It also registers itslef as the + SMM so that other drivers (such as ACPI cpufreq scaling driver) will + not load. - To compile this driver as a module, choose M here: the - module will be called xen_acpi_processor If you do not know what to choose, - select M here. If the CPUFREQ drivers are built in, select Y here. + To compile this driver as a module, choose M here: the module will be + called xen_acpi_processor If you do not know what to choose, select + M here. If the CPUFREQ drivers are built in, select Y here. endmenu diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 4b33acd8ed4e..0a8a17cd80be 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -274,7 +274,7 @@ static unsigned int cpu_from_evtchn(unsigned int evtchn) static bool pirq_check_eoi_map(unsigned irq) { - return test_bit(irq, pirq_eoi_map); + return test_bit(pirq_from_irq(irq), pirq_eoi_map); } static bool pirq_needs_eoi_flag(unsigned irq) diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 99d8151c824a..1ffd03bf8e10 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -722,7 +722,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND; if (use_ptemod) - vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP; + vma->vm_flags |= VM_DONTCOPY; vma->vm_private_data = map; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index b4d4eac761db..f100ce20b16b 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -1029,6 +1029,7 @@ int gnttab_init(void) int i; unsigned int max_nr_glist_frames, nr_glist_frames; unsigned int nr_init_grefs; + int ret; nr_grant_frames = 1; boot_max_nr_grant_frames = __max_nr_grant_frames(); @@ -1047,12 +1048,16 @@ int gnttab_init(void) nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP; for (i = 0; i < nr_glist_frames; i++) { gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL); - if (gnttab_list[i] == NULL) + if (gnttab_list[i] == NULL) { + ret = -ENOMEM; goto ini_nomem; + } } - if (gnttab_resume() < 0) - return -ENODEV; + if (gnttab_resume() < 0) { + ret = -ENODEV; + goto ini_nomem; + } nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME; @@ -1070,7 +1075,7 @@ int gnttab_init(void) for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); - return -ENOMEM; + return ret; } EXPORT_SYMBOL_GPL(gnttab_init); diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 9e14ae6cd49c..412b96cc5305 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -132,6 +132,7 @@ static void do_suspend(void) err = dpm_suspend_end(PMSG_FREEZE); if (err) { printk(KERN_ERR "dpm_suspend_end failed: %d\n", err); + si.cancelled = 0; goto out_resume; } diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 174b5653cd8a..0b48579a9cd6 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c @@ -128,7 +128,10 @@ static int push_cxx_to_hypervisor(struct acpi_processor *_pr) pr_debug(" C%d: %s %d uS\n", cx->type, cx->desc, (u32)cx->latency); } - } else + } else if (ret != -EINVAL) + /* EINVAL means the ACPI ID is incorrect - meaning the ACPI + * table is referencing a non-existing CPU - which can happen + * with broken ACPI tables. */ pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", ret, _pr->acpi_id); diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c index f20c5f178b40..a31b54d48839 100644 --- a/drivers/xen/xenbus/xenbus_probe_frontend.c +++ b/drivers/xen/xenbus/xenbus_probe_frontend.c @@ -135,7 +135,7 @@ static int read_backend_details(struct xenbus_device *xendev) return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } -static int is_device_connecting(struct device *dev, void *data) +static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; @@ -152,16 +152,41 @@ static int is_device_connecting(struct device *dev, void *data) if (drv && (dev->driver != drv)) return 0; + if (ignore_nonessential) { + /* With older QEMU, for PVonHVM guests the guest config files + * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] + * which is nonsensical as there is no PV FB (there can be + * a PVKB) running as HVM guest. */ + + if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) + return 0; + + if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) + return 0; + } xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } +static int essential_device_connecting(struct device *dev, void *data) +{ + return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */); +} +static int non_essential_device_connecting(struct device *dev, void *data) +{ + return is_device_connecting(dev, data, false); +} -static int exists_connecting_device(struct device_driver *drv) +static int exists_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, - is_device_connecting); + essential_device_connecting); +} +static int exists_non_essential_connecting_device(struct device_driver *drv) +{ + return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, + non_essential_device_connecting); } static int print_device_status(struct device *dev, void *data) @@ -192,6 +217,23 @@ static int print_device_status(struct device *dev, void *data) /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; +static bool wait_loop(unsigned long start, unsigned int max_delay, + unsigned int *seconds_waited) +{ + if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { + if (!*seconds_waited) + printk(KERN_WARNING "XENBUS: Waiting for " + "devices to initialise: "); + *seconds_waited += 5; + printk("%us...", max_delay - *seconds_waited); + if (*seconds_waited == max_delay) + return true; + } + + schedule_timeout_interruptible(HZ/10); + + return false; +} /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices @@ -215,19 +257,14 @@ static void wait_for_devices(struct xenbus_driver *xendrv) if (!ready_to_wait_for_devices || !xen_domain()) return; - while (exists_connecting_device(drv)) { - if (time_after(jiffies, start + (seconds_waited+5)*HZ)) { - if (!seconds_waited) - printk(KERN_WARNING "XENBUS: Waiting for " - "devices to initialise: "); - seconds_waited += 5; - printk("%us...", 300 - seconds_waited); - if (seconds_waited == 300) - break; - } + while (exists_non_essential_connecting_device(drv)) + if (wait_loop(start, 30, &seconds_waited)) + break; - schedule_timeout_interruptible(HZ/10); - } + /* Skips PVKB and PVFB check.*/ + while (exists_essential_connecting_device(drv)) + if (wait_loop(start, 270, &seconds_waited)) + break; if (seconds_waited) printk("\n"); diff --git a/fs/aio.c b/fs/aio.c index da887604dfc5..67a6db3e1b6f 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -93,9 +93,8 @@ static void aio_free_ring(struct kioctx *ctx) put_page(info->ring_pages[i]); if (info->mmap_size) { - down_write(&ctx->mm->mmap_sem); - do_munmap(ctx->mm, info->mmap_base, info->mmap_size); - up_write(&ctx->mm->mmap_sem); + BUG_ON(ctx->mm != current->mm); + vm_munmap(info->mmap_base, info->mmap_size); } if (info->ring_pages && info->ring_pages != info->internal_pages) @@ -389,6 +388,17 @@ void exit_aio(struct mm_struct *mm) "exit_aio:ioctx still alive: %d %d %d\n", atomic_read(&ctx->users), ctx->dead, ctx->reqs_active); + /* + * We don't need to bother with munmap() here - + * exit_mmap(mm) is coming and it'll unmap everything. + * Since aio_free_ring() uses non-zero ->mmap_size + * as indicator that it needs to unmap the area, + * just set it to 0; aio_free_ring() is the only + * place that uses ->mmap_size, so it's safe. + * That way we get all munmap done to current->mm - + * all other callers have ctx->mm == current->mm. + */ + ctx->ring_info.mmap_size = 0; put_ioctx(ctx); } } diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index eb1cc92cd67d..908e18455413 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -110,7 +110,6 @@ struct autofs_sb_info { int sub_version; int min_proto; int max_proto; - int compat_daemon; unsigned long exp_timeout; unsigned int type; int reghost_enabled; @@ -270,6 +269,17 @@ int autofs4_fill_super(struct super_block *, void *, int); struct autofs_info *autofs4_new_ino(struct autofs_sb_info *); void autofs4_clean_ino(struct autofs_info *); +static inline int autofs_prepare_pipe(struct file *pipe) +{ + if (!pipe->f_op || !pipe->f_op->write) + return -EINVAL; + if (!S_ISFIFO(pipe->f_dentry->d_inode->i_mode)) + return -EINVAL; + /* We want a packet pipe */ + pipe->f_flags |= O_DIRECT; + return 0; +} + /* Queue management functions */ int autofs4_wait(struct autofs_sb_info *,struct dentry *, enum autofs_notify); diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index 9dacb8586701..aa9103f8f01b 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -376,7 +376,7 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, err = -EBADF; goto out; } - if (!pipe->f_op || !pipe->f_op->write) { + if (autofs_prepare_pipe(pipe) < 0) { err = -EPIPE; fput(pipe); goto out; @@ -385,7 +385,6 @@ static int autofs_dev_ioctl_setpipefd(struct file *fp, sbi->pipefd = pipefd; sbi->pipe = pipe; sbi->catatonic = 0; - sbi->compat_daemon = is_compat_task(); } out: mutex_unlock(&sbi->wq_mutex); diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c index d8dc002e9cc3..6e488ebe7784 100644 --- a/fs/autofs4/inode.c +++ b/fs/autofs4/inode.c @@ -19,7 +19,6 @@ #include #include #include -#include #include "autofs_i.h" #include @@ -225,7 +224,6 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) set_autofs_type_indirect(&sbi->type); sbi->min_proto = 0; sbi->max_proto = 0; - sbi->compat_daemon = is_compat_task(); mutex_init(&sbi->wq_mutex); mutex_init(&sbi->pipe_mutex); spin_lock_init(&sbi->fs_lock); @@ -292,7 +290,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent) printk("autofs: could not open pipe file descriptor\n"); goto fail_dput; } - if (!pipe->f_op || !pipe->f_op->write) + if (autofs_prepare_pipe(pipe) < 0) goto fail_fput; sbi->pipe = pipe; sbi->pipefd = pipefd; diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 9c098db43344..da8876d38a7b 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c @@ -91,24 +91,7 @@ static int autofs4_write(struct autofs_sb_info *sbi, return (bytes > 0); } - -/* - * The autofs_v5 packet was misdesigned. - * - * The packets are identical on x86-32 and x86-64, but have different - * alignment. Which means that 'sizeof()' will give different results. - * Fix it up for the case of running 32-bit user mode on a 64-bit kernel. - */ -static noinline size_t autofs_v5_packet_size(struct autofs_sb_info *sbi) -{ - size_t pktsz = sizeof(struct autofs_v5_packet); -#if defined(CONFIG_X86_64) && defined(CONFIG_COMPAT) - if (sbi->compat_daemon > 0) - pktsz -= 4; -#endif - return pktsz; -} - + static void autofs4_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) @@ -172,7 +155,8 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, { struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet; - pktsz = autofs_v5_packet_size(sbi); + pktsz = sizeof(*packet); + packet->wait_queue_token = wq->wait_queue_token; packet->len = wq->name.len; memcpy(packet->name, wq->name.name, wq->name.len); diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 2eb12f13593d..d146e181d10d 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -50,9 +50,7 @@ static int set_brk(unsigned long start, unsigned long end) end = PAGE_ALIGN(end); if (end > start) { unsigned long addr; - down_write(¤t->mm->mmap_sem); - addr = do_brk(start, end - start); - up_write(¤t->mm->mmap_sem); + addr = vm_brk(start, end - start); if (BAD_ADDR(addr)) return addr; } @@ -280,9 +278,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) pos = 32; map_size = ex.a_text+ex.a_data; #endif - down_write(¤t->mm->mmap_sem); - error = do_brk(text_addr & PAGE_MASK, map_size); - up_write(¤t->mm->mmap_sem); + error = vm_brk(text_addr & PAGE_MASK, map_size); if (error != (text_addr & PAGE_MASK)) { send_sig(SIGKILL, current, 0); return error; @@ -313,9 +309,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { loff_t pos = fd_offset; - down_write(¤t->mm->mmap_sem); - do_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); - up_write(¤t->mm->mmap_sem); + vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); bprm->file->f_op->read(bprm->file, (char __user *)N_TXTADDR(ex), ex.a_text+ex.a_data, &pos); @@ -325,24 +319,20 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) goto beyond_if; } - down_write(¤t->mm->mmap_sem); - error = do_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, + error = vm_mmap(bprm->file, N_TXTADDR(ex), ex.a_text, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset); - up_write(¤t->mm->mmap_sem); if (error != N_TXTADDR(ex)) { send_sig(SIGKILL, current, 0); return error; } - down_write(¤t->mm->mmap_sem); - error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, + error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, fd_offset + ex.a_text); - up_write(¤t->mm->mmap_sem); if (error != N_DATADDR(ex)) { send_sig(SIGKILL, current, 0); return error; @@ -412,9 +402,7 @@ static int load_aout_library(struct file *file) "N_TXTOFF is not page aligned. Please convert library: %s\n", file->f_path.dentry->d_name.name); } - down_write(¤t->mm->mmap_sem); - do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); - up_write(¤t->mm->mmap_sem); + vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); file->f_op->read(file, (char __user *)start_addr, ex.a_text + ex.a_data, &pos); @@ -425,12 +413,10 @@ static int load_aout_library(struct file *file) goto out; } /* Now use mmap to map the library into memory. */ - down_write(¤t->mm->mmap_sem); - error = do_mmap(file, start_addr, ex.a_text + ex.a_data, + error = vm_mmap(file, start_addr, ex.a_text + ex.a_data, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, N_TXTOFF(ex)); - up_write(¤t->mm->mmap_sem); retval = error; if (error != start_addr) goto out; @@ -438,9 +424,7 @@ static int load_aout_library(struct file *file) len = PAGE_ALIGN(ex.a_text + ex.a_data); bss = ex.a_text + ex.a_data + ex.a_bss; if (bss > len) { - down_write(¤t->mm->mmap_sem); - error = do_brk(start_addr + len, bss - len); - up_write(¤t->mm->mmap_sem); + error = vm_brk(start_addr + len, bss - len); retval = error; if (error != start_addr + len) goto out; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 48ffb3dc610a..16f735417072 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -82,9 +82,7 @@ static int set_brk(unsigned long start, unsigned long end) end = ELF_PAGEALIGN(end); if (end > start) { unsigned long addr; - down_write(¤t->mm->mmap_sem); - addr = do_brk(start, end - start); - up_write(¤t->mm->mmap_sem); + addr = vm_brk(start, end - start); if (BAD_ADDR(addr)) return addr; } @@ -514,9 +512,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1); /* Map the last of the bss segment */ - down_write(¤t->mm->mmap_sem); - error = do_brk(elf_bss, last_bss - elf_bss); - up_write(¤t->mm->mmap_sem); + error = vm_brk(elf_bss, last_bss - elf_bss); if (BAD_ADDR(error)) goto out_close; } @@ -962,10 +958,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs) and some applications "depend" upon this behavior. Since we do not have the power to recompile these, we emulate the SVr4 behavior. Sigh. */ - down_write(¤t->mm->mmap_sem); - error = do_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, + error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC, MAP_FIXED | MAP_PRIVATE, 0); - up_write(¤t->mm->mmap_sem); } #ifdef ELF_PLAT_INIT @@ -1050,8 +1044,7 @@ static int load_elf_library(struct file *file) eppnt++; /* Now use mmap to map the library into memory. */ - down_write(¤t->mm->mmap_sem); - error = do_mmap(file, + error = vm_mmap(file, ELF_PAGESTART(eppnt->p_vaddr), (eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr)), @@ -1059,7 +1052,6 @@ static int load_elf_library(struct file *file) MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE, (eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr))); - up_write(¤t->mm->mmap_sem); if (error != ELF_PAGESTART(eppnt->p_vaddr)) goto out_free_ph; @@ -1072,11 +1064,8 @@ static int load_elf_library(struct file *file) len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1); bss = eppnt->p_memsz + eppnt->p_vaddr; - if (bss > len) { - down_write(¤t->mm->mmap_sem); - do_brk(len, bss - len); - up_write(¤t->mm->mmap_sem); - } + if (bss > len) + vm_brk(len, bss - len); error = 0; out_free_ph: diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 9bd5612a8224..d390a0fffc65 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -390,21 +390,17 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC)) stack_prot |= PROT_EXEC; - down_write(¤t->mm->mmap_sem); - current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot, + current->mm->start_brk = vm_mmap(NULL, 0, stack_size, stack_prot, MAP_PRIVATE | MAP_ANONYMOUS | MAP_UNINITIALIZED | MAP_GROWSDOWN, 0); if (IS_ERR_VALUE(current->mm->start_brk)) { - up_write(¤t->mm->mmap_sem); retval = current->mm->start_brk; current->mm->start_brk = 0; goto error_kill; } - up_write(¤t->mm->mmap_sem); - current->mm->brk = current->mm->start_brk; current->mm->context.end_brk = current->mm->start_brk; current->mm->context.end_brk += @@ -955,10 +951,8 @@ static int elf_fdpic_map_file_constdisp_on_uclinux( if (params->flags & ELF_FDPIC_FLAG_EXECUTABLE) mflags |= MAP_EXECUTABLE; - down_write(&mm->mmap_sem); - maddr = do_mmap(NULL, load_addr, top - base, + maddr = vm_mmap(NULL, load_addr, top - base, PROT_READ | PROT_WRITE | PROT_EXEC, mflags, 0); - up_write(&mm->mmap_sem); if (IS_ERR_VALUE(maddr)) return (int) maddr; @@ -1096,10 +1090,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, /* create the mapping */ disp = phdr->p_vaddr & ~PAGE_MASK; - down_write(&mm->mmap_sem); - maddr = do_mmap(file, maddr, phdr->p_memsz + disp, prot, flags, + maddr = vm_mmap(file, maddr, phdr->p_memsz + disp, prot, flags, phdr->p_offset - disp); - up_write(&mm->mmap_sem); kdebug("mmap[%d] sz=%lx pr=%x fl=%x of=%lx --> %08lx", loop, phdr->p_memsz + disp, prot, flags, @@ -1143,10 +1135,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, unsigned long xmaddr; flags |= MAP_FIXED | MAP_ANONYMOUS; - down_write(&mm->mmap_sem); - xmaddr = do_mmap(NULL, xaddr, excess - excess1, + xmaddr = vm_mmap(NULL, xaddr, excess - excess1, prot, flags, 0); - up_write(&mm->mmap_sem); kdebug("mmap[%d] " " ad=%lx sz=%lx pr=%x fl=%x of=0 --> %08lx", diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 024d20ee3ca3..6b2daf99fab8 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -542,10 +542,8 @@ static int load_flat_file(struct linux_binprm * bprm, */ DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); - down_write(¤t->mm->mmap_sem); - textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, + textpos = vm_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_EXECUTABLE, 0); - up_write(¤t->mm->mmap_sem); if (!textpos || IS_ERR_VALUE(textpos)) { if (!textpos) textpos = (unsigned long) -ENOMEM; @@ -556,10 +554,8 @@ static int load_flat_file(struct linux_binprm * bprm, len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); len = PAGE_ALIGN(len); - down_write(¤t->mm->mmap_sem); - realdatastart = do_mmap(0, 0, len, + realdatastart = vm_mmap(0, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); - up_write(¤t->mm->mmap_sem); if (realdatastart == 0 || IS_ERR_VALUE(realdatastart)) { if (!realdatastart) @@ -603,10 +599,8 @@ static int load_flat_file(struct linux_binprm * bprm, len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); len = PAGE_ALIGN(len); - down_write(¤t->mm->mmap_sem); - textpos = do_mmap(0, 0, len, + textpos = vm_mmap(0, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); - up_write(¤t->mm->mmap_sem); if (!textpos || IS_ERR_VALUE(textpos)) { if (!textpos) diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c index e4fc746629a7..4517aaff61b4 100644 --- a/fs/binfmt_som.c +++ b/fs/binfmt_som.c @@ -147,10 +147,8 @@ static int map_som_binary(struct file *file, code_size = SOM_PAGEALIGN(hpuxhdr->exec_tsize); current->mm->start_code = code_start; current->mm->end_code = code_start + code_size; - down_write(¤t->mm->mmap_sem); - retval = do_mmap(file, code_start, code_size, prot, + retval = vm_mmap(file, code_start, code_size, prot, flags, SOM_PAGESTART(hpuxhdr->exec_tfile)); - up_write(¤t->mm->mmap_sem); if (retval < 0 && retval > -1024) goto out; @@ -158,20 +156,16 @@ static int map_som_binary(struct file *file, data_size = SOM_PAGEALIGN(hpuxhdr->exec_dsize); current->mm->start_data = data_start; current->mm->end_data = bss_start = data_start + data_size; - down_write(¤t->mm->mmap_sem); - retval = do_mmap(file, data_start, data_size, + retval = vm_mmap(file, data_start, data_size, prot | PROT_WRITE, flags, SOM_PAGESTART(hpuxhdr->exec_dfile)); - up_write(¤t->mm->mmap_sem); if (retval < 0 && retval > -1024) goto out; som_brk = bss_start + SOM_PAGEALIGN(hpuxhdr->exec_bsize); current->mm->start_brk = current->mm->brk = som_brk; - down_write(¤t->mm->mmap_sem); - retval = do_mmap(NULL, bss_start, som_brk - bss_start, + retval = vm_mmap(NULL, bss_start, som_brk - bss_start, prot | PROT_WRITE, MAP_FIXED | MAP_PRIVATE, 0); - up_write(¤t->mm->mmap_sem); if (retval > 0 || retval < -1024) retval = 0; out: diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index f4e90748940a..bcec06750232 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -22,6 +22,7 @@ #include "ulist.h" #include "transaction.h" #include "delayed-ref.h" +#include "locking.h" /* * this structure records all encountered refs on the way up to the root @@ -893,18 +894,22 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, s64 bytes_left = size - 1; struct extent_buffer *eb = eb_in; struct btrfs_key found_key; + int leave_spinning = path->leave_spinning; if (bytes_left >= 0) dest[bytes_left] = '\0'; + path->leave_spinning = 1; while (1) { len = btrfs_inode_ref_name_len(eb, iref); bytes_left -= len; if (bytes_left >= 0) read_extent_buffer(eb, dest + bytes_left, (unsigned long)(iref + 1), len); - if (eb != eb_in) + if (eb != eb_in) { + btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); + } ret = inode_ref_info(parent, 0, fs_root, path, &found_key); if (ret > 0) ret = -ENOENT; @@ -919,8 +924,11 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, slot = path->slots[0]; eb = path->nodes[0]; /* make sure we can use eb after releasing the path */ - if (eb != eb_in) + if (eb != eb_in) { atomic_inc(&eb->refs); + btrfs_tree_read_lock(eb); + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); + } btrfs_release_path(path); iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref); @@ -931,6 +939,7 @@ static char *iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, } btrfs_release_path(path); + path->leave_spinning = leave_spinning; if (ret) return ERR_PTR(ret); @@ -1247,7 +1256,7 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, struct btrfs_path *path, iterate_irefs_t *iterate, void *ctx) { - int ret; + int ret = 0; int slot; u32 cur; u32 len; @@ -1259,7 +1268,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, struct btrfs_inode_ref *iref; struct btrfs_key found_key; - while (1) { + while (!ret) { + path->leave_spinning = 1; ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, &found_key); if (ret < 0) @@ -1275,6 +1285,8 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, eb = path->nodes[0]; /* make sure we can use eb after releasing the path */ atomic_inc(&eb->refs); + btrfs_tree_read_lock(eb); + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); btrfs_release_path(path); item = btrfs_item_nr(eb, slot); @@ -1288,13 +1300,12 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root, (unsigned long long)found_key.objectid, (unsigned long long)fs_root->objectid); ret = iterate(parent, iref, eb, ctx); - if (ret) { - free_extent_buffer(eb); + if (ret) break; - } len = sizeof(*iref) + name_len; iref = (struct btrfs_inode_ref *)((char *)iref + len); } + btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); } @@ -1414,6 +1425,8 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, void free_ipath(struct inode_fs_paths *ipath) { + if (!ipath) + return; kfree(ipath->fspath); kfree(ipath); } diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index e801f226d7e0..4106264fbc65 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -220,10 +220,12 @@ struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) */ static void add_root_to_dirty_list(struct btrfs_root *root) { + spin_lock(&root->fs_info->trans_lock); if (root->track_dirty && list_empty(&root->dirty_list)) { list_add(&root->dirty_list, &root->fs_info->dirty_cowonly_roots); } + spin_unlock(&root->fs_info->trans_lock); } /* @@ -723,7 +725,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, cur = btrfs_find_tree_block(root, blocknr, blocksize); if (cur) - uptodate = btrfs_buffer_uptodate(cur, gen); + uptodate = btrfs_buffer_uptodate(cur, gen, 0); else uptodate = 0; if (!cur || !uptodate) { @@ -1358,7 +1360,12 @@ static noinline int reada_for_balance(struct btrfs_root *root, block1 = btrfs_node_blockptr(parent, slot - 1); gen = btrfs_node_ptr_generation(parent, slot - 1); eb = btrfs_find_tree_block(root, block1, blocksize); - if (eb && btrfs_buffer_uptodate(eb, gen)) + /* + * if we get -eagain from btrfs_buffer_uptodate, we + * don't want to return eagain here. That will loop + * forever + */ + if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) block1 = 0; free_extent_buffer(eb); } @@ -1366,7 +1373,7 @@ static noinline int reada_for_balance(struct btrfs_root *root, block2 = btrfs_node_blockptr(parent, slot + 1); gen = btrfs_node_ptr_generation(parent, slot + 1); eb = btrfs_find_tree_block(root, block2, blocksize); - if (eb && btrfs_buffer_uptodate(eb, gen)) + if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0) block2 = 0; free_extent_buffer(eb); } @@ -1504,8 +1511,9 @@ read_block_for_search(struct btrfs_trans_handle *trans, tmp = btrfs_find_tree_block(root, blocknr, blocksize); if (tmp) { - if (btrfs_buffer_uptodate(tmp, 0)) { - if (btrfs_buffer_uptodate(tmp, gen)) { + /* first we do an atomic uptodate check */ + if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) { + if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) { /* * we found an up to date block without * sleeping, return @@ -1523,8 +1531,9 @@ read_block_for_search(struct btrfs_trans_handle *trans, free_extent_buffer(tmp); btrfs_set_path_blocking(p); + /* now we're allowed to do a blocking uptodate check */ tmp = read_tree_block(root, blocknr, blocksize, gen); - if (tmp && btrfs_buffer_uptodate(tmp, gen)) { + if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) { *eb_ret = tmp; return 0; } @@ -1559,7 +1568,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, * and give up so that our caller doesn't loop forever * on our EAGAINs. */ - if (!btrfs_buffer_uptodate(tmp, 0)) + if (!btrfs_buffer_uptodate(tmp, 0, 0)) ret = -EIO; free_extent_buffer(tmp); } @@ -4043,7 +4052,7 @@ again: tmp = btrfs_find_tree_block(root, blockptr, btrfs_level_size(root, level - 1)); - if (tmp && btrfs_buffer_uptodate(tmp, gen)) { + if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) { free_extent_buffer(tmp); break; } @@ -4166,7 +4175,8 @@ next: struct extent_buffer *cur; cur = btrfs_find_tree_block(root, blockptr, btrfs_level_size(root, level - 1)); - if (!cur || !btrfs_buffer_uptodate(cur, gen)) { + if (!cur || + btrfs_buffer_uptodate(cur, gen, 1) <= 0) { slot++; if (cur) free_extent_buffer(cur); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 3f65a812e282..8fd72331d600 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1078,7 +1078,7 @@ struct btrfs_fs_info { * is required instead of the faster short fsync log commits */ u64 last_trans_log_full_commit; - unsigned long mount_opt:21; + unsigned long mount_opt; unsigned long compress_type:4; u64 max_inline; u64 alloc_start; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 20196f411206..a7ffc88a7dbe 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -323,7 +323,8 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, * in the wrong place. */ static int verify_parent_transid(struct extent_io_tree *io_tree, - struct extent_buffer *eb, u64 parent_transid) + struct extent_buffer *eb, u64 parent_transid, + int atomic) { struct extent_state *cached_state = NULL; int ret; @@ -331,6 +332,9 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 0; + if (atomic) + return -EAGAIN; + lock_extent_bits(io_tree, eb->start, eb->start + eb->len - 1, 0, &cached_state); if (extent_buffer_uptodate(eb) && @@ -372,7 +376,8 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, ret = read_extent_buffer_pages(io_tree, eb, start, WAIT_COMPLETE, btree_get_extent, mirror_num); - if (!ret && !verify_parent_transid(io_tree, eb, parent_transid)) + if (!ret && !verify_parent_transid(io_tree, eb, + parent_transid, 0)) break; /* @@ -383,17 +388,16 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, if (test_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags)) break; - if (!failed_mirror) { - failed = 1; - printk(KERN_ERR "failed mirror was %d\n", eb->failed_mirror); - failed_mirror = eb->failed_mirror; - } - num_copies = btrfs_num_copies(&root->fs_info->mapping_tree, eb->start, eb->len); if (num_copies == 1) break; + if (!failed_mirror) { + failed = 1; + failed_mirror = eb->read_mirror; + } + mirror_num++; if (mirror_num == failed_mirror) mirror_num++; @@ -564,7 +568,7 @@ struct extent_buffer *find_eb_for_page(struct extent_io_tree *tree, } static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, - struct extent_state *state) + struct extent_state *state, int mirror) { struct extent_io_tree *tree; u64 found_start; @@ -589,6 +593,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, if (!reads_done) goto err; + eb->read_mirror = mirror; if (test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { ret = -EIO; goto err; @@ -652,7 +657,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror) eb = (struct extent_buffer *)page->private; set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); - eb->failed_mirror = failed_mirror; + eb->read_mirror = failed_mirror; if (test_and_clear_bit(EXTENT_BUFFER_READAHEAD, &eb->bflags)) btree_readahead_hook(root, eb, eb->start, -EIO); return -EIO; /* we fixed nothing */ @@ -1202,7 +1207,7 @@ static int __must_check find_and_setup_root(struct btrfs_root *tree_root, root->commit_root = NULL; root->node = read_tree_block(root, btrfs_root_bytenr(&root->root_item), blocksize, generation); - if (!root->node || !btrfs_buffer_uptodate(root->node, generation)) { + if (!root->node || !btrfs_buffer_uptodate(root->node, generation, 0)) { free_extent_buffer(root->node); root->node = NULL; return -EIO; @@ -2254,9 +2259,9 @@ int open_ctree(struct super_block *sb, goto fail_sb_buffer; } - if (sectorsize < PAGE_SIZE) { - printk(KERN_WARNING "btrfs: Incompatible sector size " - "found on %s\n", sb->s_id); + if (sectorsize != PAGE_SIZE) { + printk(KERN_WARNING "btrfs: Incompatible sector size(%lu) " + "found on %s\n", (unsigned long)sectorsize, sb->s_id); goto fail_sb_buffer; } @@ -3143,7 +3148,8 @@ int close_ctree(struct btrfs_root *root) return 0; } -int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) +int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, + int atomic) { int ret; struct inode *btree_inode = buf->pages[0]->mapping->host; @@ -3153,7 +3159,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid) return ret; ret = verify_parent_transid(&BTRFS_I(btree_inode)->io_tree, buf, - parent_transid); + parent_transid, atomic); + if (ret == -EAGAIN) + return ret; return !ret; } diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index a7ace1a2dd12..ab1830aaf0ed 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -66,7 +66,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); void btrfs_mark_buffer_dirty(struct extent_buffer *buf); -int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); +int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, + int atomic); int btrfs_set_buffer_uptodate(struct extent_buffer *buf); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 2b35f8d14bb9..49fd7b66d57b 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2301,6 +2301,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, if (ret) { printk(KERN_DEBUG "btrfs: run_delayed_extent_op returned %d\n", ret); + spin_lock(&delayed_refs->lock); return ret; } @@ -2331,6 +2332,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, if (ret) { printk(KERN_DEBUG "btrfs: run_one_delayed_ref returned %d\n", ret); + spin_lock(&delayed_refs->lock); return ret; } @@ -3769,13 +3771,10 @@ again: */ if (current->journal_info) return -EAGAIN; - ret = wait_event_interruptible(space_info->wait, - !space_info->flush); - /* Must have been interrupted, return */ - if (ret) { - printk(KERN_DEBUG "btrfs: %s returning -EINTR\n", __func__); + ret = wait_event_killable(space_info->wait, !space_info->flush); + /* Must have been killed, return */ + if (ret) return -EINTR; - } spin_lock(&space_info->lock); } @@ -4215,8 +4214,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) num_bytes = calc_global_metadata_size(fs_info); - spin_lock(&block_rsv->lock); spin_lock(&sinfo->lock); + spin_lock(&block_rsv->lock); block_rsv->size = num_bytes; @@ -4242,8 +4241,8 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) block_rsv->full = 1; } - spin_unlock(&sinfo->lock); spin_unlock(&block_rsv->lock); + spin_unlock(&sinfo->lock); } static void init_global_block_rsv(struct btrfs_fs_info *fs_info) @@ -6569,7 +6568,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, goto skip; } - if (!btrfs_buffer_uptodate(next, generation)) { + if (!btrfs_buffer_uptodate(next, generation, 0)) { btrfs_tree_unlock(next); free_extent_buffer(next); next = NULL; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cd4b5e400221..c9018a05036e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -402,20 +402,28 @@ static int split_state(struct extent_io_tree *tree, struct extent_state *orig, return 0; } +static struct extent_state *next_state(struct extent_state *state) +{ + struct rb_node *next = rb_next(&state->rb_node); + if (next) + return rb_entry(next, struct extent_state, rb_node); + else + return NULL; +} + /* * utility function to clear some bits in an extent state struct. - * it will optionally wake up any one waiting on this state (wake == 1), or - * forcibly remove the state from the tree (delete == 1). + * it will optionally wake up any one waiting on this state (wake == 1) * * If no bits are set on the state struct after clearing things, the * struct is freed and removed from the tree */ -static int clear_state_bit(struct extent_io_tree *tree, - struct extent_state *state, - int *bits, int wake) +static struct extent_state *clear_state_bit(struct extent_io_tree *tree, + struct extent_state *state, + int *bits, int wake) { + struct extent_state *next; int bits_to_clear = *bits & ~EXTENT_CTLBITS; - int ret = state->state & bits_to_clear; if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { u64 range = state->end - state->start + 1; @@ -427,6 +435,7 @@ static int clear_state_bit(struct extent_io_tree *tree, if (wake) wake_up(&state->wq); if (state->state == 0) { + next = next_state(state); if (state->tree) { rb_erase(&state->rb_node, &tree->state); state->tree = NULL; @@ -436,8 +445,9 @@ static int clear_state_bit(struct extent_io_tree *tree, } } else { merge_state(tree, state); + next = next_state(state); } - return ret; + return next; } static struct extent_state * @@ -476,7 +486,6 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state *state; struct extent_state *cached; struct extent_state *prealloc = NULL; - struct rb_node *next_node; struct rb_node *node; u64 last_end; int err; @@ -528,14 +537,11 @@ hit_next: WARN_ON(state->end < start); last_end = state->end; - if (state->end < end && !need_resched()) - next_node = rb_next(&state->rb_node); - else - next_node = NULL; - /* the state doesn't have the wanted bits, go ahead */ - if (!(state->state & bits)) + if (!(state->state & bits)) { + state = next_state(state); goto next; + } /* * | ---- desired range ---- | @@ -593,16 +599,13 @@ hit_next: goto out; } - clear_state_bit(tree, state, &bits, wake); + state = clear_state_bit(tree, state, &bits, wake); next: if (last_end == (u64)-1) goto out; start = last_end + 1; - if (start <= end && next_node) { - state = rb_entry(next_node, struct extent_state, - rb_node); + if (start <= end && state && !need_resched()) goto hit_next; - } goto search_again; out: @@ -2301,7 +2304,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) u64 start; u64 end; int whole_page; - int failed_mirror; + int mirror; int ret; if (err) @@ -2340,20 +2343,18 @@ static void end_bio_extent_readpage(struct bio *bio, int err) } spin_unlock(&tree->lock); + mirror = (int)(unsigned long)bio->bi_bdev; if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, - state); + state, mirror); if (ret) uptodate = 0; else clean_io_failure(start, page); } - if (!uptodate) - failed_mirror = (int)(unsigned long)bio->bi_bdev; - if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { - ret = tree->ops->readpage_io_failed_hook(page, failed_mirror); + ret = tree->ops->readpage_io_failed_hook(page, mirror); if (!ret && !err && test_bit(BIO_UPTODATE, &bio->bi_flags)) uptodate = 1; @@ -2368,8 +2369,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) * can't handle the error it will return -EIO and we * remain responsible for that page. */ - ret = bio_readpage_error(bio, page, start, end, - failed_mirror, NULL); + ret = bio_readpage_error(bio, page, start, end, mirror, NULL); if (ret == 0) { uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); @@ -4120,6 +4120,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, if (atomic_inc_not_zero(&exists->refs)) { spin_unlock(&mapping->private_lock); unlock_page(p); + page_cache_release(p); mark_extent_buffer_accessed(exists); goto free_eb; } @@ -4199,8 +4200,7 @@ free_eb: unlock_page(eb->pages[i]); } - if (!atomic_dec_and_test(&eb->refs)) - return exists; + WARN_ON(!atomic_dec_and_test(&eb->refs)); btrfs_release_extent_buffer(eb); return exists; } @@ -4462,7 +4462,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, } clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); - eb->failed_mirror = 0; + eb->read_mirror = 0; atomic_set(&eb->io_pages, num_reads); for (i = start_i; i < num_pages; i++) { page = extent_buffer_page(eb, i); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index faf10eb57f75..b516c3b8dec6 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -79,7 +79,7 @@ struct extent_io_ops { u64 start, u64 end, struct extent_state *state); int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, - struct extent_state *state); + struct extent_state *state, int mirror); int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); void (*set_bit_hook)(struct inode *inode, struct extent_state *state, @@ -135,7 +135,7 @@ struct extent_buffer { spinlock_t refs_lock; atomic_t refs; atomic_t io_pages; - int failed_mirror; + int read_mirror; struct list_head leak_list; struct rcu_head rcu_head; pid_t lock_owner; diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index d83260d7498f..53bf2d764bbc 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -567,6 +567,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, int extent_type; int recow; int ret; + int modify_tree = -1; if (drop_cache) btrfs_drop_extent_cache(inode, start, end - 1, 0); @@ -575,10 +576,13 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, if (!path) return -ENOMEM; + if (start >= BTRFS_I(inode)->disk_i_size) + modify_tree = 0; + while (1) { recow = 0; ret = btrfs_lookup_file_extent(trans, root, path, ino, - search_start, -1); + search_start, modify_tree); if (ret < 0) break; if (ret > 0 && path->slots[0] > 0 && search_start == start) { @@ -634,7 +638,8 @@ next_slot: } search_start = max(key.offset, start); - if (recow) { + if (recow || !modify_tree) { + modify_tree = -1; btrfs_release_path(path); continue; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 115bc05e42b0..61b16c641ce0 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1947,7 +1947,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, * extent_io.c will try to find good copies for us. */ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end, - struct extent_state *state) + struct extent_state *state, int mirror) { size_t offset = start - ((u64)page->index << PAGE_CACHE_SHIFT); struct inode *inode = page->mapping->host; @@ -4069,7 +4069,7 @@ static struct inode *new_simple_dir(struct super_block *s, BTRFS_I(inode)->dummy_inode = 1; inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; - inode->i_op = &simple_dir_inode_operations; + inode->i_op = &btrfs_dir_ro_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; @@ -4140,14 +4140,18 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry) static int btrfs_dentry_delete(const struct dentry *dentry) { struct btrfs_root *root; + struct inode *inode = dentry->d_inode; - if (!dentry->d_inode && !IS_ROOT(dentry)) - dentry = dentry->d_parent; + if (!inode && !IS_ROOT(dentry)) + inode = dentry->d_parent->d_inode; - if (dentry->d_inode) { - root = BTRFS_I(dentry->d_inode)->root; + if (inode) { + root = BTRFS_I(inode)->root; if (btrfs_root_refs(&root->root_item) == 0) return 1; + + if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) + return 1; } return 0; } @@ -4188,7 +4192,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, struct btrfs_path *path; struct list_head ins_list; struct list_head del_list; - struct qstr q; int ret; struct extent_buffer *leaf; int slot; @@ -4279,7 +4282,6 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, while (di_cur < di_total) { struct btrfs_key location; - struct dentry *tmp; if (verify_dir_item(root, leaf, di)) break; @@ -4300,35 +4302,15 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, d_type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; btrfs_dir_item_key_to_cpu(leaf, di, &location); - q.name = name_ptr; - q.len = name_len; - q.hash = full_name_hash(q.name, q.len); - tmp = d_lookup(filp->f_dentry, &q); - if (!tmp) { - struct btrfs_key *newkey; - newkey = kzalloc(sizeof(struct btrfs_key), - GFP_NOFS); - if (!newkey) - goto no_dentry; - tmp = d_alloc(filp->f_dentry, &q); - if (!tmp) { - kfree(newkey); - dput(tmp); - goto no_dentry; - } - memcpy(newkey, &location, - sizeof(struct btrfs_key)); - tmp->d_fsdata = newkey; - tmp->d_flags |= DCACHE_NEED_LOOKUP; - d_rehash(tmp); - dput(tmp); - } else { - dput(tmp); - } -no_dentry: /* is this a reference to our own snapshot? If so - * skip it + * skip it. + * + * In contrast to old kernels, we insert the snapshot's + * dir item and dir index after it has been created, so + * we won't find a reference to our own snapshot. We + * still keep the following code for backward + * compatibility. */ if (location.type == BTRFS_ROOT_ITEM_KEY && location.objectid == root->root_key.objectid) { diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 18cc23d164a8..14f8e1faa46e 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -2262,7 +2262,10 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) di_args->bytes_used = dev->bytes_used; di_args->total_bytes = dev->total_bytes; memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); - strncpy(di_args->path, dev->name, sizeof(di_args->path)); + if (dev->name) + strncpy(di_args->path, dev->name, sizeof(di_args->path)); + else + di_args->path[0] = '\0'; out: if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 4f69028a68c4..086e6bdae1c4 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h @@ -252,7 +252,7 @@ struct btrfs_data_container { struct btrfs_ioctl_ino_path_args { __u64 inum; /* in */ - __u32 size; /* in */ + __u64 size; /* in */ __u64 reserved[4]; /* struct btrfs_data_container *fspath; out */ __u64 fspath; /* out */ @@ -260,7 +260,7 @@ struct btrfs_ioctl_ino_path_args { struct btrfs_ioctl_logical_ino_args { __u64 logical; /* in */ - __u32 size; /* in */ + __u64 size; /* in */ __u64 reserved[4]; /* struct btrfs_data_container *inodes; out */ __u64 inodes; diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index dc5d33146fdb..ac5d01085884 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -250,14 +250,12 @@ static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info, struct btrfs_bio *bbio) { int ret; - int looped = 0; struct reada_zone *zone; struct btrfs_block_group_cache *cache = NULL; u64 start; u64 end; int i; -again: zone = NULL; spin_lock(&fs_info->reada_lock); ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, @@ -274,9 +272,6 @@ again: spin_unlock(&fs_info->reada_lock); } - if (looped) - return NULL; - cache = btrfs_lookup_block_group(fs_info, logical); if (!cache) return NULL; @@ -307,13 +302,15 @@ again: ret = radix_tree_insert(&dev->reada_zones, (unsigned long)(zone->end >> PAGE_CACHE_SHIFT), zone); - spin_unlock(&fs_info->reada_lock); - if (ret) { + if (ret == -EEXIST) { kfree(zone); - looped = 1; - goto again; + ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone, + logical >> PAGE_CACHE_SHIFT, 1); + if (ret == 1) + kref_get(&zone->refcnt); } + spin_unlock(&fs_info->reada_lock); return zone; } @@ -323,26 +320,26 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, struct btrfs_key *top, int level) { int ret; - int looped = 0; struct reada_extent *re = NULL; + struct reada_extent *re_exist = NULL; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; struct btrfs_bio *bbio = NULL; struct btrfs_device *dev; + struct btrfs_device *prev_dev; u32 blocksize; u64 length; int nzones = 0; int i; unsigned long index = logical >> PAGE_CACHE_SHIFT; -again: spin_lock(&fs_info->reada_lock); re = radix_tree_lookup(&fs_info->reada_tree, index); if (re) kref_get(&re->refcnt); spin_unlock(&fs_info->reada_lock); - if (re || looped) + if (re) return re; re = kzalloc(sizeof(*re), GFP_NOFS); @@ -398,16 +395,31 @@ again: /* insert extent in reada_tree + all per-device trees, all or nothing */ spin_lock(&fs_info->reada_lock); ret = radix_tree_insert(&fs_info->reada_tree, index, re); - if (ret) { + if (ret == -EEXIST) { + re_exist = radix_tree_lookup(&fs_info->reada_tree, index); + BUG_ON(!re_exist); + kref_get(&re_exist->refcnt); spin_unlock(&fs_info->reada_lock); - if (ret != -ENOMEM) { - /* someone inserted the extent in the meantime */ - looped = 1; - } goto error; } + if (ret) { + spin_unlock(&fs_info->reada_lock); + goto error; + } + prev_dev = NULL; for (i = 0; i < nzones; ++i) { dev = bbio->stripes[i].dev; + if (dev == prev_dev) { + /* + * in case of DUP, just add the first zone. As both + * are on the same device, there's nothing to gain + * from adding both. + * Also, it wouldn't work, as the tree is per device + * and adding would fail with EEXIST + */ + continue; + } + prev_dev = dev; ret = radix_tree_insert(&dev->reada_extents, index, re); if (ret) { while (--i >= 0) { @@ -450,9 +462,7 @@ error: } kfree(bbio); kfree(re); - if (looped) - goto again; - return NULL; + return re_exist; } static void reada_kref_dummy(struct kref *kr) diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 017281dbb2a7..646ee21bb035 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1279,7 +1279,9 @@ static int __update_reloc_root(struct btrfs_root *root, int del) if (rb_node) backref_tree_panic(rb_node, -EEXIST, node->bytenr); } else { + spin_lock(&root->fs_info->trans_lock); list_del_init(&root->root_list); + spin_unlock(&root->fs_info->trans_lock); kfree(node); } return 0; @@ -3811,7 +3813,7 @@ restart: ret = btrfs_block_rsv_check(rc->extent_root, rc->block_rsv, 5); if (ret < 0) { - if (ret != -EAGAIN) { + if (ret != -ENOSPC) { err = ret; WARN_ON(1); break; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index bc015f77f3ea..2f3d6f917fb3 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -998,6 +998,7 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev, page = sblock->pagev + page_index; page->logical = logical; page->physical = bbio->stripes[mirror_index].physical; + /* for missing devices, bdev is NULL */ page->bdev = bbio->stripes[mirror_index].dev->bdev; page->mirror_num = mirror_index + 1; page->page = alloc_page(GFP_NOFS); @@ -1042,6 +1043,12 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_page *page = sblock->pagev + page_num; DECLARE_COMPLETION_ONSTACK(complete); + if (page->bdev == NULL) { + page->io_error = 1; + sblock->no_io_error_seen = 0; + continue; + } + BUG_ON(!page->page); bio = bio_alloc(GFP_NOFS, 1); if (!bio) @@ -1257,12 +1264,6 @@ static int scrub_checksum_data(struct scrub_block *sblock) if (memcmp(csum, on_disk_csum, sdev->csum_size)) fail = 1; - if (fail) { - spin_lock(&sdev->stat_lock); - ++sdev->stat.csum_errors; - spin_unlock(&sdev->stat_lock); - } - return fail; } @@ -1335,15 +1336,6 @@ static int scrub_checksum_tree_block(struct scrub_block *sblock) if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) ++crc_fail; - if (crc_fail || fail) { - spin_lock(&sdev->stat_lock); - if (crc_fail) - ++sdev->stat.csum_errors; - if (fail) - ++sdev->stat.verify_errors; - spin_unlock(&sdev->stat_lock); - } - return fail || crc_fail; } diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 8d5d380f7bdb..c5f8fca4195f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -815,7 +815,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait) return 0; } - btrfs_start_delalloc_inodes(root, 0); btrfs_wait_ordered_extents(root, 0, 0); trans = btrfs_start_transaction(root, 0); @@ -1148,13 +1147,15 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) if (ret) goto restore; } else { - if (fs_info->fs_devices->rw_devices == 0) + if (fs_info->fs_devices->rw_devices == 0) { ret = -EACCES; goto restore; + } - if (btrfs_super_log_root(fs_info->super_copy) != 0) + if (btrfs_super_log_root(fs_info->super_copy) != 0) { ret = -EINVAL; goto restore; + } ret = btrfs_cleanup_fs_roots(fs_info); if (ret) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 11b77a59db62..36422254ef67 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -73,8 +73,10 @@ loop: cur_trans = root->fs_info->running_transaction; if (cur_trans) { - if (cur_trans->aborted) + if (cur_trans->aborted) { + spin_unlock(&root->fs_info->trans_lock); return cur_trans->aborted; + } atomic_inc(&cur_trans->use_count); atomic_inc(&cur_trans->num_writers); cur_trans->num_joined++; @@ -1400,6 +1402,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = commit_fs_roots(trans, root); if (ret) { mutex_unlock(&root->fs_info->tree_log_mutex); + mutex_unlock(&root->fs_info->reloc_mutex); goto cleanup_transaction; } @@ -1411,6 +1414,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = commit_cowonly_roots(trans, root); if (ret) { mutex_unlock(&root->fs_info->tree_log_mutex); + mutex_unlock(&root->fs_info->reloc_mutex); goto cleanup_transaction; } diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index d017283ae6f5..eb1ae908582c 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -279,7 +279,7 @@ static int process_one_buffer(struct btrfs_root *log, log->fs_info->extent_root, eb->start, eb->len); - if (btrfs_buffer_uptodate(eb, gen)) { + if (btrfs_buffer_uptodate(eb, gen, 0)) { if (wc->write) btrfs_write_tree_block(eb); if (wc->wait) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 759d02486d7c..1411b99555a4 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3324,12 +3324,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, stripe_size = devices_info[ndevs-1].max_avail; num_stripes = ndevs * dev_stripes; - if (stripe_size * num_stripes > max_chunk_size * ncopies) { + if (stripe_size * ndevs > max_chunk_size * ncopies) { stripe_size = max_chunk_size * ncopies; - do_div(stripe_size, num_stripes); + do_div(stripe_size, ndevs); } do_div(stripe_size, dev_stripes); + + /* align to BTRFS_STRIPE_LEN */ do_div(stripe_size, BTRFS_STRIPE_LEN); stripe_size *= BTRFS_STRIPE_LEN; @@ -3805,10 +3807,11 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw, else if (mirror_num) stripe_index += mirror_num - 1; else { + int old_stripe_index = stripe_index; stripe_index = find_live_mirror(map, stripe_index, map->sub_stripes, stripe_index + current->pid % map->sub_stripes); - mirror_num = stripe_index + 1; + mirror_num = stripe_index - old_stripe_index + 1; } } else { /* @@ -4350,8 +4353,10 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid) ret = __btrfs_open_devices(fs_devices, FMODE_READ, root->fs_info->bdev_holder); - if (ret) + if (ret) { + free_fs_devices(fs_devices); goto out; + } if (!fs_devices->seeding) { __btrfs_close_devices(fs_devices); diff --git a/fs/buffer.c b/fs/buffer.c index 36d66653b931..351e18ea2e53 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -985,7 +985,6 @@ grow_dev_page(struct block_device *bdev, sector_t block, return page; failed: - BUG(); unlock_page(page); page_cache_release(page); return NULL; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index d34212822444..541ef81f6ae8 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -370,13 +370,13 @@ cifs_show_options(struct seq_file *s, struct dentry *root) (int)(srcaddr->sa_family)); } - seq_printf(s, ",uid=%d", cifs_sb->mnt_uid); + seq_printf(s, ",uid=%u", cifs_sb->mnt_uid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID) seq_printf(s, ",forceuid"); else seq_printf(s, ",noforceuid"); - seq_printf(s, ",gid=%d", cifs_sb->mnt_gid); + seq_printf(s, ",gid=%u", cifs_sb->mnt_gid); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) seq_printf(s, ",forcegid"); else @@ -434,11 +434,15 @@ cifs_show_options(struct seq_file *s, struct dentry *root) seq_printf(s, ",noperm"); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) seq_printf(s, ",strictcache"); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) + seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) + seq_printf(s, ",backupgid=%u", cifs_sb->mnt_backupgid); - seq_printf(s, ",rsize=%d", cifs_sb->rsize); - seq_printf(s, ",wsize=%d", cifs_sb->wsize); + seq_printf(s, ",rsize=%u", cifs_sb->rsize); + seq_printf(s, ",wsize=%u", cifs_sb->wsize); /* convert actimeo and display it in seconds */ - seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); + seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); return 0; } @@ -695,7 +699,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate * the cached file length */ - if (origin != SEEK_SET || origin != SEEK_CUR) { + if (origin != SEEK_SET && origin != SEEK_CUR) { int rc; struct inode *inode = file->f_path.dentry->d_inode; diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index d1389bb33ceb..65365358c976 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -125,5 +125,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "1.77" +#define CIFS_VERSION "1.78" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index f52c5ab78f9d..da2f5446fa7a 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -4844,8 +4844,12 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr, max_len = data_end - temp; node->node_name = cifs_strndup_from_utf16(temp, max_len, is_unicode, nls_codepage); - if (!node->node_name) + if (!node->node_name) { rc = -ENOMEM; + goto parse_DFS_referrals_exit; + } + + ref++; } parse_DFS_referrals_exit: diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f31dc9ac37b7..5dcc55197fb3 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -215,6 +215,8 @@ static const match_table_t cifs_mount_option_tokens = { { Opt_ignore, "cred" }, { Opt_ignore, "credentials" }, + { Opt_ignore, "cred=%s" }, + { Opt_ignore, "credentials=%s" }, { Opt_ignore, "guest" }, { Opt_ignore, "rw" }, { Opt_ignore, "ro" }, @@ -2183,6 +2185,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) tcp_ses->session_estab = false; tcp_ses->sequence_number = 0; tcp_ses->lstrp = jiffies; + spin_lock_init(&tcp_ses->req_lock); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); @@ -3228,10 +3231,6 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, cifs_sb->mnt_uid = pvolume_info->linux_uid; cifs_sb->mnt_gid = pvolume_info->linux_gid; - if (pvolume_info->backupuid_specified) - cifs_sb->mnt_backupuid = pvolume_info->backupuid; - if (pvolume_info->backupgid_specified) - cifs_sb->mnt_backupgid = pvolume_info->backupgid; cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; cFYI(1, "file mode: 0x%hx dir mode: 0x%hx", @@ -3262,10 +3261,14 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; if (pvolume_info->cifs_acl) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; - if (pvolume_info->backupuid_specified) + if (pvolume_info->backupuid_specified) { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID; - if (pvolume_info->backupgid_specified) + cifs_sb->mnt_backupuid = pvolume_info->backupuid; + } + if (pvolume_info->backupgid_specified) { cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID; + cifs_sb->mnt_backupgid = pvolume_info->backupgid; + } if (pvolume_info->override_uid) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; if (pvolume_info->override_gid) @@ -3614,22 +3617,6 @@ cifs_get_volume_info(char *mount_data, const char *devname) return volume_info; } -/* make sure ra_pages is a multiple of rsize */ -static inline unsigned int -cifs_ra_pages(struct cifs_sb_info *cifs_sb) -{ - unsigned int reads; - unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; - - if (rsize_pages >= default_backing_dev_info.ra_pages) - return default_backing_dev_info.ra_pages; - else if (rsize_pages == 0) - return rsize_pages; - - reads = default_backing_dev_info.ra_pages / rsize_pages; - return reads * rsize_pages; -} - int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) { @@ -3717,7 +3704,7 @@ try_mount_again: cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info); /* tune readahead according to rsize */ - cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb); + cifs_sb->bdi.ra_pages = cifs_sb->rsize / PAGE_CACHE_SIZE; remote_path_check: #ifdef CONFIG_CIFS_DFS_UPCALL diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index d172c8ed9017..ec4e9a2a12f8 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -668,12 +668,19 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) return 0; else { /* - * Forcibly invalidate automounting directory inodes - * (remote DFS directories) so to have them - * instantiated again for automount + * If the inode wasn't known to be a dfs entry when + * the dentry was instantiated, such as when created + * via ->readdir(), it needs to be set now since the + * attributes will have been updated by + * cifs_revalidate_dentry(). */ - if (IS_AUTOMOUNT(direntry->d_inode)) - return 0; + if (IS_AUTOMOUNT(direntry->d_inode) && + !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) { + spin_lock(&direntry->d_lock); + direntry->d_flags |= DCACHE_NEED_AUTOMOUNT; + spin_unlock(&direntry->d_lock); + } + return 1; } } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index fae765dac934..81725e9286e9 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2178,7 +2178,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_pages, i; size_t copied, len, cur_len; ssize_t total_written = 0; - loff_t offset = *poffset; + loff_t offset; struct iov_iter it; struct cifsFileInfo *open_file; struct cifs_tcon *tcon; @@ -2200,6 +2200,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); + offset = *poffset; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; diff --git a/fs/dcache.c b/fs/dcache.c index b60ddc41d783..b80531c91779 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -141,18 +141,29 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, * Compare 2 name strings, return 0 if they match, otherwise non-zero. * The strings are both count bytes long, and count is non-zero. */ +#ifdef CONFIG_DCACHE_WORD_ACCESS + +#include +/* + * NOTE! 'cs' and 'scount' come from a dentry, so it has a + * aligned allocation for this particular component. We don't + * strictly need the load_unaligned_zeropad() safety, but it + * doesn't hurt either. + * + * In contrast, 'ct' and 'tcount' can be from a pathname, and do + * need the careful unaligned handling. + */ static inline int dentry_cmp(const unsigned char *cs, size_t scount, const unsigned char *ct, size_t tcount) { -#ifdef CONFIG_DCACHE_WORD_ACCESS unsigned long a,b,mask; if (unlikely(scount != tcount)) return 1; for (;;) { - a = *(unsigned long *)cs; - b = *(unsigned long *)ct; + a = load_unaligned_zeropad(cs); + b = load_unaligned_zeropad(ct); if (tcount < sizeof(unsigned long)) break; if (unlikely(a != b)) @@ -165,7 +176,13 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, } mask = ~(~0ul << tcount*8); return unlikely(!!((a ^ b) & mask)); +} + #else + +static inline int dentry_cmp(const unsigned char *cs, size_t scount, + const unsigned char *ct, size_t tcount) +{ if (scount != tcount) return 1; @@ -177,9 +194,10 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, tcount--; } while (tcount); return 0; -#endif } +#endif + static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index fa5c07d51dcc..4c58d4a3adc4 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -1736,6 +1736,18 @@ static int _can_be_granted(struct dlm_rsb *r, struct dlm_lkb *lkb, int now) if (now && conv && !(lkb->lkb_exflags & DLM_LKF_QUECVT)) return 1; + /* + * Even if the convert is compat with all granted locks, + * QUECVT forces it behind other locks on the convert queue. + */ + + if (now && conv && (lkb->lkb_exflags & DLM_LKF_QUECVT)) { + if (list_empty(&r->res_convertqueue)) + return 1; + else + goto out; + } + /* * The NOORDER flag is set to avoid the standard vms rules on grant * order. diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 739b0985b398..c0b3c70ee87a 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1663,8 +1663,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, if (op == EPOLL_CTL_ADD) { if (is_file_epoll(tfile)) { error = -ELOOP; - if (ep_loop_check(ep, tfile) != 0) + if (ep_loop_check(ep, tfile) != 0) { + clear_tfile_check_list(); goto error_tgt_fput; + } } else list_add(&tfile->f_tfile_llink, &tfile_check_list); } diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 6da193564e43..e1fb1d5de58e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1597,7 +1597,9 @@ static int parse_options(char *options, struct super_block *sb, unsigned int *journal_ioprio, int is_remount) { +#ifdef CONFIG_QUOTA struct ext4_sb_info *sbi = EXT4_SB(sb); +#endif char *p; substring_t args[MAX_OPT_ARGS]; int token; diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c index f8411bd1b805..5f5e70e047dc 100644 --- a/fs/gfs2/lock_dlm.c +++ b/fs/gfs2/lock_dlm.c @@ -200,10 +200,11 @@ static int make_mode(const unsigned int lmstate) return -1; } -static u32 make_flags(const u32 lkid, const unsigned int gfs_flags, +static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags, const int req) { u32 lkf = DLM_LKF_VALBLK; + u32 lkid = gl->gl_lksb.sb_lkid; if (gfs_flags & LM_FLAG_TRY) lkf |= DLM_LKF_NOQUEUE; @@ -227,8 +228,11 @@ static u32 make_flags(const u32 lkid, const unsigned int gfs_flags, BUG(); } - if (lkid != 0) + if (lkid != 0) { lkf |= DLM_LKF_CONVERT; + if (test_bit(GLF_BLOCKING, &gl->gl_flags)) + lkf |= DLM_LKF_QUECVT; + } return lkf; } @@ -250,7 +254,7 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, char strname[GDLM_STRNAME_BYTES] = ""; req = make_mode(req_state); - lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req); + lkf = make_flags(gl, flags, req); gfs2_glstats_inc(gl, GFS2_LKS_DCOUNT); gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT); if (gl->gl_lksb.sb_lkid) { diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index 4dfbfec357e8..ec2a9c23f0c9 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c @@ -366,6 +366,10 @@ int hfsplus_rename_cat(u32 cnid, err = hfs_brec_find(&src_fd); if (err) goto out; + if (src_fd.entrylength > sizeof(entry) || src_fd.entrylength < 0) { + err = -EIO; + goto out; + } hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, src_fd.entrylength); diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 88e155f895c6..26b53fb09f68 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -150,6 +150,11 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) filp->f_pos++; /* fall through */ case 1: + if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { + err = -EIO; + goto out; + } + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); if (be16_to_cpu(entry.type) != HFSPLUS_FOLDER_THREAD) { @@ -181,6 +186,12 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir) err = -EIO; goto out; } + + if (fd.entrylength > sizeof(entry) || fd.entrylength < 0) { + err = -EIO; + goto out; + } + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, fd.entrylength); type = be16_to_cpu(entry.type); diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 28cf06e4ec84..001ef01d2fe2 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -485,6 +485,7 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); + lockdep_annotate_inode_mutex_key(inode); } return inode; } diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 806525a7269c..840f70f50792 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -723,7 +723,7 @@ start_journal_io: if (commit_transaction->t_need_data_flush && (journal->j_fs_dev != journal->j_dev) && (journal->j_flags & JBD2_BARRIER)) - blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); + blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL); /* Done it all: now write the commit record asynchronously. */ if (JBD2_HAS_INCOMPAT_FEATURE(journal, @@ -859,7 +859,7 @@ wait_for_iobuf: if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) && journal->j_flags & JBD2_BARRIER) { - blkdev_issue_flush(journal->j_dev, GFP_KERNEL, NULL); + blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL); } if (err) diff --git a/fs/namei.c b/fs/namei.c index 0062dd17eb55..c42791914f82 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1429,7 +1429,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len) unsigned long hash = 0; for (;;) { - a = *(unsigned long *)name; + a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; hash += a; @@ -1459,7 +1459,7 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp) do { hash = (hash + a) * 9; len += sizeof(unsigned long); - a = *(unsigned long *)(name+len); + a = load_unaligned_zeropad(name+len); /* Do we have any NUL or '/' bytes in this word? */ mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/')); } while (!mask); diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index 9c94297bb70e..7f6a23f0244e 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -38,6 +38,8 @@ #include /* various write calls */ #include +#include "../pnfs.h" +#include "../internal.h" #include "blocklayout.h" #define NFSDBG_FACILITY NFSDBG_PNFS_LD @@ -868,7 +870,7 @@ nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh, * GETDEVICEINFO's maxcount */ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; - max_pages = max_resp_sz >> PAGE_SHIFT; + max_pages = nfs_page_array_len(0, max_resp_sz); dprintk("%s max_resp_sz %u max_pages %d\n", __func__, max_resp_sz, max_pages); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index da7b5e4ff9ec..60f7e4ec842c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -1729,7 +1729,8 @@ error: */ struct nfs_server *nfs_clone_server(struct nfs_server *source, struct nfs_fh *fh, - struct nfs_fattr *fattr) + struct nfs_fattr *fattr, + rpc_authflavor_t flavor) { struct nfs_server *server; struct nfs_fattr *fattr_fsinfo; @@ -1758,7 +1759,7 @@ struct nfs_server *nfs_clone_server(struct nfs_server *source, error = nfs_init_server_rpcclient(server, source->client->cl_timeout, - source->client->cl_auth->au_flavor); + flavor); if (error < 0) goto out_free_server; if (!IS_ERR(source->client_acl)) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 4aaf0316d76a..8789210c6905 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1429,7 +1429,7 @@ static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry } open_flags = nd->intent.open.flags; - attr.ia_valid = 0; + attr.ia_valid = ATTR_OPEN; ctx = create_nfs_open_context(dentry, open_flags); res = ERR_CAST(ctx); @@ -1536,7 +1536,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) if (IS_ERR(ctx)) goto out; - attr.ia_valid = 0; + attr.ia_valid = ATTR_OPEN; if (openflags & O_TRUNC) { attr.ia_valid |= ATTR_SIZE; attr.ia_size = 0; diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c index b7f348bb618b..ba3019f5934c 100644 --- a/fs/nfs/idmap.c +++ b/fs/nfs/idmap.c @@ -554,12 +554,16 @@ static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, struct nfs_client *clp; int error = 0; + if (!try_module_get(THIS_MODULE)) + return 0; + while ((clp = nfs_get_client_for_event(sb->s_fs_info, event))) { error = __rpc_pipefs_event(clp, event, sb); nfs_put_client(clp); if (error) break; } + module_put(THIS_MODULE); return error; } diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 2476dc69365f..b777bdaba4c5 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -165,7 +165,8 @@ extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *, extern void nfs_free_server(struct nfs_server *server); extern struct nfs_server *nfs_clone_server(struct nfs_server *, struct nfs_fh *, - struct nfs_fattr *); + struct nfs_fattr *, + rpc_authflavor_t); extern void nfs_mark_client_ready(struct nfs_client *clp, int state); extern int nfs4_check_client_ready(struct nfs_client *clp); extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, @@ -186,10 +187,10 @@ static inline void nfs_fs_proc_exit(void) /* nfs4namespace.c */ #ifdef CONFIG_NFS_V4 -extern struct vfsmount *nfs_do_refmount(struct dentry *dentry); +extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry); #else static inline -struct vfsmount *nfs_do_refmount(struct dentry *dentry) +struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry) { return ERR_PTR(-ENOENT); } @@ -234,7 +235,6 @@ extern const u32 nfs41_maxwrite_overhead; /* nfs4proc.c */ #ifdef CONFIG_NFS_V4 extern struct rpc_procinfo nfs4_procedures[]; -void nfs_fixup_secinfo_attributes(struct nfs_fattr *, struct nfs_fh *); #endif extern int nfs4_init_ds_session(struct nfs_client *clp); diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 1807866bb3ab..d51868e5683c 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -148,66 +148,31 @@ rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors) return pseudoflavor; } -static int nfs_negotiate_security(const struct dentry *parent, - const struct dentry *dentry, - rpc_authflavor_t *flavor) +static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir, + struct qstr *name, + struct nfs_fh *fh, + struct nfs_fattr *fattr) { - struct page *page; - struct nfs4_secinfo_flavors *flavors; - int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); - int ret = -EPERM; - - secinfo = NFS_PROTO(parent->d_inode)->secinfo; - if (secinfo != NULL) { - page = alloc_page(GFP_KERNEL); - if (!page) { - ret = -ENOMEM; - goto out; - } - flavors = page_address(page); - ret = secinfo(parent->d_inode, &dentry->d_name, flavors); - *flavor = nfs_find_best_sec(flavors); - put_page(page); - } - -out: - return ret; -} - -static int nfs_lookup_with_sec(struct nfs_server *server, struct dentry *parent, - struct dentry *dentry, struct path *path, - struct nfs_fh *fh, struct nfs_fattr *fattr, - rpc_authflavor_t *flavor) -{ - struct rpc_clnt *clone; - struct rpc_auth *auth; int err; - err = nfs_negotiate_security(parent, path->dentry, flavor); - if (err < 0) - goto out; - clone = rpc_clone_client(server->client); - auth = rpcauth_create(*flavor, clone); - if (!auth) { - err = -EIO; - goto out_shutdown; - } - err = server->nfs_client->rpc_ops->lookup(clone, parent->d_inode, - &path->dentry->d_name, - fh, fattr); -out_shutdown: - rpc_shutdown_client(clone); -out: - return err; + if (NFS_PROTO(dir)->version == 4) + return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr); + + err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr); + if (err) + return ERR_PTR(err); + return rpc_clone_client(NFS_SERVER(dir)->client); } #else /* CONFIG_NFS_V4 */ -static inline int nfs_lookup_with_sec(struct nfs_server *server, - struct dentry *parent, struct dentry *dentry, - struct path *path, struct nfs_fh *fh, - struct nfs_fattr *fattr, - rpc_authflavor_t *flavor) +static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir, + struct qstr *name, + struct nfs_fh *fh, + struct nfs_fattr *fattr) { - return -EPERM; + int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr); + if (err) + return ERR_PTR(err); + return rpc_clone_client(NFS_SERVER(dir)->client); } #endif /* CONFIG_NFS_V4 */ @@ -226,12 +191,10 @@ static inline int nfs_lookup_with_sec(struct nfs_server *server, struct vfsmount *nfs_d_automount(struct path *path) { struct vfsmount *mnt; - struct nfs_server *server = NFS_SERVER(path->dentry->d_inode); struct dentry *parent; struct nfs_fh *fh = NULL; struct nfs_fattr *fattr = NULL; - int err; - rpc_authflavor_t flavor = RPC_AUTH_UNIX; + struct rpc_clnt *client; dprintk("--> nfs_d_automount()\n"); @@ -249,21 +212,19 @@ struct vfsmount *nfs_d_automount(struct path *path) /* Look it up again to get its attributes */ parent = dget_parent(path->dentry); - err = server->nfs_client->rpc_ops->lookup(server->client, parent->d_inode, - &path->dentry->d_name, - fh, fattr); - if (err == -EPERM && NFS_PROTO(parent->d_inode)->secinfo != NULL) - err = nfs_lookup_with_sec(server, parent, path->dentry, path, fh, fattr, &flavor); + client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr); dput(parent); - if (err != 0) { - mnt = ERR_PTR(err); + if (IS_ERR(client)) { + mnt = ERR_CAST(client); goto out; } if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) - mnt = nfs_do_refmount(path->dentry); + mnt = nfs_do_refmount(client, path->dentry); else - mnt = nfs_do_submount(path->dentry, fh, fattr, flavor); + mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor); + rpc_shutdown_client(client); + if (IS_ERR(mnt)) goto out; diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 97ecc863dd76..8d75021020b3 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -59,6 +59,7 @@ struct nfs_unique_id { #define NFS_SEQID_CONFIRMED 1 struct nfs_seqid_counter { + ktime_t create_time; int owner_id; int flags; u32 counter; @@ -204,6 +205,9 @@ struct nfs4_state_maintenance_ops { extern const struct dentry_operations nfs4_dentry_operations; extern const struct inode_operations nfs4_dir_inode_operations; +/* nfs4namespace.c */ +struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); + /* nfs4proc.c */ extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); @@ -212,8 +216,11 @@ extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle); -extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, - struct nfs4_fs_locations *fs_locations, struct page *page); +extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *, + struct nfs4_fs_locations *, struct page *); +extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *, struct qstr *, + struct nfs_fh *, struct nfs_fattr *); +extern int nfs4_proc_secinfo(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); extern int nfs4_release_lockowner(struct nfs4_lock_state *); extern const struct xattr_handler *nfs4_xattr_handlers[]; diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index a866bbd2890a..c9cff9adb2d3 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c @@ -699,7 +699,7 @@ get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_fla * GETDEVICEINFO's maxcount */ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; - max_pages = max_resp_sz >> PAGE_SHIFT; + max_pages = nfs_page_array_len(0, max_resp_sz); dprintk("%s inode %p max_resp_sz %u max_pages %d\n", __func__, inode, max_resp_sz, max_pages); diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c index 9c8eca315f43..a7f3dedc4ec7 100644 --- a/fs/nfs/nfs4namespace.c +++ b/fs/nfs/nfs4namespace.c @@ -51,6 +51,30 @@ Elong: return ERR_PTR(-ENAMETOOLONG); } +/* + * return the path component of ":" + * nfspath - the ":" string + * end - one past the last char that could contain ":" + * returns NULL on failure + */ +static char *nfs_path_component(const char *nfspath, const char *end) +{ + char *p; + + if (*nfspath == '[') { + /* parse [] escaped IPv6 addrs */ + p = strchr(nfspath, ']'); + if (p != NULL && ++p < end && *p == ':') + return p + 1; + } else { + /* otherwise split on first colon */ + p = strchr(nfspath, ':'); + if (p != NULL && p < end) + return p + 1; + } + return NULL; +} + /* * Determine the mount path as a string */ @@ -59,9 +83,9 @@ static char *nfs4_path(struct dentry *dentry, char *buffer, ssize_t buflen) char *limit; char *path = nfs_path(&limit, dentry, buffer, buflen); if (!IS_ERR(path)) { - char *colon = strchr(path, ':'); - if (colon && colon < limit) - path = colon + 1; + char *path_component = nfs_path_component(path, limit); + if (path_component) + return path_component; } return path; } @@ -108,6 +132,58 @@ static size_t nfs_parse_server_name(char *string, size_t len, return ret; } +static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name) +{ + struct page *page; + struct nfs4_secinfo_flavors *flavors; + rpc_authflavor_t flavor; + int err; + + page = alloc_page(GFP_KERNEL); + if (!page) + return -ENOMEM; + flavors = page_address(page); + + err = nfs4_proc_secinfo(inode, name, flavors); + if (err < 0) { + flavor = err; + goto out; + } + + flavor = nfs_find_best_sec(flavors); + +out: + put_page(page); + return flavor; +} + +/* + * Please call rpc_shutdown_client() when you are done with this client. + */ +struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode, + struct qstr *name) +{ + struct rpc_clnt *clone; + struct rpc_auth *auth; + rpc_authflavor_t flavor; + + flavor = nfs4_negotiate_security(inode, name); + if (flavor < 0) + return ERR_PTR(flavor); + + clone = rpc_clone_client(clnt); + if (IS_ERR(clone)) + return clone; + + auth = rpcauth_create(flavor, clone); + if (!auth) { + rpc_shutdown_client(clone); + clone = ERR_PTR(-EIO); + } + + return clone; +} + static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, char *page, char *page2, const struct nfs4_fs_location *location) @@ -224,7 +300,7 @@ out: * @dentry - dentry of referral * */ -struct vfsmount *nfs_do_refmount(struct dentry *dentry) +struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry) { struct vfsmount *mnt = ERR_PTR(-ENOMEM); struct dentry *parent; @@ -250,7 +326,7 @@ struct vfsmount *nfs_do_refmount(struct dentry *dentry) dprintk("%s: getting locations for %s/%s\n", __func__, parent->d_name.name, dentry->d_name.name); - err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page); + err = nfs4_proc_fs_locations(client, parent->d_inode, &dentry->d_name, fs_locations, page); dput(parent); if (err != 0 || fs_locations->nlocations <= 0 || diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index f82bde005a82..99650aaf8937 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -838,7 +838,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & (FMODE_READ|FMODE_WRITE); p->o_arg.clientid = server->nfs_client->cl_clientid; - p->o_arg.id = sp->so_seqid.owner_id; + p->o_arg.id.create_time = ktime_to_ns(sp->so_seqid.create_time); + p->o_arg.id.uniquifier = sp->so_seqid.owner_id; p->o_arg.name = &dentry->d_name; p->o_arg.server = server; p->o_arg.bitmask = server->attr_bitmask; @@ -1466,8 +1467,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) goto unlock_no_action; rcu_read_unlock(); } - /* Update sequence id. */ - data->o_arg.id = sp->so_seqid.owner_id; + /* Update client id. */ data->o_arg.clientid = sp->so_server->nfs_client->cl_clientid; if (data->o_arg.claim == NFS4_OPEN_CLAIM_PREVIOUS) { task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; @@ -1954,10 +1954,19 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, }; int err; do { - err = nfs4_handle_exception(server, - _nfs4_do_setattr(inode, cred, fattr, sattr, state), - &exception); + err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); + switch (err) { + case -NFS4ERR_OPENMODE: + if (state && !(state->state & FMODE_WRITE)) { + err = -EBADF; + if (sattr->ia_valid & ATTR_OPEN) + err = -EACCES; + goto out; + } + } + err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); +out: return err; } @@ -2368,8 +2377,9 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, * Note that we'll actually follow the referral later when * we detect fsid mismatch in inode revalidation */ -static int nfs4_get_referral(struct inode *dir, const struct qstr *name, - struct nfs_fattr *fattr, struct nfs_fh *fhandle) +static int nfs4_get_referral(struct rpc_clnt *client, struct inode *dir, + const struct qstr *name, struct nfs_fattr *fattr, + struct nfs_fh *fhandle) { int status = -ENOMEM; struct page *page = NULL; @@ -2382,7 +2392,7 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, if (locations == NULL) goto out; - status = nfs4_proc_fs_locations(dir, name, locations, page); + status = nfs4_proc_fs_locations(client, dir, name, locations, page); if (status != 0) goto out; /* Make sure server returned a different fsid for the referral */ @@ -2519,37 +2529,82 @@ static int _nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, return status; } -void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr, struct nfs_fh *fh) +static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr) { - memset(fh, 0, sizeof(struct nfs_fh)); - fattr->fsid.major = 1; fattr->valid |= NFS_ATTR_FATTR_TYPE | NFS_ATTR_FATTR_MODE | - NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_FSID | NFS_ATTR_FATTR_MOUNTPOINT; + NFS_ATTR_FATTR_NLINK | NFS_ATTR_FATTR_MOUNTPOINT; fattr->mode = S_IFDIR | S_IRUGO | S_IXUGO; fattr->nlink = 2; } +static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir, + struct qstr *name, struct nfs_fh *fhandle, + struct nfs_fattr *fattr) +{ + struct nfs4_exception exception = { }; + struct rpc_clnt *client = *clnt; + int err; + do { + err = _nfs4_proc_lookup(client, dir, name, fhandle, fattr); + switch (err) { + case -NFS4ERR_BADNAME: + err = -ENOENT; + goto out; + case -NFS4ERR_MOVED: + err = nfs4_get_referral(client, dir, name, fattr, fhandle); + goto out; + case -NFS4ERR_WRONGSEC: + err = -EPERM; + if (client != *clnt) + goto out; + + client = nfs4_create_sec_client(client, dir, name); + if (IS_ERR(client)) + return PTR_ERR(client); + + exception.retry = 1; + break; + default: + err = nfs4_handle_exception(NFS_SERVER(dir), err, &exception); + } + } while (exception.retry); + +out: + if (err == 0) + *clnt = client; + else if (client != *clnt) + rpc_shutdown_client(client); + + return err; +} + static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) { - struct nfs4_exception exception = { }; - int err; - do { - int status; + int status; + struct rpc_clnt *client = NFS_CLIENT(dir); - status = _nfs4_proc_lookup(clnt, dir, name, fhandle, fattr); - switch (status) { - case -NFS4ERR_BADNAME: - return -ENOENT; - case -NFS4ERR_MOVED: - return nfs4_get_referral(dir, name, fattr, fhandle); - case -NFS4ERR_WRONGSEC: - nfs_fixup_secinfo_attributes(fattr, fhandle); - } - err = nfs4_handle_exception(NFS_SERVER(dir), - status, &exception); - } while (exception.retry); - return err; + status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); + if (client != NFS_CLIENT(dir)) { + rpc_shutdown_client(client); + nfs_fixup_secinfo_attributes(fattr); + } + return status; +} + +struct rpc_clnt * +nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, + struct nfs_fh *fhandle, struct nfs_fattr *fattr) +{ + int status; + struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); + + status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr); + if (status < 0) { + rpc_shutdown_client(client); + return ERR_PTR(status); + } + return client; } static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) @@ -3619,16 +3674,16 @@ out: return ret; } -static void nfs4_write_cached_acl(struct inode *inode, const char *buf, size_t acl_len) +static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) { struct nfs4_cached_acl *acl; - if (buf && acl_len <= PAGE_SIZE) { + if (pages && acl_len <= PAGE_SIZE) { acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); if (acl == NULL) goto out; acl->cached = 1; - memcpy(acl->data, buf, acl_len); + _copy_from_pages(acl->data, pages, pgbase, acl_len); } else { acl = kmalloc(sizeof(*acl), GFP_KERNEL); if (acl == NULL) @@ -3661,7 +3716,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu struct nfs_getaclres res = { .acl_len = buflen, }; - void *resp_buf; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL], .rpc_argp = &args, @@ -3675,24 +3729,27 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu if (npages == 0) npages = 1; + /* Add an extra page to handle the bitmap returned */ + npages++; + for (i = 0; i < npages; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) goto out_free; } - if (npages > 1) { - /* for decoding across pages */ - res.acl_scratch = alloc_page(GFP_KERNEL); - if (!res.acl_scratch) - goto out_free; - } + + /* for decoding across pages */ + res.acl_scratch = alloc_page(GFP_KERNEL); + if (!res.acl_scratch) + goto out_free; + args.acl_len = npages * PAGE_SIZE; args.acl_pgbase = 0; + /* Let decode_getfacl know not to fail if the ACL data is larger than * the page we send as a guess */ if (buf == NULL) res.acl_flags |= NFS4_ACL_LEN_REQUEST; - resp_buf = page_address(pages[0]); dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __func__, buf, buflen, npages, args.acl_len); @@ -3703,9 +3760,9 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu acl_len = res.acl_len - res.acl_data_offset; if (acl_len > args.acl_len) - nfs4_write_cached_acl(inode, NULL, acl_len); + nfs4_write_cached_acl(inode, NULL, 0, acl_len); else - nfs4_write_cached_acl(inode, resp_buf + res.acl_data_offset, + nfs4_write_cached_acl(inode, pages, res.acl_data_offset, acl_len); if (buf) { ret = -ERANGE; @@ -4558,7 +4615,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = { + .inode = state->inode, + }; int err; do { @@ -4576,7 +4635,9 @@ static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) { struct nfs_server *server = NFS_SERVER(state->inode); - struct nfs4_exception exception = { }; + struct nfs4_exception exception = { + .inode = state->inode, + }; int err; err = nfs4_set_lock_state(state, request); @@ -4676,6 +4737,7 @@ static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock * { struct nfs4_exception exception = { .state = state, + .inode = state->inode, }; int err; @@ -4721,6 +4783,20 @@ nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request) if (state == NULL) return -ENOLCK; + /* + * Don't rely on the VFS having checked the file open mode, + * since it won't do this for flock() locks. + */ + switch (request->fl_type & (F_RDLCK|F_WRLCK|F_UNLCK)) { + case F_RDLCK: + if (!(filp->f_mode & FMODE_READ)) + return -EBADF; + break; + case F_WRLCK: + if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + } + do { status = nfs4_proc_setlk(state, cmd, request); if ((status != -EAGAIN) || IS_SETLK(cmd)) @@ -4891,8 +4967,10 @@ static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr) fattr->nlink = 2; } -int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, - struct nfs4_fs_locations *fs_locations, struct page *page) +static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, + const struct qstr *name, + struct nfs4_fs_locations *fs_locations, + struct page *page) { struct nfs_server *server = NFS_SERVER(dir); u32 bitmask[2] = { @@ -4926,11 +5004,26 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name, nfs_fattr_init(&fs_locations->fattr); fs_locations->server = server; fs_locations->nlocations = 0; - status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 0); + status = nfs4_call_sync(client, server, &msg, &args.seq_args, &res.seq_res, 0); dprintk("%s: returned status = %d\n", __func__, status); return status; } +int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir, + const struct qstr *name, + struct nfs4_fs_locations *fs_locations, + struct page *page) +{ + struct nfs4_exception exception = { }; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), + _nfs4_proc_fs_locations(client, dir, name, fs_locations, page), + &exception); + } while (exception.retry); + return err; +} + static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct nfs4_secinfo_flavors *flavors) { int status; @@ -4953,8 +5046,8 @@ static int _nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, struct return status; } -static int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, - struct nfs4_secinfo_flavors *flavors) +int nfs4_proc_secinfo(struct inode *dir, const struct qstr *name, + struct nfs4_secinfo_flavors *flavors) { struct nfs4_exception exception = { }; int err; @@ -5029,10 +5122,9 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) nfs4_construct_boot_verifier(clp, &verifier); args.id_len = scnprintf(args.id, sizeof(args.id), - "%s/%s.%s/%u", + "%s/%s/%u", clp->cl_ipaddr, - init_utsname()->nodename, - init_utsname()->domainname, + clp->cl_rpcclient->cl_nodename, clp->cl_rpcclient->cl_auth->au_flavor); res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0f43414eb25a..7f0fcfc1fe9d 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -393,6 +393,7 @@ nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp) static void nfs4_init_seqid_counter(struct nfs_seqid_counter *sc) { + sc->create_time = ktime_get(); sc->flags = 0; sc->counter = 0; spin_lock_init(&sc->lock); @@ -434,13 +435,17 @@ nfs4_alloc_state_owner(struct nfs_server *server, static void nfs4_drop_state_owner(struct nfs4_state_owner *sp) { - if (!RB_EMPTY_NODE(&sp->so_server_node)) { + struct rb_node *rb_node = &sp->so_server_node; + + if (!RB_EMPTY_NODE(rb_node)) { struct nfs_server *server = sp->so_server; struct nfs_client *clp = server->nfs_client; spin_lock(&clp->cl_lock); - rb_erase(&sp->so_server_node, &server->state_owners); - RB_CLEAR_NODE(&sp->so_server_node); + if (!RB_EMPTY_NODE(rb_node)) { + rb_erase(rb_node, &server->state_owners); + RB_CLEAR_NODE(rb_node); + } spin_unlock(&clp->cl_lock); } } @@ -516,6 +521,14 @@ out: /** * nfs4_put_state_owner - Release a nfs4_state_owner * @sp: state owner data to release + * + * Note that we keep released state owners on an LRU + * list. + * This caches valid state owners so that they can be + * reused, to avoid the OPEN_CONFIRM on minor version 0. + * It also pins the uniquifier of dropped state owners for + * a while, to ensure that those state owner names are + * never reused. */ void nfs4_put_state_owner(struct nfs4_state_owner *sp) { @@ -525,15 +538,9 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock)) return; - if (!RB_EMPTY_NODE(&sp->so_server_node)) { - sp->so_expires = jiffies; - list_add_tail(&sp->so_lru, &server->state_owners_lru); - spin_unlock(&clp->cl_lock); - } else { - nfs4_remove_state_owner_locked(sp); - spin_unlock(&clp->cl_lock); - nfs4_free_state_owner(sp); - } + sp->so_expires = jiffies; + list_add_tail(&sp->so_lru, &server->state_owners_lru); + spin_unlock(&clp->cl_lock); } /** diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index c74fdb114b48..c54aae364bee 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -74,7 +74,7 @@ static int nfs4_stat_to_errno(int); /* lock,open owner id: * we currently use size 2 (u64) out of (NFS4_OPAQUE_LIMIT >> 2) */ -#define open_owner_id_maxsz (1 + 1 + 4) +#define open_owner_id_maxsz (1 + 2 + 1 + 1 + 2) #define lock_owner_id_maxsz (1 + 1 + 4) #define decode_lockowner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) #define compound_encode_hdr_maxsz (3 + (NFS4_MAXTAGLEN >> 2)) @@ -1340,12 +1340,13 @@ static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_opena */ encode_nfs4_seqid(xdr, arg->seqid); encode_share_access(xdr, arg->fmode); - p = reserve_space(xdr, 32); + p = reserve_space(xdr, 36); p = xdr_encode_hyper(p, arg->clientid); - *p++ = cpu_to_be32(20); + *p++ = cpu_to_be32(24); p = xdr_encode_opaque_fixed(p, "open id:", 8); *p++ = cpu_to_be32(arg->server->s_dev); - xdr_encode_hyper(p, arg->id); + *p++ = cpu_to_be32(arg->id.uniquifier); + xdr_encode_hyper(p, arg->id.create_time); } static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg) @@ -4257,8 +4258,6 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, status = decode_attr_error(xdr, bitmap, &err); if (status < 0) goto xdr_error; - if (err == -NFS4ERR_WRONGSEC) - nfs_fixup_secinfo_attributes(fattr, fh); status = decode_attr_filehandle(xdr, bitmap, fh); if (status < 0) @@ -4901,11 +4900,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, bitmap[3] = {0}; struct kvec *iov = req->rq_rcv_buf.head; int status; + size_t page_len = xdr->buf->page_len; res->acl_len = 0; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto out; + bm_p = xdr->p; + res->acl_data_offset = be32_to_cpup(bm_p) + 2; + res->acl_data_offset <<= 2; + /* Check if the acl data starts beyond the allocated buffer */ + if (res->acl_data_offset > page_len) + return -ERANGE; + if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto out; if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0) @@ -4915,28 +4922,24 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, return -EIO; if (likely(bitmap[0] & FATTR4_WORD0_ACL)) { size_t hdrlen; - u32 recvd; /* The bitmap (xdr len + bitmaps) and the attr xdr len words * are stored with the acl data to handle the problem of * variable length bitmaps.*/ xdr->p = bm_p; - res->acl_data_offset = be32_to_cpup(bm_p) + 2; - res->acl_data_offset <<= 2; /* We ignore &savep and don't do consistency checks on * the attr length. Let userspace figure it out.... */ hdrlen = (u8 *)xdr->p - (u8 *)iov->iov_base; attrlen += res->acl_data_offset; - recvd = req->rq_rcv_buf.len - hdrlen; - if (attrlen > recvd) { + if (attrlen > page_len) { if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { /* getxattr interface called with a NULL buf */ res->acl_len = attrlen; goto out; } - dprintk("NFS: acl reply: attrlen %u > recvd %u\n", - attrlen, recvd); + dprintk("NFS: acl reply: attrlen %u > page_len %zu\n", + attrlen, page_len); return -EINVAL; } xdr_read_pages(xdr, attrlen); @@ -5089,16 +5092,13 @@ out_err: return -EINVAL; } -static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) +static int decode_secinfo_common(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) { struct nfs4_secinfo_flavor *sec_flavor; int status; __be32 *p; int i, num_flavors; - status = decode_op_hdr(xdr, OP_SECINFO); - if (status) - goto out; p = xdr_inline_decode(xdr, 4); if (unlikely(!p)) goto out_overflow; @@ -5124,6 +5124,7 @@ static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) res->flavors->num_flavors++; } + status = 0; out: return status; out_overflow: @@ -5131,7 +5132,23 @@ out_overflow: return -EIO; } +static int decode_secinfo(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) +{ + int status = decode_op_hdr(xdr, OP_SECINFO); + if (status) + return status; + return decode_secinfo_common(xdr, res); +} + #if defined(CONFIG_NFS_V4_1) +static int decode_secinfo_no_name(struct xdr_stream *xdr, struct nfs4_secinfo_res *res) +{ + int status = decode_op_hdr(xdr, OP_SECINFO_NO_NAME); + if (status) + return status; + return decode_secinfo_common(xdr, res); +} + static int decode_exchange_id(struct xdr_stream *xdr, struct nfs41_exchange_id_res *res) { @@ -6816,7 +6833,7 @@ static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp, status = decode_putrootfh(xdr); if (status) goto out; - status = decode_secinfo(xdr, res); + status = decode_secinfo_no_name(xdr, res); out: return status; } diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c index 8d45f1c318ce..595c5fc21a19 100644 --- a/fs/nfs/objlayout/objlayout.c +++ b/fs/nfs/objlayout/objlayout.c @@ -604,7 +604,6 @@ int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay, { struct objlayout_deviceinfo *odi; struct pnfs_device pd; - struct super_block *sb; struct page *page, **pages; u32 *p; int err; @@ -623,7 +622,6 @@ int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay, pd.pglen = PAGE_SIZE; pd.mincount = 0; - sb = pnfslay->plh_inode->i_sb; err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd); dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err); if (err) diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index b5d451586943..38512bcd2e98 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -587,7 +587,7 @@ send_layoutget(struct pnfs_layout_hdr *lo, /* allocate pages for xdr post processing */ max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; - max_pages = max_resp_sz >> PAGE_SHIFT; + max_pages = nfs_page_array_len(0, max_resp_sz); pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); if (!pages) diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 9a0e8ef4a409..0a4be28c2ea3 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c @@ -322,7 +322,7 @@ out_bad: while (!list_empty(res)) { data = list_entry(res->next, struct nfs_read_data, list); list_del(&data->list); - nfs_readdata_free(data); + nfs_readdata_release(data); } nfs_readpage_release(req); return -ENOMEM; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 37412f706b32..4ac7fca7e4bf 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2428,7 +2428,7 @@ nfs_xdev_mount(struct file_system_type *fs_type, int flags, dprintk("--> nfs_xdev_mount()\n"); /* create a new volume representation */ - server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr); + server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); if (IS_ERR(server)) { error = PTR_ERR(server); goto out_err_noserver; @@ -2767,11 +2767,15 @@ static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type, char *root_devname; size_t len; - len = strlen(hostname) + 3; + len = strlen(hostname) + 5; root_devname = kmalloc(len, GFP_KERNEL); if (root_devname == NULL) return ERR_PTR(-ENOMEM); - snprintf(root_devname, len, "%s:/", hostname); + /* Does hostname needs to be enclosed in brackets? */ + if (strchr(hostname, ':')) + snprintf(root_devname, len, "[%s]:/", hostname); + else + snprintf(root_devname, len, "%s:/", hostname); root_mnt = vfs_kern_mount(fs_type, flags, root_devname, data); kfree(root_devname); return root_mnt; @@ -2951,7 +2955,7 @@ nfs4_xdev_mount(struct file_system_type *fs_type, int flags, dprintk("--> nfs4_xdev_mount()\n"); /* create a new volume representation */ - server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr); + server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); if (IS_ERR(server)) { error = PTR_ERR(server); goto out_err_noserver; diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 2c68818f68ac..c07462320f6b 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -682,7 +682,8 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode, req->wb_bytes = rqend - req->wb_offset; out_unlock: spin_unlock(&inode->i_lock); - nfs_clear_request_commit(req); + if (req) + nfs_clear_request_commit(req); return req; out_flushme: spin_unlock(&inode->i_lock); @@ -1018,7 +1019,7 @@ out_bad: while (!list_empty(res)) { data = list_entry(res->next, struct nfs_write_data, list); list_del(&data->list); - nfs_writedata_free(data); + nfs_writedata_release(data); } nfs_redirty_request(req); return -ENOMEM; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 4767429264a2..ed3f9206a0ee 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -577,7 +577,7 @@ cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) struct cld_net *cn = nn->cld_net; if (mlen != sizeof(*cmsg)) { - dprintk("%s: got %lu bytes, expected %lu\n", __func__, mlen, + dprintk("%s: got %zu bytes, expected %zu\n", __func__, mlen, sizeof(*cmsg)); return -EINVAL; } diff --git a/fs/pipe.c b/fs/pipe.c index 25feaa3faac0..fec5e4ad071a 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -346,6 +346,16 @@ static const struct pipe_buf_operations anon_pipe_buf_ops = { .get = generic_pipe_buf_get, }; +static const struct pipe_buf_operations packet_pipe_buf_ops = { + .can_merge = 0, + .map = generic_pipe_buf_map, + .unmap = generic_pipe_buf_unmap, + .confirm = generic_pipe_buf_confirm, + .release = anon_pipe_buf_release, + .steal = generic_pipe_buf_steal, + .get = generic_pipe_buf_get, +}; + static ssize_t pipe_read(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t pos) @@ -407,6 +417,13 @@ redo: ret += chars; buf->offset += chars; buf->len -= chars; + + /* Was it a packet buffer? Clean up and exit */ + if (buf->flags & PIPE_BUF_FLAG_PACKET) { + total_len = chars; + buf->len = 0; + } + if (!buf->len) { buf->ops = NULL; ops->release(pipe, buf); @@ -459,6 +476,11 @@ redo: return ret; } +static inline int is_packetized(struct file *file) +{ + return (file->f_flags & O_DIRECT) != 0; +} + static ssize_t pipe_write(struct kiocb *iocb, const struct iovec *_iov, unsigned long nr_segs, loff_t ppos) @@ -593,6 +615,11 @@ redo2: buf->ops = &anon_pipe_buf_ops; buf->offset = 0; buf->len = chars; + buf->flags = 0; + if (is_packetized(filp)) { + buf->ops = &packet_pipe_buf_ops; + buf->flags = PIPE_BUF_FLAG_PACKET; + } pipe->nrbufs = ++bufs; pipe->tmp_page = NULL; @@ -1013,7 +1040,7 @@ struct file *create_write_pipe(int flags) goto err_dentry; f->f_mapping = inode->i_mapping; - f->f_flags = O_WRONLY | (flags & O_NONBLOCK); + f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT)); f->f_version = 0; return f; @@ -1057,7 +1084,7 @@ int do_pipe_flags(int *fd, int flags) int error; int fdw, fdr; - if (flags & ~(O_CLOEXEC | O_NONBLOCK)) + if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT)) return -EINVAL; fw = create_write_pipe(flags); diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 2b9a7607cbd5..1030a716d155 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -597,9 +597,6 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, if (!page) continue; - if (PageReserved(page)) - continue; - /* Clear accessed and referenced bits. */ ptep_test_and_clear_young(vma, addr, pte); ClearPageReferenced(page); @@ -750,6 +747,8 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) else if (pte_present(pte)) *pme = make_pme(PM_PFRAME(pte_pfn(pte)) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); + else + *pme = make_pme(PM_NOT_PRESENT); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -764,6 +763,8 @@ static void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, if (pmd_present(pmd)) *pme = make_pme(PM_PFRAME(pmd_pfn(pmd) + offset) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); + else + *pme = make_pme(PM_NOT_PRESENT); } #else static inline void thp_pmd_to_pagemap_entry(pagemap_entry_t *pme, @@ -804,8 +805,10 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, /* check to see if we've left 'vma' behind * and need a new, higher one */ - if (vma && (addr >= vma->vm_end)) + if (vma && (addr >= vma->vm_end)) { vma = find_vma(walk->mm, addr); + pme = make_pme(PM_NOT_PRESENT); + } /* check that 'vma' actually covers this address, * and that it isn't a huge page vma */ @@ -833,6 +836,8 @@ static void huge_pte_to_pagemap_entry(pagemap_entry_t *pme, if (pte_present(pte)) *pme = make_pme(PM_PFRAME(pte_pfn(pte) + offset) | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); + else + *pme = make_pme(PM_NOT_PRESENT); } /* This function walks within one hugetlb entry in the single call */ @@ -842,7 +847,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask, { struct pagemapread *pm = walk->private; int err = 0; - pagemap_entry_t pme = make_pme(PM_NOT_PRESENT); + pagemap_entry_t pme; for (; addr != end; addr += PAGE_SIZE) { int offset = (addr & ~hmask) >> PAGE_SHIFT; diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index eba66043cf1b..e8bcc4742e0e 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h @@ -499,9 +499,10 @@ typedef u64 acpi_integer; #define ACPI_STATE_D0 (u8) 0 #define ACPI_STATE_D1 (u8) 1 #define ACPI_STATE_D2 (u8) 2 -#define ACPI_STATE_D3 (u8) 3 -#define ACPI_STATE_D3_COLD (u8) 4 -#define ACPI_D_STATES_MAX ACPI_STATE_D3_COLD +#define ACPI_STATE_D3_HOT (u8) 3 +#define ACPI_STATE_D3 (u8) 4 +#define ACPI_STATE_D3_COLD ACPI_STATE_D3 +#define ACPI_D_STATES_MAX ACPI_STATE_D3 #define ACPI_D_STATE_COUNT 5 #define ACPI_STATE_C0 (u8) 0 diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 0dd4e87f6fba..5e5e3865f1ed 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -35,6 +35,14 @@ typedef union sigval { #define __ARCH_SI_BAND_T long #endif +#ifndef __ARCH_SI_CLOCK_T +#define __ARCH_SI_CLOCK_T __kernel_clock_t +#endif + +#ifndef __ARCH_SI_ATTRIBUTES +#define __ARCH_SI_ATTRIBUTES +#endif + #ifndef HAVE_ARCH_SIGINFO_T typedef struct siginfo { @@ -72,8 +80,8 @@ typedef struct siginfo { __kernel_pid_t _pid; /* which child */ __ARCH_SI_UID_T _uid; /* sender's uid */ int _status; /* exit code */ - __kernel_clock_t _utime; - __kernel_clock_t _stime; + __ARCH_SI_CLOCK_T _utime; + __ARCH_SI_CLOCK_T _stime; } _sigchld; /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ @@ -91,7 +99,7 @@ typedef struct siginfo { int _fd; } _sigpoll; } _sifields; -} siginfo_t; +} __ARCH_SI_ATTRIBUTES siginfo_t; #endif diff --git a/include/asm-generic/statfs.h b/include/asm-generic/statfs.h index 0fd28e028de1..c749af9c0983 100644 --- a/include/asm-generic/statfs.h +++ b/include/asm-generic/statfs.h @@ -15,7 +15,7 @@ typedef __kernel_fsid_t fsid_t; * with a 10' pole. */ #ifndef __statfs_word -#if BITS_PER_LONG == 64 +#if __BITS_PER_LONG == 64 #define __statfs_word long #else #define __statfs_word __u32 diff --git a/include/linux/efi.h b/include/linux/efi.h index 88ec80670d5f..ec45ccd8708a 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -554,7 +554,18 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_VARIABLE_NON_VOLATILE 0x0000000000000001 #define EFI_VARIABLE_BOOTSERVICE_ACCESS 0x0000000000000002 #define EFI_VARIABLE_RUNTIME_ACCESS 0x0000000000000004 +#define EFI_VARIABLE_HARDWARE_ERROR_RECORD 0x0000000000000008 +#define EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS 0x0000000000000010 +#define EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS 0x0000000000000020 +#define EFI_VARIABLE_APPEND_WRITE 0x0000000000000040 +#define EFI_VARIABLE_MASK (EFI_VARIABLE_NON_VOLATILE | \ + EFI_VARIABLE_BOOTSERVICE_ACCESS | \ + EFI_VARIABLE_RUNTIME_ACCESS | \ + EFI_VARIABLE_HARDWARE_ERROR_RECORD | \ + EFI_VARIABLE_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_TIME_BASED_AUTHENTICATED_WRITE_ACCESS | \ + EFI_VARIABLE_APPEND_WRITE) /* * The type of search to perform when calling boottime->locate_handle */ diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 8a1835855faa..fe5136d81454 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -159,7 +159,8 @@ static inline void eth_hw_addr_random(struct net_device *dev) * @addr1: Pointer to a six-byte array containing the Ethernet address * @addr2: Pointer other six-byte array containing the Ethernet address * - * Compare two ethernet addresses, returns 0 if equal + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. + * Unlike memcmp(), it doesn't return a value suitable for sorting. */ static inline unsigned compare_ether_addr(const u8 *addr1, const u8 *addr2) { @@ -184,10 +185,10 @@ static inline unsigned long zap_last_2bytes(unsigned long value) * @addr1: Pointer to an array of 8 bytes * @addr2: Pointer to an other array of 8 bytes * - * Compare two ethernet addresses, returns 0 if equal. - * Same result than "memcmp(addr1, addr2, ETH_ALEN)" but without conditional - * branches, and possibly long word memory accesses on CPU allowing cheap - * unaligned memory reads. + * Compare two ethernet addresses, returns 0 if equal, non-zero otherwise. + * Unlike memcmp(), it doesn't return a value suitable for sorting. + * The function doesn't need any conditional branches and possibly uses + * word memory accesses on CPU allowing cheap unaligned memory reads. * arrays = { byte1, byte2, byte3, byte4, byte6, byte7, pad1, pad2} * * Please note that alignment of addr1 & addr2 is only guaranted to be 16 bits. diff --git a/include/linux/gpio-pxa.h b/include/linux/gpio-pxa.h index 05071ee34c3f..d755b28ba635 100644 --- a/include/linux/gpio-pxa.h +++ b/include/linux/gpio-pxa.h @@ -13,4 +13,8 @@ extern int pxa_last_gpio; extern int pxa_irq_to_gpio(int irq); +struct pxa_gpio_platform_data { + int (*gpio_set_wake)(unsigned int gpio, unsigned int on); +}; + #endif /* __GPIO_PXA_H */ diff --git a/include/linux/hsi/hsi.h b/include/linux/hsi/hsi.h index 4b178067f405..56fae865e272 100644 --- a/include/linux/hsi/hsi.h +++ b/include/linux/hsi/hsi.h @@ -26,9 +26,9 @@ #include #include #include -#include #include #include +#include /* HSI message ttype */ #define HSI_MSG_READ 0 @@ -121,18 +121,18 @@ static inline int hsi_register_board_info(struct hsi_board_info const *info, * @device: Driver model representation of the device * @tx_cfg: HSI TX configuration * @rx_cfg: HSI RX configuration - * @hsi_start_rx: Called after incoming wake line goes high - * @hsi_stop_rx: Called after incoming wake line goes low + * @e_handler: Callback for handling port events (RX Wake High/Low) + * @pclaimed: Keeps tracks if the clients claimed its associated HSI port + * @nb: Notifier block for port events */ struct hsi_client { struct device device; struct hsi_config tx_cfg; struct hsi_config rx_cfg; - void (*hsi_start_rx)(struct hsi_client *cl); - void (*hsi_stop_rx)(struct hsi_client *cl); /* private: */ + void (*ehandler)(struct hsi_client *, unsigned long); unsigned int pclaimed:1; - struct list_head link; + struct notifier_block nb; }; #define to_hsi_client(dev) container_of(dev, struct hsi_client, device) @@ -147,6 +147,10 @@ static inline void *hsi_client_drvdata(struct hsi_client *cl) return dev_get_drvdata(&cl->device); } +int hsi_register_port_event(struct hsi_client *cl, + void (*handler)(struct hsi_client *, unsigned long)); +int hsi_unregister_port_event(struct hsi_client *cl); + /** * struct hsi_client_driver - Driver associated to an HSI client * @driver: Driver model representation of the driver @@ -214,8 +218,7 @@ void hsi_free_msg(struct hsi_msg *msg); * @start_tx: Callback to inform that a client wants to TX data * @stop_tx: Callback to inform that a client no longer wishes to TX data * @release: Callback to inform that a client no longer uses the port - * @clients: List of hsi_clients using the port. - * @clock: Lock to serialize access to the clients list. + * @n_head: Notifier chain for signaling port events to the clients. */ struct hsi_port { struct device device; @@ -231,14 +234,14 @@ struct hsi_port { int (*start_tx)(struct hsi_client *cl); int (*stop_tx)(struct hsi_client *cl); int (*release)(struct hsi_client *cl); - struct list_head clients; - spinlock_t clock; + /* private */ + struct atomic_notifier_head n_head; }; #define to_hsi_port(dev) container_of(dev, struct hsi_port, device) #define hsi_get_port(cl) to_hsi_port((cl)->device.parent) -void hsi_event(struct hsi_port *port, unsigned int event); +int hsi_event(struct hsi_port *port, unsigned long event); int hsi_claim_port(struct hsi_client *cl, unsigned int share); void hsi_release_port(struct hsi_client *cl); @@ -270,13 +273,13 @@ struct hsi_controller { struct module *owner; unsigned int id; unsigned int num_ports; - struct hsi_port *port; + struct hsi_port **port; }; #define to_hsi_controller(dev) container_of(dev, struct hsi_controller, device) struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags); -void hsi_free_controller(struct hsi_controller *hsi); +void hsi_put_controller(struct hsi_controller *hsi); int hsi_register_controller(struct hsi_controller *hsi); void hsi_unregister_controller(struct hsi_controller *hsi); @@ -294,7 +297,7 @@ static inline void *hsi_controller_drvdata(struct hsi_controller *hsi) static inline struct hsi_port *hsi_find_port_num(struct hsi_controller *hsi, unsigned int num) { - return (num < hsi->num_ports) ? &hsi->port[num] : NULL; + return (num < hsi->num_ports) ? hsi->port[num] : NULL; } /* diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 2463b6100333..1f90de0cfdbe 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h @@ -666,23 +666,11 @@ struct twl4030_codec_data { unsigned int check_defaults:1; unsigned int reset_registers:1; unsigned int hs_extmute:1; - u16 hs_left_step; - u16 hs_right_step; - u16 hf_left_step; - u16 hf_right_step; void (*set_hs_extmute)(int mute); }; struct twl4030_vibra_data { unsigned int coexist; - - /* twl6040 */ - unsigned int vibldrv_res; /* left driver resistance */ - unsigned int vibrdrv_res; /* right driver resistance */ - unsigned int viblmotor_res; /* left motor resistance */ - unsigned int vibrmotor_res; /* right motor resistance */ - int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ - int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ }; struct twl4030_audio_data { diff --git a/include/linux/irq.h b/include/linux/irq.h index 7810406f3d80..b27cfcfd3a59 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -49,6 +49,12 @@ typedef void (*irq_preflow_handler_t)(struct irq_data *data); * IRQ_TYPE_LEVEL_LOW - low level triggered * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits * IRQ_TYPE_SENSE_MASK - Mask for all the above bits + * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type + * to setup the HW to a sane default (used + * by irqdomain map() callbacks to synchronize + * the HW state and SW flags for a newly + * allocated descriptor). + * * IRQ_TYPE_PROBE - Special flag for probing in progress * * Bits which can be modified via irq_set/clear/modify_status_flags() @@ -77,6 +83,7 @@ enum { IRQ_TYPE_LEVEL_LOW = 0x00000008, IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), IRQ_TYPE_SENSE_MASK = 0x0000000f, + IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, IRQ_TYPE_PROBE = 0x00000010, diff --git a/include/linux/libata.h b/include/linux/libata.h index 42378d637ffb..e926df7b54c9 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -996,7 +996,8 @@ extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, extern void ata_sas_port_destroy(struct ata_port *); extern struct ata_port *ata_sas_port_alloc(struct ata_host *, struct ata_port_info *, struct Scsi_Host *); -extern int ata_sas_async_port_init(struct ata_port *); +extern void ata_sas_async_probe(struct ata_port *ap); +extern int ata_sas_sync_probe(struct ata_port *ap); extern int ata_sas_port_init(struct ata_port *); extern int ata_sas_port_start(struct ata_port *ap); extern void ata_sas_port_stop(struct ata_port *ap); diff --git a/include/linux/mfd/db5500-prcmu.h b/include/linux/mfd/db5500-prcmu.h index 9890687f582d..5a049dfaf153 100644 --- a/include/linux/mfd/db5500-prcmu.h +++ b/include/linux/mfd/db5500-prcmu.h @@ -8,70 +8,6 @@ #ifndef __MFD_DB5500_PRCMU_H #define __MFD_DB5500_PRCMU_H -#ifdef CONFIG_MFD_DB5500_PRCMU - -void db5500_prcmu_early_init(void); -int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state); -int db5500_prcmu_set_display_clocks(void); -int db5500_prcmu_disable_dsipll(void); -int db5500_prcmu_enable_dsipll(void); -int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); -int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); -void db5500_prcmu_enable_wakeups(u32 wakeups); -int db5500_prcmu_request_clock(u8 clock, bool enable); -void db5500_prcmu_config_abb_event_readout(u32 abb_events); -void db5500_prcmu_get_abb_event_buffer(void __iomem **buf); -int prcmu_resetout(u8 resoutn, u8 state); -int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, - bool keep_ap_pll); -int db5500_prcmu_config_esram0_deep_sleep(u8 state); -void db5500_prcmu_system_reset(u16 reset_code); -u16 db5500_prcmu_get_reset_code(void); -bool db5500_prcmu_is_ac_wake_requested(void); -int db5500_prcmu_set_arm_opp(u8 opp); -int db5500_prcmu_get_arm_opp(void); - -#else /* !CONFIG_UX500_SOC_DB5500 */ - -static inline void db5500_prcmu_early_init(void) {} - -static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) -{ - return -ENOSYS; -} - -static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) -{ - return -ENOSYS; -} - -static inline int db5500_prcmu_request_clock(u8 clock, bool enable) -{ - return 0; -} - -static inline int db5500_prcmu_set_display_clocks(void) -{ - return 0; -} - -static inline int db5500_prcmu_disable_dsipll(void) -{ - return 0; -} - -static inline int db5500_prcmu_enable_dsipll(void) -{ - return 0; -} - -static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state) -{ - return 0; -} - -static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {} - static inline int prcmu_resetout(u8 resoutn, u8 state) { return 0; @@ -82,8 +18,10 @@ static inline int db5500_prcmu_set_epod(u16 epod_id, u8 epod_state) return 0; } -static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {} -static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {} +static inline int db5500_prcmu_request_clock(u8 clock, bool enable) +{ + return 0; +} static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll) @@ -91,7 +29,10 @@ static inline int db5500_prcmu_set_power_state(u8 state, bool keep_ulp_clk, return 0; } -static inline void db5500_prcmu_system_reset(u16 reset_code) {} +static inline int db5500_prcmu_config_esram0_deep_sleep(u8 state) +{ + return 0; +} static inline u16 db5500_prcmu_get_reset_code(void) { @@ -113,6 +54,51 @@ static inline int db5500_prcmu_get_arm_opp(void) return 0; } +static inline void db5500_prcmu_config_abb_event_readout(u32 abb_events) {} + +static inline void db5500_prcmu_get_abb_event_buffer(void __iomem **buf) {} + +static inline void db5500_prcmu_system_reset(u16 reset_code) {} + +static inline void db5500_prcmu_enable_wakeups(u32 wakeups) {} + +#ifdef CONFIG_MFD_DB5500_PRCMU + +void db5500_prcmu_early_init(void); +int db5500_prcmu_set_display_clocks(void); +int db5500_prcmu_disable_dsipll(void); +int db5500_prcmu_enable_dsipll(void); +int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size); +int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size); + +#else /* !CONFIG_UX500_SOC_DB5500 */ + +static inline void db5500_prcmu_early_init(void) {} + +static inline int db5500_prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int db5500_prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size) +{ + return -ENOSYS; +} + +static inline int db5500_prcmu_set_display_clocks(void) +{ + return 0; +} + +static inline int db5500_prcmu_disable_dsipll(void) +{ + return 0; +} + +static inline int db5500_prcmu_enable_dsipll(void) +{ + return 0; +} #endif /* CONFIG_MFD_DB5500_PRCMU */ diff --git a/include/linux/mfd/rc5t583.h b/include/linux/mfd/rc5t583.h index a2c61609d21d..0b64b19d81ab 100644 --- a/include/linux/mfd/rc5t583.h +++ b/include/linux/mfd/rc5t583.h @@ -26,6 +26,7 @@ #include #include +#include #define RC5T583_MAX_REGS 0xF8 @@ -279,14 +280,44 @@ struct rc5t583_platform_data { bool enable_shutdown; }; -int rc5t583_write(struct device *dev, u8 reg, uint8_t val); -int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val); -int rc5t583_set_bits(struct device *dev, unsigned int reg, - unsigned int bit_mask); -int rc5t583_clear_bits(struct device *dev, unsigned int reg, - unsigned int bit_mask); -int rc5t583_update(struct device *dev, unsigned int reg, - unsigned int val, unsigned int mask); +static inline int rc5t583_write(struct device *dev, uint8_t reg, uint8_t val) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_write(rc5t583->regmap, reg, val); +} + +static inline int rc5t583_read(struct device *dev, uint8_t reg, uint8_t *val) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + unsigned int ival; + int ret; + ret = regmap_read(rc5t583->regmap, reg, &ival); + if (!ret) + *val = (uint8_t)ival; + return ret; +} + +static inline int rc5t583_set_bits(struct device *dev, unsigned int reg, + unsigned int bit_mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, bit_mask); +} + +static inline int rc5t583_clear_bits(struct device *dev, unsigned int reg, + unsigned int bit_mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, bit_mask, 0); +} + +static inline int rc5t583_update(struct device *dev, unsigned int reg, + unsigned int val, unsigned int mask) +{ + struct rc5t583 *rc5t583 = dev_get_drvdata(dev); + return regmap_update_bits(rc5t583->regmap, reg, mask, val); +} + int rc5t583_ext_power_req_config(struct device *dev, int deepsleep_id, int ext_pwr_req, int deepsleep_slot_nr); int rc5t583_irq_init(struct rc5t583 *rc5t583, int irq, int irq_base); diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h index 9bc9ac651dad..b15b5f03f5c4 100644 --- a/include/linux/mfd/twl6040.h +++ b/include/linux/mfd/twl6040.h @@ -174,8 +174,35 @@ #define TWL6040_SYSCLK_SEL_LPPLL 0 #define TWL6040_SYSCLK_SEL_HPPLL 1 +struct twl6040_codec_data { + u16 hs_left_step; + u16 hs_right_step; + u16 hf_left_step; + u16 hf_right_step; +}; + +struct twl6040_vibra_data { + unsigned int vibldrv_res; /* left driver resistance */ + unsigned int vibrdrv_res; /* right driver resistance */ + unsigned int viblmotor_res; /* left motor resistance */ + unsigned int vibrmotor_res; /* right motor resistance */ + int vddvibl_uV; /* VDDVIBL volt, set 0 for fixed reg */ + int vddvibr_uV; /* VDDVIBR volt, set 0 for fixed reg */ +}; + +struct twl6040_platform_data { + int audpwron_gpio; /* audio power-on gpio */ + unsigned int irq_base; + + struct twl6040_codec_data *codec; + struct twl6040_vibra_data *vibra; +}; + +struct regmap; + struct twl6040 { struct device *dev; + struct regmap *regmap; struct mutex mutex; struct mutex io_mutex; struct mutex irq_mutex; diff --git a/include/linux/mm.h b/include/linux/mm.h index d8738a464b94..74aa71bea1e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1393,29 +1393,20 @@ extern int install_special_mapping(struct mm_struct *mm, extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); -extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long pgoff); extern unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff); - -static inline unsigned long do_mmap(struct file *file, unsigned long addr, - unsigned long len, unsigned long prot, - unsigned long flag, unsigned long offset) -{ - unsigned long ret = -EINVAL; - if ((offset + PAGE_ALIGN(len)) < offset) - goto out; - if (!(offset & ~PAGE_MASK)) - ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); -out: - return ret; -} - +extern unsigned long do_mmap(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); extern int do_munmap(struct mm_struct *, unsigned long, size_t); -extern unsigned long do_brk(unsigned long, unsigned long); +/* These take the mm semaphore themselves */ +extern unsigned long vm_brk(unsigned long, unsigned long); +extern int vm_munmap(unsigned long, size_t); +extern unsigned long vm_mmap(struct file *, unsigned long, + unsigned long, unsigned long, + unsigned long, unsigned long); /* truncate.c */ extern void truncate_inode_pages(struct address_space *, loff_t); diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 01beae78f079..629b823f8836 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -481,7 +481,7 @@ struct mmc_driver { struct device_driver drv; int (*probe)(struct mmc_card *); void (*remove)(struct mmc_card *); - int (*suspend)(struct mmc_card *, pm_message_t); + int (*suspend)(struct mmc_card *); int (*resume)(struct mmc_card *); }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5cbaa20f1659..33900a53c990 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1403,15 +1403,6 @@ static inline bool netdev_uses_dsa_tags(struct net_device *dev) return 0; } -#ifndef CONFIG_NET_NS -static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev) -{ - skb->dev = dev; -} -#else /* CONFIG_NET_NS */ -void skb_set_dev(struct sk_buff *skb, struct net_device *dev); -#endif - static inline bool netdev_uses_trailer_tags(struct net_device *dev) { #ifdef CONFIG_NET_DSA_TAG_TRAILER diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h index 0ddd161f3b06..31d2844e6572 100644 --- a/include/linux/netfilter_bridge.h +++ b/include/linux/netfilter_bridge.h @@ -104,9 +104,18 @@ struct bridge_skb_cb { } daddr; }; +static inline void br_drop_fake_rtable(struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + + if (dst && (dst->flags & DST_FAKE_RTABLE)) + skb_dst_drop(skb); +} + #else #define nf_bridge_maybe_copy_header(skb) (0) #define nf_bridge_pad(skb) (0) +#define br_drop_fake_rtable(skb) do { } while (0) #endif /* CONFIG_BRIDGE_NETFILTER */ #endif /* __KERNEL__ */ diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index bfd0d1bf6707..7ba3551a0414 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -312,6 +312,11 @@ struct nfs4_layoutreturn { int rpc_status; }; +struct stateowner_id { + __u64 create_time; + __u32 uniquifier; +}; + /* * Arguments to the open call. */ @@ -321,7 +326,7 @@ struct nfs_openargs { int open_flags; fmode_t fmode; __u64 clientid; - __u64 id; + struct stateowner_id id; union { struct { struct iattr * attrs; /* UNCHECKED, GUARDED */ diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h index fee4349364f7..e4d1de742502 100644 --- a/include/linux/pinctrl/machine.h +++ b/include/linux/pinctrl/machine.h @@ -12,6 +12,8 @@ #ifndef __LINUX_PINCTRL_MACHINE_H #define __LINUX_PINCTRL_MACHINE_H +#include + #include "pinctrl-state.h" enum pinctrl_map_type { @@ -148,7 +150,7 @@ struct pinctrl_map { #define PIN_MAP_CONFIGS_GROUP_HOG_DEFAULT(dev, grp, cfgs) \ PIN_MAP_CONFIGS_GROUP(dev, PINCTRL_STATE_DEFAULT, dev, grp, cfgs) -#ifdef CONFIG_PINMUX +#ifdef CONFIG_PINCTRL extern int pinctrl_register_mappings(struct pinctrl_map const *map, unsigned num_maps); diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 6d626ff0cfd0..e1ac1ce16fb0 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -6,6 +6,7 @@ #define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */ #define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */ #define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */ +#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */ /** * struct pipe_buffer - a linux kernel pipe buffer diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index c6db9fb33c44..600060e25ec6 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -141,7 +141,7 @@ static inline unsigned __read_seqcount_begin(const seqcount_t *s) unsigned ret; repeat: - ret = s->sequence; + ret = ACCESS_ONCE(s->sequence); if (unlikely(ret & 1)) { cpu_relax(); goto repeat; @@ -165,6 +165,27 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) return ret; } +/** + * raw_seqcount_begin - begin a seq-read critical section + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_seqcount_begin opens a read critical section of the given seqcount. + * Validity of the critical section is tested by checking read_seqcount_retry + * function. + * + * Unlike read_seqcount_begin(), this function will not wait for the count + * to stabilize. If a writer is active when we begin, we will fail the + * read_seqcount_retry() instead of stabilizing at the beginning of the + * critical section. + */ +static inline unsigned raw_seqcount_begin(const seqcount_t *s) +{ + unsigned ret = ACCESS_ONCE(s->sequence); + smp_rmb(); + return ret & ~1; +} + /** * __read_seqcount_retry - end a seq-read critical section (without barrier) * @s: pointer to seqcount_t diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 70a3f8d49118..111f26b6e28b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -238,11 +238,12 @@ enum { /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. - * The desc is used to track userspace buffer index. + * The ctx field is used to track device context. + * The desc field is used to track userspace buffer index. */ struct ubuf_info { - void (*callback)(void *); - void *arg; + void (*callback)(struct ubuf_info *); + void *ctx; unsigned long desc; }; @@ -1019,7 +1020,7 @@ static inline void skb_queue_splice(const struct sk_buff_head *list, } /** - * skb_queue_splice - join two skb lists and reinitialise the emptied list + * skb_queue_splice_init - join two skb lists and reinitialise the emptied list * @list: the new list to add * @head: the place to add it in the first list * @@ -1050,7 +1051,7 @@ static inline void skb_queue_splice_tail(const struct sk_buff_head *list, } /** - * skb_queue_splice_tail - join two skb lists and reinitialise the emptied list + * skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list * @list: the new list to add * @head: the place to add it in the first list * diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 98679b061b63..fa702aeb5038 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -254,7 +254,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * driver is finished with this message, it must call * spi_finalize_current_message() so the subsystem can issue the next * transfer - * @prepare_transfer_hardware: there are currently no more messages on the + * @unprepare_transfer_hardware: there are currently no more messages on the * queue so the subsystem notifies the driver that it may relax the * hardware by issuing this call * diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 5de415707c23..d28cc78a38e4 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h @@ -126,6 +126,8 @@ struct usb_hcd { unsigned wireless:1; /* Wireless USB HCD */ unsigned authorized_default:1; unsigned has_tt:1; /* Integrated TT in root hub */ + unsigned broken_pci_sleep:1; /* Don't put the + controller in PCI-D3 for system sleep */ unsigned int irq; /* irq allocated */ void __iomem *regs; /* device memory/io */ diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index f67810f8f21b..38ab3f46346f 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -94,6 +94,7 @@ struct usb_phy { struct usb_otg *otg; + struct device *io_dev; struct usb_phy_io_ops *io_ops; void __iomem *io_priv; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 03b90cdc1921..06f8e3858251 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -26,13 +26,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGFREE, PGACTIVATE, PGDEACTIVATE, PGFAULT, PGMAJFAULT, FOR_ALL_ZONES(PGREFILL), - FOR_ALL_ZONES(PGSTEAL), + FOR_ALL_ZONES(PGSTEAL_KSWAPD), + FOR_ALL_ZONES(PGSTEAL_DIRECT), FOR_ALL_ZONES(PGSCAN_KSWAPD), FOR_ALL_ZONES(PGSCAN_DIRECT), #ifdef CONFIG_NUMA PGSCAN_ZONE_RECLAIM_FAILED, #endif - PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, + PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, KSWAPD_SKIP_CONGESTION_WAIT, PAGEOUTRUN, ALLOCSTALL, PGROTATED, diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 6822d2595aff..db1c5df45224 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -314,6 +314,7 @@ struct hci_conn { __u8 remote_cap; __u8 remote_auth; + bool flush_key; unsigned int sent; @@ -980,7 +981,7 @@ int mgmt_discoverable(struct hci_dev *hdev, u8 discoverable); int mgmt_connectable(struct hci_dev *hdev, u8 connectable); int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status); int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, - u8 persistent); + bool persistent); int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u32 flags, u8 *name, u8 name_len, u8 *dev_class); diff --git a/include/net/dst.h b/include/net/dst.h index 59c5d18cc385..bed833d9796a 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -36,7 +36,11 @@ struct dst_entry { struct net_device *dev; struct dst_ops *ops; unsigned long _metrics; - unsigned long expires; + union { + unsigned long expires; + /* point to where the dst_entry copied from */ + struct dst_entry *from; + }; struct dst_entry *path; struct neighbour __rcu *_neighbour; #ifdef CONFIG_XFRM @@ -55,6 +59,7 @@ struct dst_entry { #define DST_NOCACHE 0x0010 #define DST_NOCOUNT 0x0020 #define DST_NOPEER 0x0040 +#define DST_FAKE_RTABLE 0x0080 short error; short obsolete; diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index b26bb8101981..0ae759a6c76e 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -123,6 +123,54 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) return ((struct rt6_info *)dst)->rt6i_idev; } +static inline void rt6_clean_expires(struct rt6_info *rt) +{ + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) + dst_release(rt->dst.from); + + rt->rt6i_flags &= ~RTF_EXPIRES; + rt->dst.from = NULL; +} + +static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires) +{ + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) + dst_release(rt->dst.from); + + rt->rt6i_flags |= RTF_EXPIRES; + rt->dst.expires = expires; +} + +static inline void rt6_update_expires(struct rt6_info *rt, int timeout) +{ + if (!(rt->rt6i_flags & RTF_EXPIRES)) { + if (rt->dst.from) + dst_release(rt->dst.from); + /* dst_set_expires relies on expires == 0 + * if it has not been set previously. + */ + rt->dst.expires = 0; + } + + dst_set_expires(&rt->dst, timeout); + rt->rt6i_flags |= RTF_EXPIRES; +} + +static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) +{ + struct dst_entry *new = (struct dst_entry *) from; + + if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) { + if (new == rt->dst.from) + return; + dst_release(rt->dst.from); + } + + rt->rt6i_flags &= ~RTF_EXPIRES; + rt->dst.from = new; + dst_hold(new); +} + struct fib6_walker_t { struct list_head lh; struct fib6_node *root, *node; diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 2bdee51ba30d..72522f087375 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -393,7 +393,7 @@ struct ip_vs_protocol { void (*exit)(struct ip_vs_protocol *pp); - void (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); + int (*init_netns)(struct net *net, struct ip_vs_proto_data *pd); void (*exit_netns)(struct net *net, struct ip_vs_proto_data *pd); @@ -1203,6 +1203,8 @@ ip_vs_lookup_real_service(struct net *net, int af, __u16 protocol, extern int ip_vs_use_count_inc(void); extern void ip_vs_use_count_dec(void); +extern int ip_vs_register_nl_ioctl(void); +extern void ip_vs_unregister_nl_ioctl(void); extern int ip_vs_control_init(void); extern void ip_vs_control_cleanup(void); extern struct ip_vs_dest * diff --git a/include/net/red.h b/include/net/red.h index 77d4c3745cb5..ef46058d35bf 100644 --- a/include/net/red.h +++ b/include/net/red.h @@ -245,7 +245,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms * * dummy packets as a burst after idle time, i.e. * - * p->qavg *= (1-W)^m + * v->qavg *= (1-W)^m * * This is an apparently overcomplicated solution (f.e. we have to * precompute a table to make this calculation in reasonable time) @@ -279,7 +279,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p unsigned int backlog) { /* - * NOTE: p->qavg is fixed point number with point at Wlog. + * NOTE: v->qavg is fixed point number with point at Wlog. * The formula below is equvalent to floating point * version: * @@ -390,7 +390,7 @@ static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v) if (red_is_idling(v)) qavg = red_calc_qavg_from_idle_time(p, v); - /* p->qavg is fixed point number with point at Wlog */ + /* v->qavg is fixed point number with point at Wlog */ qavg >>= p->Wlog; if (qavg > p->target_max && p->max_P <= MAX_P_MAX) diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 6ee44b24864a..a2ef81466b00 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h @@ -704,4 +704,17 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr) addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff); } +/* The cookie is always 0 since this is how it's used in the + * pmtu code. + */ +static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) +{ + if (t->dst && !dst_check(t->dst, 0)) { + dst_release(t->dst); + t->dst = NULL; + } + + return t->dst; +} + #endif /* __net_sctp_h__ */ diff --git a/include/net/sock.h b/include/net/sock.h index a6ba1f8871fd..5a0a58ac4126 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -246,6 +246,7 @@ struct cg_proto; * @sk_user_data: RPC layer private data * @sk_sndmsg_page: cached page for sendmsg * @sk_sndmsg_off: cached offset for sendmsg + * @sk_peek_off: current peek_offset value * @sk_send_head: front of stuff to transmit * @sk_security: used by security modules * @sk_mark: generic packet mark @@ -1128,9 +1129,9 @@ sk_sockets_allocated_read_positive(struct sock *sk) struct proto *prot = sk->sk_prot; if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return percpu_counter_sum_positive(sk->sk_cgrp->sockets_allocated); + return percpu_counter_read_positive(sk->sk_cgrp->sockets_allocated); - return percpu_counter_sum_positive(prot->sockets_allocated); + return percpu_counter_read_positive(prot->sockets_allocated); } static inline int diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 5f5ed1b8b41b..f4f1c96dca72 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h @@ -217,11 +217,29 @@ struct domain_device { struct kref kref; }; -struct sas_discovery_event { +struct sas_work { + struct list_head drain_node; struct work_struct work; +}; + +static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *)) +{ + INIT_WORK(&sw->work, fn); + INIT_LIST_HEAD(&sw->drain_node); +} + +struct sas_discovery_event { + struct sas_work work; struct asd_sas_port *port; }; +static inline struct sas_discovery_event *to_sas_discovery_event(struct work_struct *work) +{ + struct sas_discovery_event *ev = container_of(work, typeof(*ev), work.work); + + return ev; +} + struct sas_discovery { struct sas_discovery_event disc_work[DISC_NUM_EVENTS]; unsigned long pending; @@ -244,7 +262,7 @@ struct asd_sas_port { struct list_head destroy_list; enum sas_linkrate linkrate; - struct work_struct work; + struct sas_work work; /* public: */ int id; @@ -270,10 +288,17 @@ struct asd_sas_port { }; struct asd_sas_event { - struct work_struct work; + struct sas_work work; struct asd_sas_phy *phy; }; +static inline struct asd_sas_event *to_asd_sas_event(struct work_struct *work) +{ + struct asd_sas_event *ev = container_of(work, typeof(*ev), work.work); + + return ev; +} + /* The phy pretty much is controlled by the LLDD. * The class only reads those fields. */ @@ -333,10 +358,17 @@ struct scsi_core { }; struct sas_ha_event { - struct work_struct work; + struct sas_work work; struct sas_ha_struct *ha; }; +static inline struct sas_ha_event *to_sas_ha_event(struct work_struct *work) +{ + struct sas_ha_event *ev = container_of(work, typeof(*ev), work.work); + + return ev; +} + enum sas_ha_state { SAS_HA_REGISTERED, SAS_HA_DRAINING, diff --git a/include/scsi/sas.h b/include/scsi/sas.h index a577a833603d..be3eb0bf1ac0 100644 --- a/include/scsi/sas.h +++ b/include/scsi/sas.h @@ -103,6 +103,7 @@ enum sas_dev_type { }; enum sas_protocol { + SAS_PROTOCOL_NONE = 0, SAS_PROTOCOL_SATA = 0x01, SAS_PROTOCOL_SMP = 0x02, SAS_PROTOCOL_STP = 0x04, diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index cdccd2eb7b6c..77670e823ed8 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -37,7 +37,7 @@ static inline int dev_is_sata(struct domain_device *dev) } int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); -int sas_ata_init_host_and_port(struct domain_device *found_dev); +int sas_ata_init(struct domain_device *dev); void sas_ata_task_abort(struct sas_task *task); void sas_ata_strategy_handler(struct Scsi_Host *shost); void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q, @@ -52,7 +52,7 @@ static inline int dev_is_sata(struct domain_device *dev) { return 0; } -static inline int sas_ata_init_host_and_port(struct domain_device *found_dev) +static inline int sas_ata_init(struct domain_device *dev) { return 0; } diff --git a/init/do_mounts.c b/init/do_mounts.c index 0e93f92a0345..42b0707c3481 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -472,7 +472,7 @@ void __init change_floppy(char *fmt, ...) void __init mount_root(void) { #ifdef CONFIG_ROOT_NFS - if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) { + if (ROOT_DEV == Root_NFS) { if (mount_nfs_root()) return; diff --git a/init/main.c b/init/main.c index 9d454f09f3b1..44b2433334c7 100644 --- a/init/main.c +++ b/init/main.c @@ -225,13 +225,9 @@ static int __init loglevel(char *str) early_param("loglevel", loglevel); -/* - * Unknown boot options get handed to init, unless they look like - * unused parameters (modprobe will find them in /proc/cmdline). - */ -static int __init unknown_bootoption(char *param, char *val) +/* Change NUL term back to "=", to make "param" the whole string. */ +static int __init repair_env_string(char *param, char *val) { - /* Change NUL term back to "=", to make "param" the whole string. */ if (val) { /* param=val or param="val"? */ if (val == param+strlen(param)+1) @@ -243,6 +239,16 @@ static int __init unknown_bootoption(char *param, char *val) } else BUG(); } + return 0; +} + +/* + * Unknown boot options get handed to init, unless they look like + * unused parameters (modprobe will find them in /proc/cmdline). + */ +static int __init unknown_bootoption(char *param, char *val) +{ + repair_env_string(param, val); /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) @@ -732,11 +738,6 @@ static char *initcall_level_names[] __initdata = { "late parameters", }; -static int __init ignore_unknown_bootoption(char *param, char *val) -{ - return 0; -} - static void __init do_initcall_level(int level) { extern const struct kernel_param __start___param[], __stop___param[]; @@ -747,7 +748,7 @@ static void __init do_initcall_level(int level) static_command_line, __start___param, __stop___param - __start___param, level, level, - ignore_unknown_bootoption); + repair_env_string); for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) do_one_initcall(*fn); diff --git a/kernel/compat.c b/kernel/compat.c index 74ff8498809a..d2c67aa49ae6 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -372,25 +372,54 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set) #ifdef __ARCH_WANT_SYS_SIGPROCMASK -asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set, - compat_old_sigset_t __user *oset) +/* + * sys_sigprocmask SIG_SETMASK sets the first (compat) word of the + * blocked set of signals to the supplied signal set + */ +static inline void compat_sig_setmask(sigset_t *blocked, compat_sigset_word set) { - old_sigset_t s; - long ret; - mm_segment_t old_fs; + memcpy(blocked->sig, &set, sizeof(set)); +} - if (set && get_user(s, set)) - return -EFAULT; - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = sys_sigprocmask(how, - set ? (old_sigset_t __user *) &s : NULL, - oset ? (old_sigset_t __user *) &s : NULL); - set_fs(old_fs); - if (ret == 0) - if (oset) - ret = put_user(s, oset); - return ret; +asmlinkage long compat_sys_sigprocmask(int how, + compat_old_sigset_t __user *nset, + compat_old_sigset_t __user *oset) +{ + old_sigset_t old_set, new_set; + sigset_t new_blocked; + + old_set = current->blocked.sig[0]; + + if (nset) { + if (get_user(new_set, nset)) + return -EFAULT; + new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); + + new_blocked = current->blocked; + + switch (how) { + case SIG_BLOCK: + sigaddsetmask(&new_blocked, new_set); + break; + case SIG_UNBLOCK: + sigdelsetmask(&new_blocked, new_set); + break; + case SIG_SETMASK: + compat_sig_setmask(&new_blocked, new_set); + break; + default: + return -EINVAL; + } + + set_current_blocked(&new_blocked); + } + + if (oset) { + if (put_user(old_set, oset)) + return -EFAULT; + } + + return 0; } #endif diff --git a/kernel/events/core.c b/kernel/events/core.c index a6a9ec4cd8f5..fd126f82b57c 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3183,7 +3183,7 @@ static void perf_event_for_each(struct perf_event *event, perf_event_for_each_child(event, func); func(event); list_for_each_entry(sibling, &event->sibling_list, group_entry) - perf_event_for_each_child(event, func); + perf_event_for_each_child(sibling, func); mutex_unlock(&ctx->mutex); } diff --git a/kernel/fork.c b/kernel/fork.c index b9372a0bff18..687a15d56243 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include @@ -1464,6 +1465,8 @@ bad_fork_cleanup_io: if (p->io_context) exit_io_context(p); bad_fork_cleanup_namespaces: + if (unlikely(clone_flags & CLONE_NEWPID)) + pid_ns_release_proc(p->nsproxy->pid_ns); exit_task_namespaces(p); bad_fork_cleanup_mm: if (p->mm) diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index 97a8bfadc88a..e75e29e4434a 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h @@ -4,10 +4,10 @@ #include -#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) -#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) +#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) +#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f) /* FIXME */ -#define PD(f) do { } while (0) +#define ___PD(f) do { } while (0) static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) { @@ -23,23 +23,23 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) print_symbol("%s\n", (unsigned long)desc->action->handler); } - P(IRQ_LEVEL); - P(IRQ_PER_CPU); - P(IRQ_NOPROBE); - P(IRQ_NOREQUEST); - P(IRQ_NOTHREAD); - P(IRQ_NOAUTOEN); + ___P(IRQ_LEVEL); + ___P(IRQ_PER_CPU); + ___P(IRQ_NOPROBE); + ___P(IRQ_NOREQUEST); + ___P(IRQ_NOTHREAD); + ___P(IRQ_NOAUTOEN); - PS(IRQS_AUTODETECT); - PS(IRQS_REPLAY); - PS(IRQS_WAITING); - PS(IRQS_PENDING); + ___PS(IRQS_AUTODETECT); + ___PS(IRQS_REPLAY); + ___PS(IRQS_WAITING); + ___PS(IRQS_PENDING); - PD(IRQS_INPROGRESS); - PD(IRQS_DISABLED); - PD(IRQS_MASKED); + ___PD(IRQS_INPROGRESS); + ___PD(IRQS_DISABLED); + ___PD(IRQS_MASKED); } -#undef P -#undef PS -#undef PD +#undef ___P +#undef ___PS +#undef ___PD diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8742fd013a94..eef311a58a64 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -51,6 +51,23 @@ #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) +/* + * Number of free pages that are not high. + */ +static inline unsigned long low_free_pages(void) +{ + return nr_free_pages() - nr_free_highpages(); +} + +/* + * Number of pages required to be kept free while writing the image. Always + * half of all available low pages before the writing starts. + */ +static inline unsigned long reqd_free_pages(void) +{ + return low_free_pages() / 2; +} + struct swap_map_page { sector_t entries[MAP_PAGE_ENTRIES]; sector_t next_swap; @@ -72,7 +89,7 @@ struct swap_map_handle { sector_t cur_swap; sector_t first_sector; unsigned int k; - unsigned long nr_free_pages, written; + unsigned long reqd_free_pages; u32 crc32; }; @@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle) goto err_rel; } handle->k = 0; - handle->nr_free_pages = nr_free_pages() >> 1; - handle->written = 0; + handle->reqd_free_pages = reqd_free_pages(); handle->first_sector = handle->cur_swap; return 0; err_rel: @@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf, handle->cur_swap = offset; handle->k = 0; } - if (bio_chain && ++handle->written > handle->nr_free_pages) { + if (bio_chain && low_free_pages() <= handle->reqd_free_pages) { error = hib_wait_on_bio_chain(bio_chain); if (error) goto out; - handle->written = 0; + handle->reqd_free_pages = reqd_free_pages(); } out: return error; @@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle, * Adjust number of free pages after all allocations have been done. * We don't want to run out of pages when writing. */ - handle->nr_free_pages = nr_free_pages() >> 1; + handle->reqd_free_pages = reqd_free_pages(); /* * Start the CRC32 thread. diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1050d6d3922c..d0c5baf1ab18 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1820,7 +1820,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), * a quiescent state betweentimes. */ local_irq_save(flags); - WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); rdp = this_cpu_ptr(rsp->rda); /* Add the callback to our list. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4603b9d8f30a..0533a688ce22 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map) struct sd_data *sdd = &tl->data; for_each_cpu(j, cpu_map) { - struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); - if (sd && (sd->flags & SD_OVERLAP)) - free_sched_groups(sd->groups, 0); - kfree(*per_cpu_ptr(sdd->sd, j)); - kfree(*per_cpu_ptr(sdd->sg, j)); - kfree(*per_cpu_ptr(sdd->sgp, j)); + struct sched_domain *sd; + + if (sdd->sd) { + sd = *per_cpu_ptr(sdd->sd, j); + if (sd && (sd->flags & SD_OVERLAP)) + free_sched_groups(sd->groups, 0); + kfree(*per_cpu_ptr(sdd->sd, j)); + } + + if (sdd->sg) + kfree(*per_cpu_ptr(sdd->sg, j)); + if (sdd->sgp) + kfree(*per_cpu_ptr(sdd->sgp, j)); } free_percpu(sdd->sd); + sdd->sd = NULL; free_percpu(sdd->sg); + sdd->sg = NULL; free_percpu(sdd->sgp); + sdd->sgp = NULL; } } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0d97ebdc58f0..e9553640c1c3 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) update_load_add(&rq_of(cfs_rq)->load, se->load.weight); #ifdef CONFIG_SMP if (entity_is_task(se)) - list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); + list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); #endif cfs_rq->nr_running++; } @@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env) static unsigned long task_h_load(struct task_struct *p); +static const unsigned int sched_nr_migrate_break = 32; + /* * move_tasks tries to move up to load_move weighted load from busiest to * this_rq, as part of a balancing operation within domain "sd". @@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env) /* take a breather every nr_migrate tasks */ if (env->loop > env->loop_break) { - env->loop_break += sysctl_sched_nr_migrate; + env->loop_break += sched_nr_migrate_break; env->flags |= LBF_NEED_BREAK; break; } @@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env) load = task_h_load(p); - if (load < 16 && !env->sd->nr_balance_failed) + if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed) goto next; if ((load / 2) > env->load_move) @@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, .dst_cpu = this_cpu, .dst_rq = this_rq, .idle = idle, - .loop_break = sysctl_sched_nr_migrate, + .loop_break = sched_nr_migrate_break, }; cpumask_copy(cpus, cpu_active_mask); @@ -4445,10 +4447,10 @@ redo: * correctly treated as an imbalance. */ env.flags |= LBF_ALL_PINNED; - env.load_move = imbalance; - env.src_cpu = busiest->cpu; - env.src_rq = busiest; - env.loop_max = busiest->nr_running; + env.load_move = imbalance; + env.src_cpu = busiest->cpu; + env.src_rq = busiest; + env.loop_max = min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running); more_balance: local_irq_save(flags); diff --git a/kernel/sched/features.h b/kernel/sched/features.h index e61fd73913d0..de00a486c5c6 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true) SCHED_FEAT(FORCE_SD_OVERLAP, false) SCHED_FEAT(RT_RUNTIME_SHARE, true) +SCHED_FEAT(LB_MIN, false) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index bf57abdc7bd0..f113755695e2 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -346,7 +346,8 @@ int tick_resume_broadcast(void) tick_get_broadcast_mask()); break; case TICKDEV_MODE_ONESHOT: - broadcast = tick_resume_broadcast_oneshot(bc); + if (!cpumask_empty(tick_get_broadcast_mask())) + broadcast = tick_resume_broadcast_oneshot(bc); break; } } @@ -373,6 +374,9 @@ static int tick_broadcast_set_event(ktime_t expires, int force) { struct clock_event_device *bc = tick_broadcast_device.evtdev; + if (bc->mode != CLOCK_EVT_MODE_ONESHOT) + clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); + return clockevents_program_event(bc, expires, force); } @@ -531,7 +535,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; bc->event_handler = tick_handle_oneshot_broadcast; - clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); /* Take the do_timer update */ tick_do_timer_cpu = cpu; @@ -549,6 +552,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) to_cpumask(tmpmask)); if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { + clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); tick_broadcast_init_next_event(to_cpumask(tmpmask), tick_next_period); tick_broadcast_set_event(tick_next_period, 1); @@ -577,15 +581,10 @@ void tick_broadcast_switch_to_oneshot(void) raw_spin_lock_irqsave(&tick_broadcast_lock, flags); tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; - - if (cpumask_empty(tick_get_broadcast_mask())) - goto end; - bc = tick_broadcast_device.evtdev; if (bc) tick_broadcast_setup_oneshot(bc); -end: raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ed7b5d1e12f4..2a22255c1010 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4629,7 +4629,8 @@ static ssize_t rb_simple_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ring_buffer *buffer = filp->private_data; + struct trace_array *tr = filp->private_data; + struct ring_buffer *buffer = tr->buffer; char buf[64]; int r; @@ -4647,7 +4648,8 @@ static ssize_t rb_simple_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { - struct ring_buffer *buffer = filp->private_data; + struct trace_array *tr = filp->private_data; + struct ring_buffer *buffer = tr->buffer; unsigned long val; int ret; @@ -4734,7 +4736,7 @@ static __init int tracer_init_debugfs(void) &trace_clock_fops); trace_create_file("tracing_on", 0644, d_tracer, - global_trace.buffer, &rb_simple_fops); + &global_trace, &rb_simple_fops); #ifdef CONFIG_DYNAMIC_FTRACE trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 95059f091a24..f95d65da6db8 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -836,11 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[]; filter) #include "trace_entries.h" -#ifdef CONFIG_FUNCTION_TRACER +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER) int perf_ftrace_event_register(struct ftrace_event_call *call, enum trace_reg type, void *data); #else #define perf_ftrace_event_register NULL -#endif /* CONFIG_FUNCTION_TRACER */ +#endif #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 859fae6b1825..df611a0e76c5 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -652,6 +652,8 @@ int trace_print_lat_context(struct trace_iterator *iter) { u64 next_ts; int ret; + /* trace_find_next_entry will reset ent_size */ + int ent_size = iter->ent_size; struct trace_seq *s = &iter->seq; struct trace_entry *entry = iter->ent, *next_entry = trace_find_next_entry(iter, NULL, @@ -660,6 +662,9 @@ int trace_print_lat_context(struct trace_iterator *iter) unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); unsigned long rel_usecs; + /* Restore the original ent_size */ + iter->ent_size = ent_size; + if (!next_entry) next_ts = iter->ts; rel_usecs = ns2usecs(next_ts - iter->ts); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index cd65cb19c941..ae8f708e3d75 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -532,7 +532,7 @@ static struct page *dequeue_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, unsigned long address, int avoid_reserve) { - struct page *page; + struct page *page = NULL; struct mempolicy *mpol; nodemask_t *nodemask; struct zonelist *zonelist; @@ -2498,7 +2498,6 @@ retry_avoidcopy: if (outside_reserve) { BUG_ON(huge_pte_none(pte)); if (unmap_ref_private(mm, vma, old_page, address)) { - BUG_ON(page_count(old_page) != 1); BUG_ON(huge_pte_none(pte)); spin_lock(&mm->page_table_lock); ptep = huge_pte_offset(mm, address & huge_page_mask(h)); diff --git a/mm/memblock.c b/mm/memblock.c index 99f285599501..a44eab3157f8 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -330,6 +330,9 @@ static int __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t end = base + memblock_cap_size(base, &size); int i, nr_new; + if (!size) + return 0; + /* special case for empty array */ if (type->regions[0].size == 0) { WARN_ON(type->cnt != 1 || type->total_size); @@ -430,6 +433,9 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type, *start_rgn = *end_rgn = 0; + if (!size) + return 0; + /* we'll create at most two more regions */ while (type->cnt + 2 > type->max) if (memblock_double_array(type) < 0) @@ -514,7 +520,6 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) (unsigned long long)base, (unsigned long long)base + size, (void *)_RET_IP_); - BUG_ON(0 == size); return memblock_add_region(_rgn, base, size, MAX_NUMNODES); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b868def9bcc1..b659260c56ad 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2476,10 +2476,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg, struct page *page, unsigned int nr_pages, - struct page_cgroup *pc, enum charge_type ctype, bool lrucare) { + struct page_cgroup *pc = lookup_page_cgroup(page); struct zone *uninitialized_var(zone); bool was_on_lru = false; bool anon; @@ -2716,7 +2716,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, { struct mem_cgroup *memcg = NULL; unsigned int nr_pages = 1; - struct page_cgroup *pc; bool oom = true; int ret; @@ -2730,11 +2729,10 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm, oom = false; } - pc = lookup_page_cgroup(page); ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); if (ret == -ENOMEM) return ret; - __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false); + __mem_cgroup_commit_charge(memcg, page, nr_pages, ctype, false); return 0; } @@ -2831,16 +2829,13 @@ static void __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg, enum charge_type ctype) { - struct page_cgroup *pc; - if (mem_cgroup_disabled()) return; if (!memcg) return; cgroup_exclude_rmdir(&memcg->css); - pc = lookup_page_cgroup(page); - __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true); + __mem_cgroup_commit_charge(memcg, page, 1, ctype, true); /* * Now swap is on-memory. This means this page may be * counted both as mem and swap....double count. @@ -3298,14 +3293,13 @@ int mem_cgroup_prepare_migration(struct page *page, * page. In the case new page is migrated but not remapped, new page's * mapcount will be finally 0 and we call uncharge in end_migration(). */ - pc = lookup_page_cgroup(newpage); if (PageAnon(page)) ctype = MEM_CGROUP_CHARGE_TYPE_MAPPED; else if (page_is_file_cache(page)) ctype = MEM_CGROUP_CHARGE_TYPE_CACHE; else ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM; - __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false); + __mem_cgroup_commit_charge(memcg, newpage, 1, ctype, false); return ret; } @@ -3392,8 +3386,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage, * the newpage may be on LRU(or pagevec for LRU) already. We lock * LRU while we overwrite pc->mem_cgroup. */ - pc = lookup_page_cgroup(newpage); - __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true); + __mem_cgroup_commit_charge(memcg, newpage, 1, type, true); } #ifdef CONFIG_DEBUG_VM @@ -4514,6 +4507,12 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, swap_buffers: /* Swap primary and spare array */ thresholds->spare = thresholds->primary; + /* If all events are unregistered, free the spare array */ + if (!new) { + kfree(thresholds->spare); + thresholds->spare = NULL; + } + rcu_assign_pointer(thresholds->primary, new); /* To be sure that nobody uses thresholds */ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index cfb6c8678754..b19569137529 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1361,11 +1361,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode, mm = get_task_mm(task); put_task_struct(task); - if (mm) - err = do_migrate_pages(mm, old, new, - capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); - else + + if (!mm) { err = -EINVAL; + goto out; + } + + err = do_migrate_pages(mm, old, new, + capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); mmput(mm); out: diff --git a/mm/migrate.c b/mm/migrate.c index 51c08a0c6f68..11072383ae12 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1388,14 +1388,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, mm = get_task_mm(task); put_task_struct(task); - if (mm) { - if (nodes) - err = do_pages_move(mm, task_nodes, nr_pages, pages, - nodes, status, flags); - else - err = do_pages_stat(mm, nr_pages, pages, status); - } else - err = -EINVAL; + if (!mm) + return -EINVAL; + + if (nodes) + err = do_pages_move(mm, task_nodes, nr_pages, pages, + nodes, status, flags); + else + err = do_pages_stat(mm, nr_pages, pages, status); mmput(mm); return err; diff --git a/mm/mmap.c b/mm/mmap.c index a7bf6a31c9f6..848ef52d9603 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -240,6 +240,8 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) return next; } +static unsigned long do_brk(unsigned long addr, unsigned long len); + SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long rlim, retval; @@ -951,7 +953,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) * The caller must hold down_write(¤t->mm->mmap_sem). */ -unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, +static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff) { @@ -1087,7 +1089,32 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, return mmap_region(file, addr, len, flags, vm_flags, pgoff); } -EXPORT_SYMBOL(do_mmap_pgoff); + +unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + if (unlikely(offset + PAGE_ALIGN(len) < offset)) + return -EINVAL; + if (unlikely(offset & ~PAGE_MASK)) + return -EINVAL; + return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); +} +EXPORT_SYMBOL(do_mmap); + +unsigned long vm_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + unsigned long ret; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + ret = do_mmap(file, addr, len, prot, flag, offset); + up_write(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL(vm_mmap); SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, @@ -2105,21 +2132,25 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) return 0; } - EXPORT_SYMBOL(do_munmap); -SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +int vm_munmap(unsigned long start, size_t len) { int ret; struct mm_struct *mm = current->mm; - profile_munmap(addr); - down_write(&mm->mmap_sem); - ret = do_munmap(mm, addr, len); + ret = do_munmap(mm, start, len); up_write(&mm->mmap_sem); return ret; } +EXPORT_SYMBOL(vm_munmap); + +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +{ + profile_munmap(addr); + return vm_munmap(addr, len); +} static inline void verify_mm_writelocked(struct mm_struct *mm) { @@ -2136,7 +2167,7 @@ static inline void verify_mm_writelocked(struct mm_struct *mm) * anonymous maps. eventually we may be able to do some * brk-specific accounting here. */ -unsigned long do_brk(unsigned long addr, unsigned long len) +static unsigned long do_brk(unsigned long addr, unsigned long len) { struct mm_struct * mm = current->mm; struct vm_area_struct * vma, * prev; @@ -2232,7 +2263,17 @@ out: return addr; } -EXPORT_SYMBOL(do_brk); +unsigned long vm_brk(unsigned long addr, unsigned long len) +{ + struct mm_struct *mm = current->mm; + unsigned long ret; + + down_write(&mm->mmap_sem); + ret = do_brk(addr, len); + up_write(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL(vm_brk); /* Release all mmaps. */ void exit_mmap(struct mm_struct *mm) diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 24f0fc1a56d6..1983fb1c7026 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c @@ -82,8 +82,7 @@ void __init free_bootmem_late(unsigned long addr, unsigned long size) static void __init __free_pages_memory(unsigned long start, unsigned long end) { - int i; - unsigned long start_aligned, end_aligned; + unsigned long i, start_aligned, end_aligned; int order = ilog2(BITS_PER_LONG); start_aligned = (start + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1); @@ -298,13 +297,19 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size, if (WARN_ON_ONCE(slab_is_available())) return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); +again: ptr = __alloc_memory_core_early(pgdat->node_id, size, align, goal, -1ULL); if (ptr) return ptr; - return __alloc_memory_core_early(MAX_NUMNODES, size, align, - goal, -1ULL); + ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, + goal, -1ULL); + if (!ptr && goal) { + goal = 0; + goto again; + } + return ptr; } void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, diff --git a/mm/nommu.c b/mm/nommu.c index f59e170fceb4..bb8f4f004a82 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1233,7 +1233,7 @@ enomem: /* * handle mapping creation for uClinux */ -unsigned long do_mmap_pgoff(struct file *file, +static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1470,7 +1470,32 @@ error_getting_region: show_free_areas(0); return -ENOMEM; } -EXPORT_SYMBOL(do_mmap_pgoff); + +unsigned long do_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + if (unlikely(offset + PAGE_ALIGN(len) < offset)) + return -EINVAL; + if (unlikely(offset & ~PAGE_MASK)) + return -EINVAL; + return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); +} +EXPORT_SYMBOL(do_mmap); + +unsigned long vm_mmap(struct file *file, unsigned long addr, + unsigned long len, unsigned long prot, + unsigned long flag, unsigned long offset) +{ + unsigned long ret; + struct mm_struct *mm = current->mm; + + down_write(&mm->mmap_sem); + ret = do_mmap(file, addr, len, prot, flag, offset); + up_write(&mm->mmap_sem); + return ret; +} +EXPORT_SYMBOL(vm_mmap); SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, @@ -1709,16 +1734,22 @@ erase_whole_vma: } EXPORT_SYMBOL(do_munmap); -SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +int vm_munmap(unsigned long addr, size_t len) { - int ret; struct mm_struct *mm = current->mm; + int ret; down_write(&mm->mmap_sem); ret = do_munmap(mm, addr, len); up_write(&mm->mmap_sem); return ret; } +EXPORT_SYMBOL(vm_munmap); + +SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len) +{ + return vm_munmap(addr, len); +} /* * release all the mappings made in a process's VM space @@ -1744,7 +1775,7 @@ void exit_mmap(struct mm_struct *mm) kleave(""); } -unsigned long do_brk(unsigned long addr, unsigned long len) +unsigned long vm_brk(unsigned long addr, unsigned long len) { return -ENOMEM; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a712fb9e04ce..918330f71dba 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5203,7 +5203,7 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write, int ret; ret = proc_dointvec_minmax(table, write, buffer, length, ppos); - if (!write || (ret == -EINVAL)) + if (!write || (ret < 0)) return ret; for_each_populated_zone(zone) { for_each_possible_cpu(cpu) { diff --git a/mm/percpu.c b/mm/percpu.c index f47af9123af7..bb4be7435ce3 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1132,20 +1132,20 @@ static void pcpu_dump_alloc_info(const char *lvl, for (alloc_end += gi->nr_units / upa; alloc < alloc_end; alloc++) { if (!(alloc % apl)) { - printk("\n"); + printk(KERN_CONT "\n"); printk("%spcpu-alloc: ", lvl); } - printk("[%0*d] ", group_width, group); + printk(KERN_CONT "[%0*d] ", group_width, group); for (unit_end += upa; unit < unit_end; unit++) if (gi->cpu_map[unit] != NR_CPUS) - printk("%0*d ", cpu_width, + printk(KERN_CONT "%0*d ", cpu_width, gi->cpu_map[unit]); else - printk("%s ", empty_str); + printk(KERN_CONT "%s ", empty_str); } } - printk("\n"); + printk(KERN_CONT "\n"); } /** @@ -1650,6 +1650,16 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, areas[group] = ptr; base = min(ptr, base); + } + + /* + * Copy data and free unused parts. This should happen after all + * allocations are complete; otherwise, we may end up with + * overlapping groups. + */ + for (group = 0; group < ai->nr_groups; group++) { + struct pcpu_group_info *gi = &ai->groups[group]; + void *ptr = areas[group]; for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) { if (gi->cpu_map[i] == NR_CPUS) { @@ -1885,6 +1895,8 @@ void __init setup_per_cpu_areas(void) fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (!ai || !fc) panic("Failed to allocate memory for percpu areas."); + /* kmemleak tracks the percpu allocations separately */ + kmemleak_free(fc); ai->dyn_size = unit_size; ai->unit_size = unit_size; diff --git a/mm/swap_state.c b/mm/swap_state.c index 9d3dd3763cf7..4c5ff7f284d9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -26,7 +26,7 @@ */ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, - .set_page_dirty = __set_page_dirty_nobuffers, + .set_page_dirty = __set_page_dirty_no_writeback, .migratepage = migrate_page, }; diff --git a/mm/vmscan.c b/mm/vmscan.c index 1a518684a32f..33dc256033b5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1568,9 +1568,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, reclaim_stat->recent_scanned[0] += nr_anon; reclaim_stat->recent_scanned[1] += nr_file; - if (current_is_kswapd()) - __count_vm_events(KSWAPD_STEAL, nr_reclaimed); - __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); + if (global_reclaim(sc)) { + if (current_is_kswapd()) + __count_zone_vm_events(PGSTEAL_KSWAPD, zone, + nr_reclaimed); + else + __count_zone_vm_events(PGSTEAL_DIRECT, zone, + nr_reclaimed); + } putback_inactive_pages(mz, &page_list); diff --git a/mm/vmstat.c b/mm/vmstat.c index f600557a7659..7db1b9bab492 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -738,7 +738,8 @@ const char * const vmstat_text[] = { "pgmajfault", TEXTS_FOR_ZONES("pgrefill") - TEXTS_FOR_ZONES("pgsteal") + TEXTS_FOR_ZONES("pgsteal_kswapd") + TEXTS_FOR_ZONES("pgsteal_direct") TEXTS_FOR_ZONES("pgscan_kswapd") TEXTS_FOR_ZONES("pgscan_direct") @@ -747,7 +748,6 @@ const char * const vmstat_text[] = { #endif "pginodesteal", "slabs_scanned", - "kswapd_steal", "kswapd_inodesteal", "kswapd_low_wmark_hit_quickly", "kswapd_high_wmark_hit_quickly", diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 9988d4abb372..9757c193c86b 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -157,7 +157,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } - skb_set_dev(skb, vlan_dev_priv(dev)->real_dev); + skb->dev = vlan_dev_priv(dev)->real_dev; len = skb->len; if (netpoll_tx_running(dev)) return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 0906c194a413..9d9a6a3edbd5 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -2011,16 +2011,17 @@ static void __exit ax25_exit(void) proc_net_remove(&init_net, "ax25_route"); proc_net_remove(&init_net, "ax25"); proc_net_remove(&init_net, "ax25_calls"); - ax25_rt_free(); - ax25_uid_free(); - ax25_dev_free(); - ax25_unregister_sysctl(); unregister_netdevice_notifier(&ax25_dev_notifier); + ax25_unregister_sysctl(); dev_remove_pack(&ax25_packet_type); sock_unregister(PF_AX25); proto_unregister(&ax25_proto); + + ax25_rt_free(); + ax25_uid_free(); + ax25_dev_free(); } module_exit(ax25_exit); diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 92a857e3786d..edfd61addcec 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -1215,40 +1215,40 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) return NULL; } -static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, +static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) - return 1; + return true; /* Debug keys are insecure so don't store them persistently */ if (key_type == HCI_LK_DEBUG_COMBINATION) - return 0; + return false; /* Changed combination key and there's no previous one */ if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) - return 0; + return false; /* Security mode 3 case */ if (!conn) - return 1; + return true; /* Neither local nor remote side had no-bonding as requirement */ if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) - return 1; + return true; /* Local side had dedicated bonding as requirement */ if (conn->auth_type == 0x02 || conn->auth_type == 0x03) - return 1; + return true; /* Remote side had dedicated bonding as requirement */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) - return 1; + return true; /* If none of the above criteria match, then don't store the key * persistently */ - return 0; + return false; } struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) @@ -1285,7 +1285,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) { struct link_key *key, *old_key; - u8 old_key_type, persistent; + u8 old_key_type; + bool persistent; old_key = hci_find_link_key(hdev, bdaddr); if (old_key) { @@ -1328,10 +1329,8 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, mgmt_new_link_key(hdev, key, persistent); - if (!persistent) { - list_del(&key->list); - kfree(key); - } + if (conn) + conn->flush_key = !persistent; return 0; } diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index b37531094c49..6c065254afc0 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1901,6 +1901,8 @@ static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff } if (ev->status == 0) { + if (conn->type == ACL_LINK && conn->flush_key) + hci_remove_link_key(hdev, &conn->dst); hci_proto_disconn_cfm(conn, ev->reason); hci_conn_del(conn); } @@ -2311,6 +2313,7 @@ static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *sk case HCI_OP_USER_PASSKEY_NEG_REPLY: hci_cc_user_passkey_neg_reply(hdev, skb); + break; case HCI_OP_LE_SET_SCAN_PARAM: hci_cc_le_set_scan_param(hdev, skb); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 4ef275c69675..4bb03b111122 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2884,7 +2884,7 @@ int mgmt_write_scan_failed(struct hci_dev *hdev, u8 scan, u8 status) return 0; } -int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, u8 persistent) +int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bool persistent) { struct mgmt_ev_new_link_key ev; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 61f65344e711..a2098e3de500 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -47,6 +47,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb) kfree_skb(skb); } else { skb_push(skb, ETH_HLEN); + br_drop_fake_rtable(skb); dev_queue_xmit(skb); } diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index dec4f3817133..d7f49b63ab0f 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -156,7 +156,7 @@ void br_netfilter_rtable_init(struct net_bridge *br) rt->dst.dev = br->dev; rt->dst.path = &rt->dst; dst_init_metrics(&rt->dst, br_dst_default_metrics, true); - rt->dst.flags = DST_NOXFRM | DST_NOPEER; + rt->dst.flags = DST_NOXFRM | DST_NOPEER | DST_FAKE_RTABLE; rt->dst.ops = &fake_dst_ops; } @@ -694,11 +694,7 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb, const struct net_device *out, int (*okfn)(struct sk_buff *)) { - struct rtable *rt = skb_rtable(skb); - - if (rt && rt == bridge_parent_rtable(in)) - skb_dst_drop(skb); - + br_drop_fake_rtable(skb); return NF_ACCEPT; } diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c index 20618dd3088b..d09340e1523f 100644 --- a/net/caif/chnl_net.c +++ b/net/caif/chnl_net.c @@ -103,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) skb->protocol = htons(ETH_P_IPV6); break; default: + kfree_skb(skb); priv->netdev->stats.rx_errors++; return -EINVAL; } @@ -220,14 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) if (skb->len > priv->netdev->mtu) { pr_warn("Size of skb exceeded MTU\n"); + kfree_skb(skb); dev->stats.tx_errors++; - return -ENOSPC; + return NETDEV_TX_OK; } if (!priv->flowenabled) { pr_debug("dropping packets flow off\n"); + kfree_skb(skb); dev->stats.tx_dropped++; - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) @@ -242,7 +245,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev) result = priv->chnl.dn->transmit(priv->chnl.dn, pkt); if (result) { dev->stats.tx_dropped++; - return result; + return NETDEV_TX_OK; } /* Update statistics. */ diff --git a/net/core/dev.c b/net/core/dev.c index c25d453b2803..99e1d759f41e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier); * register_netdevice_notifier(). The notifier is unlinked into the * kernel structures and may then be reused. A negative errno code * is returned on a failure. + * + * After unregistering unregister and down device events are synthesized + * for all devices on the device list to the removed notifier to remove + * the need for special case cleanup code. */ int unregister_netdevice_notifier(struct notifier_block *nb) { + struct net_device *dev; + struct net *net; int err; rtnl_lock(); err = raw_notifier_chain_unregister(&netdev_chain, nb); + if (err) + goto unlock; + + for_each_net(net) { + for_each_netdev(net, dev) { + if (dev->flags & IFF_UP) { + nb->notifier_call(nb, NETDEV_GOING_DOWN, dev); + nb->notifier_call(nb, NETDEV_DOWN, dev); + } + nb->notifier_call(nb, NETDEV_UNREGISTER, dev); + nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev); + } + } +unlock: rtnl_unlock(); return err; } @@ -1597,10 +1617,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) return NET_RX_DROP; } skb->skb_iif = 0; - skb_set_dev(skb, dev); + skb->dev = dev; + skb_dst_drop(skb); skb->tstamp.tv64 = 0; skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, dev); + skb->mark = 0; + secpath_reset(skb); + nf_reset(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); @@ -1849,36 +1873,6 @@ void netif_device_attach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_attach); -/** - * skb_dev_set -- assign a new device to a buffer - * @skb: buffer for the new device - * @dev: network device - * - * If an skb is owned by a device already, we have to reset - * all data private to the namespace a device belongs to - * before assigning it a new device. - */ -#ifdef CONFIG_NET_NS -void skb_set_dev(struct sk_buff *skb, struct net_device *dev) -{ - skb_dst_drop(skb); - if (skb->dev && !net_eq(dev_net(skb->dev), dev_net(dev))) { - secpath_reset(skb); - nf_reset(skb); - skb_init_secmark(skb); - skb->mark = 0; - skb->priority = 0; - skb->nf_trace = 0; - skb->ipvs_property = 0; -#ifdef CONFIG_NET_SCHED - skb->tc_index = 0; -#endif - } - skb->dev = dev; -} -EXPORT_SYMBOL(skb_set_dev); -#endif /* CONFIG_NET_NS */ - static void skb_warn_bad_offload(const struct sk_buff *skb) { static const netdev_features_t null_features = 0; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 7f36b38e060f..a7cad741df01 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -42,13 +42,14 @@ static void send_dm_alert(struct work_struct *unused); * netlink alerts */ static int trace_state = TRACE_OFF; -static DEFINE_SPINLOCK(trace_state_lock); +static DEFINE_MUTEX(trace_state_mutex); struct per_cpu_dm_data { struct work_struct dm_alert_work; - struct sk_buff *skb; + struct sk_buff __rcu *skb; atomic_t dm_hit_count; struct timer_list send_timer; + int cpu; }; struct dm_hw_stat_delta { @@ -79,29 +80,53 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) size_t al; struct net_dm_alert_msg *msg; struct nlattr *nla; + struct sk_buff *skb; + struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); al += sizeof(struct nlattr); - data->skb = genlmsg_new(al, GFP_KERNEL); - genlmsg_put(data->skb, 0, 0, &net_drop_monitor_family, - 0, NET_DM_CMD_ALERT); - nla = nla_reserve(data->skb, NLA_UNSPEC, sizeof(struct net_dm_alert_msg)); - msg = nla_data(nla); - memset(msg, 0, al); - atomic_set(&data->dm_hit_count, dm_hit_limit); + skb = genlmsg_new(al, GFP_KERNEL); + + if (skb) { + genlmsg_put(skb, 0, 0, &net_drop_monitor_family, + 0, NET_DM_CMD_ALERT); + nla = nla_reserve(skb, NLA_UNSPEC, + sizeof(struct net_dm_alert_msg)); + msg = nla_data(nla); + memset(msg, 0, al); + } else + schedule_work_on(data->cpu, &data->dm_alert_work); + + /* + * Don't need to lock this, since we are guaranteed to only + * run this on a single cpu at a time. + * Note also that we only update data->skb if the old and new skb + * pointers don't match. This ensures that we don't continually call + * synchornize_rcu if we repeatedly fail to alloc a new netlink message. + */ + if (skb != oskb) { + rcu_assign_pointer(data->skb, skb); + + synchronize_rcu(); + + atomic_set(&data->dm_hit_count, dm_hit_limit); + } + } static void send_dm_alert(struct work_struct *unused) { struct sk_buff *skb; - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + + WARN_ON_ONCE(data->cpu != smp_processor_id()); /* * Grab the skb we're about to send */ - skb = data->skb; + skb = rcu_dereference_protected(data->skb, 1); /* * Replace it with a new one @@ -111,8 +136,10 @@ static void send_dm_alert(struct work_struct *unused) /* * Ship it! */ - genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + if (skb) + genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); + put_cpu_var(dm_cpu_data); } /* @@ -123,9 +150,11 @@ static void send_dm_alert(struct work_struct *unused) */ static void sched_send_work(unsigned long unused) { - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); - schedule_work(&data->dm_alert_work); + schedule_work_on(smp_processor_id(), &data->dm_alert_work); + + put_cpu_var(dm_cpu_data); } static void trace_drop_common(struct sk_buff *skb, void *location) @@ -134,9 +163,16 @@ static void trace_drop_common(struct sk_buff *skb, void *location) struct nlmsghdr *nlh; struct nlattr *nla; int i; - struct per_cpu_dm_data *data = &__get_cpu_var(dm_cpu_data); + struct sk_buff *dskb; + struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + rcu_read_lock(); + dskb = rcu_dereference(data->skb); + + if (!dskb) + goto out; + if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { /* * we're already at zero, discard this hit @@ -144,12 +180,13 @@ static void trace_drop_common(struct sk_buff *skb, void *location) goto out; } - nlh = (struct nlmsghdr *)data->skb->data; + nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); for (i = 0; i < msg->entries; i++) { if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { msg->points[i].count++; + atomic_inc(&data->dm_hit_count); goto out; } } @@ -157,7 +194,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location) /* * We need to create a new entry */ - __nla_reserve_nohdr(data->skb, sizeof(struct net_dm_drop_point)); + __nla_reserve_nohdr(dskb, sizeof(struct net_dm_drop_point)); nla->nla_len += NLA_ALIGN(sizeof(struct net_dm_drop_point)); memcpy(msg->points[msg->entries].pc, &location, sizeof(void *)); msg->points[msg->entries].count = 1; @@ -169,6 +206,8 @@ static void trace_drop_common(struct sk_buff *skb, void *location) } out: + rcu_read_unlock(); + put_cpu_var(dm_cpu_data); return; } @@ -213,7 +252,7 @@ static int set_all_monitor_traces(int state) struct dm_hw_stat_delta *new_stat = NULL; struct dm_hw_stat_delta *temp; - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); if (state == trace_state) { rc = -EAGAIN; @@ -252,7 +291,7 @@ static int set_all_monitor_traces(int state) rc = -EINPROGRESS; out_unlock: - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); return rc; } @@ -295,12 +334,12 @@ static int dropmon_net_event(struct notifier_block *ev_block, new_stat->dev = dev; new_stat->last_rx = jiffies; - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); list_add_rcu(&new_stat->list, &hw_stats_list); - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); break; case NETDEV_UNREGISTER: - spin_lock(&trace_state_lock); + mutex_lock(&trace_state_mutex); list_for_each_entry_safe(new_stat, tmp, &hw_stats_list, list) { if (new_stat->dev == dev) { new_stat->dev = NULL; @@ -311,7 +350,7 @@ static int dropmon_net_event(struct notifier_block *ev_block, } } } - spin_unlock(&trace_state_lock); + mutex_unlock(&trace_state_mutex); break; } out: @@ -367,13 +406,15 @@ static int __init init_net_drop_monitor(void) for_each_present_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); - reset_per_cpu_data(data); + data->cpu = cpu; INIT_WORK(&data->dm_alert_work, send_dm_alert); init_timer(&data->send_timer); data->send_timer.data = cpu; data->send_timer.function = sched_send_work; + reset_per_cpu_data(data); } + goto out; out_unreg: diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 0e950fda9a0a..31a5ae51a45c 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -83,21 +83,29 @@ assign: static int ops_init(const struct pernet_operations *ops, struct net *net) { - int err; + int err = -ENOMEM; + void *data = NULL; + if (ops->id && ops->size) { - void *data = kzalloc(ops->size, GFP_KERNEL); + data = kzalloc(ops->size, GFP_KERNEL); if (!data) - return -ENOMEM; + goto out; err = net_assign_generic(net, *ops->id, data); - if (err) { - kfree(data); - return err; - } + if (err) + goto cleanup; } + err = 0; if (ops->init) - return ops->init(net); - return 0; + err = ops->init(net); + if (!err) + return 0; + +cleanup: + kfree(data); + +out: + return err; } static void ops_free(const struct pernet_operations *ops, struct net *net) @@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) static int __register_pernet_operations(struct list_head *list, struct pernet_operations *ops) { - int err = 0; - err = ops_init(ops, &init_net); - if (err) - ops_free(ops, &init_net); - return err; - + return ops_init(ops, &init_net); } static void __unregister_pernet_operations(struct pernet_operations *ops) diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 4d8ce93cd503..77a59980b579 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1931,7 +1931,7 @@ static int pktgen_device_event(struct notifier_block *unused, { struct net_device *dev = ptr; - if (!net_eq(dev_net(dev), &init_net)) + if (!net_eq(dev_net(dev), &init_net) || pktgen_exiting) return NOTIFY_DONE; /* It is OK that we do not hold the group lock right now, @@ -3755,12 +3755,18 @@ static void __exit pg_cleanup(void) { struct pktgen_thread *t; struct list_head *q, *n; + struct list_head list; /* Stop all interfaces & threads */ pktgen_exiting = true; - list_for_each_safe(q, n, &pktgen_threads) { + mutex_lock(&pktgen_thread_lock); + list_splice(&list, &pktgen_threads); + mutex_unlock(&pktgen_thread_lock); + + list_for_each_safe(q, n, &list) { t = list_entry(q, struct pktgen_thread, th_list); + list_del(&t->th_list); kthread_stop(t->tsk); kfree(t); } diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c index 368515885368..840821b90bcd 100644 --- a/net/ieee802154/6lowpan.c +++ b/net/ieee802154/6lowpan.c @@ -1044,6 +1044,24 @@ static void lowpan_dev_free(struct net_device *dev) free_netdev(dev); } +static struct wpan_phy *lowpan_get_phy(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + return ieee802154_mlme_ops(real_dev)->get_phy(real_dev); +} + +static u16 lowpan_get_pan_id(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev); +} + +static u16 lowpan_get_short_addr(const struct net_device *dev) +{ + struct net_device *real_dev = lowpan_dev_info(dev)->real_dev; + return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev); +} + static struct header_ops lowpan_header_ops = { .create = lowpan_header_create, }; @@ -1053,6 +1071,12 @@ static const struct net_device_ops lowpan_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; +static struct ieee802154_mlme_ops lowpan_mlme = { + .get_pan_id = lowpan_get_pan_id, + .get_phy = lowpan_get_phy, + .get_short_addr = lowpan_get_short_addr, +}; + static void lowpan_setup(struct net_device *dev) { pr_debug("(%s)\n", __func__); @@ -1070,6 +1094,7 @@ static void lowpan_setup(struct net_device *dev) dev->netdev_ops = &lowpan_netdev_ops; dev->header_ops = &lowpan_header_ops; + dev->ml_priv = &lowpan_mlme; dev->destructor = lowpan_dev_free; } @@ -1143,6 +1168,8 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev, list_add_tail(&entry->list, &lowpan_devices); mutex_unlock(&lowpan_dev_info(dev)->dev_list_mtx); + spin_lock_init(&flist_lock); + register_netdevice(dev); return 0; @@ -1152,11 +1179,20 @@ static void lowpan_dellink(struct net_device *dev, struct list_head *head) { struct lowpan_dev_info *lowpan_dev = lowpan_dev_info(dev); struct net_device *real_dev = lowpan_dev->real_dev; - struct lowpan_dev_record *entry; - struct lowpan_dev_record *tmp; + struct lowpan_dev_record *entry, *tmp; + struct lowpan_fragment *frame, *tframe; ASSERT_RTNL(); + spin_lock(&flist_lock); + list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) { + del_timer(&frame->timer); + list_del(&frame->list); + dev_kfree_skb(frame->skb); + kfree(frame); + } + spin_unlock(&flist_lock); + mutex_lock(&lowpan_dev_info(dev)->dev_list_mtx); list_for_each_entry_safe(entry, tmp, &lowpan_devices, list) { if (entry->ldev == dev) { diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index bce36f1a37b4..30b88d7b4bd6 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -1370,6 +1370,8 @@ static int check_leaf(struct fib_table *tb, struct trie *t, struct leaf *l, if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos) continue; + if (fi->fib_dead) + continue; if (fa->fa_info->fib_scope < flp->flowi4_scope) continue; fib_alias_accessed(fa); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 8d25a1c557eb..8f8db724bfaf 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -141,7 +141,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, goto rtattr_failure; if (icsk == NULL) { - r->idiag_rqueue = r->idiag_wqueue = 0; + handler->idiag_get_info(sk, r, NULL); goto out; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 8bb6adeb62c0..1272a88c2a63 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3243,7 +3243,7 @@ void __init tcp_init(void) { struct sk_buff *skb = NULL; unsigned long limit; - int max_share, cnt; + int max_rshare, max_wshare, cnt; unsigned int i; unsigned long jiffy = jiffies; @@ -3303,15 +3303,16 @@ void __init tcp_init(void) tcp_init_mem(&init_net); /* Set per-socket limits to no more than 1/128 the pressure threshold */ limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); - max_share = min(4UL*1024*1024, limit); + max_wshare = min(4UL*1024*1024, limit); + max_rshare = min(6UL*1024*1024, limit); sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; sysctl_tcp_wmem[1] = 16*1024; - sysctl_tcp_wmem[2] = max(64*1024, max_share); + sysctl_tcp_wmem[2] = max(64*1024, max_wshare); sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; sysctl_tcp_rmem[1] = 87380; - sysctl_tcp_rmem[2] = max(87380, max_share); + sysctl_tcp_rmem[2] = max(87380, max_rshare); pr_info("Hash tables configured (established %u bind %u)\n", tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9944c1d9a218..257b61789eeb 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -85,7 +85,7 @@ int sysctl_tcp_ecn __read_mostly = 2; EXPORT_SYMBOL(sysctl_tcp_ecn); int sysctl_tcp_dsack __read_mostly = 1; int sysctl_tcp_app_win __read_mostly = 31; -int sysctl_tcp_adv_win_scale __read_mostly = 2; +int sysctl_tcp_adv_win_scale __read_mostly = 1; EXPORT_SYMBOL(sysctl_tcp_adv_win_scale); int sysctl_tcp_stdurg __read_mostly; @@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) incr = __tcp_grow_window(sk, skb); if (incr) { + incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; @@ -494,7 +495,7 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; - tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1); + tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; @@ -2867,11 +2868,14 @@ static inline void tcp_complete_cwr(struct sock *sk) /* Do not moderate cwnd if it's already undone in cwr or recovery. */ if (tp->undo_marker) { - if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) + if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); - else /* PRR */ + tp->snd_cwnd_stamp = tcp_time_stamp; + } else if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH) { + /* PRR algorithm. */ tp->snd_cwnd = tp->snd_ssthresh; - tp->snd_cwnd_stamp = tcp_time_stamp; + tp->snd_cwnd_stamp = tcp_time_stamp; + } } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 376b2cfbb685..7ac6423117ad 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1096,6 +1096,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len) eat = min_t(int, len, skb_headlen(skb)); if (eat) { __skb_pull(skb, eat); + skb->avail_size -= eat; len -= eat; if (!len) return; diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c index 8a949f19deb6..a7f86a3cd502 100644 --- a/net/ipv4/udp_diag.c +++ b/net/ipv4/udp_diag.c @@ -146,9 +146,17 @@ static int udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, return udp_dump_one(&udp_table, in_skb, nlh, req); } +static void udp_diag_get_info(struct sock *sk, struct inet_diag_msg *r, + void *info) +{ + r->idiag_rqueue = sk_rmem_alloc_get(sk); + r->idiag_wqueue = sk_wmem_alloc_get(sk); +} + static const struct inet_diag_handler udp_diag_handler = { .dump = udp_diag_dump, .dump_one = udp_diag_dump_one, + .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDP, }; @@ -167,6 +175,7 @@ static int udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr * static const struct inet_diag_handler udplite_diag_handler = { .dump = udplite_diag_dump, .dump_one = udplite_diag_dump_one, + .idiag_get_info = udp_diag_get_info, .idiag_type = IPPROTO_UDPLITE, }; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 6a3bb6077e19..7d5cb975cc6f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp) ip6_del_rt(rt); rt = NULL; } else if (!(rt->rt6i_flags & RTF_EXPIRES)) { - rt->dst.expires = expires; - rt->rt6i_flags |= RTF_EXPIRES; + rt6_set_expires(rt, expires); } } dst_release(&rt->dst); @@ -1887,11 +1886,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao) rt = NULL; } else if (addrconf_finite_timeout(rt_expires)) { /* not infinity */ - rt->dst.expires = jiffies + rt_expires; - rt->rt6i_flags |= RTF_EXPIRES; + rt6_set_expires(rt, jiffies + rt_expires); } else { - rt->rt6i_flags &= ~RTF_EXPIRES; - rt->dst.expires = 0; + rt6_clean_expires(rt); } } else if (valid_lft) { clock_t expires = 0; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 5b27fbcae346..93717435013e 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, &rt->rt6i_gateway)) { if (!(iter->rt6i_flags & RTF_EXPIRES)) return -EEXIST; - iter->dst.expires = rt->dst.expires; - if (!(rt->rt6i_flags & RTF_EXPIRES)) { - iter->rt6i_flags &= ~RTF_EXPIRES; - iter->dst.expires = 0; - } + if (!(rt->rt6i_flags & RTF_EXPIRES)) + rt6_clean_expires(iter); + else + rt6_set_expires(iter, rt->dst.expires); return -EEXIST; } } diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 3dcdb81ec3e8..176b469322ac 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1264,8 +1264,7 @@ static void ndisc_router_discovery(struct sk_buff *skb) } if (rt) - rt->dst.expires = jiffies + (HZ * lifetime); - + rt6_set_expires(rt, jiffies + (HZ * lifetime)); if (ra_msg->icmph.icmp6_hop_limit) { in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit; if (rt) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 3992e26a6039..bc4888d902b2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -62,7 +62,7 @@ #include #endif -static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, +static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, const struct in6_addr *dest); static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie); static unsigned int ip6_default_advmss(const struct dst_entry *dst); @@ -285,6 +285,10 @@ static void ip6_dst_destroy(struct dst_entry *dst) rt->rt6i_idev = NULL; in6_dev_put(idev); } + + if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from) + dst_release(dst->from); + if (peer) { rt->rt6i_peer = NULL; inet_putpeer(peer); @@ -329,8 +333,17 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static __inline__ int rt6_check_expired(const struct rt6_info *rt) { - return (rt->rt6i_flags & RTF_EXPIRES) && - time_after(jiffies, rt->dst.expires); + struct rt6_info *ort = NULL; + + if (rt->rt6i_flags & RTF_EXPIRES) { + if (time_after(jiffies, rt->dst.expires)) + return 1; + } else if (rt->dst.from) { + ort = (struct rt6_info *) rt->dst.from; + return (ort->rt6i_flags & RTF_EXPIRES) && + time_after(jiffies, ort->dst.expires); + } + return 0; } static inline int rt6_need_strict(const struct in6_addr *daddr) @@ -620,12 +633,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); if (rt) { - if (!addrconf_finite_timeout(lifetime)) { - rt->rt6i_flags &= ~RTF_EXPIRES; - } else { - rt->dst.expires = jiffies + HZ * lifetime; - rt->rt6i_flags |= RTF_EXPIRES; - } + if (!addrconf_finite_timeout(lifetime)) + rt6_clean_expires(rt); + else + rt6_set_expires(rt, jiffies + HZ * lifetime); + dst_release(&rt->dst); } return 0; @@ -730,7 +742,7 @@ int ip6_ins_rt(struct rt6_info *rt) return __ip6_ins_rt(rt, &info); } -static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort, +static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { @@ -954,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); - rt->dst.expires = 0; rt->rt6i_gateway = ort->rt6i_gateway; - rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; + rt->rt6i_flags = ort->rt6i_flags; + rt6_clean_expires(rt); rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); @@ -1019,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb) rt = (struct rt6_info *) skb_dst(skb); if (rt) { - if (rt->rt6i_flags & RTF_CACHE) { - dst_set_expires(&rt->dst, 0); - rt->rt6i_flags |= RTF_EXPIRES; - } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) + if (rt->rt6i_flags & RTF_CACHE) + rt6_update_expires(rt, 0); + else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) rt->rt6i_node->fn_sernum = -1; } } @@ -1289,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg) } rt->dst.obsolete = -1; - rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ? - jiffies + clock_t_to_jiffies(cfg->fc_expires) : - 0; + + if (cfg->fc_flags & RTF_EXPIRES) + rt6_set_expires(rt, jiffies + + clock_t_to_jiffies(cfg->fc_expires)); + else + rt6_clean_expires(rt); if (cfg->fc_protocol == RTPROT_UNSPEC) cfg->fc_protocol = RTPROT_BOOT; @@ -1736,8 +1750,8 @@ again: features |= RTAX_FEATURE_ALLFRAG; dst_metric_set(&rt->dst, RTAX_FEATURES, features); } - dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); - rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES; + rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); + rt->rt6i_flags |= RTF_MODIFIED; goto out; } @@ -1765,9 +1779,8 @@ again: * which is 10 mins. After 10 mins the decreased pmtu is expired * and detecting PMTU increase will be automatically happened. */ - dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires); - nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES; - + rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires); + nrt->rt6i_flags |= RTF_DYNAMIC; ip6_ins_rt(nrt); } out: @@ -1799,7 +1812,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad * Misc support functions */ -static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, +static struct rt6_info *ip6_rt_copy(struct rt6_info *ort, const struct in6_addr *dest) { struct net *net = dev_net(ort->dst.dev); @@ -1819,10 +1832,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort, if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; - rt->dst.expires = 0; rt->rt6i_gateway = ort->rt6i_gateway; - rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES; + rt->rt6i_flags = ort->rt6i_flags; + if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) == + (RTF_DEFAULT | RTF_ADDRCONF)) + rt6_set_from(rt, ort); + else + rt6_clean_expires(rt); rt->rt6i_metric = 0; #ifdef CONFIG_IPV6_SUBTREES diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 86cfe6005f40..98256cf72f9d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1383,6 +1383,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, tcp_mtup_init(newsk); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); + if (tcp_sk(sk)->rx_opt.user_mss && + tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) + newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; + tcp_initialize_rcv_mss(newsk); if (tcp_rsk(req)->snt_synack) tcp_valid_rtt_meas(newsk, diff --git a/net/key/af_key.c b/net/key/af_key.c index 11dbb2255ccb..7e5d927b576f 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, /* Addresses to be used by KM for negotiation, if ext is available */ if (k != NULL && (set_sadb_kmaddress(skb, k) < 0)) - return -EINVAL; + goto err; /* selector src */ set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 55670ec3cd0f..6274f0be82b0 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -232,7 +232,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout) { write_lock_bh(&l2tp_ip_lock); hlist_del_init(&sk->sk_bind_node); - hlist_del_init(&sk->sk_node); + sk_del_node_init(sk); write_unlock_bh(&l2tp_ip_lock); sk_common_release(sk); } @@ -271,7 +271,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) goto out; - inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; + if (addr->l2tp_addr.s_addr) + inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr; if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST) inet->inet_saddr = 0; /* Use device */ sk_dst_reset(sk); @@ -441,8 +442,9 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m daddr = lip->l2tp_addr.s_addr; } else { + rc = -EDESTADDRREQ; if (sk->sk_state != TCP_ESTABLISHED) - return -EDESTADDRREQ; + goto out; daddr = inet->inet_daddr; connected = 1; diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 33fd8d9f714e..cef7c29214a8 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -457,8 +457,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata, * fall back to HT20 if we don't use or use * the other extension channel */ - if ((channel_type == NL80211_CHAN_HT40MINUS || - channel_type == NL80211_CHAN_HT40PLUS) && + if (!(channel_type == NL80211_CHAN_HT40MINUS || + channel_type == NL80211_CHAN_HT40PLUS) || channel_type != sdata->u.ibss.channel_type) sta_ht_cap_new.cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index d9798a307f20..db8fae51714c 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -1210,7 +1210,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, struct sk_buff *skb); void ieee80211_sta_reset_beacon_monitor(struct ieee80211_sub_if_data *sdata); void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata); -void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata); +void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); /* IBSS code */ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 401c01f0731e..c20051b7ffcd 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -486,6 +486,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, /* free all potentially still buffered bcast frames */ local->total_ps_buffered -= skb_queue_len(&sdata->u.ap.ps_bc_buf); skb_queue_purge(&sdata->u.ap.ps_bc_buf); + } else if (sdata->vif.type == NL80211_IFTYPE_STATION) { + ieee80211_mgd_stop(sdata); } if (going_down) @@ -644,8 +646,6 @@ static void ieee80211_teardown_sdata(struct net_device *dev) if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_rmc_free(sdata); - else if (sdata->vif.type == NL80211_IFTYPE_STATION) - ieee80211_mgd_teardown(sdata); flushed = sta_info_flush(local, sdata); WARN_ON(flushed); diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index f76da5b3f5c5..20c680bfc3ae 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -3497,7 +3497,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata, return 0; } -void ieee80211_mgd_teardown(struct ieee80211_sub_if_data *sdata) +void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) { struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bcfe8c77c839..d64e285400aa 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -103,7 +103,7 @@ static void ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, struct sk_buff *skb, struct ieee80211_rate *rate, - int rtap_len) + int rtap_len, bool has_fcs) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_radiotap_header *rthdr; @@ -134,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, } /* IEEE80211_RADIOTAP_FLAGS */ - if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) + if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)) *pos |= IEEE80211_RADIOTAP_F_FCS; if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC)) *pos |= IEEE80211_RADIOTAP_F_BADFCS; @@ -294,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb, } /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, + true); skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -2571,7 +2572,8 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx, goto out_free_skb; /* prepend radiotap information */ - ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom); + ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom, + false); skb_set_mac_header(skb, 0); skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 782a60198df4..e76facc69e95 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1158,7 +1158,8 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, tx->sta = rcu_dereference(sdata->u.vlan.sta); if (!tx->sta && sdata->dev->ieee80211_ptr->use_4addr) return TX_DROP; - } else if (info->flags & IEEE80211_TX_CTL_INJECTED) { + } else if (info->flags & IEEE80211_TX_CTL_INJECTED || + tx->sdata->control_port_protocol == tx->skb->protocol) { tx->sta = sta_info_get_bss(sdata, hdr->addr1); } if (!tx->sta) diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 2555816e7788..00bdb1d9d690 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1924,6 +1924,7 @@ protocol_fail: control_fail: ip_vs_estimator_net_cleanup(net); estimator_fail: + net->ipvs = NULL; return -ENOMEM; } @@ -1936,6 +1937,7 @@ static void __net_exit __ip_vs_cleanup(struct net *net) ip_vs_control_net_cleanup(net); ip_vs_estimator_net_cleanup(net); IP_VS_DBG(2, "ipvs netns %d released\n", net_ipvs(net)->gen); + net->ipvs = NULL; } static void __net_exit __ip_vs_dev_cleanup(struct net *net) @@ -1993,10 +1995,18 @@ static int __init ip_vs_init(void) goto cleanup_dev; } + ret = ip_vs_register_nl_ioctl(); + if (ret < 0) { + pr_err("can't register netlink/ioctl.\n"); + goto cleanup_hooks; + } + pr_info("ipvs loaded.\n"); return ret; +cleanup_hooks: + nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); cleanup_dev: unregister_pernet_device(&ipvs_core_dev_ops); cleanup_sub: @@ -2012,6 +2022,7 @@ exit: static void __exit ip_vs_cleanup(void) { + ip_vs_unregister_nl_ioctl(); nf_unregister_hooks(ip_vs_ops, ARRAY_SIZE(ip_vs_ops)); unregister_pernet_device(&ipvs_core_dev_ops); unregister_pernet_subsys(&ipvs_core_ops); /* free ip_vs struct */ diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b3afe189af61..f5589987fc80 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3680,7 +3680,7 @@ int __net_init ip_vs_control_net_init_sysctl(struct net *net) return 0; } -void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) +void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -3692,7 +3692,7 @@ void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) #else int __net_init ip_vs_control_net_init_sysctl(struct net *net) { return 0; } -void __net_init ip_vs_control_net_cleanup_sysctl(struct net *net) { } +void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net) { } #endif @@ -3750,21 +3750,10 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net) free_percpu(ipvs->tot_stats.cpustats); } -int __init ip_vs_control_init(void) +int __init ip_vs_register_nl_ioctl(void) { - int idx; int ret; - EnterFunction(2); - - /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */ - for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { - INIT_LIST_HEAD(&ip_vs_svc_table[idx]); - INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); - } - - smp_wmb(); /* Do we really need it now ? */ - ret = nf_register_sockopt(&ip_vs_sockopts); if (ret) { pr_err("cannot register sockopt.\n"); @@ -3776,28 +3765,47 @@ int __init ip_vs_control_init(void) pr_err("cannot register Generic Netlink interface.\n"); goto err_genl; } - - ret = register_netdevice_notifier(&ip_vs_dst_notifier); - if (ret < 0) - goto err_notf; - - LeaveFunction(2); return 0; -err_notf: - ip_vs_genl_unregister(); err_genl: nf_unregister_sockopt(&ip_vs_sockopts); err_sock: return ret; } +void ip_vs_unregister_nl_ioctl(void) +{ + ip_vs_genl_unregister(); + nf_unregister_sockopt(&ip_vs_sockopts); +} + +int __init ip_vs_control_init(void) +{ + int idx; + int ret; + + EnterFunction(2); + + /* Initialize svc_table, ip_vs_svc_fwm_table, rs_table */ + for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { + INIT_LIST_HEAD(&ip_vs_svc_table[idx]); + INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]); + } + + smp_wmb(); /* Do we really need it now ? */ + + ret = register_netdevice_notifier(&ip_vs_dst_notifier); + if (ret < 0) + return ret; + + LeaveFunction(2); + return 0; +} + void ip_vs_control_cleanup(void) { EnterFunction(2); unregister_netdevice_notifier(&ip_vs_dst_notifier); - ip_vs_genl_unregister(); - nf_unregister_sockopt(&ip_vs_sockopts); LeaveFunction(2); } diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 538d74ee4f68..e39f693dd3e4 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -439,6 +439,8 @@ static int __net_init __ip_vs_ftp_init(struct net *net) struct ip_vs_app *app; struct netns_ipvs *ipvs = net_ipvs(net); + if (!ipvs) + return -ENOENT; app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); if (!app) return -ENOMEM; diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 0f16283fd058..caa43704e55e 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c @@ -551,6 +551,9 @@ static int __net_init __ip_vs_lblc_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + if (!ipvs) + return -ENOENT; + if (!net_eq(net, &init_net)) { ipvs->lblc_ctl_table = kmemdup(vs_vars_table, sizeof(vs_vars_table), diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index eec797f8cce7..548bf37aa29e 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c @@ -745,6 +745,9 @@ static int __net_init __ip_vs_lblcr_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); + if (!ipvs) + return -ENOENT; + if (!net_eq(net, &init_net)) { ipvs->lblcr_ctl_table = kmemdup(vs_vars_table, sizeof(vs_vars_table), diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c index f843a8833250..ed835e67a07e 100644 --- a/net/netfilter/ipvs/ip_vs_proto.c +++ b/net/netfilter/ipvs/ip_vs_proto.c @@ -59,9 +59,6 @@ static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) return 0; } -#if defined(CONFIG_IP_VS_PROTO_TCP) || defined(CONFIG_IP_VS_PROTO_UDP) || \ - defined(CONFIG_IP_VS_PROTO_SCTP) || defined(CONFIG_IP_VS_PROTO_AH) || \ - defined(CONFIG_IP_VS_PROTO_ESP) /* * register an ipvs protocols netns related data */ @@ -81,12 +78,18 @@ register_ip_vs_proto_netns(struct net *net, struct ip_vs_protocol *pp) ipvs->proto_data_table[hash] = pd; atomic_set(&pd->appcnt, 0); /* Init app counter */ - if (pp->init_netns != NULL) - pp->init_netns(net, pd); + if (pp->init_netns != NULL) { + int ret = pp->init_netns(net, pd); + if (ret) { + /* unlink an free proto data */ + ipvs->proto_data_table[hash] = pd->next; + kfree(pd); + return ret; + } + } return 0; } -#endif /* * unregister an ipvs protocol @@ -316,22 +319,35 @@ ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, */ int __net_init ip_vs_protocol_net_init(struct net *net) { + int i, ret; + static struct ip_vs_protocol *protos[] = { #ifdef CONFIG_IP_VS_PROTO_TCP - register_ip_vs_proto_netns(net, &ip_vs_protocol_tcp); + &ip_vs_protocol_tcp, #endif #ifdef CONFIG_IP_VS_PROTO_UDP - register_ip_vs_proto_netns(net, &ip_vs_protocol_udp); + &ip_vs_protocol_udp, #endif #ifdef CONFIG_IP_VS_PROTO_SCTP - register_ip_vs_proto_netns(net, &ip_vs_protocol_sctp); + &ip_vs_protocol_sctp, #endif #ifdef CONFIG_IP_VS_PROTO_AH - register_ip_vs_proto_netns(net, &ip_vs_protocol_ah); + &ip_vs_protocol_ah, #endif #ifdef CONFIG_IP_VS_PROTO_ESP - register_ip_vs_proto_netns(net, &ip_vs_protocol_esp); + &ip_vs_protocol_esp, #endif + }; + + for (i = 0; i < ARRAY_SIZE(protos); i++) { + ret = register_ip_vs_proto_netns(net, protos[i]); + if (ret < 0) + goto cleanup; + } return 0; + +cleanup: + ip_vs_protocol_net_cleanup(net); + return ret; } void __net_exit ip_vs_protocol_net_cleanup(struct net *net) diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 1fbf7a2816f5..9f3fb751c491 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -1090,7 +1090,7 @@ out: * timeouts is netns related now. * --------------------------------------------- */ -static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) +static int __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -1098,6 +1098,9 @@ static void __ip_vs_sctp_init(struct net *net, struct ip_vs_proto_data *pd) spin_lock_init(&ipvs->sctp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)sctp_timeouts, sizeof(sctp_timeouts)); + if (!pd->timeout_table) + return -ENOMEM; + return 0; } static void __ip_vs_sctp_exit(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index ef8641f7af83..cd609cc62721 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c @@ -677,7 +677,7 @@ void ip_vs_tcp_conn_listen(struct net *net, struct ip_vs_conn *cp) * timeouts is netns related now. * --------------------------------------------- */ -static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) +static int __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -685,7 +685,10 @@ static void __ip_vs_tcp_init(struct net *net, struct ip_vs_proto_data *pd) spin_lock_init(&ipvs->tcp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)tcp_timeouts, sizeof(tcp_timeouts)); + if (!pd->timeout_table) + return -ENOMEM; pd->tcp_state_table = tcp_states; + return 0; } static void __ip_vs_tcp_exit(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c index f4b7262896bb..2fedb2dcb3d1 100644 --- a/net/netfilter/ipvs/ip_vs_proto_udp.c +++ b/net/netfilter/ipvs/ip_vs_proto_udp.c @@ -467,7 +467,7 @@ udp_state_transition(struct ip_vs_conn *cp, int direction, cp->timeout = pd->timeout_table[IP_VS_UDP_S_NORMAL]; } -static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) +static int __udp_init(struct net *net, struct ip_vs_proto_data *pd) { struct netns_ipvs *ipvs = net_ipvs(net); @@ -475,6 +475,9 @@ static void __udp_init(struct net *net, struct ip_vs_proto_data *pd) spin_lock_init(&ipvs->udp_app_lock); pd->timeout_table = ip_vs_create_timeout_table((int *)udp_timeouts, sizeof(udp_timeouts)); + if (!pd->timeout_table) + return -ENOMEM; + return 0; } static void __udp_exit(struct net *net, struct ip_vs_proto_data *pd) diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c index 59530e93fa58..3746d8b9a478 100644 --- a/net/netfilter/xt_CT.c +++ b/net/netfilter/xt_CT.c @@ -227,7 +227,7 @@ static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par) } #ifdef CONFIG_NF_CONNTRACK_TIMEOUT - if (info->timeout) { + if (info->timeout[0]) { typeof(nf_ct_timeout_find_get_hook) timeout_find_get; struct nf_conn_timeout *timeout_ext; diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e44e631ea952..777716bc80f7 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -421,6 +421,19 @@ static int validate_sample(const struct nlattr *attr, return validate_actions(actions, key, depth + 1); } +static int validate_tp_port(const struct sw_flow_key *flow_key) +{ + if (flow_key->eth.type == htons(ETH_P_IP)) { + if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) + return 0; + } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { + if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) + return 0; + } + + return -EINVAL; +} + static int validate_set(const struct nlattr *a, const struct sw_flow_key *flow_key) { @@ -462,18 +475,13 @@ static int validate_set(const struct nlattr *a, if (flow_key->ip.proto != IPPROTO_TCP) return -EINVAL; - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) - return -EINVAL; - - break; + return validate_tp_port(flow_key); case OVS_KEY_ATTR_UDP: if (flow_key->ip.proto != IPPROTO_UDP) return -EINVAL; - if (!flow_key->ipv4.tp.src || !flow_key->ipv4.tp.dst) - return -EINVAL; - break; + return validate_tp_port(flow_key); default: return -EINVAL; @@ -1641,10 +1649,9 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) reply = ovs_vport_cmd_build_info(vport, info->snd_pid, info->snd_seq, OVS_VPORT_CMD_NEW); if (IS_ERR(reply)) { - err = PTR_ERR(reply); netlink_set_err(init_net.genl_sock, 0, - ovs_dp_vport_multicast_group.id, err); - return 0; + ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); + goto exit_unlock; } genl_notify(reply, genl_info_net(info), info->snd_pid, diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 1252c3081ef1..2a11ec2383ee 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -183,7 +183,8 @@ void ovs_flow_used(struct sw_flow *flow, struct sk_buff *skb) u8 tcp_flags = 0; if (flow->key.eth.type == htons(ETH_P_IP) && - flow->key.ip.proto == IPPROTO_TCP) { + flow->key.ip.proto == IPPROTO_TCP && + likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) { u8 *tcp = (u8 *)tcp_hdr(skb); tcp_flags = *(tcp + TCP_FLAGS_OFFSET) & TCP_FLAG_MASK; } diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c index 9b9a85ecc4c7..bf5cf69c820a 100644 --- a/net/phonet/pn_dev.c +++ b/net/phonet/pn_dev.c @@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net) static void __net_exit phonet_exit_net(struct net *net) { - struct phonet_net *pnn = phonet_pernet(net); - struct net_device *dev; - unsigned i; - - rtnl_lock(); - for_each_netdev(net, dev) - phonet_device_destroy(dev); - - for (i = 0; i < 64; i++) { - dev = pnn->routes.table[i]; - if (dev) { - rtm_phonet_notify(RTM_DELROUTE, dev, i); - dev_put(dev); - } - } - rtnl_unlock(); - proc_net_remove(net, "phonet"); } @@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = { /* Initialize Phonet devices list */ int __init phonet_device_init(void) { - int err = register_pernet_device(&phonet_net_ops); + int err = register_pernet_subsys(&phonet_net_ops); if (err) return err; @@ -377,7 +360,7 @@ void phonet_device_exit(void) { rtnl_unregister_all(PF_PHONET); unregister_netdevice_notifier(&phonet_device_notifier); - unregister_pernet_device(&phonet_net_ops); + unregister_pernet_subsys(&phonet_net_ops); proc_net_remove(&init_net, "pnresource"); } diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index 0b15236be7b6..8179494c269a 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -565,11 +565,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) opt.packets = q->packetsin; opt.bytesin = q->bytesin; - if (gred_wred_mode(table)) { - q->vars.qidlestart = - table->tab[table->def]->vars.qidlestart; - q->vars.qavg = table->tab[table->def]->vars.qavg; - } + if (gred_wred_mode(table)) + gred_load_wred_set(table, q); opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 5da548fa7ae9..ebd22966f748 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -408,10 +408,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { if (!(skb = skb_unshare(skb, GFP_ATOMIC)) || (skb->ip_summed == CHECKSUM_PARTIAL && - skb_checksum_help(skb))) { - sch->qstats.drops++; - return NET_XMIT_DROP; - } + skb_checksum_help(skb))) + return qdisc_drop(skb, sch); skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); } diff --git a/net/sctp/output.c b/net/sctp/output.c index 817174eb5f41..8fc4dcd294ab 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c @@ -377,9 +377,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) */ skb_set_owner_w(nskb, sk); - /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ - if (!dst || (dst->obsolete > 1)) { - dst_release(dst); + if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) { sctp_assoc_sync_pmtu(asoc); diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 3889330b7b04..b026ba0c6992 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -226,23 +226,6 @@ void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk) transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT; } -/* this is a complete rip-off from __sk_dst_check - * the cookie is always 0 since this is how it's used in the - * pmtu code - */ -static struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t) -{ - struct dst_entry *dst = t->dst; - - if (dst && dst->obsolete && dst->ops->check(dst, 0) == NULL) { - dst_release(t->dst); - t->dst = NULL; - return NULL; - } - - return dst; -} - void sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu) { struct dst_entry *dst; diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index ca8cad8251c7..782bfe1b6465 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c @@ -242,12 +242,13 @@ EXPORT_SYMBOL_GPL(gss_mech_get_by_pseudoflavor); int gss_mech_list_pseudoflavors(rpc_authflavor_t *array_ptr) { struct gss_api_mech *pos = NULL; - int i = 0; + int j, i = 0; spin_lock(®istered_mechs_lock); list_for_each_entry(pos, ®istered_mechs, gm_list) { - array_ptr[i] = pos->gm_pfs->pseudoflavor; - i++; + for (j=0; j < pos->gm_pf_num; j++) { + array_ptr[i++] = pos->gm_pfs[j].pseudoflavor; + } } spin_unlock(®istered_mechs_lock); return i; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 67972462a543..adf2990acebf 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -176,16 +176,22 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name) return 0; } -static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, - struct super_block *sb) +static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) +{ + if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || + ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) + return 1; + return 0; +} + +static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, + struct super_block *sb) { struct dentry *dentry; int err = 0; switch (event) { case RPC_PIPEFS_MOUNT: - if (clnt->cl_program->pipe_dir_name == NULL) - break; dentry = rpc_setup_pipedir_sb(sb, clnt, clnt->cl_program->pipe_dir_name); BUG_ON(dentry == NULL); @@ -208,6 +214,20 @@ static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, return err; } +static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, + struct super_block *sb) +{ + int error = 0; + + for (;; clnt = clnt->cl_parent) { + if (!rpc_clnt_skip_event(clnt, event)) + error = __rpc_clnt_handle_event(clnt, event, sb); + if (error || clnt == clnt->cl_parent) + break; + } + return error; +} + static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); @@ -215,10 +235,12 @@ static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { - if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || - ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) + if (clnt->cl_program->pipe_dir_name == NULL) + break; + if (rpc_clnt_skip_event(clnt, event)) + continue; + if (atomic_inc_not_zero(&clnt->cl_count) == 0) continue; - atomic_inc(&clnt->cl_count); spin_unlock(&sn->rpc_client_lock); return clnt; } @@ -257,6 +279,14 @@ void rpc_clients_notifier_unregister(void) return rpc_pipefs_notifier_unregister(&rpc_clients_block); } +static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) +{ + clnt->cl_nodelen = strlen(nodename); + if (clnt->cl_nodelen > UNX_MAXNODENAME) + clnt->cl_nodelen = UNX_MAXNODENAME; + memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen); +} + static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) { const struct rpc_program *program = args->program; @@ -337,10 +367,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru } /* save the nodename */ - clnt->cl_nodelen = strlen(init_utsname()->nodename); - if (clnt->cl_nodelen > UNX_MAXNODENAME) - clnt->cl_nodelen = UNX_MAXNODENAME; - memcpy(clnt->cl_nodename, init_utsname()->nodename, clnt->cl_nodelen); + rpc_clnt_set_nodename(clnt, utsname()->nodename); rpc_register_client(clnt); return clnt; @@ -499,6 +526,7 @@ rpc_clone_client(struct rpc_clnt *clnt) err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); if (err != 0) goto out_no_path; + rpc_clnt_set_nodename(new, utsname()->nodename); if (new->cl_auth) atomic_inc(&new->cl_auth->au_count); atomic_inc(&clnt->cl_count); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 0af37fc46818..3b62cf288031 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1126,19 +1126,20 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) return -ENOMEM; dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", net, NET_NAME(net)); + sn->pipefs_sb = sb; err = blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_MOUNT, sb); if (err) goto err_depopulate; sb->s_fs_info = get_net(net); - sn->pipefs_sb = sb; return 0; err_depopulate: blocking_notifier_call_chain(&rpc_pipefs_notifier_list, RPC_PIPEFS_UMOUNT, sb); + sn->pipefs_sb = NULL; __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); return err; } diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 8adfc88e793a..3d6498af9adc 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c @@ -75,19 +75,20 @@ static struct pernet_operations sunrpc_net_ops = { static int __init init_sunrpc(void) { - int err = register_rpc_pipefs(); + int err = rpc_init_mempool(); if (err) goto out; - err = rpc_init_mempool(); - if (err) - goto out2; err = rpcauth_init_module(); if (err) - goto out3; + goto out2; cache_initialize(); err = register_pernet_subsys(&sunrpc_net_ops); + if (err) + goto out3; + + err = register_rpc_pipefs(); if (err) goto out4; #ifdef RPC_DEBUG @@ -98,11 +99,11 @@ init_sunrpc(void) return 0; out4: - rpcauth_remove_module(); + unregister_pernet_subsys(&sunrpc_net_ops); out3: - rpc_destroy_mempool(); + rpcauth_remove_module(); out2: - unregister_rpc_pipefs(); + rpc_destroy_mempool(); out: return err; } diff --git a/net/wireless/util.c b/net/wireless/util.c index 1b7a08df933c..957f25621617 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -989,7 +989,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev, if (rdev->wiphy.software_iftypes & BIT(iftype)) continue; for (j = 0; j < c->n_limits; j++) { - if (!(limits[j].types & iftype)) + if (!(limits[j].types & BIT(iftype))) continue; if (limits[j].max < num[iftype]) goto cont; diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index 8e730ccc3f2b..44ddaa542db6 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c @@ -1100,6 +1100,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info, if (!sym->st_shndx || get_secindex(info, sym) >= info->num_sections) return; + /* We're looking for an object */ + if (ELF_ST_TYPE(sym->st_info) != STT_OBJECT) + return; + /* All our symbols are of form __mod_XXX_device_table. */ name = strstr(symname, "__mod_"); if (!name) diff --git a/sound/core/vmaster.c b/sound/core/vmaster.c index 14a286a7bf2b..857586135d18 100644 --- a/sound/core/vmaster.c +++ b/sound/core/vmaster.c @@ -419,6 +419,7 @@ EXPORT_SYMBOL(snd_ctl_make_virtual_master); * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control * @kcontrol: vmaster kctl element * @hook: the hook function + * @private_data: the private_data pointer to be saved * * Adds the given hook to the vmaster control element so that it's called * at each time when the value is changed. diff --git a/sound/last.c b/sound/last.c index bdd0857b8871..7ffc182e0844 100644 --- a/sound/last.c +++ b/sound/last.c @@ -38,4 +38,4 @@ static int __init alsa_sound_last_init(void) return 0; } -__initcall(alsa_sound_last_init); +late_initcall_sync(alsa_sound_last_init); diff --git a/sound/pci/echoaudio/echoaudio_dsp.c b/sound/pci/echoaudio/echoaudio_dsp.c index 64417a733220..d8c670c9d62c 100644 --- a/sound/pci/echoaudio/echoaudio_dsp.c +++ b/sound/pci/echoaudio/echoaudio_dsp.c @@ -475,7 +475,7 @@ static int load_firmware(struct echoaudio *chip) const struct firmware *fw; int box_type, err; - if (snd_BUG_ON(!chip->dsp_code_to_load || !chip->comm_page)) + if (snd_BUG_ON(!chip->comm_page)) return -EPERM; /* See if the ASIC is present and working - only if the DSP is already loaded */ diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7a8fcc4c15f8..841475cc13b6 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -5444,10 +5444,6 @@ int snd_hda_suspend(struct hda_bus *bus) list_for_each_entry(codec, &bus->codec_list, list) { if (hda_codec_is_power_on(codec)) hda_call_codec_suspend(codec); - else /* forcibly change the power to D3 even if not used */ - hda_set_power_state(codec, - codec->afg ? codec->afg : codec->mfg, - AC_PWRST_D3); if (codec->patch_ops.post_suspend) codec->patch_ops.post_suspend(codec); } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c19e71a94e1b..1f350522bed4 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -783,11 +783,13 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, { struct azx *chip = bus->private_data; unsigned long timeout; + unsigned long loopcounter; int do_poll = 0; again: timeout = jiffies + msecs_to_jiffies(1000); - for (;;) { + + for (loopcounter = 0;; loopcounter++) { if (chip->polling_mode || do_poll) { spin_lock_irq(&chip->reg_lock); azx_update_rirb(chip); @@ -803,7 +805,7 @@ static unsigned int azx_rirb_get_response(struct hda_bus *bus, } if (time_after(jiffies, timeout)) break; - if (bus->needs_damn_long_delay) + if (bus->needs_damn_long_delay || loopcounter > 3000) msleep(2); /* temporary workaround */ else { udelay(10); @@ -2351,6 +2353,17 @@ static void azx_power_notify(struct hda_bus *bus) * power management */ +static int snd_hda_codecs_inuse(struct hda_bus *bus) +{ + struct hda_codec *codec; + + list_for_each_entry(codec, &bus->codec_list, list) { + if (snd_hda_codec_needs_resume(codec)) + return 1; + } + return 0; +} + static int azx_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); @@ -2397,7 +2410,8 @@ static int azx_resume(struct pci_dev *pci) return -EIO; azx_init_pci(chip); - azx_init_chip(chip, 1); + if (snd_hda_codecs_inuse(chip->bus)) + azx_init_chip(chip, 1); snd_hda_resume(chip->bus); snd_power_change_state(card, SNDRV_CTL_POWER_D0); diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index a36488d94aaa..d906c5b74cf0 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c @@ -3971,9 +3971,14 @@ static void cx_auto_init_output(struct hda_codec *codec) int i; mute_outputs(codec, spec->multiout.num_dacs, spec->multiout.dac_nids); - for (i = 0; i < cfg->hp_outs; i++) + for (i = 0; i < cfg->hp_outs; i++) { + unsigned int val = PIN_OUT; + if (snd_hda_query_pin_caps(codec, cfg->hp_pins[i]) & + AC_PINCAP_HP_DRV) + val |= AC_PINCTL_HP_EN; snd_hda_codec_write(codec, cfg->hp_pins[i], 0, - AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); + AC_VERB_SET_PIN_WIDGET_CONTROL, val); + } mute_outputs(codec, cfg->hp_outs, cfg->hp_pins); mute_outputs(codec, cfg->line_outs, cfg->line_out_pins); mute_outputs(codec, cfg->speaker_outs, cfg->speaker_pins); @@ -4391,8 +4396,10 @@ static void apply_pin_fixup(struct hda_codec *codec, enum { CXT_PINCFG_LENOVO_X200, + CXT_PINCFG_LENOVO_TP410, }; +/* ThinkPad X200 & co with cxt5051 */ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = { { 0x16, 0x042140ff }, /* HP (seq# overridden) */ { 0x17, 0x21a11000 }, /* dock-mic */ @@ -4401,15 +4408,33 @@ static const struct cxt_pincfg cxt_pincfg_lenovo_x200[] = { {} }; -static const struct cxt_pincfg *cxt_pincfg_tbl[] = { - [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200, +/* ThinkPad 410/420/510/520, X201 & co with cxt5066 */ +static const struct cxt_pincfg cxt_pincfg_lenovo_tp410[] = { + { 0x19, 0x042110ff }, /* HP (seq# overridden) */ + { 0x1a, 0x21a190f0 }, /* dock-mic */ + { 0x1c, 0x212140ff }, /* dock-HP */ + {} }; -static const struct snd_pci_quirk cxt_fixups[] = { +static const struct cxt_pincfg *cxt_pincfg_tbl[] = { + [CXT_PINCFG_LENOVO_X200] = cxt_pincfg_lenovo_x200, + [CXT_PINCFG_LENOVO_TP410] = cxt_pincfg_lenovo_tp410, +}; + +static const struct snd_pci_quirk cxt5051_fixups[] = { SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo X200", CXT_PINCFG_LENOVO_X200), {} }; +static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), + {} +}; + /* add "fake" mute amp-caps to DACs on cx5051 so that mixer mute switches * can be created (bko#42825) */ @@ -4446,13 +4471,13 @@ static int patch_conexant_auto(struct hda_codec *codec) case 0x14f15051: add_cx5051_fake_mutes(codec); codec->pin_amp_workaround = 1; + apply_pin_fixup(codec, cxt5051_fixups, cxt_pincfg_tbl); break; default: codec->pin_amp_workaround = 1; + apply_pin_fixup(codec, cxt5066_fixups, cxt_pincfg_tbl); } - apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl); - /* Show mute-led control only on HP laptops * This is a sort of white-list: on HP laptops, EAPD corresponds * only to the mute-LED without actualy amp function. Meanwhile, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2508f8109f11..7810913d07a0 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -1445,6 +1445,13 @@ enum { ALC_FIXUP_ACT_BUILD, }; +static void alc_apply_pincfgs(struct hda_codec *codec, + const struct alc_pincfg *cfg) +{ + for (; cfg->nid; cfg++) + snd_hda_codec_set_pincfg(codec, cfg->nid, cfg->val); +} + static void alc_apply_fixup(struct hda_codec *codec, int action) { struct alc_spec *spec = codec->spec; @@ -1478,9 +1485,7 @@ static void alc_apply_fixup(struct hda_codec *codec, int action) snd_printdd(KERN_INFO "hda_codec: %s: " "Apply pincfg for %s\n", codec->chip_name, modelname); - for (; cfg->nid; cfg++) - snd_hda_codec_set_pincfg(codec, cfg->nid, - cfg->val); + alc_apply_pincfgs(codec, cfg); break; case ALC_FIXUP_VERBS: if (action != ALC_FIXUP_ACT_PROBE || !fix->v.verbs) @@ -4861,6 +4866,7 @@ enum { ALC260_FIXUP_GPIO1_TOGGLE, ALC260_FIXUP_REPLACER, ALC260_FIXUP_HP_B1900, + ALC260_FIXUP_KN1, }; static void alc260_gpio1_automute(struct hda_codec *codec) @@ -4888,6 +4894,36 @@ static void alc260_fixup_gpio1_toggle(struct hda_codec *codec, } } +static void alc260_fixup_kn1(struct hda_codec *codec, + const struct alc_fixup *fix, int action) +{ + struct alc_spec *spec = codec->spec; + static const struct alc_pincfg pincfgs[] = { + { 0x0f, 0x02214000 }, /* HP/speaker */ + { 0x12, 0x90a60160 }, /* int mic */ + { 0x13, 0x02a19000 }, /* ext mic */ + { 0x18, 0x01446000 }, /* SPDIF out */ + /* disable bogus I/O pins */ + { 0x10, 0x411111f0 }, + { 0x11, 0x411111f0 }, + { 0x14, 0x411111f0 }, + { 0x15, 0x411111f0 }, + { 0x16, 0x411111f0 }, + { 0x17, 0x411111f0 }, + { 0x19, 0x411111f0 }, + { } + }; + + switch (action) { + case ALC_FIXUP_ACT_PRE_PROBE: + alc_apply_pincfgs(codec, pincfgs); + break; + case ALC_FIXUP_ACT_PROBE: + spec->init_amp = ALC_INIT_NONE; + break; + } +} + static const struct alc_fixup alc260_fixups[] = { [ALC260_FIXUP_HP_DC5750] = { .type = ALC_FIXUP_PINS, @@ -4938,7 +4974,11 @@ static const struct alc_fixup alc260_fixups[] = { .v.func = alc260_fixup_gpio1_toggle, .chained = true, .chain_id = ALC260_FIXUP_COEF, - } + }, + [ALC260_FIXUP_KN1] = { + .type = ALC_FIXUP_FUNC, + .v.func = alc260_fixup_kn1, + }, }; static const struct snd_pci_quirk alc260_fixup_tbl[] = { @@ -4948,6 +4988,7 @@ static const struct snd_pci_quirk alc260_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x280a, "HP dc5750", ALC260_FIXUP_HP_DC5750), SND_PCI_QUIRK(0x103c, 0x30ba, "HP Presario B1900", ALC260_FIXUP_HP_B1900), SND_PCI_QUIRK(0x1509, 0x4540, "Favorit 100XS", ALC260_FIXUP_GPIO1), + SND_PCI_QUIRK(0x152d, 0x0729, "Quanta KN1", ALC260_FIXUP_KN1), SND_PCI_QUIRK(0x161f, 0x2057, "Replacer 672V", ALC260_FIXUP_REPLACER), SND_PCI_QUIRK(0x1631, 0xc017, "PB V7900", ALC260_FIXUP_COEF), {} @@ -5364,6 +5405,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1025, 0x0142, "Acer Aspire 7730G", ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), + SND_PCI_QUIRK(0x1025, 0x021e, "Acer Aspire 5739G", + ALC882_FIXUP_ACER_ASPIRE_4930G), SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G), SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), @@ -5397,6 +5440,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), + SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3", ALC889_FIXUP_CD), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), @@ -5597,13 +5641,13 @@ static int patch_alc262(struct hda_codec *codec) snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_PROC_COEF, tmp | 0x80); } #endif - alc_auto_parse_customize_define(codec); - alc_fix_pll_init(codec, 0x20, 0x0a, 10); alc_pick_fixup(codec, NULL, alc262_fixup_tbl, alc262_fixups); alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); + alc_auto_parse_customize_define(codec); + /* automatic parse from the BIOS config */ err = alc262_parse_auto_config(codec); if (err < 0) @@ -6068,6 +6112,7 @@ static const struct alc_fixup alc269_fixups[] = { static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_MIC2_MUTE_LED), + SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_DMIC), SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), @@ -6207,8 +6252,6 @@ static int patch_alc269(struct hda_codec *codec) spec->mixer_nid = 0x0b; - alc_auto_parse_customize_define(codec); - err = alc_codec_rename_from_preset(codec); if (err < 0) goto error; @@ -6241,6 +6284,8 @@ static int patch_alc269(struct hda_codec *codec) alc269_fixup_tbl, alc269_fixups); alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); + alc_auto_parse_customize_define(codec); + /* automatic parse from the BIOS config */ err = alc269_parse_auto_config(codec); if (err < 0) @@ -6817,8 +6862,6 @@ static int patch_alc662(struct hda_codec *codec) /* handle multiple HPs as is */ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP; - alc_auto_parse_customize_define(codec); - alc_fix_pll_init(codec, 0x20, 0x04, 15); err = alc_codec_rename_from_preset(codec); @@ -6835,6 +6878,9 @@ static int patch_alc662(struct hda_codec *codec) alc_pick_fixup(codec, alc662_fixup_models, alc662_fixup_tbl, alc662_fixups); alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); + + alc_auto_parse_customize_define(codec); + /* automatic parse from the BIOS config */ err = alc662_parse_auto_config(codec); if (err < 0) diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 33a9946b492c..4742cac26aa9 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -5063,12 +5063,11 @@ static void stac92xx_update_led_status(struct hda_codec *codec, int enabled) if (spec->gpio_led_polarity) muted = !muted; - /*polarity defines *not* muted state level*/ if (!spec->vref_mute_led_nid) { if (muted) - spec->gpio_data &= ~spec->gpio_led; /* orange */ + spec->gpio_data |= spec->gpio_led; else - spec->gpio_data |= spec->gpio_led; /* white */ + spec->gpio_data &= ~spec->gpio_led; stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data); } else { diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c index b68cdec03b9e..0b2aea2ce172 100644 --- a/sound/pci/rme9652/hdsp.c +++ b/sound/pci/rme9652/hdsp.c @@ -5170,6 +5170,7 @@ static int snd_hdsp_create_hwdep(struct snd_card *card, struct hdsp *hdsp) strcpy(hw->name, "HDSP hwdep interface"); hw->ops.ioctl = snd_hdsp_hwdep_ioctl; + hw->ops.ioctl_compat = snd_hdsp_hwdep_ioctl; return 0; } diff --git a/sound/soc/blackfin/bf5xx-ssm2602.c b/sound/soc/blackfin/bf5xx-ssm2602.c index df3ac73f8778..b39ad356b92b 100644 --- a/sound/soc/blackfin/bf5xx-ssm2602.c +++ b/sound/soc/blackfin/bf5xx-ssm2602.c @@ -99,6 +99,7 @@ static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = { .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ssm2602.0-001b", .ops = &bf5xx_ssm2602_ops, + .dai_fmt = BF5XX_SSM2602_DAIFMT, }, { .name = "ssm2602", @@ -108,6 +109,7 @@ static struct snd_soc_dai_link bf5xx_ssm2602_dai[] = { .platform_name = "bfin-i2s-pcm-audio", .codec_name = "ssm2602.0-001b", .ops = &bf5xx_ssm2602_ops, + .dai_fmt = BF5XX_SSM2602_DAIFMT, }, }; diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig index 6508e8b790bb..59d8efaa17e9 100644 --- a/sound/soc/codecs/Kconfig +++ b/sound/soc/codecs/Kconfig @@ -57,7 +57,7 @@ config SND_SOC_ALL_CODECS select SND_SOC_TPA6130A2 if I2C select SND_SOC_TLV320DAC33 if I2C select SND_SOC_TWL4030 if TWL4030_CORE - select SND_SOC_TWL6040 if TWL4030_CORE + select SND_SOC_TWL6040 if TWL6040_CORE select SND_SOC_UDA134X select SND_SOC_UDA1380 if I2C select SND_SOC_WL1273 if MFD_WL1273_CORE @@ -276,7 +276,6 @@ config SND_SOC_TWL4030 tristate config SND_SOC_TWL6040 - select TWL6040_CORE tristate config SND_SOC_UDA134X diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c index 78979b3e0e95..07c44b71f096 100644 --- a/sound/soc/codecs/cs42l73.c +++ b/sound/soc/codecs/cs42l73.c @@ -929,6 +929,8 @@ static int cs42l73_set_mclk(struct snd_soc_dai *dai, unsigned int freq) /* MCLKX -> MCLK */ mclkx_coeff = cs42l73_get_mclkx_coeff(freq); + if (mclkx_coeff < 0) + return mclkx_coeff; mclk = cs42l73_mclkx_coeffs[mclkx_coeff].mclkx / cs42l73_mclkx_coeffs[mclkx_coeff].ratio; diff --git a/sound/soc/codecs/tlv320aic23.c b/sound/soc/codecs/tlv320aic23.c index 16d55f91a653..df1e07ffac32 100644 --- a/sound/soc/codecs/tlv320aic23.c +++ b/sound/soc/codecs/tlv320aic23.c @@ -472,7 +472,7 @@ static int tlv320aic23_set_dai_sysclk(struct snd_soc_dai *codec_dai, static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { - u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0xff7f; + u16 reg = snd_soc_read(codec, TLV320AIC23_PWR) & 0x17f; switch (level) { case SND_SOC_BIAS_ON: @@ -491,7 +491,7 @@ static int tlv320aic23_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_OFF: /* everything off, dac mute, inactive */ snd_soc_write(codec, TLV320AIC23_ACTIVE, 0x0); - snd_soc_write(codec, TLV320AIC23_PWR, 0xffff); + snd_soc_write(codec, TLV320AIC23_PWR, 0x1ff); break; } codec->dapm.bias_level = level; diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c index 2d8c6b825e57..dc7509b9d53a 100644 --- a/sound/soc/codecs/twl6040.c +++ b/sound/soc/codecs/twl6040.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include @@ -1528,7 +1527,7 @@ static int twl6040_resume(struct snd_soc_codec *codec) static int twl6040_probe(struct snd_soc_codec *codec) { struct twl6040_data *priv; - struct twl4030_codec_data *pdata = dev_get_platdata(codec->dev); + struct twl6040_codec_data *pdata = dev_get_platdata(codec->dev); struct platform_device *pdev = container_of(codec->dev, struct platform_device, dev); int ret = 0; diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c index 8c4c9591ec05..aa12c6b6beeb 100644 --- a/sound/soc/codecs/wm8350.c +++ b/sound/soc/codecs/wm8350.c @@ -60,7 +60,7 @@ struct wm8350_jack_data { }; struct wm8350_data { - struct snd_soc_codec codec; + struct wm8350 *wm8350; struct wm8350_output out1; struct wm8350_output out2; struct wm8350_jack_data hpl; @@ -1309,7 +1309,7 @@ static void wm8350_hp_work(struct wm8350_data *priv, struct wm8350_jack_data *jack, u16 mask) { - struct wm8350 *wm8350 = priv->codec.control_data; + struct wm8350 *wm8350 = priv->wm8350; u16 reg; int report; @@ -1342,7 +1342,7 @@ static void wm8350_hpr_work(struct work_struct *work) static irqreturn_t wm8350_hp_jack_handler(int irq, void *data) { struct wm8350_data *priv = data; - struct wm8350 *wm8350 = priv->codec.control_data; + struct wm8350 *wm8350 = priv->wm8350; struct wm8350_jack_data *jack = NULL; switch (irq - wm8350->irq_base) { @@ -1427,7 +1427,7 @@ EXPORT_SYMBOL_GPL(wm8350_hp_jack_detect); static irqreturn_t wm8350_mic_handler(int irq, void *data) { struct wm8350_data *priv = data; - struct wm8350 *wm8350 = priv->codec.control_data; + struct wm8350 *wm8350 = priv->wm8350; u16 reg; int report = 0; @@ -1536,6 +1536,8 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec) return -ENOMEM; snd_soc_codec_set_drvdata(codec, priv); + priv->wm8350 = wm8350; + for (i = 0; i < ARRAY_SIZE(supply_names); i++) priv->supplies[i].supply = supply_names[i]; @@ -1544,7 +1546,6 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec) if (ret != 0) return ret; - wm8350->codec.codec = codec; codec->control_data = wm8350; /* Put the codec into reset if it wasn't already */ diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 7c49642af052..6c1fe3afd4b5 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -1000,6 +1000,204 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec) } } +static int aif1clk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994 *control = codec->control_data; + int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; + int dac; + int adc; + int val; + + switch (control->type) { + case WM8994: + case WM8958: + mask |= WM8994_AIF1DAC2L_ENA | WM8994_AIF1DAC2R_ENA; + break; + default: + break; + } + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + val = snd_soc_read(codec, WM8994_AIF1_CONTROL_1); + if ((val & WM8994_AIF1ADCL_SRC) && + (val & WM8994_AIF1ADCR_SRC)) + adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA; + else if (!(val & WM8994_AIF1ADCL_SRC) && + !(val & WM8994_AIF1ADCR_SRC)) + adc = WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA; + else + adc = WM8994_AIF1ADC1R_ENA | WM8994_AIF1ADC2R_ENA | + WM8994_AIF1ADC1L_ENA | WM8994_AIF1ADC2L_ENA; + + val = snd_soc_read(codec, WM8994_AIF1_CONTROL_2); + if ((val & WM8994_AIF1DACL_SRC) && + (val & WM8994_AIF1DACR_SRC)) + dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA; + else if (!(val & WM8994_AIF1DACL_SRC) && + !(val & WM8994_AIF1DACR_SRC)) + dac = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA; + else + dac = WM8994_AIF1DAC1R_ENA | WM8994_AIF1DAC2R_ENA | + WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC2L_ENA; + + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, + mask, adc); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + mask, dac); + snd_soc_update_bits(codec, WM8994_CLOCKING_1, + WM8994_AIF1DSPCLK_ENA | + WM8994_SYSDSPCLK_ENA, + WM8994_AIF1DSPCLK_ENA | + WM8994_SYSDSPCLK_ENA); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, mask, + WM8994_AIF1ADC1R_ENA | + WM8994_AIF1ADC1L_ENA | + WM8994_AIF1ADC2R_ENA | + WM8994_AIF1ADC2L_ENA); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, mask, + WM8994_AIF1DAC1R_ENA | + WM8994_AIF1DAC1L_ENA | + WM8994_AIF1DAC2R_ENA | + WM8994_AIF1DAC2L_ENA); + break; + + case SND_SOC_DAPM_PRE_PMD: + case SND_SOC_DAPM_POST_PMD: + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + mask, 0); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, + mask, 0); + + val = snd_soc_read(codec, WM8994_CLOCKING_1); + if (val & WM8994_AIF2DSPCLK_ENA) + val = WM8994_SYSDSPCLK_ENA; + else + val = 0; + snd_soc_update_bits(codec, WM8994_CLOCKING_1, + WM8994_SYSDSPCLK_ENA | + WM8994_AIF1DSPCLK_ENA, val); + break; + } + + return 0; +} + +static int aif2clk_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + int dac; + int adc; + int val; + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + val = snd_soc_read(codec, WM8994_AIF2_CONTROL_1); + if ((val & WM8994_AIF2ADCL_SRC) && + (val & WM8994_AIF2ADCR_SRC)) + adc = WM8994_AIF2ADCR_ENA; + else if (!(val & WM8994_AIF2ADCL_SRC) && + !(val & WM8994_AIF2ADCR_SRC)) + adc = WM8994_AIF2ADCL_ENA; + else + adc = WM8994_AIF2ADCL_ENA | WM8994_AIF2ADCR_ENA; + + + val = snd_soc_read(codec, WM8994_AIF2_CONTROL_2); + if ((val & WM8994_AIF2DACL_SRC) && + (val & WM8994_AIF2DACR_SRC)) + dac = WM8994_AIF2DACR_ENA; + else if (!(val & WM8994_AIF2DACL_SRC) && + !(val & WM8994_AIF2DACR_SRC)) + dac = WM8994_AIF2DACL_ENA; + else + dac = WM8994_AIF2DACL_ENA | WM8994_AIF2DACR_ENA; + + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, + WM8994_AIF2ADCL_ENA | + WM8994_AIF2ADCR_ENA, adc); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + WM8994_AIF2DACL_ENA | + WM8994_AIF2DACR_ENA, dac); + snd_soc_update_bits(codec, WM8994_CLOCKING_1, + WM8994_AIF2DSPCLK_ENA | + WM8994_SYSDSPCLK_ENA, + WM8994_AIF2DSPCLK_ENA | + WM8994_SYSDSPCLK_ENA); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_4, + WM8994_AIF2ADCL_ENA | + WM8994_AIF2ADCR_ENA, + WM8994_AIF2ADCL_ENA | + WM8994_AIF2ADCR_ENA); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + WM8994_AIF2DACL_ENA | + WM8994_AIF2DACR_ENA, + WM8994_AIF2DACL_ENA | + WM8994_AIF2DACR_ENA); + break; + + case SND_SOC_DAPM_PRE_PMD: + case SND_SOC_DAPM_POST_PMD: + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + WM8994_AIF2DACL_ENA | + WM8994_AIF2DACR_ENA, 0); + snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, + WM8994_AIF2ADCL_ENA | + WM8994_AIF2ADCR_ENA, 0); + + val = snd_soc_read(codec, WM8994_CLOCKING_1); + if (val & WM8994_AIF1DSPCLK_ENA) + val = WM8994_SYSDSPCLK_ENA; + else + val = 0; + snd_soc_update_bits(codec, WM8994_CLOCKING_1, + WM8994_SYSDSPCLK_ENA | + WM8994_AIF2DSPCLK_ENA, val); + break; + } + + return 0; +} + +static int aif1clk_late_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wm8994->aif1clk_enable = 1; + break; + case SND_SOC_DAPM_POST_PMD: + wm8994->aif1clk_disable = 1; + break; + } + + return 0; +} + +static int aif2clk_late_ev(struct snd_soc_dapm_widget *w, + struct snd_kcontrol *kcontrol, int event) +{ + struct snd_soc_codec *codec = w->codec; + struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); + + switch (event) { + case SND_SOC_DAPM_PRE_PMU: + wm8994->aif2clk_enable = 1; + break; + case SND_SOC_DAPM_POST_PMD: + wm8994->aif2clk_disable = 1; + break; + } + + return 0; +} + static int late_enable_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1009,12 +1207,14 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w, switch (event) { case SND_SOC_DAPM_PRE_PMU: if (wm8994->aif1clk_enable) { + aif1clk_ev(w, kcontrol, event); snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, WM8994_AIF1CLK_ENA_MASK, WM8994_AIF1CLK_ENA); wm8994->aif1clk_enable = 0; } if (wm8994->aif2clk_enable) { + aif2clk_ev(w, kcontrol, event); snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, WM8994_AIF2CLK_ENA_MASK, WM8994_AIF2CLK_ENA); @@ -1040,11 +1240,13 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, if (wm8994->aif1clk_disable) { snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, WM8994_AIF1CLK_ENA_MASK, 0); + aif1clk_ev(w, kcontrol, event); wm8994->aif1clk_disable = 0; } if (wm8994->aif2clk_disable) { snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, WM8994_AIF2CLK_ENA_MASK, 0); + aif2clk_ev(w, kcontrol, event); wm8994->aif2clk_disable = 0; } break; @@ -1053,42 +1255,6 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, return 0; } -static int aif1clk_ev(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = w->codec; - struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - wm8994->aif1clk_enable = 1; - break; - case SND_SOC_DAPM_POST_PMD: - wm8994->aif1clk_disable = 1; - break; - } - - return 0; -} - -static int aif2clk_ev(struct snd_soc_dapm_widget *w, - struct snd_kcontrol *kcontrol, int event) -{ - struct snd_soc_codec *codec = w->codec; - struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); - - switch (event) { - case SND_SOC_DAPM_PRE_PMU: - wm8994->aif2clk_enable = 1; - break; - case SND_SOC_DAPM_POST_PMD: - wm8994->aif2clk_disable = 1; - break; - } - - return 0; -} - static int adc_mux_ev(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { @@ -1385,9 +1551,9 @@ static const struct snd_kcontrol_new aif2dacr_src_mux = SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = { -SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev, +SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_late_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), -SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev, +SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_late_ev, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, @@ -1416,8 +1582,10 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) }; static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { -SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), +SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, + SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), @@ -1470,30 +1638,30 @@ SND_SOC_DAPM_SUPPLY("VMID", SND_SOC_NOPM, 0, 0, vmid_event, SND_SOC_DAPM_SUPPLY("CLK_SYS", SND_SOC_NOPM, 0, 0, clk_sys_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), -SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), -SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("DSP1CLK", SND_SOC_NOPM, 3, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("DSP2CLK", SND_SOC_NOPM, 2, 0, NULL, 0), +SND_SOC_DAPM_SUPPLY("DSPINTCLK", SND_SOC_NOPM, 1, 0, NULL, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, - 0, WM8994_POWER_MANAGEMENT_4, 9, 0), + 0, SND_SOC_NOPM, 9, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, - 0, WM8994_POWER_MANAGEMENT_4, 8, 0), + 0, SND_SOC_NOPM, 8, 0), SND_SOC_DAPM_AIF_IN_E("AIF1DAC1L", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 9, 0, wm8958_aif_ev, + SND_SOC_NOPM, 9, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_AIF_IN_E("AIF1DAC1R", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 8, 0, wm8958_aif_ev, + SND_SOC_NOPM, 8, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_AIF_OUT("AIF1ADC2L", NULL, - 0, WM8994_POWER_MANAGEMENT_4, 11, 0), + 0, SND_SOC_NOPM, 11, 0), SND_SOC_DAPM_AIF_OUT("AIF1ADC2R", NULL, - 0, WM8994_POWER_MANAGEMENT_4, 10, 0), + 0, SND_SOC_NOPM, 10, 0), SND_SOC_DAPM_AIF_IN_E("AIF1DAC2L", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 11, 0, wm8958_aif_ev, + SND_SOC_NOPM, 11, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_AIF_IN_E("AIF1DAC2R", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 10, 0, wm8958_aif_ev, + SND_SOC_NOPM, 10, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_MIXER("AIF1ADC1L Mixer", SND_SOC_NOPM, 0, 0, @@ -1520,14 +1688,14 @@ SND_SOC_DAPM_MIXER("DAC1R Mixer", SND_SOC_NOPM, 0, 0, dac1r_mix, ARRAY_SIZE(dac1r_mix)), SND_SOC_DAPM_AIF_OUT("AIF2ADCL", NULL, 0, - WM8994_POWER_MANAGEMENT_4, 13, 0), + SND_SOC_NOPM, 13, 0), SND_SOC_DAPM_AIF_OUT("AIF2ADCR", NULL, 0, - WM8994_POWER_MANAGEMENT_4, 12, 0), + SND_SOC_NOPM, 12, 0), SND_SOC_DAPM_AIF_IN_E("AIF2DACL", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 13, 0, wm8958_aif_ev, + SND_SOC_NOPM, 13, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_AIF_IN_E("AIF2DACR", NULL, 0, - WM8994_POWER_MANAGEMENT_5, 12, 0, wm8958_aif_ev, + SND_SOC_NOPM, 12, 0, wm8958_aif_ev, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_AIF_IN("AIF1DACDAT", NULL, 0, SND_SOC_NOPM, 0, 0), diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index f13f2886339c..6c028c470601 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c @@ -1035,7 +1035,7 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm_hubs_data *hubs = snd_soc_codec_get_drvdata(codec); - int val; + int mask, val; switch (level) { case SND_SOC_BIAS_STANDBY: @@ -1047,6 +1047,13 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec, case SND_SOC_BIAS_ON: /* Turn off any unneded single ended outputs */ val = 0; + mask = 0; + + if (hubs->lineout1_se) + mask |= WM8993_LINEOUT1N_ENA | WM8993_LINEOUT1P_ENA; + + if (hubs->lineout2_se) + mask |= WM8993_LINEOUT2N_ENA | WM8993_LINEOUT2P_ENA; if (hubs->lineout1_se && hubs->lineout1n_ena) val |= WM8993_LINEOUT1N_ENA; @@ -1061,11 +1068,7 @@ void wm_hubs_set_bias_level(struct snd_soc_codec *codec, val |= WM8993_LINEOUT2P_ENA; snd_soc_update_bits(codec, WM8993_POWER_MANAGEMENT_3, - WM8993_LINEOUT1N_ENA | - WM8993_LINEOUT1P_ENA | - WM8993_LINEOUT2N_ENA | - WM8993_LINEOUT2P_ENA, - val); + mask, val); /* Remove the input clamps */ snd_soc_update_bits(codec, WM8993_INPUTS_CLAMP_REG, diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig index e00dd0b1139c..deafbfaacdbf 100644 --- a/sound/soc/omap/Kconfig +++ b/sound/soc/omap/Kconfig @@ -97,7 +97,7 @@ config SND_OMAP_SOC_SDP3430 config SND_OMAP_SOC_OMAP_ABE_TWL6040 tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec" - depends on TWL4030_CORE && SND_OMAP_SOC && ARCH_OMAP4 + depends on TWL6040_CORE && SND_OMAP_SOC && ARCH_OMAP4 select SND_OMAP_SOC_DMIC select SND_OMAP_SOC_MCPDM select SND_SOC_TWL6040 diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c index a59bd352d342..5a649da9122a 100644 --- a/sound/soc/omap/omap-pcm.c +++ b/sound/soc/omap/omap-pcm.c @@ -401,6 +401,10 @@ static int omap_pcm_new(struct snd_soc_pcm_runtime *rtd) } out: + /* free preallocated buffers in case of error */ + if (ret) + omap_pcm_free_dma_buffers(pcm); + return ret; } diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c index 72185078ddf8..79fbeea99d46 100644 --- a/sound/soc/samsung/s3c2412-i2s.c +++ b/sound/soc/samsung/s3c2412-i2s.c @@ -166,7 +166,7 @@ static struct snd_soc_dai_driver s3c2412_i2s_dai = { static __devinit int s3c2412_iis_dev_probe(struct platform_device *pdev) { - return snd_soc_register_dai(&pdev->dev, &s3c2412_i2s_dai); + return s3c_i2sv2_register_dai(&pdev->dev, -1, &s3c2412_i2s_dai); } static __devexit int s3c2412_iis_dev_remove(struct platform_device *pdev) diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c index 378cc5b056d7..74ed2dffbffd 100644 --- a/sound/soc/sh/fsi.c +++ b/sound/soc/sh/fsi.c @@ -1001,11 +1001,10 @@ static void fsi_dma_do_tasklet(unsigned long data) sg_dma_address(&sg) = buf; sg_dma_len(&sg) = len; - desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir, - DMA_PREP_INTERRUPT | - DMA_CTRL_ACK); + desc = dmaengine_prep_slave_sg(chan, &sg, 1, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { - dev_err(dai->dev, "device_prep_slave_sg() fail\n"); + dev_err(dai->dev, "dmaengine_prep_slave_sg() fail\n"); return; } diff --git a/sound/soc/sh/migor.c b/sound/soc/sh/migor.c index 9d9ad8d61c0a..8526e1edaf45 100644 --- a/sound/soc/sh/migor.c +++ b/sound/soc/sh/migor.c @@ -35,7 +35,7 @@ static unsigned long siumckb_recalc(struct clk *clk) return codec_freq; } -static struct clk_ops siumckb_clk_ops = { +static struct sh_clk_ops siumckb_clk_ops = { .recalc = siumckb_recalc, }; diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index accdcb7d4d9d..c88d9741b9e7 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -3113,6 +3113,7 @@ int snd_soc_register_card(struct snd_soc_card *card) GFP_KERNEL); if (card->rtd == NULL) return -ENOMEM; + card->num_rtd = 0; card->rtd_aux = &card->rtd[card->num_links]; for (i = 0; i < card->num_links; i++) @@ -3624,10 +3625,10 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card, int i, ret; num_routes = of_property_count_strings(np, propname); - if (num_routes & 1) { + if (num_routes < 0 || num_routes & 1) { dev_err(card->dev, - "Property '%s's length is not even\n", - propname); + "Property '%s' does not exist or its length is not even\n", + propname); return -EINVAL; } num_routes /= 2; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 5cbd2d7623b8..1bb6d4a63cd8 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -67,6 +67,7 @@ static int dapm_up_seq[] = { [snd_soc_dapm_out_drv] = 10, [snd_soc_dapm_hp] = 10, [snd_soc_dapm_spk] = 10, + [snd_soc_dapm_line] = 10, [snd_soc_dapm_post] = 11, }; @@ -75,6 +76,7 @@ static int dapm_down_seq[] = { [snd_soc_dapm_adc] = 1, [snd_soc_dapm_hp] = 2, [snd_soc_dapm_spk] = 2, + [snd_soc_dapm_line] = 2, [snd_soc_dapm_out_drv] = 2, [snd_soc_dapm_pga] = 4, [snd_soc_dapm_mixer_named_ctl] = 5, diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 03059e75665a..9bf3fc759344 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile @@ -234,8 +234,8 @@ endif export PERL_PATH -FLEX = $(CROSS_COMPILE)flex -BISON= $(CROSS_COMPILE)bison +FLEX = flex +BISON= bison $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 2e317438980b..cdae9b2db1cc 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -374,16 +374,23 @@ static int __cmd_report(struct perf_report *rep) (kernel_map->dso->hit && (kernel_kmap->ref_reloc_sym == NULL || kernel_kmap->ref_reloc_sym->addr == 0))) { - const struct dso *kdso = kernel_map->dso; + const char *desc = + "As no suitable kallsyms nor vmlinux was found, kernel samples\n" + "can't be resolved."; + + if (kernel_map) { + const struct dso *kdso = kernel_map->dso; + if (!RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION])) { + desc = "If some relocation was applied (e.g. " + "kexec) symbols may be misresolved."; + } + } ui__warning( "Kernel address maps (/proc/{kallsyms,modules}) were restricted.\n\n" "Check /proc/sys/kernel/kptr_restrict before running 'perf record'.\n\n%s\n\n" "Samples in kernel modules can't be resolved as well.\n\n", - RB_EMPTY_ROOT(&kdso->symbols[MAP__FUNCTION]) ? -"As no suitable kallsyms nor vmlinux was found, kernel samples\n" -"can't be resolved." : -"If some relocation was applied (e.g. kexec) symbols may be misresolved."); + desc); } if (dump_trace) { diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 1c5b9801ac61..223ffdcc0fd8 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c @@ -851,6 +851,28 @@ static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) return test__checkevent_symbolic_name(evlist); } +static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + + return test__checkevent_symbolic_name(evlist); +} + +static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = list_entry(evlist->entries.next, + struct perf_evsel, node); + + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + + return test__checkevent_symbolic_name(evlist); +} + static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) { struct perf_evsel *evsel = list_entry(evlist->entries.next, @@ -1091,6 +1113,14 @@ static struct test__event_st { .name = "r1,syscalls:sys_enter_open:k,1:1:hp", .check = test__checkevent_list, }, + { + .name = "instructions:G", + .check = test__checkevent_exclude_host_modifier, + }, + { + .name = "instructions:H", + .check = test__checkevent_exclude_guest_modifier, + }, }; #define TEST__EVENTS_CNT (sizeof(test__events) / sizeof(struct test__event_st)) diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index 05d766e3ecb5..1fcf1bbc5458 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l @@ -54,7 +54,7 @@ num_dec [0-9]+ num_hex 0x[a-fA-F0-9]+ num_raw_hex [a-fA-F0-9]+ name [a-zA-Z_*?][a-zA-Z0-9_*?]* -modifier_event [ukhp]{1,5} +modifier_event [ukhpGH]{1,8} modifier_bp [rwx] %% diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index c0a028c3ebaf..ab9867b2b433 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -977,8 +977,9 @@ static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, * And always look at the original dso, not at debuginfo packages, that * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). */ -static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, - symbol_filter_t filter) +static int +dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map, + symbol_filter_t filter) { uint32_t nr_rel_entries, idx; GElf_Sym sym; @@ -993,10 +994,7 @@ static int dso__synthesize_plt_symbols(struct dso *dso, struct map *map, char sympltname[1024]; Elf *elf; int nr = 0, symidx, fd, err = 0; - char name[PATH_MAX]; - snprintf(name, sizeof(name), "%s%s", - symbol_conf.symfs, dso->long_name); fd = open(name, O_RDONLY); if (fd < 0) goto out; @@ -1703,8 +1701,9 @@ restart: continue; if (ret > 0) { - int nr_plt = dso__synthesize_plt_symbols(dso, map, - filter); + int nr_plt; + + nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter); if (nr_plt > 0) ret += nr_plt; break; diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl index 95d6a6f7c33a..4915408f6a98 100755 --- a/tools/testing/ktest/ktest.pl +++ b/tools/testing/ktest/ktest.pl @@ -183,6 +183,9 @@ my %force_config; # do not force reboots on config problems my $no_reboot = 1; +# reboot on success +my $reboot_success = 0; + my %option_map = ( "MACHINE" => \$machine, "SSH_USER" => \$ssh_user, @@ -2192,7 +2195,7 @@ sub run_bisect { } # Are we looking for where it worked, not failed? - if ($reverse_bisect) { + if ($reverse_bisect && $ret >= 0) { $ret = !$ret; } @@ -3469,6 +3472,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { # Do not reboot on failing test options $no_reboot = 1; + $reboot_success = 0; $iteration = $i; @@ -3554,9 +3558,11 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { die "failed to checkout $checkout"; } + $no_reboot = 0; + # A test may opt to not reboot the box if ($reboot_on_success) { - $no_reboot = 0; + $reboot_success = 1; } if ($test_type eq "bisect") { @@ -3600,7 +3606,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) { if ($opt{"POWEROFF_ON_SUCCESS"}) { halt; -} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) { +} elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot && $reboot_success) { reboot_to_good; } elsif (defined($switch_to_good)) { # still need to get to the good kernel