Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux

Resolved conflicts:
  fs/xfs/xfs_trans_priv.h:
    - deleted struct xfs_ail field xa_flags
    - kept field xa_log_flush in struct xfs_ail
  fs/xfs/xfs_trans_ail.c:
    - in xfsaild_push(), in XFS_ITEM_PUSHBUF case, replaced
      "flush_log = 1" with "ailp->xa_log_flush++"

Signed-off-by: Alex Elder <aelder@sgi.com>
This commit is contained in:
Alex Elder 2011-10-17 15:42:02 -05:00
Родитель 5a93a064d2 a84a79e4d3
Коммит 9508534c5f
101 изменённых файлов: 469 добавлений и 336 удалений

Просмотреть файл

@ -2706,10 +2706,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
functions are at fixed addresses, they make nice
targets for exploits that can control RIP.
emulate [default] Vsyscalls turn into traps and are
emulated reasonably safely.
emulate Vsyscalls turn into traps and are emulated
reasonably safely.
native Vsyscalls are native syscall instructions.
native [default] Vsyscalls are native syscall
instructions.
This is a little bit faster than trapping
and makes a few dynamic recompilers work
better than they would in emulation mode.

Просмотреть файл

@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number
of logical flows. Packets for each flow are steered to a separate receive
queue, which in turn can be processed by separate CPUs. This mechanism is
generally known as “Receive-side Scaling” (RSS). The goal of RSS and
the other scaling techniques to increase performance uniformly.
the other scaling techniques is to increase performance uniformly.
Multi-queue distribution can also be used for traffic prioritization, but
that is not the focus of these techniques.
@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the
same CPU. Indeed, with many flows and few CPUs, it is very likely that
a single application thread handles flows with many different flow hashes.
rps_sock_table is a global flow table that contains the *desired* CPU for
flows: the CPU that is currently processing the flow in userspace. Each
table value is a CPU index that is updated during calls to recvmsg and
sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
rps_sock_flow_table is a global flow table that contains the *desired* CPU
for flows: the CPU that is currently processing the flow in userspace.
Each table value is a CPU index that is updated during calls to recvmsg
and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage()
and tcp_splice_read()).
When the scheduler moves a thread to a new CPU while it has outstanding

Просмотреть файл

@ -6366,10 +6366,10 @@ F: net/ipv4/tcp_lp.c
TEGRA SUPPORT
M: Colin Cross <ccross@android.com>
M: Erik Gilling <konkers@android.com>
M: Olof Johansson <olof@lixom.net>
M: Stephen Warren <swarren@nvidia.com>
L: linux-tegra@vger.kernel.org
T: git git://android.git.kernel.org/kernel/tegra.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git
S: Supported
F: arch/arm/mach-tegra

Просмотреть файл

@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base)
writel(0, base + VIC_INT_SELECT);
writel(0, base + VIC_INT_ENABLE);
writel(~0, base + VIC_INT_ENABLE_CLEAR);
writel(0, base + VIC_IRQ_STATUS);
writel(0, base + VIC_ITCR);
writel(~0, base + VIC_INT_SOFT_CLEAR);
}

Просмотреть файл

@ -10,6 +10,8 @@
#ifndef __ASM_ARM_LOCALTIMER_H
#define __ASM_ARM_LOCALTIMER_H
#include <linux/errno.h>
struct clock_event_device;
/*

Просмотреть файл

@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,

Просмотреть файл

@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void)
{
omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo,
ARRAY_SIZE(sdp2430_i2c1_boardinfo));
omap2_pmic_init("twl4030", &sdp2430_twldata);
omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ,
&sdp2430_twldata);
return 0;
}

Просмотреть файл

@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot,
*/
reg = omap4_ctrl_pad_readl(control_pbias_offset);
reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
OMAP4_MMC1_PWRDNZ_MASK |
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
else
reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK;
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
OMAP4_MMC1_PWRDNZ_MASK |
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
timeout = jiffies + msecs_to_jiffies(5);
@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
pr_err("Pbias Voltage is not same as LDO\n");
/* Caution : On VMODE_ERROR Power Down MMC IO */
reg &= ~(OMAP4_MMC1_PWRDNZ_MASK |
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
reg &= ~(OMAP4_MMC1_PWRDNZ_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
} else {
reg = omap4_ctrl_pad_readl(control_pbias_offset);
reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK |
OMAP4_MMC1_PWRDNZ_MASK |
OMAP4_MMC1_PBIASLITE_VMODE_MASK |
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
OMAP4_MMC1_PBIASLITE_VMODE_MASK);
omap4_ctrl_pad_writel(reg, control_pbias_offset);
}
}

Просмотреть файл

@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
musb_plat.mode = board_data->mode;
musb_plat.extvbus = board_data->extvbus;
if (cpu_is_omap44xx())
omap4430_phy_init(dev);
if (cpu_is_omap3517() || cpu_is_omap3505()) {
oh_name = "am35x_otg_hs";
name = "musb-am35x";

Просмотреть файл

@ -32,7 +32,6 @@
#include <asm/system.h>
#include <mach/hardware.h>
#include <mach/clk.h>
/* Frequency table index must be sequential starting at 0 */

Просмотреть файл

@ -6,6 +6,7 @@ config UX500_SOC_COMMON
select ARM_GIC
select HAS_MTU
select ARM_ERRATA_753970
select ARM_ERRATA_754322
menu "Ux500 SoC"

Просмотреть файл

@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
#else
/*
* Align down here since the VM subsystem insists that the
* memmap entries are valid from the bank start aligned to
* MAX_ORDER_NR_PAGES.
*/
bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space

Просмотреть файл

@ -24,6 +24,7 @@ config MIPS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select HAVE_ARCH_JUMP_LABEL
select IRQ_FORCED_THREADING
menu "Machine selection"
@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR
select SYS_SUPPORTS_HIGHMEM
select SYS_SUPPORTS_HOTPLUG_CPU
select SYS_HAS_CPU_CAVIUM_OCTEON
select HOLES_IN_ZONE
help
The Octeon simulator is software performance model of the Cavium
Octeon Processor. It supports simulating Octeon processors on x86
@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
select ZONE_DMA32
select USB_ARCH_HAS_OHCI
select USB_ARCH_HAS_EHCI
select HOLES_IN_ZONE
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
@ -973,6 +976,9 @@ config ISA_DMA_API
config GENERIC_GPIO
bool
config HOLES_IN_ZONE
bool
#
# Endianess selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a

Просмотреть файл

@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype)
memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6);
ret = platform_device_register(&au1xxx_eth0_device);
if (!ret)
if (ret)
printk(KERN_INFO "Alchemy: failed to register MAC0\n");

Просмотреть файл

@ -158,15 +158,21 @@ static void restore_core_regs(void)
void au_sleep(void)
{
int cpuid = alchemy_get_cputype();
if (cpuid != ALCHEMY_CPU_UNKNOWN) {
save_core_regs();
if (cpuid <= ALCHEMY_CPU_AU1500)
alchemy_sleep_au1000();
else if (cpuid <= ALCHEMY_CPU_AU1200)
alchemy_sleep_au1550();
restore_core_regs();
save_core_regs();
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
alchemy_sleep_au1000();
break;
case ALCHEMY_CPU_AU1550:
case ALCHEMY_CPU_AU1200:
alchemy_sleep_au1550();
break;
}
restore_core_regs();
}
#endif /* CONFIG_PM */

Просмотреть файл

@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d)
{
unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT);
disable_irq_nosync(irq);
for ( ; bisr; bisr &= bisr - 1)
generic_handle_irq(bcsr_csc_base + __ffs(bisr));
enable_irq(irq);
}
/* NOTE: both the enable and mask bits must be cleared, otherwise the

Просмотреть файл

@ -23,13 +23,6 @@ void __init board_setup(void)
unsigned long freq0, clksrc, div, pfc;
unsigned short whoami;
/* Set Config[OD] (disable overlapping bus transaction):
* This gets rid of a _lot_ of spurious interrupts (especially
* wrt. IDE); but incurs ~10% performance hit in some
* cpu-bound applications.
*/
set_c0_config(1 << 19);
bcsr_init(DB1200_BCSR_PHYS_ADDR,
DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS);

Просмотреть файл

@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = {
static struct irqaction ar7_cascade_action = {
.handler = no_action,
.name = "AR7 cascade interrupt"
.name = "AR7 cascade interrupt",
.flags = IRQF_NO_THREAD,
};
static void __init ar7_irq_init(int base)

Просмотреть файл

@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = {
static struct irqaction cpu_ip2_cascade_action = {
.handler = no_action,
.name = "cascade_ip2",
.flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)

Просмотреть файл

@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void)
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)

Просмотреть файл

@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU);
static struct irqaction ioirq = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct irqaction fpuirq = {
.handler = no_action,
.name = "fpu",
.flags = IRQF_NO_THREAD,
};
static struct irqaction busirq = {
.flags = IRQF_DISABLED,
.name = "bus error",
.flags = IRQF_NO_THREAD,
};
static struct irqaction haltirq = {
.handler = dec_intr_halt,
.name = "halt",
.flags = IRQF_NO_THREAD,
};

Просмотреть файл

@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void)
static struct irqaction irq_cascade = {
.handler = no_action,
.flags = 0,
.flags = IRQF_NO_THREAD,
.name = "cascade",
.dev_id = NULL,
.next = NULL,

Просмотреть файл

@ -54,7 +54,6 @@
#define cpu_has_mips_r2_exec_hazard 0
#define cpu_has_dsp 0
#define cpu_has_mipsmt 0
#define cpu_has_userlocal 0
#define cpu_has_vint 0
#define cpu_has_veic 0
#define cpu_hwrena_impl_bits 0xc0000000

Просмотреть файл

@ -13,7 +13,6 @@
#define __ASM_MACH_POWERTV_DMA_COHERENCE_H
#include <linux/sched.h>
#include <linux/version.h>
#include <linux/device.h>
#include <asm/mach-powertv/asic.h>

Просмотреть файл

@ -195,9 +195,9 @@
* to cover the pipeline delay.
*/
.set mips32
mfc0 v1, CP0_TCSTATUS
mfc0 k0, CP0_TCSTATUS
.set mips0
LONG_S v1, PT_TCSTATUS(sp)
LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp)
LONG_S $5, PT_R5(sp)

Просмотреть файл

@ -18,7 +18,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/delay.h>
@ -86,7 +86,6 @@ struct jz_gpio_chip {
spinlock_t lock;
struct gpio_chip gpio_chip;
struct sys_device sysdev;
};
static struct jz_gpio_chip jz4740_gpio_chips[];
@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = {
JZ4740_GPIO_CHIP(D),
};
static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev)
static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip)
{
return container_of(dev, struct jz_gpio_chip, sysdev);
}
static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state)
{
struct jz_gpio_chip *chip = sysdev_to_chip(dev);
chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK);
writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET);
writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR);
}
static int jz4740_gpio_suspend(void)
{
int i;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++)
jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]);
return 0;
}
static int jz4740_gpio_resume(struct sys_device *dev)
static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip)
{
struct jz_gpio_chip *chip = sysdev_to_chip(dev);
uint32_t mask = chip->suspend_mask;
writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR);
writel(mask, chip->base + JZ_REG_GPIO_MASK_SET);
return 0;
}
static struct sysdev_class jz4740_gpio_sysdev_class = {
.name = "gpio",
static void jz4740_gpio_resume(void)
{
int i;
for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--)
jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]);
}
static struct syscore_ops jz4740_gpio_syscore_ops = {
.suspend = jz4740_gpio_suspend,
.resume = jz4740_gpio_resume,
};
static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
{
int ret, irq;
chip->sysdev.id = id;
chip->sysdev.cls = &jz4740_gpio_sysdev_class;
ret = sysdev_register(&chip->sysdev);
if (ret)
return ret;
int irq;
spin_lock_init(&chip->lock);
@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id)
irq_set_chip_and_handler(irq, &jz_gpio_irq_chip,
handle_level_irq);
}
return 0;
}
static int __init jz4740_gpio_init(void)
{
unsigned int i;
int ret;
ret = sysdev_class_register(&jz4740_gpio_sysdev_class);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i)
jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i);
register_syscore_ops(&jz4740_gpio_syscore_ops);
printk(KERN_INFO "JZ4740 GPIO initialized\n");
return 0;

Просмотреть файл

@ -19,6 +19,26 @@
#include <asm-generic/sections.h>
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
#define MCOUNT_OFFSET_INSNS 5
#else
#define MCOUNT_OFFSET_INSNS 4
#endif
/*
* Check if the address is in kernel space
*
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
*/
static inline int in_kernel_space(unsigned long ip)
{
if (ip >= (unsigned long)_stext &&
ip <= (unsigned long)_etext)
return 1;
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
#endif
}
/*
* Check if the address is in kernel space
*
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
*/
static inline int in_kernel_space(unsigned long ip)
{
if (ip >= (unsigned long)_stext &&
ip <= (unsigned long)_etext)
return 1;
return 0;
}
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
* 1: offset = 4 instructions
*/
#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
#define MCOUNT_OFFSET_INSNS 5
#else
#define MCOUNT_OFFSET_INSNS 4
#endif
#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
int ftrace_make_nop(struct module *mod,

Просмотреть файл

@ -229,7 +229,7 @@ static void i8259A_shutdown(void)
*/
if (i8259A_auto_eoi >= 0) {
outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
}
}
@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi)
static struct irqaction irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource pic1_io_resource = {

Просмотреть файл

@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags,
return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4),
dfd, pathname);
}
SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val,
struct compat_timespec __user *, utime, u32 __user *, uaddr2,
u32, val3)
{
return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3);
}

Просмотреть файл

@ -315,7 +315,7 @@ EXPORT(sysn32_call_table)
PTR sys_fremovexattr
PTR sys_tkill
PTR sys_ni_syscall
PTR compat_sys_futex
PTR sys_32_futex
PTR compat_sys_sched_setaffinity /* 6195 */
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush

Просмотреть файл

@ -441,7 +441,7 @@ sys_call_table:
PTR sys_fremovexattr /* 4235 */
PTR sys_tkill
PTR sys_sendfile64
PTR compat_sys_futex
PTR sys_32_futex
PTR compat_sys_sched_setaffinity
PTR compat_sys_sched_getaffinity /* 4240 */
PTR compat_sys_io_setup

Просмотреть файл

@ -8,6 +8,7 @@
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
*/
#include <linux/cache.h>
#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/personality.h>
@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs)
asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
__u32 thread_info_flags)
{
local_irq_enable();
/* deal with pending signal delivery */
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
do_signal(regs);

Просмотреть файл

@ -14,6 +14,7 @@
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs)
return (regs->cp0_cause >> 2) & 0x1f;
}
static DEFINE_SPINLOCK(die_lock);
static DEFINE_RAW_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long dvpret = dvpe();
unsigned long dvpret;
#endif /* CONFIG_MIPS_MT_SMTC */
oops_enter();
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
spin_lock_irq(&die_lock);
raw_spin_lock_irq(&die_lock);
#ifdef CONFIG_MIPS_MT_SMTC
dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs)
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
raw_spin_unlock_irq(&die_lock);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");

Просмотреть файл

@ -192,7 +192,7 @@ static struct tc *get_tc(int index)
}
spin_unlock(&vpecontrol.tc_list_lock);
return NULL;
return res;
}
/* allocate a vpe and associate it with this minor (or index) */

Просмотреть файл

@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d)
static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
{
int i;
int irq_nr = d->irq - INT_NUM_IRQ0;
ltq_enable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
if (irq_nr == ltq_eiu_irq[i]) {
if (d->irq == ltq_eiu_irq[i]) {
/* low level - we should really handle set_type */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
(0x6 << (i * 4)), LTQ_EIU_EXIN_C);
@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
static void ltq_shutdown_eiu_irq(struct irq_data *d)
{
int i;
int irq_nr = d->irq - INT_NUM_IRQ0;
ltq_disable_irq(d);
for (i = 0; i < MAX_EIU; i++) {
if (irq_nr == ltq_eiu_irq[i]) {
if (d->irq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i),
LTQ_EIU_EXIN_INEN);

Просмотреть файл

@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/ioport.h>
#include <lantiq_soc.h>

Просмотреть файл

@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/version.h>
#include <linux/ioport.h>
#include <lantiq_soc.h>

Просмотреть файл

@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void)
static struct irqaction cascade = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)

Просмотреть файл

@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending)
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
void __init mach_init_irq(void)

Просмотреть файл

@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
struct irqaction ip6_irqaction = {
.handler = ip6_action,
.name = "cascade",
.flags = IRQF_SHARED,
.flags = IRQF_SHARED | IRQF_NO_THREAD,
};
struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
void __init mach_init_irq(void)

Просмотреть файл

@ -6,6 +6,7 @@
* Copyright (C) 2011 Wind River Systems,
* written by Ralf Baechle <ralf@linux-mips.org>
*/
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
@ -15,12 +16,11 @@
#include <linux/sched.h>
unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
EXPORT_SYMBOL(shm_align_mask);
/* gap between mmap and stack */
#define MIN_GAP (128*1024*1024UL)
#define MAX_GAP ((TASK_SIZE)/6*5)
#define MAX_GAP ((TASK_SIZE)/6*5)
static int mmap_is_legacy(void)
{
@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
return base - off;
}
#define COLOUR_ALIGN(addr,pgoff) \
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + shm_align_mask) & ~shm_align_mask) + \
(((pgoff) << PAGE_SHIFT) & shm_align_mask))
enum mmap_allocation_direction {UP, DOWN};
static unsigned long arch_get_unmapped_area_foo(struct file *filp,
static unsigned long arch_get_unmapped_area_common(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags, enum mmap_allocation_direction dir)
{
@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (dir == UP) {
addr = mm->mmap_base;
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
if (do_color_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
mm->free_area_cache = mm->mmap_base;
}
/* either no address requested or can't fit in requested address hole */
/*
* either no address requested, or the mapping can't fit into
* the requested address hole
*/
addr = mm->free_area_cache;
if (do_color_align) {
unsigned long base =
COLOUR_ALIGN_DOWN(addr - len, pgoff);
if (do_color_align) {
unsigned long base =
COLOUR_ALIGN_DOWN(addr - len, pgoff);
addr = base + len;
}
}
/* make sure it can fit in the remaining address space */
if (likely(addr > len)) {
vma = find_vma(mm, addr - len);
if (!vma || addr <= vma->vm_start) {
/* remember the address as a hint for next time */
return mm->free_area_cache = addr-len;
/* cache the address as a hint for next time */
return mm->free_area_cache = addr - len;
}
}
if (unlikely(mm->mmap_base < len))
goto bottomup;
addr = mm->mmap_base-len;
addr = mm->mmap_base - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
* return with success:
*/
vma = find_vma(mm, addr);
if (likely(!vma || addr+len <= vma->vm_start)) {
/* remember the address as a hint for next time */
if (likely(!vma || addr + len <= vma->vm_start)) {
/* cache the address as a hint for next time */
return mm->free_area_cache = addr;
}
@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp,
mm->cached_hole_size = vma->vm_start - addr;
/* try just below the current vma->vm_start */
addr = vma->vm_start-len;
addr = vma->vm_start - len;
if (do_color_align)
addr = COLOUR_ALIGN_DOWN(addr, pgoff);
} while (likely(len < vma->vm_start));
@ -201,7 +203,7 @@ bottomup:
unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
return arch_get_unmapped_area_foo(filp,
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, UP);
}
@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
unsigned long addr0, unsigned long len, unsigned long pgoff,
unsigned long flags)
{
return arch_get_unmapped_area_foo(filp,
return arch_get_unmapped_area_common(filp,
addr0, len, pgoff, flags, DOWN);
}

Просмотреть файл

@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
u32 *p = handle_tlbm;
struct uasm_label *l = labels;
struct uasm_reloc *r = relocs;
struct work_registers wr;
memset(handle_tlbm, 0, sizeof(handle_tlbm));
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
build_make_write(&p, &r, K0, K1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
uasm_i_andi(&p, wr.r3, wr.r3, 2);
uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
}
if (PM_DEFAULT_MASK == 0)
uasm_i_nop(&p);
/*
* We clobbered C0_PAGEMASK, restore it. On the other branch
* it is restored in build_huge_tlb_write_entry.

Просмотреть файл

@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
static struct irqaction i8259irq = {
.handler = no_action,
.name = "XT-PIC cascade"
.name = "XT-PIC cascade",
.flags = IRQF_NO_THREAD,
};
static struct irqaction corehi_irqaction = {
.handler = no_action,
.name = "CoreHi"
.name = "CoreHi",
.flags = IRQF_NO_THREAD,
};
static msc_irqmap_t __initdata msc_irqmap[] = {

Просмотреть файл

@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o
EXTRA_CFLAGS += -Werror
ccflags-y += -Werror

Просмотреть файл

@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
u32 temp_buffer;
/* set clock to 33Mhz */
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
if (ltq_is_ar9()) {
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR);
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR);
} else {
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
}
/* external or internal clock ? */
if (conf->clock) {

Просмотреть файл

@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void)
rc32434_pcibridge_init();
io_map_base = ioremap(rc32434_res_pci_io1.start,
resource_size(&rcrc32434_res_pci_io1));
resource_size(&rc32434_res_pci_io1));
if (!io_map_base)
return -ENOMEM;

Просмотреть файл

@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs)
static struct irqaction cic_cascade_msp = {
.handler = no_action,
.name = "MSP CIC cascade"
.name = "MSP CIC cascade",
.flags = IRQF_NO_THREAD,
};
static struct irqaction per_cascade_msp = {
.handler = no_action,
.name = "MSP PER cascade"
.name = "MSP PER cascade",
.flags = IRQF_NO_THREAD,
};
void __init arch_init_irq(void)

Просмотреть файл

@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = {
static struct irqaction gic_action = {
.handler = no_action,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "GIC",
};

Просмотреть файл

@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void)
static struct irqaction local0_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "local0 cascade",
};
static struct irqaction local1_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "local1 cascade",
};
static struct irqaction buserr = {
.handler = no_action,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "Bus Error",
};
static struct irqaction map0_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "mapable0 cascade",
};
#ifdef USE_LIO3_IRQ
static struct irqaction map1_cascade = {
.handler = no_action,
.flags = IRQF_DISABLED,
.flags = IRQF_DISABLED | IRQF_NO_THREAD,
.name = "mapable1 cascade",
};
#define SGI_INTERRUPTS SGINT_END

Просмотреть файл

@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void)
static struct irqaction sni_rm200_irq2 = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
static struct resource sni_rm200_pic1_resource = {

Просмотреть файл

@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned;
static struct irqaction cascade_irqaction = {
.handler = no_action,
.name = "cascade",
.flags = IRQF_NO_THREAD,
};
int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int))

Просмотреть файл

@ -21,7 +21,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/irqflags.h>
#include <linux/atomic.h>
#include <asm/atomic_32.h>
#include <asm/asm-offsets.h>
#include <hv/hypervisor.h>
#include <arch/abi.h>

Просмотреть файл

@ -70,7 +70,7 @@
*/
#include <linux/linkage.h>
#include <linux/atomic.h>
#include <asm/atomic_32.h>
#include <asm/page.h>
#include <asm/processor.h>

Просмотреть файл

@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
};
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE;
static int __init vsyscall_setup(char *str)
{

Просмотреть файл

@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
/* 2006 AMD HT/VIA system with two host bridges */
{
.callback = set_use_crs,
.ident = "ASUS M2V-MX SE",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
},
},
{}
};

Просмотреть файл

@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
pentry = (struct sfi_device_table_entry *)sb->pentry;
for (i = 0; i < num; i++, pentry++) {
if (pentry->irq != (u8)0xff) { /* native RTE case */
int irq = pentry->irq;
if (irq != (u8)0xff) { /* native RTE case */
/* these SPI2 devices are not exposed to system as PCI
* devices, but they have separate RTE entry in IOAPIC
* so we have to enable them one by one here
*/
ioapic = mp_find_ioapic(pentry->irq);
ioapic = mp_find_ioapic(irq);
irq_attr.ioapic = ioapic;
irq_attr.ioapic_pin = pentry->irq;
irq_attr.ioapic_pin = irq;
irq_attr.trigger = 1;
irq_attr.polarity = 1;
io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr);
io_apic_set_pci_routing(NULL, irq, &irq_attr);
} else
pentry->irq = 0; /* No irq */
irq = 0; /* No irq */
switch (pentry->type) {
case SFI_DEV_TYPE_IPC:
/* ID as IRQ is a hack that will go away */
pdev = platform_device_alloc(pentry->name, pentry->irq);
pdev = platform_device_alloc(pentry->name, irq);
if (pdev == NULL) {
pr_err("out of memory for SFI platform device '%s'.\n",
pentry->name);
continue;
}
install_irq_resource(pdev, pentry->irq);
install_irq_resource(pdev, irq);
pr_debug("info[%2d]: IPC bus, name = %16.16s, "
"irq = 0x%2x\n", i, pentry->name, pentry->irq);
"irq = 0x%2x\n", i, pentry->name, irq);
sfi_handle_ipc_dev(pdev);
break;
case SFI_DEV_TYPE_SPI:
memset(&spi_info, 0, sizeof(spi_info));
strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN);
spi_info.irq = pentry->irq;
spi_info.irq = irq;
spi_info.bus_num = pentry->host_num;
spi_info.chip_select = pentry->addr;
spi_info.max_speed_hz = pentry->max_freq;
@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
memset(&i2c_info, 0, sizeof(i2c_info));
bus = pentry->host_num;
strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN);
i2c_info.irq = pentry->irq;
i2c_info.irq = irq;
i2c_info.addr = pentry->addr;
pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, "
"irq = 0x%2x, addr = 0x%x\n", i, bus,

Просмотреть файл

@ -34,8 +34,8 @@ struct gpio_bank {
u16 irq;
u16 virtual_irq_start;
int method;
#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 suspend_wakeup;
#if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS)
u32 saved_wakeup;
#endif
u32 non_wakeup_gpios;

Просмотреть файл

@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
void
pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert)
{
*gpio_base = -1;
}
#endif

Просмотреть файл

@ -129,7 +129,9 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret < 0)
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return send_bytes;
@ -160,7 +162,9 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret < 0)
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
@ -236,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret < 0) {
if (ret == -EBUSY)
continue;
else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}

Просмотреть файл

@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
/* get the DPCD from the bridge */
radeon_dp_getdpcd(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
ret = connector_status_connected;
else {
/* need to setup ddc on the bridge */
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
if (radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe))
radeon_connector->requires_extended_probe)) /* try DDC */
ret = connector_status_connected;
}
if ((ret == connector_status_disconnected) &&
radeon_connector->dac_load_detect) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
struct drm_encoder_helper_funcs *encoder_funcs;
if (encoder) {
encoder_funcs = encoder->helper_private;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}

Просмотреть файл

@ -1755,9 +1755,12 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
/* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
if (ASIC_IS_DCE41(rdev))
return radeon_crtc->crtc_id;
else {
if (ASIC_IS_DCE41(rdev)) {
if (dig->linkb)
return 1;
else
return 0;
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)

Просмотреть файл

@ -1715,7 +1715,8 @@ static void w83627ehf_device_remove_files(struct device *dev)
}
/* Get the monitoring functions started */
static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data,
enum kinds kind)
{
int i;
u8 tmp, diode;
@ -1746,10 +1747,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data)
w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01);
/* Get thermal sensor types */
diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
switch (kind) {
case w83627ehf:
diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE);
break;
default:
diode = 0x70;
}
for (i = 0; i < 3; i++) {
if ((tmp & (0x02 << i)))
data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2;
data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3;
else
data->temp_type[i] = 4; /* thermistor */
}
@ -2016,7 +2023,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
}
/* Initialize the chip */
w83627ehf_init_device(data);
w83627ehf_init_device(data, sio_data->kind);
data->vrm = vid_which_vrm();
superio_enter(sio_data->sioreg);

Просмотреть файл

@ -327,7 +327,7 @@ config BLK_DEV_OPTI621
select BLK_DEV_IDEPCI
help
This is a driver for the OPTi 82C621 EIDE controller.
Please read the comments at the top of <file:drivers/ide/pci/opti621.c>.
Please read the comments at the top of <file:drivers/ide/opti621.c>.
config BLK_DEV_RZ1000
tristate "RZ1000 chipset bugfix/support"
@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3
normal dual channel support.
Please read the comments at the top of
<file:drivers/ide/pci/alim15x3.c>.
<file:drivers/ide/alim15x3.c>.
If unsure, say N.
@ -528,7 +528,7 @@ config BLK_DEV_NS87415
This driver adds detection and support for the NS87415 chip
(used mainly on SPARC64 and PA-RISC machines).
Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>.
Please read the comments at the top of <file:drivers/ide/ns87415.c>.
config BLK_DEV_PDC202XX_OLD
tristate "PROMISE PDC202{46|62|65|67} support"
@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD
for more than one card.
Please read the comments at the top of
<file:drivers/ide/pci/pdc202xx_old.c>.
<file:drivers/ide/pdc202xx_old.c>.
If unsure, say N.
@ -593,7 +593,7 @@ config BLK_DEV_SIS5513
ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740,
SiS745, SiS750
Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>.
Please read the comments at the top of <file:drivers/ide/sis5513.c>.
config BLK_DEV_SL82C105
tristate "Winbond SL82c105 support"
@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66
look-a-like to the PIIX4 it should be a nice addition.
Please read the comments at the top of
<file:drivers/ide/pci/slc90e66.c>.
<file:drivers/ide/slc90e66.c>.
config BLK_DEV_TRM290
tristate "Tekram TRM290 chipset support"
@ -625,7 +625,7 @@ config BLK_DEV_TRM290
This driver adds support for bus master DMA transfers
using the Tekram TRM290 PCI IDE chip. Volunteers are
needed for further tweaking and development.
Please read the comments at the top of <file:drivers/ide/pci/trm290.c>.
Please read the comments at the top of <file:drivers/ide/trm290.c>.
config BLK_DEV_VIA82CXXX
tristate "VIA82CXXX chipset support"
@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX
of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster
I/O speeds to be set as well.
See the files <file:Documentation/ide/ide.txt> and
<file:drivers/ide/legacy/ali14xx.c> for more info.
<file:drivers/ide/ali14xx.c> for more info.
config BLK_DEV_DTC2278
tristate "DTC-2278 support"
@ -847,7 +847,7 @@ config BLK_DEV_DTC2278
boot parameter. It enables support for the secondary IDE interface
of the DTC-2278 card, and permits faster I/O speeds to be set as
well. See the <file:Documentation/ide/ide.txt> and
<file:drivers/ide/legacy/dtc2278.c> files for more info.
<file:drivers/ide/dtc2278.c> files for more info.
config BLK_DEV_HT6560B
tristate "Holtek HT6560B support"
@ -858,7 +858,7 @@ config BLK_DEV_HT6560B
boot parameter. It enables support for the secondary IDE interface
of the Holtek card, and permits faster I/O speeds to be set as well.
See the <file:Documentation/ide/ide.txt> and
<file:drivers/ide/legacy/ht6560b.c> files for more info.
<file:drivers/ide/ht6560b.c> files for more info.
config BLK_DEV_QD65XX
tristate "QDI QD65xx support"
@ -867,7 +867,7 @@ config BLK_DEV_QD65XX
help
This driver is enabled at runtime using the "qd65xx.probe" kernel
boot parameter. It permits faster I/O speeds to be set. See the
<file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c>
<file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c>
for more info.
config BLK_DEV_UMC8672
@ -879,7 +879,7 @@ config BLK_DEV_UMC8672
boot parameter. It enables support for the secondary IDE interface
of the UMC-8672, and permits faster I/O speeds to be set as well.
See the files <file:Documentation/ide/ide.txt> and
<file:drivers/ide/legacy/umc8672.c> for more info.
<file:drivers/ide/umc8672.c> for more info.
endif

Просмотреть файл

@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
for (i = 0; i < 8; i++)
__set_bit(BTN_0 + i, input_dev->keybit);
if (wacom_wac->features.type != WACOM_21UX2) {
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
}
input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0);
input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
__set_bit(INPUT_PROP_DIRECT, input_dev->propbit);

Просмотреть файл

@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
ti->num_flush_requests = 1;
ti->discard_zeroes_data_unsupported = 1;
return 0;
bad:

Просмотреть файл

@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc,
* corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags>
*/
if (!strcasecmp(arg_name, "corrupt_bio_byte")) {
if (!argc)
if (!argc) {
ti->error = "Feature corrupt_bio_byte requires parameters";
return -EINVAL;
}
r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error);
if (r)

Просмотреть файл

@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv,
rs->ti->error = "write_mostly option is only valid for RAID1";
return -EINVAL;
}
if (value > rs->md.raid_disks) {
if (value >= rs->md.raid_disks) {
rs->ti->error = "Invalid write_mostly drive index given";
return -EINVAL;
}

Просмотреть файл

@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t)
return;
template_disk = dm_table_get_integrity_disk(t, true);
if (!template_disk &&
blk_integrity_is_initialized(dm_disk(t->md))) {
if (template_disk)
blk_integrity_register(dm_disk(t->md),
blk_get_integrity(template_disk));
else if (blk_integrity_is_initialized(dm_disk(t->md)))
DMWARN("%s: device no longer has a valid integrity profile",
dm_device_name(t->md));
return;
}
blk_integrity_register(dm_disk(t->md),
blk_get_integrity(template_disk));
else
DMWARN("%s: unable to establish an integrity profile",
dm_device_name(t->md));
}
static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
return 0;
}
static bool dm_table_discard_zeroes_data(struct dm_table *t)
{
struct dm_target *ti;
unsigned i = 0;
/* Ensure that all targets supports discard_zeroes_data. */
while (i < dm_table_get_num_targets(t)) {
ti = dm_table_get_target(t, i++);
if (ti->discard_zeroes_data_unsupported)
return 0;
}
return 1;
}
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_flush(q, flush);
if (!dm_table_discard_zeroes_data(t))
q->limits.discard_zeroes_data = 0;
dm_table_set_integrity(t);
/*

Просмотреть файл

@ -61,6 +61,11 @@
static void autostart_arrays(int part);
#endif
/* pers_list is a list of registered personalities protected
* by pers_lock.
* pers_lock does extra service to protect accesses to
* mddev->thread when the mutex cannot be held.
*/
static LIST_HEAD(pers_list);
static DEFINE_SPINLOCK(pers_lock);
@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev)
} else
mutex_unlock(&mddev->reconfig_mutex);
/* was we've dropped the mutex we need a spinlock to
* make sur the thread doesn't disappear
*/
spin_lock(&pers_lock);
md_wakeup_thread(mddev->thread);
spin_unlock(&pers_lock);
}
static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr)
@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev,
return thread;
}
void md_unregister_thread(mdk_thread_t *thread)
void md_unregister_thread(mdk_thread_t **threadp)
{
mdk_thread_t *thread = *threadp;
if (!thread)
return;
dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk));
/* Locking ensures that mddev_unlock does not wake_up a
* non-existent thread
*/
spin_lock(&pers_lock);
*threadp = NULL;
spin_unlock(&pers_lock);
kthread_stop(thread->tsk);
kfree(thread);
@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev)
mdk_rdev_t *rdev;
/* resync has finished, collect result */
md_unregister_thread(mddev->sync_thread);
mddev->sync_thread = NULL;
md_unregister_thread(&mddev->sync_thread);
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
/* success...*/

Просмотреть файл

@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p);
extern int unregister_md_personality(struct mdk_personality *p);
extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
mddev_t *mddev, const char *name);
extern void md_unregister_thread(mdk_thread_t *thread);
extern void md_unregister_thread(mdk_thread_t **threadp);
extern void md_wakeup_thread(mdk_thread_t *thread);
extern void md_check_recovery(mddev_t *mddev);
extern void md_write_start(mddev_t *mddev, struct bio *bi);

Просмотреть файл

@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev)
{
multipath_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
mempool_destroy(conf->pool);
kfree(conf->multipaths);

Просмотреть файл

@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf);
lower_barrier(conf);
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
md_unregister_thread(&mddev->thread);
if (conf->r1bio_pool)
mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);

Просмотреть файл

@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev)
return 0;
out_free_conf:
md_unregister_thread(mddev->thread);
md_unregister_thread(&mddev->thread);
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev)
raise_barrier(conf, 0);
lower_barrier(conf);
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
md_unregister_thread(&mddev->thread);
blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
if (conf->r10bio_pool)
mempool_destroy(conf->r10bio_pool);

Просмотреть файл

@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev)
return 0;
abort:
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
md_unregister_thread(&mddev->thread);
if (conf) {
print_raid5_conf(conf);
free_conf(conf);
@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev)
{
raid5_conf_t *conf = mddev->private;
md_unregister_thread(mddev->thread);
mddev->thread = NULL;
md_unregister_thread(&mddev->thread);
if (mddev->queue)
mddev->queue->backing_dev_info.congested_fn = NULL;
free_conf(conf);

Просмотреть файл

@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp);
* FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
*
*/
/* iSCSI L2 */
#define BNX2X_ISCSI_ETH_CL_ID_IDX 1
#define BNX2X_ISCSI_ETH_CID 49
enum {
BNX2X_ISCSI_ETH_CL_ID_IDX,
BNX2X_FCOE_ETH_CL_ID_IDX,
BNX2X_MAX_CNIC_ETH_CL_ID_IDX,
};
/* FCoE L2 */
#define BNX2X_FCOE_ETH_CL_ID_IDX 2
#define BNX2X_FCOE_ETH_CID 50
#define BNX2X_CNIC_START_ETH_CID 48
enum {
/* iSCSI L2 */
BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID,
/* FCoE L2 */
BNX2X_FCOE_ETH_CID,
};
/** Additional rings budgeting */
#ifdef BCM_CNIC

Просмотреть файл

@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
{
return bp->cnic_base_cl_id + cl_idx +
(bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
(bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX;
}
static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)

Просмотреть файл

@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
void __iomem *data = &regs->tx.dsr1_0;
u16 *payload = (u16 *)frame->data;
/* It is safe to write into dsr[dlc+1] */
for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
for (i = 0; i < frame->can_dlc / 2; i++) {
out_be16(data, *payload++);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
/* write remaining byte if necessary */
if (frame->can_dlc & 1)
out_8(data, frame->data[frame->can_dlc - 1]);
}
out_8(&regs->tx.dlr, frame->can_dlc);
@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
void __iomem *data = &regs->rx.dsr1_0;
u16 *payload = (u16 *)frame->data;
for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
for (i = 0; i < frame->can_dlc / 2; i++) {
*payload++ = in_be16(data);
data += 2 + _MSCAN_RESERVED_DSR_SIZE;
}
/* read remaining byte if necessary */
if (frame->can_dlc & 1)
frame->data[frame->can_dlc - 1] = in_8(data);
}
out_8(&regs->canrflg, MSCAN_RXF);

Просмотреть файл

@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
/* send to lowerdev first for its network taps */
vlan->forward(vlan->lowerdev, skb);
dev_forward_skb(vlan->lowerdev, skb);
return NET_XMIT_SUCCESS;
}

Просмотреть файл

@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
memset(ring->buf, 0, ring->buf_size);
ring->qp_state = MLX4_QP_STATE_RST;
ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
ring->doorbell_qpn = ring->qp.qpn << 8;
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, &ring->context);
@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) {
*(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn;
*(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
tx_desc->ctrl.owner_opcode = op_own;
wmb();
writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL);
}
/* Poll CQ here */

Просмотреть файл

@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent,
sas_disable_routing(parent, phy->attached_sas_addr);
}
memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
sas_port_delete_phy(phy->port, phy->phy);
if (phy->port->num_phys == 0)
sas_port_delete(phy->port);
phy->port = NULL;
if (phy->port) {
sas_port_delete_phy(phy->port, phy->phy);
if (phy->port->num_phys == 0)
sas_port_delete(phy->port);
phy->port = NULL;
}
}
static int sas_discover_bfs_by_root_level(struct domain_device *root,

Просмотреть файл

@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
qla2x00_sp_compl(ha, sp);
} else {
ctx = sp->ctx;
if (ctx->type == SRB_LOGIN_CMD ||
ctx->type == SRB_LOGOUT_CMD) {
ctx->u.iocb_cmd->free(sp);
} else {
if (ctx->type == SRB_ELS_CMD_RPT ||
ctx->type == SRB_ELS_CMD_HST ||
ctx->type == SRB_CT_CMD) {
struct fc_bsg_job *bsg_job =
ctx->u.bsg_job;
if (bsg_job->request->msgcode
@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
kfree(sp->ctx);
mempool_free(sp,
ha->srb_mempool);
} else {
ctx->u.iocb_cmd->free(sp);
}
}
}

Просмотреть файл

@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error))
if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;

Просмотреть файл

@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port,
spin_unlock_irqrestore(&ltq_asc_lock, flags);
/* Don't rewrite B0 */
if (tty_termios_baud_rate(new))
if (tty_termios_baud_rate(new))
tty_termios_encode_baud_rate(new, baud, baud);
uart_update_timeout(port, cflag, baud);
}
static const char*

Просмотреть файл

@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (!max_to_defrag)
max_to_defrag = last_index - 1;
while (i <= last_index && defrag_count < max_to_defrag) {
/*
* make writeback starts from i, so the defrag range can be
* written sequentially.
*/
if (i < inode->i_mapping->writeback_index)
inode->i_mapping->writeback_index = i;
while (i <= last_index && defrag_count < max_to_defrag &&
(i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT)) {
/*
* make sure we stop running if someone unmounts
* the FS

Просмотреть файл

@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
"ntlmv2 in kernel release 3.1");
"ntlmv2 in kernel release 3.2");
}
ses->overrideSecFlg = volume_info->secFlg;

Просмотреть файл

@ -629,7 +629,7 @@ xfs_buf_item_push(
* the xfsbufd to get this buffer written. We have to unlock the buffer
* to allow the xfsbufd to write it, too.
*/
STATIC void
STATIC bool
xfs_buf_item_pushbuf(
struct xfs_log_item *lip)
{
@ -643,6 +643,7 @@ xfs_buf_item_pushbuf(
xfs_buf_delwri_promote(bp);
xfs_buf_relse(bp);
return true;
}
STATIC void

Просмотреть файл

@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait(
* search the buffer cache can be a time consuming thing, and AIL lock is a
* spinlock.
*/
STATIC void
STATIC bool
xfs_qm_dquot_logitem_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
struct xfs_dquot *dqp = qlip->qli_dquot;
struct xfs_buf *bp;
bool ret = true;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf(
if (completion_done(&dqp->q_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_dqunlock(dqp);
return;
return true;
}
bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
xfs_dqunlock(dqp);
if (!bp)
return;
return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
if (xfs_buf_ispinned(bp))
ret = false;
xfs_buf_relse(bp);
return ret;
}
/*

Просмотреть файл

@ -706,13 +706,14 @@ xfs_inode_item_committed(
* marked delayed write. If that's the case, we'll promote it and that will
* allow the caller to write the buffer by triggering the xfsbufd to run.
*/
STATIC void
STATIC bool
xfs_inode_item_pushbuf(
struct xfs_log_item *lip)
{
struct xfs_inode_log_item *iip = INODE_ITEM(lip);
struct xfs_inode *ip = iip->ili_inode;
struct xfs_buf *bp;
bool ret = true;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
@ -723,7 +724,7 @@ xfs_inode_item_pushbuf(
if (completion_done(&ip->i_flush) ||
!(lip->li_flags & XFS_LI_IN_AIL)) {
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
return true;
}
bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
@ -731,10 +732,13 @@ xfs_inode_item_pushbuf(
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!bp)
return;
return true;
if (XFS_BUF_ISDELAYWRITE(bp))
xfs_buf_delwri_promote(bp);
if (xfs_buf_ispinned(bp))
ret = false;
xfs_buf_relse(bp);
return ret;
}
/*

Просмотреть файл

@ -68,6 +68,8 @@
#include <linux/ctype.h>
#include <linux/writeback.h>
#include <linux/capability.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <linux/list_sort.h>
#include <asm/page.h>

Просмотреть файл

@ -1648,24 +1648,13 @@ xfs_init_workqueues(void)
*/
xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8);
if (!xfs_syncd_wq)
goto out;
xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8);
if (!xfs_ail_wq)
goto out_destroy_syncd;
return -ENOMEM;
return 0;
out_destroy_syncd:
destroy_workqueue(xfs_syncd_wq);
out:
return -ENOMEM;
}
STATIC void
xfs_destroy_workqueues(void)
{
destroy_workqueue(xfs_ail_wq);
destroy_workqueue(xfs_syncd_wq);
}

Просмотреть файл

@ -350,7 +350,7 @@ typedef struct xfs_item_ops {
void (*iop_unlock)(xfs_log_item_t *);
xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t);
void (*iop_push)(xfs_log_item_t *);
void (*iop_pushbuf)(xfs_log_item_t *);
bool (*iop_pushbuf)(xfs_log_item_t *);
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
} xfs_item_ops_t;

Просмотреть файл

@ -28,8 +28,6 @@
#include "xfs_trans_priv.h"
#include "xfs_error.h"
struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
#ifdef DEBUG
/*
* Check that the list is sorted as it should be.
@ -356,16 +354,10 @@ xfs_ail_delete(
xfs_trans_ail_cursor_clear(ailp, lip);
}
/*
* xfs_ail_worker does the work of pushing on the AIL. It will requeue itself
* to run at a later time if there is more work to do to complete the push.
*/
STATIC void
xfs_ail_worker(
struct work_struct *work)
static long
xfsaild_push(
struct xfs_ail *ailp)
{
struct xfs_ail *ailp = container_of(to_delayed_work(work),
struct xfs_ail, xa_work);
xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor cur;
xfs_log_item_t *lip;
@ -439,8 +431,13 @@ xfs_ail_worker(
case XFS_ITEM_PUSHBUF:
XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
ailp->xa_last_pushed_lsn = lsn;
if (!IOP_PUSHBUF(lip)) {
stuck++;
flush_log = 1;
} else {
ailp->xa_last_pushed_lsn = lsn;
}
push_xfsbufd = 1;
break;
@ -452,7 +449,6 @@ xfs_ail_worker(
case XFS_ITEM_LOCKED:
XFS_STATS_INC(xs_push_ail_locked);
ailp->xa_last_pushed_lsn = lsn;
stuck++;
break;
@ -504,20 +500,6 @@ out_done:
ailp->xa_last_pushed_lsn = 0;
ailp->xa_log_flush = 0;
/*
* We clear the XFS_AIL_PUSHING_BIT first before checking
* whether the target has changed. If the target has changed,
* this pushes the requeue race directly onto the result of the
* atomic test/set bit, so we are guaranteed that either the
* the pusher that changed the target or ourselves will requeue
* the work (but not both).
*/
clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags);
smp_rmb();
if (XFS_LSN_CMP(ailp->xa_target, target) == 0 ||
test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
return;
tout = 50;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
@ -544,9 +526,30 @@ out_done:
ailp->xa_last_pushed_lsn = 0;
}
/* There is more to do, requeue us. */
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work,
msecs_to_jiffies(tout));
return tout;
}
static int
xfsaild(
void *data)
{
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
while (!kthread_should_stop()) {
if (tout && tout <= 20)
__set_current_state(TASK_KILLABLE);
else
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(tout ?
msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
try_to_freeze();
tout = xfsaild_push(ailp);
}
return 0;
}
/*
@ -581,8 +584,9 @@ xfs_ail_push(
*/
smp_wmb();
xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn);
if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags))
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
smp_wmb();
wake_up_process(ailp->xa_task);
}
/*
@ -820,9 +824,18 @@ xfs_trans_ail_init(
INIT_LIST_HEAD(&ailp->xa_ail);
INIT_LIST_HEAD(&ailp->xa_cursors);
spin_lock_init(&ailp->xa_lock);
INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker);
ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
ailp->xa_mount->m_fsname);
if (IS_ERR(ailp->xa_task))
goto out_free_ailp;
mp->m_ail = ailp;
return 0;
out_free_ailp:
kmem_free(ailp);
return ENOMEM;
}
void
@ -831,6 +844,6 @@ xfs_trans_ail_destroy(
{
struct xfs_ail *ailp = mp->m_ail;
cancel_delayed_work_sync(&ailp->xa_work);
kthread_stop(ailp->xa_task);
kmem_free(ailp);
}

Просмотреть файл

@ -64,24 +64,18 @@ struct xfs_ail_cursor {
*/
struct xfs_ail {
struct xfs_mount *xa_mount;
struct task_struct *xa_task;
struct list_head xa_ail;
xfs_lsn_t xa_target;
struct list_head xa_cursors;
spinlock_t xa_lock;
struct delayed_work xa_work;
xfs_lsn_t xa_last_pushed_lsn;
unsigned long xa_flags;
int xa_log_flush;
};
#define XFS_AIL_PUSHING_BIT 0
/*
* From xfs_trans_ail.c
*/
extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */
void xfs_trans_ail_update_bulk(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
struct xfs_log_item **log_items, int nr_items,

Просмотреть файл

@ -197,6 +197,11 @@ struct dm_target {
* whether or not its underlying devices have support.
*/
unsigned discards_supported:1;
/*
* Set if this target does not return zeroes on discarded blocks.
*/
unsigned discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */

Просмотреть файл

@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem);
static int override_release(char __user *release, int len)
{
int ret = 0;
char buf[len];
char buf[65];
if (current->personality & UNAME26) {
char *rest = UTS_RELEASE;

Просмотреть файл

@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
netif_carrier_off(dev);
netdev_update_features(dev);
netif_start_queue(dev);
br_stp_enable_bridge(br);
@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
netif_carrier_off(dev);
br_stp_disable_bridge(br);
br_multicast_stop(br);

Просмотреть файл

@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
BUG_ON(!pcount);
/* Tweak before seqno plays */
if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint &&
!before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq))
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted;

Просмотреть файл

@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
}
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
if (tcp_alloc_md5sig_pool(sk) == NULL) {
md5sig = tp->md5sig_info;
if (md5sig->entries4 == 0 &&
tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
md5sig = tp->md5sig_info;
if (md5sig->alloced4 == md5sig->entries4) {
keys = kmalloc((sizeof(*keys) *
(md5sig->entries4 + 1)), GFP_ATOMIC);
if (!keys) {
kfree(newkey);
tcp_free_md5sig_pool();
if (md5sig->entries4 == 0)
tcp_free_md5sig_pool();
return -ENOMEM;
}
@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
kfree(tp->md5sig_info->keys4);
tp->md5sig_info->keys4 = NULL;
tp->md5sig_info->alloced4 = 0;
tcp_free_md5sig_pool();
} else if (tp->md5sig_info->entries4 != i) {
/* Need to do some manipulation */
memmove(&tp->md5sig_info->keys4[i],
@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
(tp->md5sig_info->entries4 - i) *
sizeof(struct tcp4_md5sig_key));
}
tcp_free_md5sig_pool();
return 0;
}
}

Просмотреть файл

@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
skb_reset_transport_header(skb);
__skb_push(skb, skb_gro_offset(skb));
ops = rcu_dereference(inet6_protos[proto]);
if (!ops || !ops->gro_receive)
goto out_unlock;

Просмотреть файл

@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
}
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
}
if (tcp_alloc_md5sig_pool(sk) == NULL) {
if (tp->md5sig_info->entries6 == 0 &&
tcp_alloc_md5sig_pool(sk) == NULL) {
kfree(newkey);
return -ENOMEM;
}
@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer,
(tp->md5sig_info->entries6 + 1)), GFP_ATOMIC);
if (!keys) {
tcp_free_md5sig_pool();
kfree(newkey);
if (tp->md5sig_info->entries6 == 0)
tcp_free_md5sig_pool();
return -ENOMEM;
}
@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
kfree(tp->md5sig_info->keys6);
tp->md5sig_info->keys6 = NULL;
tp->md5sig_info->alloced6 = 0;
tcp_free_md5sig_pool();
} else {
/* shrink the database */
if (tp->md5sig_info->entries6 != i)
@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer)
(tp->md5sig_info->entries6 - i)
* sizeof (tp->md5sig_info->keys6[0]));
}
tcp_free_md5sig_pool();
return 0;
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше