Merge branch 'linus' into locking/core, to pick up fixes

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2016-03-10 10:28:27 +01:00
Родитель 29b75eb2d5 8e0f93cda4
Коммит 6cbe9e4a22
315 изменённых файлов: 2906 добавлений и 2255 удалений

Просмотреть файл

@ -56,10 +56,6 @@
<entry><constant>MEDIA_ENT_F_CONN_COMPOSITE</constant></entry> <entry><constant>MEDIA_ENT_F_CONN_COMPOSITE</constant></entry>
<entry>Connector for a RGB composite signal.</entry> <entry>Connector for a RGB composite signal.</entry>
</row> </row>
<row>
<entry><constant>MEDIA_ENT_F_CONN_TEST</constant></entry>
<entry>Connector for a test generator.</entry>
</row>
<row> <row>
<entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry> <entry><constant>MEDIA_ENT_F_CAM_SENSOR</constant></entry>
<entry>Camera video sensor entity.</entry> <entry>Camera video sensor entity.</entry>

Просмотреть файл

@ -400,3 +400,7 @@ wm8350_wdt:
nowayout: Watchdog cannot be stopped once started nowayout: Watchdog cannot be stopped once started
(default=kernel config parameter) (default=kernel config parameter)
------------------------------------------------- -------------------------------------------------
sun4v_wdt:
timeout_ms: Watchdog timeout in milliseconds 1..180000, default=60000)
nowayout: Watchdog cannot be stopped once started
-------------------------------------------------

Просмотреть файл

@ -4518,6 +4518,12 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained S: Maintained
F: drivers/dma/fsldma.* F: drivers/dma/fsldma.*
FREESCALE GPMI NAND DRIVER
M: Han Xu <han.xu@nxp.com>
L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/nand/gpmi-nand/*
FREESCALE I2C CPM DRIVER FREESCALE I2C CPM DRIVER
M: Jochen Friedrich <jochen@scram.de> M: Jochen Friedrich <jochen@scram.de>
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
@ -4534,7 +4540,7 @@ F: include/linux/platform_data/video-imxfb.h
F: drivers/video/fbdev/imxfb.c F: drivers/video/fbdev/imxfb.c
FREESCALE QUAD SPI DRIVER FREESCALE QUAD SPI DRIVER
M: Han Xu <han.xu@freescale.com> M: Han Xu <han.xu@nxp.com>
L: linux-mtd@lists.infradead.org L: linux-mtd@lists.infradead.org
S: Maintained S: Maintained
F: drivers/mtd/spi-nor/fsl-quadspi.c F: drivers/mtd/spi-nor/fsl-quadspi.c
@ -4548,6 +4554,15 @@ S: Maintained
F: drivers/net/ethernet/freescale/fs_enet/ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h F: include/linux/fs_enet_pd.h
FREESCALE IMX / MXC FEC DRIVER
M: Fugang Duan <fugang.duan@nxp.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/freescale/fec_main.c
F: drivers/net/ethernet/freescale/fec_ptp.c
F: drivers/net/ethernet/freescale/fec.h
F: Documentation/devicetree/bindings/net/fsl-fec.txt
FREESCALE QUICC ENGINE LIBRARY FREESCALE QUICC ENGINE LIBRARY
L: linuxppc-dev@lists.ozlabs.org L: linuxppc-dev@lists.ozlabs.org
S: Orphan S: Orphan
@ -6764,6 +6779,7 @@ S: Maintained
F: Documentation/networking/mac80211-injection.txt F: Documentation/networking/mac80211-injection.txt
F: include/net/mac80211.h F: include/net/mac80211.h
F: net/mac80211/ F: net/mac80211/
F: drivers/net/wireless/mac80211_hwsim.[ch]
MACVLAN DRIVER MACVLAN DRIVER
M: Patrick McHardy <kaber@trash.net> M: Patrick McHardy <kaber@trash.net>

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 5 PATCHLEVEL = 5
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Blurry Fish Butt NAME = Blurry Fish Butt
# *DOCUMENTATION* # *DOCUMENTATION*

Просмотреть файл

@ -195,5 +195,7 @@ CFLAGS_font.o := -Dstatic=
$(obj)/font.c: $(FONTC) $(obj)/font.c: $(FONTC)
$(call cmd,shipped) $(call cmd,shipped)
AFLAGS_hyp-stub.o := -Wa,-march=armv7-a
$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S $(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
$(call cmd,shipped) $(call cmd,shipped)

Просмотреть файл

@ -283,7 +283,6 @@
pinctrl-names = "default"; pinctrl-names = "default";
status = "okay"; status = "okay";
renesas,enable-gpio = <&gpio5 31 GPIO_ACTIVE_HIGH>;
}; };
&usbphy { &usbphy {

Просмотреть файл

@ -88,6 +88,7 @@ obj-$(CONFIG_DEBUG_LL) += debug.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
AFLAGS_hyp-stub.o :=-Wa,-march=armv7-a
ifeq ($(CONFIG_ARM_PSCI),y) ifeq ($(CONFIG_ARM_PSCI),y)
obj-$(CONFIG_SMP) += psci_smp.o obj-$(CONFIG_SMP) += psci_smp.o
endif endif

Просмотреть файл

@ -161,7 +161,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val; u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id); val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
static unsigned long num_core_regs(void) static unsigned long num_core_regs(void)

Просмотреть файл

@ -49,6 +49,9 @@ static int change_memory_common(unsigned long addr, int numpages,
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
if (!numpages)
return 0;
if (start < MODULES_VADDR || start >= MODULES_END) if (start < MODULES_VADDR || start >= MODULES_END)
return -EINVAL; return -EINVAL;

Просмотреть файл

@ -34,13 +34,13 @@
/* /*
* VMALLOC and SPARSEMEM_VMEMMAP ranges. * VMALLOC and SPARSEMEM_VMEMMAP ranges.
* *
* VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array * VMEMAP_SIZE: allows the whole linear region to be covered by a struct page array
* (rounded up to PUD_SIZE). * (rounded up to PUD_SIZE).
* VMALLOC_START: beginning of the kernel VA space * VMALLOC_START: beginning of the kernel VA space
* VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
* fixed mappings and modules * fixed mappings and modules
*/ */
#define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT - 1)) * sizeof(struct page), PUD_SIZE)
#ifndef CONFIG_KASAN #ifndef CONFIG_KASAN
#define VMALLOC_START (VA_START) #define VMALLOC_START (VA_START)
@ -51,7 +51,8 @@
#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) #define VMEMMAP_START (VMALLOC_END + SZ_64K)
#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
#define FIRST_USER_ADDRESS 0UL #define FIRST_USER_ADDRESS 0UL

Просмотреть файл

@ -145,6 +145,10 @@ ENTRY(cpu_resume_mmu)
ENDPROC(cpu_resume_mmu) ENDPROC(cpu_resume_mmu)
.popsection .popsection
cpu_resume_after_mmu: cpu_resume_after_mmu:
#ifdef CONFIG_KASAN
mov x0, sp
bl kasan_unpoison_remaining_stack
#endif
mov x0, #0 // return zero on success mov x0, #0 // return zero on success
ldp x19, x20, [sp, #16] ldp x19, x20, [sp, #16]
ldp x21, x22, [sp, #32] ldp x21, x22, [sp, #32]

Просмотреть файл

@ -194,7 +194,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
u64 val; u64 val;
val = kvm_arm_timer_get_reg(vcpu, reg->id); val = kvm_arm_timer_get_reg(vcpu, reg->id);
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)); return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
} }
/** /**

Просмотреть файл

@ -319,8 +319,8 @@ void __init mem_init(void)
#endif #endif
MLG(VMALLOC_START, VMALLOC_END), MLG(VMALLOC_START, VMALLOC_END),
#ifdef CONFIG_SPARSEMEM_VMEMMAP #ifdef CONFIG_SPARSEMEM_VMEMMAP
MLG((unsigned long)vmemmap, MLG(VMEMMAP_START,
(unsigned long)vmemmap + VMEMMAP_SIZE), VMEMMAP_START + VMEMMAP_SIZE),
MLM((unsigned long)virt_to_page(PAGE_OFFSET), MLM((unsigned long)virt_to_page(PAGE_OFFSET),
(unsigned long)virt_to_page(high_memory)), (unsigned long)virt_to_page(high_memory)),
#endif #endif

Просмотреть файл

@ -270,7 +270,7 @@ uint32_t jz_gpio_port_get_value(int port, uint32_t mask)
} }
EXPORT_SYMBOL(jz_gpio_port_get_value); EXPORT_SYMBOL(jz_gpio_port_get_value);
#define IRQ_TO_BIT(irq) BIT(irq_to_gpio(irq) & 0x1f) #define IRQ_TO_BIT(irq) BIT((irq - JZ4740_IRQ_GPIO(0)) & 0x1f)
static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq) static void jz_gpio_check_trigger_both(struct jz_gpio_chip *chip, unsigned int irq)
{ {

Просмотреть файл

@ -125,7 +125,7 @@ LEAF(_restore_fp_context)
END(_restore_fp_context) END(_restore_fp_context)
.set reorder .set reorder
.type fault@function .type fault, @function
.ent fault .ent fault
fault: li v0, -EFAULT fault: li v0, -EFAULT
jr ra jr ra

Просмотреть файл

@ -358,7 +358,7 @@ LEAF(_restore_msa_all_upper)
.set reorder .set reorder
.type fault@function .type fault, @function
.ent fault .ent fault
fault: li v0, -EFAULT # failure fault: li v0, -EFAULT # failure
jr ra jr ra

Просмотреть файл

@ -690,15 +690,15 @@ static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
asmlinkage void do_ov(struct pt_regs *regs) asmlinkage void do_ov(struct pt_regs *regs)
{ {
enum ctx_state prev_state; enum ctx_state prev_state;
siginfo_t info; siginfo_t info = {
.si_signo = SIGFPE,
.si_code = FPE_INTOVF,
.si_addr = (void __user *)regs->cp0_epc,
};
prev_state = exception_enter(); prev_state = exception_enter();
die_if_kernel("Integer overflow", regs); die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current); force_sig_info(SIGFPE, &info, current);
exception_exit(prev_state); exception_exit(prev_state);
} }
@ -874,7 +874,7 @@ out:
void do_trap_or_bp(struct pt_regs *regs, unsigned int code, void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str) const char *str)
{ {
siginfo_t info; siginfo_t info = { 0 };
char b[40]; char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@ -903,7 +903,6 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
else else
info.si_code = FPE_INTOVF; info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE; info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc; info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current); force_sig_info(SIGFPE, &info, current);
break; break;

Просмотреть файл

@ -702,7 +702,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
return copy_to_user(uaddr, vs, 16); return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0;
} else { } else {
return -EINVAL; return -EINVAL;
} }
@ -732,7 +732,7 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
} else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) {
void __user *uaddr = (void __user *)(long)reg->addr; void __user *uaddr = (void __user *)(long)reg->addr;
return copy_from_user(vs, uaddr, 16); return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0;
} else { } else {
return -EINVAL; return -EINVAL;
} }

Просмотреть файл

@ -164,11 +164,13 @@ static int __init mips_sc_probe_cm3(void)
sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK; sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF; sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
c->scache.sets = 64 << sets; if (sets)
c->scache.sets = 64 << sets;
line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK; line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF; line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
c->scache.linesz = 2 << line_sz; if (line_sz)
c->scache.linesz = 2 << line_sz;
assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK; assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF; assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
@ -176,9 +178,12 @@ static int __init mips_sc_probe_cm3(void)
c->scache.waysize = c->scache.sets * c->scache.linesz; c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize); c->scache.waybit = __ffs(c->scache.waysize);
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; if (c->scache.linesz) {
c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
return 1;
}
return 1; return 0;
} }
static inline int __init mips_sc_probe(void) static inline int __init mips_sc_probe(void)

Просмотреть файл

@ -33,7 +33,7 @@
* floppy accesses go through the track buffer. * floppy accesses go through the track buffer.
*/ */
#define _CROSS_64KB(a,s,vdma) \ #define _CROSS_64KB(a,s,vdma) \
(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64)) (!(vdma) && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1) #define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)

Просмотреть файл

@ -361,8 +361,9 @@
#define __NR_membarrier (__NR_Linux + 343) #define __NR_membarrier (__NR_Linux + 343)
#define __NR_userfaultfd (__NR_Linux + 344) #define __NR_userfaultfd (__NR_Linux + 344)
#define __NR_mlock2 (__NR_Linux + 345) #define __NR_mlock2 (__NR_Linux + 345)
#define __NR_copy_file_range (__NR_Linux + 346)
#define __NR_Linux_syscalls (__NR_mlock2 + 1) #define __NR_Linux_syscalls (__NR_copy_file_range + 1)
#define __IGNORE_select /* newselect */ #define __IGNORE_select /* newselect */

Просмотреть файл

@ -269,14 +269,19 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs) long do_syscall_trace_enter(struct pt_regs *regs)
{ {
long ret = 0;
/* Do the secure computing check first. */ /* Do the secure computing check first. */
secure_computing_strict(regs->gr[20]); secure_computing_strict(regs->gr[20]);
if (test_thread_flag(TIF_SYSCALL_TRACE) && if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs)) tracehook_report_syscall_entry(regs)) {
ret = -1L; /*
* Tracing decided this syscall should not happen or the
* debugger stored an invalid system call number. Skip
* the system call and the system call restart handling.
*/
regs->gr[20] = -1UL;
goto out;
}
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
if (!is_compat_task()) if (!is_compat_task())
@ -290,7 +295,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff, regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff); regs->gr[23] & 0xffffffff);
return ret ? : regs->gr[20]; out:
return regs->gr[20];
} }
void do_syscall_trace_exit(struct pt_regs *regs) void do_syscall_trace_exit(struct pt_regs *regs)

Просмотреть файл

@ -343,7 +343,7 @@ tracesys_next:
#endif #endif
comiclr,>>= __NR_Linux_syscalls, %r20, %r0 comiclr,>>= __NR_Linux_syscalls, %r20, %r0
b,n .Lsyscall_nosys b,n .Ltracesys_nosys
LDREGX %r20(%r19), %r19 LDREGX %r20(%r19), %r19
@ -359,6 +359,9 @@ tracesys_next:
be 0(%sr7,%r19) be 0(%sr7,%r19)
ldo R%tracesys_exit(%r2),%r2 ldo R%tracesys_exit(%r2),%r2
.Ltracesys_nosys:
ldo -ENOSYS(%r0),%r28 /* set errno */
/* Do *not* call this function on the gateway page, because it /* Do *not* call this function on the gateway page, because it
makes a direct call to syscall_trace. */ makes a direct call to syscall_trace. */

Просмотреть файл

@ -441,6 +441,7 @@
ENTRY_SAME(membarrier) ENTRY_SAME(membarrier)
ENTRY_SAME(userfaultfd) ENTRY_SAME(userfaultfd)
ENTRY_SAME(mlock2) /* 345 */ ENTRY_SAME(mlock2) /* 345 */
ENTRY_SAME(copy_file_range)
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))

Просмотреть файл

@ -109,8 +109,9 @@ void arch_unregister_hw_breakpoint(struct perf_event *bp)
* If the breakpoint is unregistered between a hw_breakpoint_handler() * If the breakpoint is unregistered between a hw_breakpoint_handler()
* and the single_step_dabr_instruction(), then cleanup the breakpoint * and the single_step_dabr_instruction(), then cleanup the breakpoint
* restoration variables to prevent dangling pointers. * restoration variables to prevent dangling pointers.
* FIXME, this should not be using bp->ctx at all! Sayeth peterz.
*/ */
if (bp->ctx && bp->ctx->task) if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L))
bp->ctx->task->thread.last_hit_ubp = NULL; bp->ctx->task->thread.last_hit_ubp = NULL;
} }

Просмотреть файл

@ -8,6 +8,8 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <asm/mmu.h>
#ifdef CONFIG_PPC_FSL_BOOK3E #ifdef CONFIG_PPC_FSL_BOOK3E
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
static inline int tlb1_next(void) static inline int tlb1_next(void)
@ -60,6 +62,14 @@ static inline void book3e_tlb_lock(void)
unsigned long tmp; unsigned long tmp;
int token = smp_processor_id() + 1; int token = smp_processor_id() + 1;
/*
* Besides being unnecessary in the absence of SMT, this
* check prevents trying to do lbarx/stbcx. on e5500 which
* doesn't implement either feature.
*/
if (!cpu_has_feature(CPU_FTR_SMT))
return;
asm volatile("1: lbarx %0, 0, %1;" asm volatile("1: lbarx %0, 0, %1;"
"cmpwi %0, 0;" "cmpwi %0, 0;"
"bne 2f;" "bne 2f;"
@ -80,6 +90,9 @@ static inline void book3e_tlb_unlock(void)
{ {
struct paca_struct *paca = get_paca(); struct paca_struct *paca = get_paca();
if (!cpu_has_feature(CPU_FTR_SMT))
return;
isync(); isync();
paca->tcd_ptr->lock = 0; paca->tcd_ptr->lock = 0;
} }

Просмотреть файл

@ -24,7 +24,13 @@ LDFLAGS := -m elf32_sparc
export BITS := 32 export BITS := 32
UTS_MACHINE := sparc UTS_MACHINE := sparc
# We are adding -Wa,-Av8 to KBUILD_CFLAGS to deal with a specs bug in some
# versions of gcc. Some gcc versions won't pass -Av8 to binutils when you
# give -mcpu=v8. This silently worked with older bintutils versions but
# does not any more.
KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7 KBUILD_CFLAGS += -m32 -mcpu=v8 -pipe -mno-fpu -fcall-used-g5 -fcall-used-g7
KBUILD_CFLAGS += -Wa,-Av8
KBUILD_AFLAGS += -m32 -Wa,-Av8 KBUILD_AFLAGS += -m32 -Wa,-Av8
else else

Просмотреть файл

@ -422,8 +422,9 @@
#define __NR_listen 354 #define __NR_listen 354
#define __NR_setsockopt 355 #define __NR_setsockopt 355
#define __NR_mlock2 356 #define __NR_mlock2 356
#define __NR_copy_file_range 357
#define NR_syscalls 357 #define NR_syscalls 358
/* Bitmask values returned from kern_features system call. */ /* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001

Просмотреть файл

@ -948,7 +948,24 @@ linux_syscall_trace:
cmp %o0, 0 cmp %o0, 0
bne 3f bne 3f
mov -ENOSYS, %o0 mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ld [%sp + STACKFRAME_SZ + PT_G1], %g1
sethi %hi(sys_call_table), %l7
ld [%sp + STACKFRAME_SZ + PT_I0], %i0
or %l7, %lo(sys_call_table), %l7
ld [%sp + STACKFRAME_SZ + PT_I1], %i1
ld [%sp + STACKFRAME_SZ + PT_I2], %i2
ld [%sp + STACKFRAME_SZ + PT_I3], %i3
ld [%sp + STACKFRAME_SZ + PT_I4], %i4
ld [%sp + STACKFRAME_SZ + PT_I5], %i5
cmp %g1, NR_syscalls
bgeu 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
mov %i0, %o0 mov %i0, %o0
ld [%l7 + %l4], %l7
mov %i1, %o1 mov %i1, %o1
mov %i2, %o2 mov %i2, %o2
mov %i3, %o3 mov %i3, %o3

Просмотреть файл

@ -338,8 +338,9 @@ ENTRY(sun4v_mach_set_watchdog)
mov %o1, %o4 mov %o1, %o4
mov HV_FAST_MACH_SET_WATCHDOG, %o5 mov HV_FAST_MACH_SET_WATCHDOG, %o5
ta HV_FAST_TRAP ta HV_FAST_TRAP
brnz,a,pn %o4, 0f
stx %o1, [%o4] stx %o1, [%o4]
retl 0: retl
nop nop
ENDPROC(sun4v_mach_set_watchdog) ENDPROC(sun4v_mach_set_watchdog)

Просмотреть файл

@ -52,7 +52,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
unsigned char fenab; unsigned char fenab;
int err; int err;
flush_user_windows(); synchronize_user_stack();
if (get_thread_wsaved() || if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) || (((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp)))) (!__access_ok(ucp, sizeof(*ucp))))

Просмотреть файл

@ -37,6 +37,7 @@ EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf); EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf); EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf); EXPORT_SYMBOL(sun4v_niagara2_setperf);
EXPORT_SYMBOL(sun4v_mach_set_watchdog);
/* from hweight.S */ /* from hweight.S */
EXPORT_SYMBOL(__arch_hweight8); EXPORT_SYMBOL(__arch_hweight8);

Просмотреть файл

@ -158,7 +158,25 @@ linux_syscall_trace32:
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f brnz,pn %o0, 3f
mov -ENOSYS, %o0 mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
sethi %hi(sys_call_table32), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
or %l7, %lo(sys_call_table32), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
cmp %g1, NR_syscalls
bgeu,pn %xcc, 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
srl %i0, 0, %o0 srl %i0, 0, %o0
lduw [%l7 + %l4], %l7
srl %i4, 0, %o4 srl %i4, 0, %o4
srl %i1, 0, %o1 srl %i1, 0, %o1
srl %i2, 0, %o2 srl %i2, 0, %o2
@ -170,7 +188,25 @@ linux_syscall_trace:
add %sp, PTREGS_OFF, %o0 add %sp, PTREGS_OFF, %o0
brnz,pn %o0, 3f brnz,pn %o0, 3f
mov -ENOSYS, %o0 mov -ENOSYS, %o0
/* Syscall tracing can modify the registers. */
ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
sethi %hi(sys_call_table64), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
or %l7, %lo(sys_call_table64), %l7
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
cmp %g1, NR_syscalls
bgeu,pn %xcc, 3f
mov -ENOSYS, %o0
sll %g1, 2, %l4
mov %i0, %o0 mov %i0, %o0
lduw [%l7 + %l4], %l7
mov %i1, %o1 mov %i1, %o1
mov %i2, %o2 mov %i2, %o2
mov %i3, %o3 mov %i3, %o3

Просмотреть файл

@ -88,4 +88,4 @@ sys_call_table:
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
/*355*/ .long sys_setsockopt, sys_mlock2 /*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range

Просмотреть файл

@ -89,7 +89,7 @@ sys_call_table32:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word compat_sys_setsockopt, sys_mlock2 .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
#endif /* CONFIG_COMPAT */ #endif /* CONFIG_COMPAT */
@ -170,4 +170,4 @@ sys_call_table:
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
.word sys_setsockopt, sys_mlock2 .word sys_setsockopt, sys_mlock2, sys_copy_file_range

Просмотреть файл

@ -12,6 +12,7 @@
#include <skas.h> #include <skas.h>
void (*pm_power_off)(void); void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
static void kill_off_processes(void) static void kill_off_processes(void)
{ {

Просмотреть файл

@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
struct ksignal ksig; struct ksignal ksig;
int handled_sig = 0; int handled_sig = 0;
if (get_signal(&ksig)) { while (get_signal(&ksig)) {
handled_sig = 1; handled_sig = 1;
/* Whee! Actually deliver the signal. */ /* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs); handle_signal(&ksig, regs);

Просмотреть файл

@ -16,6 +16,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/realmode.h> #include <asm/realmode.h>
#include <linux/ftrace.h>
#include "../../realmode/rm/wakeup.h" #include "../../realmode/rm/wakeup.h"
#include "sleep.h" #include "sleep.h"
@ -107,7 +108,13 @@ int x86_acpi_suspend_lowlevel(void)
saved_magic = 0x123456789abcdef0L; saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
/*
* Pause/unpause graph tracing around do_suspend_lowlevel as it has
* inconsistent call/return info after it jumps to the wakeup vector.
*/
pause_graph_tracing();
do_suspend_lowlevel(); do_suspend_lowlevel();
unpause_graph_tracing();
return 0; return 0;
} }

Просмотреть файл

@ -596,6 +596,8 @@ struct vcpu_vmx {
/* Support for PML */ /* Support for PML */
#define PML_ENTITY_NUM 512 #define PML_ENTITY_NUM 512
struct page *pml_pg; struct page *pml_pg;
u64 current_tsc_ratio;
}; };
enum segment_cache_field { enum segment_cache_field {
@ -2127,14 +2129,16 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
/* Setup TSC multiplier */
if (cpu_has_vmx_tsc_scaling())
vmcs_write64(TSC_MULTIPLIER,
vcpu->arch.tsc_scaling_ratio);
vmx->loaded_vmcs->cpu = cpu; vmx->loaded_vmcs->cpu = cpu;
} }
/* Setup TSC multiplier */
if (kvm_has_tsc_control &&
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
}
vmx_vcpu_pi_load(vcpu, cpu); vmx_vcpu_pi_load(vcpu, cpu);
} }

Просмотреть файл

@ -2752,7 +2752,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
} }
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
} }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@ -6619,12 +6618,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* KVM_DEBUGREG_WONT_EXIT again. * KVM_DEBUGREG_WONT_EXIT again.
*/ */
if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) { if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
int i;
WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP); WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
kvm_x86_ops->sync_dirty_debug_regs(vcpu); kvm_x86_ops->sync_dirty_debug_regs(vcpu);
for (i = 0; i < KVM_NR_DB_REGS; i++) kvm_update_dr0123(vcpu);
vcpu->arch.eff_db[i] = vcpu->arch.db[i]; kvm_update_dr6(vcpu);
kvm_update_dr7(vcpu);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD;
} }
/* /*

Просмотреть файл

@ -109,7 +109,7 @@ unsigned long os_get_top_address(void)
exit(1); exit(1);
} }
printf("0x%x\n", bottom << UM_KERN_PAGE_SHIFT); printf("0x%lx\n", bottom << UM_KERN_PAGE_SHIFT);
printf("Locating the top of the address space ... "); printf("Locating the top of the address space ... ");
fflush(stdout); fflush(stdout);
@ -134,7 +134,7 @@ out:
exit(1); exit(1);
} }
top <<= UM_KERN_PAGE_SHIFT; top <<= UM_KERN_PAGE_SHIFT;
printf("0x%x\n", top); printf("0x%lx\n", top);
return top; return top;
} }

Просмотреть файл

@ -57,6 +57,49 @@ static int __blk_rq_unmap_user(struct bio *bio)
return ret; return ret;
} }
static int __blk_rq_map_user_iov(struct request *rq,
struct rq_map_data *map_data, struct iov_iter *iter,
gfp_t gfp_mask, bool copy)
{
struct request_queue *q = rq->q;
struct bio *bio, *orig_bio;
int ret;
if (copy)
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
else
bio = bio_map_user_iov(q, iter, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
iov_iter_advance(iter, bio->bi_iter.bi_size);
if (map_data)
map_data->offset += bio->bi_iter.bi_size;
orig_bio = bio;
blk_queue_bounce(q, &bio);
/*
* We link the bounce buffer in and could have to traverse it
* later so we have to get a ref to prevent it from being freed
*/
bio_get(bio);
ret = blk_rq_append_bio(q, rq, bio);
if (ret) {
bio_endio(bio);
__blk_rq_unmap_user(orig_bio);
bio_put(bio);
return ret;
}
return 0;
}
/** /**
* blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
* @q: request queue where request should be inserted * @q: request queue where request should be inserted
@ -82,10 +125,11 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
struct rq_map_data *map_data, struct rq_map_data *map_data,
const struct iov_iter *iter, gfp_t gfp_mask) const struct iov_iter *iter, gfp_t gfp_mask)
{ {
struct bio *bio;
int unaligned = 0;
struct iov_iter i;
struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0}; struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
bool copy = (q->dma_pad_mask & iter->count) || map_data;
struct bio *bio = NULL;
struct iov_iter i;
int ret;
if (!iter || !iter->count) if (!iter || !iter->count)
return -EINVAL; return -EINVAL;
@ -101,42 +145,29 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
*/ */
if ((uaddr & queue_dma_alignment(q)) || if ((uaddr & queue_dma_alignment(q)) ||
iovec_gap_to_prv(q, &prv, &iov)) iovec_gap_to_prv(q, &prv, &iov))
unaligned = 1; copy = true;
prv.iov_base = iov.iov_base; prv.iov_base = iov.iov_base;
prv.iov_len = iov.iov_len; prv.iov_len = iov.iov_len;
} }
if (unaligned || (q->dma_pad_mask & iter->count) || map_data) i = *iter;
bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); do {
else ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
bio = bio_map_user_iov(q, iter, gfp_mask); if (ret)
goto unmap_rq;
if (IS_ERR(bio)) if (!bio)
return PTR_ERR(bio); bio = rq->bio;
} while (iov_iter_count(&i));
if (map_data && map_data->null_mapped)
bio_set_flag(bio, BIO_NULL_MAPPED);
if (bio->bi_iter.bi_size != iter->count) {
/*
* Grab an extra reference to this bio, as bio_unmap_user()
* expects to be able to drop it twice as it happens on the
* normal IO completion path
*/
bio_get(bio);
bio_endio(bio);
__blk_rq_unmap_user(bio);
return -EINVAL;
}
if (!bio_flagged(bio, BIO_USER_MAPPED)) if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER; rq->cmd_flags |= REQ_COPY_USER;
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
return 0; return 0;
unmap_rq:
__blk_rq_unmap_user(bio);
rq->bio = NULL;
return -EINVAL;
} }
EXPORT_SYMBOL(blk_rq_map_user_iov); EXPORT_SYMBOL(blk_rq_map_user_iov);

Просмотреть файл

@ -304,7 +304,6 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
struct bio *nxt) struct bio *nxt)
{ {
struct bio_vec end_bv = { NULL }, nxt_bv; struct bio_vec end_bv = { NULL }, nxt_bv;
struct bvec_iter iter;
if (!blk_queue_cluster(q)) if (!blk_queue_cluster(q))
return 0; return 0;
@ -316,11 +315,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
if (!bio_has_data(bio)) if (!bio_has_data(bio))
return 1; return 1;
bio_for_each_segment(end_bv, bio, iter) bio_get_last_bvec(bio, &end_bv);
if (end_bv.bv_len == iter.bi_size) bio_get_first_bvec(nxt, &nxt_bv);
break;
nxt_bv = bio_iovec(nxt);
if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
return 0; return 0;

Просмотреть файл

@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
start = ndr_desc->res->start; start = ndr_desc->res->start;
len = ndr_desc->res->end - ndr_desc->res->start + 1; len = ndr_desc->res->end - ndr_desc->res->start + 1;
/*
* If ARS is unimplemented, unsupported, or if the 'Persistent Memory
* Scrub' flag in extended status is not set, skip this but continue
* initialization
*/
rc = ars_get_cap(nd_desc, ars_cap, start, len); rc = ars_get_cap(nd_desc, ars_cap, start, len);
if (rc == -ENOTTY) {
dev_dbg(acpi_desc->dev,
"Address Range Scrub is not implemented, won't create an error list\n");
rc = 0;
goto out;
}
if (rc) if (rc)
goto out; goto out;
/*
* If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
* extended status is not set, skip this but continue initialization
*/
if ((ars_cap->status & 0xffff) || if ((ars_cap->status & 0xffff) ||
!(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) { !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
dev_warn(acpi_desc->dev, dev_warn(acpi_desc->dev,

Просмотреть файл

@ -367,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
/* JMicron 360/1/3/5/6, match class to avoid IDE function */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@ -1325,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
{} {}
#endif #endif
#ifdef CONFIG_ARM64
/*
* Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
* Workaround is to make sure all pending IRQs are served before leaving
* handler.
*/
static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
unsigned int handled = 1;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
do {
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
rc = ahci_handle_port_intr(host, irq_masked);
if (!rc)
handled = 0;
writel(irq_stat, mmio + HOST_IRQ_STAT);
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
#endif
/* /*
* ahci_init_msix() - optionally enable per-port MSI-X otherwise defer * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
* to single msi. * to single msi.
@ -1560,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ahci_broken_devslp(pdev)) if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
#ifdef CONFIG_ARM64
if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
hpriv->irq_handler = ahci_thunderx_irq_handler;
#endif
/* save initial config */ /* save initial config */
ahci_pci_save_initial_config(pdev, hpriv); ahci_pci_save_initial_config(pdev, hpriv);

Просмотреть файл

@ -240,8 +240,7 @@ enum {
error-handling stage) */ error-handling stage) */
AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as
Edge Triggered */
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@ -361,6 +360,7 @@ struct ahci_host_priv {
* be overridden anytime before the host is activated. * be overridden anytime before the host is activated.
*/ */
void (*start_engine)(struct ata_port *ap); void (*start_engine)(struct ata_port *ap);
irqreturn_t (*irq_handler)(int irq, void *dev_instance);
}; };
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
@ -424,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
void ahci_print_info(struct ata_host *host, const char *scc_s); void ahci_print_info(struct ata_host *host, const char *scc_s);
int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht); int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
void ahci_error_handler(struct ata_port *ap); void ahci_error_handler(struct ata_port *ap);
u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
static inline void __iomem *__ahci_port_base(struct ata_host *host, static inline void __iomem *__ahci_port_base(struct ata_host *host,
unsigned int port_no) unsigned int port_no)

Просмотреть файл

@ -548,6 +548,88 @@ softreset_retry:
return rc; return rc;
} }
/**
* xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
* @ata_host: Host that recieved the irq
* @irq_masked: HOST_IRQ_STAT value
*
* For hardware with broken edge trigger latch
* the HOST_IRQ_STAT register misses the edge interrupt
* when clearing of HOST_IRQ_STAT register and hardware
* reporting the PORT_IRQ_STAT register at the
* same clock cycle.
* As such, the algorithm below outlines the workaround.
*
* 1. Read HOST_IRQ_STAT register and save the state.
* 2. Clear the HOST_IRQ_STAT register.
* 3. Read back the HOST_IRQ_STAT register.
* 4. If HOST_IRQ_STAT register equals to zero, then
* traverse the rest of port's PORT_IRQ_STAT register
* to check if an interrupt is triggered at that point else
* go to step 6.
* 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
* then update the state of HOST_IRQ_STAT saved in step 1.
* 6. Handle port interrupts.
* 7. Exit
*/
static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
u32 irq_masked)
{
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *port_mmio;
int i;
if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
for (i = 0; i < host->n_ports; i++) {
if (irq_masked & (1 << i))
continue;
port_mmio = ahci_port_base(host->ports[i]);
if (readl(port_mmio + PORT_IRQ_STAT))
irq_masked |= (1 << i);
}
}
return ahci_handle_port_intr(host, irq_masked);
}
static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
/*
* HOST_IRQ_STAT behaves as edge triggered latch meaning that
* it should be cleared before all the port events are cleared.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}
static struct ata_port_operations xgene_ahci_v1_ops = { static struct ata_port_operations xgene_ahci_v1_ops = {
.inherits = &ahci_ops, .inherits = &ahci_ops,
.host_stop = xgene_ahci_host_stop, .host_stop = xgene_ahci_host_stop,
@ -779,7 +861,8 @@ skip_clk_phy:
hpriv->flags = AHCI_HFLAG_NO_NCQ; hpriv->flags = AHCI_HFLAG_NO_NCQ;
break; break;
case XGENE_AHCI_V2: case XGENE_AHCI_V2:
hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ; hpriv->flags |= AHCI_HFLAG_YES_FBS;
hpriv->irq_handler = xgene_ahci_irq_intr;
break; break;
default: default:
break; break;

Просмотреть файл

@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
const char *buf, size_t size); const char *buf, size_t size);
static ssize_t ahci_show_em_supported(struct device *dev, static ssize_t ahci_show_em_supported(struct device *dev,
struct device_attribute *attr, char *buf); struct device_attribute *attr, char *buf);
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
if (!hpriv->start_engine) if (!hpriv->start_engine)
hpriv->start_engine = ahci_start_engine; hpriv->start_engine = ahci_start_engine;
if (!hpriv->irq_handler)
hpriv->irq_handler = ahci_single_level_irq_intr;
} }
EXPORT_SYMBOL_GPL(ahci_save_initial_config); EXPORT_SYMBOL_GPL(ahci_save_initial_config);
@ -1164,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* mark esata ports */ /* mark esata ports */
tmp = readl(port_mmio + PORT_CMD); tmp = readl(port_mmio + PORT_CMD);
if ((tmp & PORT_CMD_HPCP) || if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
ap->pflags |= ATA_PFLAG_EXTERNAL; ap->pflags |= ATA_PFLAG_EXTERNAL;
} }
@ -1846,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
{ {
unsigned int i, handled = 0; unsigned int i, handled = 0;
@ -1872,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
return handled; return handled;
} }
EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int rc = 0;
void __iomem *mmio;
u32 irq_stat, irq_masked;
VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
if (!irq_stat)
return IRQ_NONE;
irq_masked = irq_stat & hpriv->port_map;
spin_lock(&host->lock);
/*
* HOST_IRQ_STAT behaves as edge triggered latch meaning that
* it should be cleared before all the port events are cleared.
*/
writel(irq_stat, mmio + HOST_IRQ_STAT);
rc = ahci_handle_port_intr(host, irq_masked);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
return IRQ_RETVAL(rc);
}
static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
{ {
@ -2535,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
int irq = hpriv->irq; int irq = hpriv->irq;
int rc; int rc;
if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
if (hpriv->irq_handler)
dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
and custom irq handler implemented\n");
rc = ahci_host_activate_multi_irqs(host, sht); rc = ahci_host_activate_multi_irqs(host, sht);
else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ) } else {
rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr, rc = ata_host_activate(host, irq, hpriv->irq_handler,
IRQF_SHARED, sht);
else
rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
IRQF_SHARED, sht); IRQF_SHARED, sht);
}
return rc; return rc;
} }
EXPORT_SYMBOL_GPL(ahci_host_activate); EXPORT_SYMBOL_GPL(ahci_host_activate);

Просмотреть файл

@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
int cmd, void __user *arg) int cmd, void __user *arg)
{ {
int val = -EINVAL, rc = -EINVAL; unsigned long val;
int rc = -EINVAL;
unsigned long flags; unsigned long flags;
switch (cmd) { switch (cmd) {
case ATA_IOC_GET_IO32: case HDIO_GET_32BIT:
spin_lock_irqsave(ap->lock, flags); spin_lock_irqsave(ap->lock, flags);
val = ata_ioc32(ap); val = ata_ioc32(ap);
spin_unlock_irqrestore(ap->lock, flags); spin_unlock_irqrestore(ap->lock, flags);
if (copy_to_user(arg, &val, 1)) return put_user(val, (unsigned long __user *)arg);
return -EFAULT;
return 0;
case ATA_IOC_SET_IO32: case HDIO_SET_32BIT:
val = (unsigned long) arg; val = (unsigned long) arg;
rc = 0; rc = 0;
spin_lock_irqsave(ap->lock, flags); spin_lock_irqsave(ap->lock, flags);

Просмотреть файл

@ -32,6 +32,8 @@
#include <linux/libata.h> #include <linux/libata.h>
#include <scsi/scsi_host.h> #include <scsi/scsi_host.h>
#include <asm/mach-rc32434/rb.h>
#define DRV_NAME "pata-rb532-cf" #define DRV_NAME "pata-rb532-cf"
#define DRV_VERSION "0.1.0" #define DRV_VERSION "0.1.0"
#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" #define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
int gpio; int gpio;
struct resource *res; struct resource *res;
struct ata_host *ah; struct ata_host *ah;
struct cf_device *pdata;
struct rb532_cf_info *info; struct rb532_cf_info *info;
int ret; int ret;
@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
return -ENOENT; return -ENOENT;
} }
gpio = irq_to_gpio(irq); pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data specified\n");
return -EINVAL;
}
gpio = pdata->gpio_pin;
if (gpio < 0) { if (gpio < 0) {
dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
return -ENOENT; return -ENOENT;

Просмотреть файл

@ -296,6 +296,7 @@ endif
config QORIQ_CPUFREQ config QORIQ_CPUFREQ
tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
depends on OF && COMMON_CLK && (PPC_E500MC || ARM) depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
depends on !CPU_THERMAL || THERMAL
select CLK_QORIQ select CLK_QORIQ
help help
This adds the CPUFreq driver support for Freescale QorIQ SoCs This adds the CPUFreq driver support for Freescale QorIQ SoCs

Просмотреть файл

@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
SoCs. SoCs.
config ARM_MT8173_CPUFREQ config ARM_MT8173_CPUFREQ
bool "Mediatek MT8173 CPUFreq support" tristate "Mediatek MT8173 CPUFreq support"
depends on ARCH_MEDIATEK && REGULATOR depends on ARCH_MEDIATEK && REGULATOR
depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
depends on !CPU_THERMAL || THERMAL=y depends on !CPU_THERMAL || THERMAL
select PM_OPP select PM_OPP
help help
This adds the CPUFreq driver support for Mediatek MT8173 SoC. This adds the CPUFreq driver support for Mediatek MT8173 SoC.

Просмотреть файл

@ -17,6 +17,7 @@
#include <linux/cpu_cooling.h> #include <linux/cpu_cooling.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pm_opp.h> #include <linux/pm_opp.h>

Просмотреть файл

@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
(PXA_DCMD_LENGTH & sizeof(u32)); (PXA_DCMD_LENGTH & sizeof(u32));
if (flags & DMA_PREP_INTERRUPT) if (flags & DMA_PREP_INTERRUPT)
updater->dcmd |= PXA_DCMD_ENDIRQEN; updater->dcmd |= PXA_DCMD_ENDIRQEN;
if (sw_desc->cyclic)
sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
} }
static bool is_desc_completed(struct virt_dma_desc *vd) static bool is_desc_completed(struct virt_dma_desc *vd)
@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
dev_dbg(&chan->vc.chan.dev->device, dev_dbg(&chan->vc.chan.dev->device,
"%s(): checking txd %p[%x]: completed=%d\n", "%s(): checking txd %p[%x]: completed=%d\n",
__func__, vd, vd->tx.cookie, is_desc_completed(vd)); __func__, vd, vd->tx.cookie, is_desc_completed(vd));
if (to_pxad_sw_desc(vd)->cyclic) {
vchan_cyclic_callback(vd);
break;
}
if (is_desc_completed(vd)) { if (is_desc_completed(vd)) {
list_del(&vd->node); list_del(&vd->node);
vchan_cookie_complete(vd); vchan_cookie_complete(vd);
@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
return NULL; return NULL;
pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
dev_dbg(&chan->vc.chan.dev->device, dev_dbg(&chan->vc.chan.dev->device,
"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
__func__, (unsigned long)buf_addr, len, period_len, dir, flags); __func__, (unsigned long)buf_addr, len, period_len, dir, flags);

Просмотреть файл

@ -1574,7 +1574,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
for (cha = 0; cha < KNL_MAX_CHAS; cha++) { for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
if (knl_get_mc_route(target, if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel mc_route_reg[cha]) == channel
&& participants[channel]) { && !participants[channel]) {
participant_count++; participant_count++;
participants[channel] = 1; participants[channel] = 1;
break; break;

Просмотреть файл

@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0; return 0;
} }
static void gpio_rcar_irq_bus_lock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
pm_runtime_get_sync(&p->pdev->dev);
}
static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_irq_request_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
int error;
error = pm_runtime_get_sync(&p->pdev->dev);
if (error < 0)
return error;
return 0;
}
static void gpio_rcar_irq_release_resources(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct gpio_rcar_priv *p = gpiochip_get_data(gc);
pm_runtime_put(&p->pdev->dev);
}
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{ {
struct gpio_rcar_priv *p = dev_id; struct gpio_rcar_priv *p = dev_id;
@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable; irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type; irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p); ret = gpiochip_add_data(gpio_chip, p);

Просмотреть файл

@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we /* Don't try to start link training before we
* have the dpcd */ * have the dpcd */
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return; return;
/* set it to OFF so that drm_helper_connector_dpms() /* set it to OFF so that drm_helper_connector_dpms()

Просмотреть файл

@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
while (amdgpuCrtc->enabled && repcnt--) { while (amdgpuCrtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
@ -112,13 +112,13 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
break; break;
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) { if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */ /* Don't wait ridiculously long - something is wrong */
repcnt = 0; repcnt = 0;
break; break;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };

Просмотреть файл

@ -649,9 +649,6 @@ force:
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev); amdgpu_display_bandwidth_update(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
@ -670,6 +667,9 @@ force:
/* update displays */ /* update displays */
amdgpu_dpm_display_configuration_changed(adev); amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
if (adev->pm.funcs->force_performance_level) { if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;

Просмотреть файл

@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
adev->powerplay.pp_handle); adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY #ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) if (adev->pp_enabled) {
amdgpu_pm_sysfs_init(adev); amdgpu_pm_sysfs_init(adev);
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
}
#endif #endif
return ret; return ret;
} }

Просмотреть файл

@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock; unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { ENCODER_OBJECT_ID_NUTMEG) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) { if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num; *dp_lanes = lane_num;
*dp_rate = link_rates[i]; *dp_rate = 270000;
return 0; return 0;
} }
} }
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
} }
return -EINVAL; return -EINVAL;

Просмотреть файл

@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
AMD_PG_STATE_GATE); AMD_PG_STATE_GATE);
cz_enable_vce_dpm(adev, false); cz_enable_vce_dpm(adev, false);
/* TODO: to figure out why vce can't be poweroff. */ cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
/* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
pi->vce_power_gated = true; pi->vce_power_gated = true;
} else { } else {
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
} }
} else { /*pi->caps_vce_pg*/ } else { /*pi->caps_vce_pg*/
cz_update_vce_dpm(adev); cz_update_vce_dpm(adev);
cz_enable_vce_dpm(adev, true); cz_enable_vce_dpm(adev, !gate);
} }
return;
} }
const struct amd_ip_funcs cz_dpm_ip_funcs = { const struct amd_ip_funcs cz_dpm_ip_funcs = {

Просмотреть файл

@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr) unsigned vm_id, uint64_t pd_addr)
{ {
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
uint32_t seq = ring->fence_drv.sync_seq;
uint64_t addr = ring->fence_drv.gpu_addr;
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
if (usepfp) { if (usepfp) {
/* synce CE with ME to prevent CE fetch CEIB before context switch done */ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));

Просмотреть файл

@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
WAIT_REG_MEM_FUNCTION(3))); /* equal */ WAIT_REG_MEM_FUNCTION(3) | /* equal */
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
amdgpu_ring_write(ring, addr & 0xfffffffc); amdgpu_ring_write(ring, addr & 0xfffffffc);
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
amdgpu_ring_write(ring, seq); amdgpu_ring_write(ring, seq);

Просмотреть файл

@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
data.requested_ui_label = power_state_convert(ps); data.requested_ui_label = power_state_convert(ps);
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
} }
break; case AMD_PP_EVENT_COMPLETE_INIT:
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
break;
default: default:
break; break;
} }

Просмотреть файл

@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
}; };
static const pem_event_action *complete_init_event[] = { static const pem_event_action *complete_init_event[] = {
unblock_adjust_power_state_tasks,
adjust_power_state_tasks, adjust_power_state_tasks,
enable_gfx_clock_gating_tasks, enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks, enable_gfx_voltage_island_power_gating_tasks,

Просмотреть файл

@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
} }
} else { } else {
cz_dpm_update_vce_dpm(hwmgr); cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true); cz_enable_disable_vce_dpm(hwmgr, !bgate);
return 0; return 0;
} }

Просмотреть файл

@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
} while (ast_read32(ast, 0x10000) != 0x01); } while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004); data = ast_read32(ast, 0x10004);
if (data & 0x400) if (data & 0x40)
ast->dram_bus_width = 16; ast->dram_bus_width = 16;
else else
ast->dram_bus_width = 32; ast->dram_bus_width = 32;

Просмотреть файл

@ -1382,8 +1382,16 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector); drm_connector_cleanup(connector);
} }
static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
{
if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
return drm_atomic_helper_connector_dpms(connector, mode);
else
return drm_helper_connector_dpms(connector, mode);
}
static const struct drm_connector_funcs tda998x_connector_funcs = { static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_atomic_helper_connector_dpms, .dpms = tda998x_connector_dpms,
.reset = drm_atomic_helper_connector_reset, .reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes, .fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect, .detect = tda998x_connector_detect,

Просмотреть файл

@ -2303,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
*/ */
void intel_power_domains_suspend(struct drm_i915_private *dev_priv) void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
{ {
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
/* /*
* Even if power well support was disabled we still want to disable * Even if power well support was disabled we still want to disable
* power wells while we are system suspended. * power wells while we are system suspended.
*/ */
if (!i915.disable_power_well) if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
skl_display_core_uninit(dev_priv);
} }
/** /**
@ -2349,22 +2349,20 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct device *device = &dev->pdev->dev; struct device *device = &dev->pdev->dev;
int ret;
if (!IS_ENABLED(CONFIG_PM)) if (IS_ENABLED(CONFIG_PM)) {
return true; int ret = pm_runtime_get_if_in_use(device);
ret = pm_runtime_get_if_in_use(device); /*
* In cases runtime PM is disabled by the RPM core and we get
/* * an -EINVAL return value we are not supposed to call this
* In cases runtime PM is disabled by the RPM core and we get an * function, since the power state is undefined. This applies
* -EINVAL return value we are not supposed to call this function, * atm to the late/early system suspend/resume handlers.
* since the power state is undefined. This applies atm to the */
* late/early system suspend/resume handlers. WARN_ON_ONCE(ret < 0);
*/ if (ret <= 0)
WARN_ON_ONCE(ret < 0); return false;
if (ret <= 0) }
return false;
atomic_inc(&dev_priv->pm.wakeref_count); atomic_inc(&dev_priv->pm.wakeref_count);
assert_rpm_wakelock_held(dev_priv); assert_rpm_wakelock_held(dev_priv);

Просмотреть файл

@ -64,6 +64,7 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
/* Start DC channel and DI after IDMAC */ /* Start DC channel and DI after IDMAC */
ipu_dc_enable_channel(ipu_crtc->dc); ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di); ipu_di_enable(ipu_crtc->di);
drm_crtc_vblank_on(&ipu_crtc->base);
ipu_crtc->enabled = 1; ipu_crtc->enabled = 1;
} }
@ -80,6 +81,7 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
ipu_di_disable(ipu_crtc->di); ipu_di_disable(ipu_crtc->di);
ipu_plane_disable(ipu_crtc->plane[0]); ipu_plane_disable(ipu_crtc->plane[0]);
ipu_dc_disable(ipu); ipu_dc_disable(ipu);
drm_crtc_vblank_off(&ipu_crtc->base);
ipu_crtc->enabled = 0; ipu_crtc->enabled = 0;
} }

Просмотреть файл

@ -42,6 +42,7 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_YVYU, DRM_FORMAT_YVYU,
DRM_FORMAT_YUV420, DRM_FORMAT_YUV420,
DRM_FORMAT_YVU420, DRM_FORMAT_YVU420,
DRM_FORMAT_RGB565,
}; };
int ipu_plane_irq(struct ipu_plane *ipu_plane) int ipu_plane_irq(struct ipu_plane *ipu_plane)

Просмотреть файл

@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
unsigned max_lane_num = drm_dp_max_lane_count(dpcd); unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
unsigned lane_num, i, max_pix_clock; unsigned lane_num, i, max_pix_clock;
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { ENCODER_OBJECT_ID_NUTMEG) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = (lane_num * 270000 * 8) / bpp;
if (max_pix_clock >= pix_clock) { if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num; *dp_lanes = lane_num;
*dp_rate = link_rates[i]; *dp_rate = 270000;
return 0; return 0;
} }
} }
} else {
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
if (max_pix_clock >= pix_clock) {
*dp_lanes = lane_num;
*dp_rate = link_rates[i];
return 0;
}
}
}
} }
return -EINVAL; return -EINVAL;

Просмотреть файл

@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
/* set the power state here in case we are a PX system or headless */ /* set the power state here in case we are a PX system or headless */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)

Просмотреть файл

@ -455,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
* In practice this won't execute very often unless on very fast * In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small. * machines because the time window for this to happen is very small.
*/ */
while (radeon_crtc->enabled && repcnt--) { while (radeon_crtc->enabled && --repcnt) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in * start in hpos, and to the "fudged earlier" vblank start in
* vpos. * vpos.
@ -471,13 +471,13 @@ static void radeon_flip_work_func(struct work_struct *__work)
break; break;
/* Sleep at least until estimated real start of hw vblank */ /* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
if (min_udelay > vblank->framedur_ns / 2000) { if (min_udelay > vblank->framedur_ns / 2000) {
/* Don't wait ridiculously long - something is wrong */ /* Don't wait ridiculously long - something is wrong */
repcnt = 0; repcnt = 0;
break; break;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
usleep_range(min_udelay, 2 * min_udelay); usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags); spin_lock_irqsave(&crtc->dev->event_lock, flags);
}; };

Просмотреть файл

@ -1079,10 +1079,8 @@ force:
/* update display watermarks based on new power state */ /* update display watermarks based on new power state */
radeon_bandwidth_update(rdev); radeon_bandwidth_update(rdev);
/* update displays */
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
/* wait for the rings to drain */ /* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) { for (i = 0; i < RADEON_NUM_RINGS; i++) {
@ -1099,8 +1097,9 @@ force:
radeon_dpm_post_set_power_state(rdev); radeon_dpm_post_set_power_state(rdev);
/* update displays */ rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
radeon_dpm_display_configuration_changed(rdev); rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
rdev->pm.dpm.single_display = single_display;
if (rdev->asic->dpm.force_performance_level) { if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) { if (rdev->pm.dpm.thermal_active) {

Просмотреть файл

@ -563,6 +563,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs vmw_sou_connector_funcs = { static const struct drm_connector_funcs vmw_sou_connector_funcs = {
.dpms = vmw_du_connector_dpms, .dpms = vmw_du_connector_dpms,
.detect = vmw_du_connector_detect,
.fill_modes = vmw_du_connector_fill_modes,
.set_property = vmw_du_connector_set_property, .set_property = vmw_du_connector_set_property,
.destroy = vmw_sou_connector_destroy, .destroy = vmw_sou_connector_destroy,
}; };

Просмотреть файл

@ -18,6 +18,7 @@
#include <linux/host1x.h> #include <linux/host1x.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/of_device.h>
#include "bus.h" #include "bus.h"
#include "dev.h" #include "dev.h"
@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
device->dev.dma_mask = &device->dev.coherent_dma_mask; device->dev.dma_mask = &device->dev.coherent_dma_mask;
dev_set_name(&device->dev, "%s", driver->driver.name); dev_set_name(&device->dev, "%s", driver->driver.name);
of_dma_configure(&device->dev, host1x->dev->of_node);
device->dev.release = host1x_device_release; device->dev.release = host1x_device_release;
device->dev.bus = &host1x_bus_type; device->dev.bus = &host1x_bus_type;
device->dev.parent = host1x->dev; device->dev.parent = host1x->dev;

Просмотреть файл

@ -23,6 +23,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/dma-mapping.h>
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/host1x.h> #include <trace/events/host1x.h>
@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
.nb_bases = 8, .nb_bases = 8,
.init = host1x01_init, .init = host1x01_init,
.sync_offset = 0x3000, .sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
}; };
static const struct host1x_info host1x02_info = { static const struct host1x_info host1x02_info = {
@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
.nb_bases = 12, .nb_bases = 12,
.init = host1x02_init, .init = host1x02_init,
.sync_offset = 0x3000, .sync_offset = 0x3000,
.dma_mask = DMA_BIT_MASK(32),
}; };
static const struct host1x_info host1x04_info = { static const struct host1x_info host1x04_info = {
@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
.nb_bases = 64, .nb_bases = 64,
.init = host1x04_init, .init = host1x04_init,
.sync_offset = 0x2100, .sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
}; };
static const struct host1x_info host1x05_info = { static const struct host1x_info host1x05_info = {
@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
.nb_bases = 64, .nb_bases = 64,
.init = host1x05_init, .init = host1x05_init,
.sync_offset = 0x2100, .sync_offset = 0x2100,
.dma_mask = DMA_BIT_MASK(34),
}; };
static struct of_device_id host1x_of_match[] = { static struct of_device_id host1x_of_match[] = {
@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
if (IS_ERR(host->regs)) if (IS_ERR(host->regs))
return PTR_ERR(host->regs); return PTR_ERR(host->regs);
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
if (host->info->init) { if (host->info->init) {
err = host->info->init(host); err = host->info->init(host);
if (err) if (err)

Просмотреть файл

@ -96,6 +96,7 @@ struct host1x_info {
int nb_mlocks; /* host1x: number of mlocks */ int nb_mlocks; /* host1x: number of mlocks */
int (*init)(struct host1x *); /* initialize per SoC ops */ int (*init)(struct host1x *); /* initialize per SoC ops */
int sync_offset; int sync_offset;
u64 dma_mask; /* mask of addressable memory */
}; };
struct host1x { struct host1x {

Просмотреть файл

@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
for (i = 0; i < ARRAY_SIZE(client_reg); i++) { for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i]; const struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev; struct platform_device *pdev;
struct device_node *of_node;
/* Associate subdevice with the corresponding port node */
of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!of_node) {
dev_info(dev,
"no port@%d node in %s, not using %s%d\n",
i, dev->of_node->full_name,
(i / 2) ? "DI" : "CSI", i % 2);
continue;
}
pdev = platform_device_alloc(reg->name, id++); pdev = platform_device_alloc(reg->name, id++);
if (!pdev) { if (!pdev) {
@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
goto err_register; goto err_register;
} }
pdev->dev.of_node = of_node;
pdev->dev.parent = dev; pdev->dev.parent = dev;
/* Associate subdevice with the corresponding port node */
pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
if (!pdev->dev.of_node) {
dev_err(dev, "missing port@%d node in %s\n", i,
dev->of_node->full_name);
ret = -ENODEV;
goto err_register;
}
ret = platform_device_add_data(pdev, &reg->pdata, ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata)); sizeof(reg->pdata));
if (!ret) if (!ret)
@ -1289,10 +1292,6 @@ static int ipu_probe(struct platform_device *pdev)
ipu->irq_sync = irq_sync; ipu->irq_sync = irq_sync;
ipu->irq_err = irq_err; ipu->irq_err = irq_err;
ret = ipu_irq_init(ipu);
if (ret)
goto out_failed_irq;
ret = device_reset(&pdev->dev); ret = device_reset(&pdev->dev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed to reset: %d\n", ret); dev_err(&pdev->dev, "failed to reset: %d\n", ret);
@ -1302,6 +1301,10 @@ static int ipu_probe(struct platform_device *pdev)
if (ret) if (ret)
goto out_failed_reset; goto out_failed_reset;
ret = ipu_irq_init(ipu);
if (ret)
goto out_failed_irq;
/* Set MCU_T to divide MCU access window into 2 */ /* Set MCU_T to divide MCU access window into 2 */
ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18), ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
IPU_DISP_GEN); IPU_DISP_GEN);
@ -1324,9 +1327,9 @@ static int ipu_probe(struct platform_device *pdev)
failed_add_clients: failed_add_clients:
ipu_submodules_exit(ipu); ipu_submodules_exit(ipu);
failed_submodules_init: failed_submodules_init:
out_failed_reset:
ipu_irq_exit(ipu); ipu_irq_exit(ipu);
out_failed_irq: out_failed_irq:
out_failed_reset:
clk_disable_unprepare(ipu->clk); clk_disable_unprepare(ipu->clk);
return ret; return ret;
} }

Просмотреть файл

@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
if (!dev) if (!dev)
return -ENOMEM; return -ENOMEM;
dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *), dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
GFP_KERNEL);
if (!dev->bsc_regmap) if (!dev->bsc_regmap)
return -ENOMEM; return -ENOMEM;

Просмотреть файл

@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
ret = device->query_device(device, &device->attrs, &uhw); ret = device->query_device(device, &device->attrs, &uhw);
if (ret) { if (ret) {
printk(KERN_WARNING "Couldn't query the device attributes\n"); printk(KERN_WARNING "Couldn't query the device attributes\n");
ib_cache_cleanup_one(device);
goto out; goto out;
} }

Просмотреть файл

@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
} }
} }
if (rec->hop_limit > 1 || use_roce) { if (rec->hop_limit > 0 || use_roce) {
ah_attr->ah_flags = IB_AH_GRH; ah_attr->ah_flags = IB_AH_GRH;
ah_attr->grh.dgid = rec->dgid; ah_attr->grh.dgid = rec->dgid;

Просмотреть файл

@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
resp_size); resp_size);
INIT_UDATA(&uhw, buf + sizeof(cmd), INIT_UDATA(&uhw, buf + sizeof(cmd),
(unsigned long)cmd.response + resp_size, (unsigned long)cmd.response + resp_size,
in_len - sizeof(cmd), out_len - resp_size); in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
out_len - resp_size);
memset(&cmd_ex, 0, sizeof(cmd_ex)); memset(&cmd_ex, 0, sizeof(cmd_ex));
cmd_ex.user_handle = cmd.user_handle; cmd_ex.user_handle = cmd.user_handle;
@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd, INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp, (unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp); in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
if (ret) if (ret)
@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
INIT_UDATA(&udata, buf + sizeof cmd, INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp, (unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp); in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
out_len - sizeof resp);
ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
if (ret) if (ret)

Просмотреть файл

@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, struct mlx5_create_srq_mbox_in **in,
struct ib_udata *udata, int buf_size, int *inlen) struct ib_udata *udata, int buf_size, int *inlen,
int is_xrc)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_create_srq ucmd = {}; struct mlx5_ib_create_srq ucmd = {};
@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
int ncont; int ncont;
u32 offset; u32 offset;
u32 uidx = MLX5_IB_DEFAULT_UIDX; u32 uidx = MLX5_IB_DEFAULT_UIDX;
int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
if (drv_data < 0) ucmdlen = min(udata->inlen, sizeof(ucmd));
return -EINVAL;
ucmdlen = (drv_data < sizeof(ucmd)) ?
drv_data : sizeof(ucmd);
if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
mlx5_ib_dbg(dev, "failed copy udata\n"); mlx5_ib_dbg(dev, "failed copy udata\n");
@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
if (ucmd.reserved0 || ucmd.reserved1) if (ucmd.reserved0 || ucmd.reserved1)
return -EINVAL; return -EINVAL;
if (drv_data > sizeof(ucmd) && if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd), !ib_is_udata_cleared(udata, sizeof(ucmd),
drv_data - sizeof(ucmd))) udata->inlen - sizeof(ucmd)))
return -EINVAL; return -EINVAL;
err = get_srq_user_index(to_mucontext(pd->uobject->context), if (is_xrc) {
&ucmd, udata->inlen, &uidx); err = get_srq_user_index(to_mucontext(pd->uobject->context),
if (err) &ucmd, udata->inlen, &uidx);
return err; if (err)
return err;
}
srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
(*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry); xrc_srq_context_entry);
MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@ -170,7 +169,7 @@ err_umem:
static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
struct mlx5_create_srq_mbox_in **in, int buf_size, struct mlx5_create_srq_mbox_in **in, int buf_size,
int *inlen) int *inlen, int is_xrc)
{ {
int err; int err;
int i; int i;
@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
(*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
is_xrc){
xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
xrc_srq_context_entry); xrc_srq_context_entry);
/* 0xffffff means we ask to work with cqe version 0 */ /* 0xffffff means we ask to work with cqe version 0 */
@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
srq->msrq.max_avail_gather); srq->msrq.max_avail_gather);
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
if (pd->uobject) if (pd->uobject)
err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
is_xrc);
else else
err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
is_xrc);
if (err) { if (err) {
mlx5_ib_warn(dev, "create srq %s failed, err %d\n", mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
goto err_srq; goto err_srq;
} }
is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
in->ctx.state_log_sz = ilog2(srq->msrq.max); in->ctx.state_log_sz = ilog2(srq->msrq.max);
flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
xrcdn = 0; xrcdn = 0;

Просмотреть файл

@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
static void update_domain(struct protection_domain *domain); static void update_domain(struct protection_domain *domain);
static int protection_domain_init(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain);
static void detach_device(struct device *dev);
/* /*
* For dynamic growth the aperture size is split into ranges of 128MB of * For dynamic growth the aperture size is split into ranges of 128MB of
@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
if (!dev_data) if (!dev_data)
return; return;
if (dev_data->domain)
detach_device(dev);
iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
dev); dev);

Просмотреть файл

@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
static int __init iommu_go_to_state(enum iommu_init_state state); static int __init iommu_go_to_state(enum iommu_init_state state);
static void init_device_table_dma(void); static void init_device_table_dma(void);
static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write);
static inline void update_last_devid(u16 devid) static inline void update_last_devid(u16 devid)
{ {
if (devid > amd_iommu_last_bdf) if (devid > amd_iommu_last_bdf)
@ -1015,6 +1019,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
pci_write_config_dword(iommu->dev, 0xf0, 0x90); pci_write_config_dword(iommu->dev, 0xf0, 0x90);
} }
/*
* Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
* Workaround:
* BIOS should enable ATS write permission check by setting
* L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
*/
static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
{
u32 value;
if ((boot_cpu_data.x86 != 0x15) ||
(boot_cpu_data.x86_model < 0x30) ||
(boot_cpu_data.x86_model > 0x3f))
return;
/* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
value = iommu_read_l2(iommu, 0x47);
if (value & BIT(0))
return;
/* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
iommu_write_l2(iommu, 0x47, value | BIT(0));
pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
dev_name(&iommu->dev->dev));
}
/* /*
* This function clues the initialization function for one IOMMU * This function clues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the * together and also allocates the command buffer and programs the
@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
amd_iommu_pc_present = true; amd_iommu_pc_present = true;
/* Check if the performance counters can be written to */ /* Check if the performance counters can be written to */
if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
(0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
(val != val2)) { (val != val2)) {
pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
amd_iommu_pc_present = false; amd_iommu_pc_present = false;
@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
} }
amd_iommu_erratum_746_workaround(iommu); amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
amd_iommu_groups, "ivhd%d", amd_iommu_groups, "ivhd%d",
@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
} }
EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write) u64 *value, bool is_write)
{ {
struct amd_iommu *iommu;
u32 offset; u32 offset;
u32 max_offset_lim; u32 max_offset_lim;
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_present)
return -ENODEV;
/* Locate the iommu associated with the device ID */
iommu = amd_iommu_rlookup_table[devid];
/* Check for valid iommu and pc register indexing */ /* Check for valid iommu and pc register indexing */
if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) if (WARN_ON((fxn > 0x28) || (fxn & 7)))
return -ENODEV; return -ENODEV;
offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
return 0; return 0;
} }
EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
u64 *value, bool is_write)
{
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
/* Make sure the IOMMU PC resource is available */
if (!amd_iommu_pc_present || iommu == NULL)
return -ENODEV;
return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
value, is_write);
}

Просмотреть файл

@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
/* Only care about add/remove events for physical functions */ /* Only care about add/remove events for physical functions */
if (pdev->is_virtfn) if (pdev->is_virtfn)
return NOTIFY_DONE; return NOTIFY_DONE;
if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) if (action != BUS_NOTIFY_ADD_DEVICE &&
action != BUS_NOTIFY_REMOVED_DEVICE)
return NOTIFY_DONE; return NOTIFY_DONE;
info = dmar_alloc_pci_notify_info(pdev, action); info = dmar_alloc_pci_notify_info(pdev, action);
@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
down_write(&dmar_global_lock); down_write(&dmar_global_lock);
if (action == BUS_NOTIFY_ADD_DEVICE) if (action == BUS_NOTIFY_ADD_DEVICE)
dmar_pci_bus_add_dev(info); dmar_pci_bus_add_dev(info);
else if (action == BUS_NOTIFY_DEL_DEVICE) else if (action == BUS_NOTIFY_REMOVED_DEVICE)
dmar_pci_bus_del_dev(info); dmar_pci_bus_del_dev(info);
up_write(&dmar_global_lock); up_write(&dmar_global_lock);

Просмотреть файл

@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
rmrru->devices_cnt); rmrru->devices_cnt);
if(ret < 0) if(ret < 0)
return ret; return ret;
} else if (info->event == BUS_NOTIFY_DEL_DEVICE) { } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
dmar_remove_dev_scope(info, rmrr->segment, dmar_remove_dev_scope(info, rmrr->segment,
rmrru->devices, rmrru->devices_cnt); rmrru->devices, rmrru->devices_cnt);
} }
@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
break; break;
else if(ret < 0) else if(ret < 0)
return ret; return ret;
} else if (info->event == BUS_NOTIFY_DEL_DEVICE) { } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
if (dmar_remove_dev_scope(info, atsr->segment, if (dmar_remove_dev_scope(info, atsr->segment,
atsru->devices, atsru->devices_cnt)) atsru->devices, atsru->devices_cnt))
break; break;

Просмотреть файл

@ -497,7 +497,7 @@ static int adp1653_probe(struct i2c_client *client,
if (!client->dev.platform_data) { if (!client->dev.platform_data) {
dev_err(&client->dev, dev_err(&client->dev,
"Neither DT not platform data provided\n"); "Neither DT not platform data provided\n");
return EINVAL; return -EINVAL;
} }
flash->platform_data = client->dev.platform_data; flash->platform_data = client->dev.platform_data;
} }

Просмотреть файл

@ -1960,10 +1960,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
} }
/* tx 5v detect */ /* tx 5v detect */
tx_5v = io_read(sd, 0x70) & info->cable_det_mask; tx_5v = irq_reg_0x70 & info->cable_det_mask;
if (tx_5v) { if (tx_5v) {
v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v); v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
io_write(sd, 0x71, tx_5v);
adv76xx_s_detect_tx_5v_ctrl(sd); adv76xx_s_detect_tx_5v_ctrl(sd);
if (handled) if (handled)
*handled = true; *handled = true;

Просмотреть файл

@ -1843,8 +1843,7 @@ static void au0828_analog_create_entities(struct au0828_dev *dev)
ent->function = MEDIA_ENT_F_CONN_RF; ent->function = MEDIA_ENT_F_CONN_RF;
break; break;
default: /* AU0828_VMUX_DEBUG */ default: /* AU0828_VMUX_DEBUG */
ent->function = MEDIA_ENT_F_CONN_TEST; continue;
break;
} }
ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]); ret = media_entity_pads_init(ent, 1, &dev->input_pad[i]);

Просмотреть файл

@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
delta = mftb() - psl_tb; delta = mftb() - psl_tb;
if (delta < 0) if (delta < 0)
delta = -delta; delta = -delta;
} while (cputime_to_usecs(delta) > 16); } while (tb_to_ns(delta) > 16000);
return 0; return 0;
} }

Просмотреть файл

@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
vol->changing_leb = 1; vol->changing_leb = 1;
vol->ch_lnum = req->lnum; vol->ch_lnum = req->lnum;
vol->upd_buf = vmalloc(req->bytes); vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
if (!vol->upd_buf) if (!vol->upd_buf)
return -ENOMEM; return -ENOMEM;

Просмотреть файл

@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
if (clear_intf) if (clear_intf)
mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00); mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
if (eflag) if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
mcp251x_write_bits(spi, EFLG, eflag, 0x00); mcp251x_write_bits(spi, EFLG, eflag, 0x00);
/* Update can state */ /* Update can state */

Просмотреть файл

@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
static void gs_destroy_candev(struct gs_can *dev) static void gs_destroy_candev(struct gs_can *dev)
{ {
unregister_candev(dev->netdev); unregister_candev(dev->netdev);
free_candev(dev->netdev);
usb_kill_anchored_urbs(&dev->tx_submitted); usb_kill_anchored_urbs(&dev->tx_submitted);
kfree(dev); free_candev(dev->netdev);
} }
static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
for (i = 0; i < icount; i++) { for (i = 0; i < icount; i++) {
dev->canch[i] = gs_make_candev(i, intf); dev->canch[i] = gs_make_candev(i, intf);
if (IS_ERR_OR_NULL(dev->canch[i])) { if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
/* on failure destroy previously created candevs */ /* on failure destroy previously created candevs */
icount = i; icount = i;
for (i = 0; i < icount; i++) { for (i = 0; i < icount; i++)
gs_destroy_candev(dev->canch[i]); gs_destroy_candev(dev->canch[i]);
dev->canch[i] = NULL;
} usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev); kfree(dev);
return rc; return rc;
} }
@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
return; return;
} }
for (i = 0; i < GS_MAX_INTF; i++) { for (i = 0; i < GS_MAX_INTF; i++)
struct gs_can *can = dev->canch[i]; if (dev->canch[i])
gs_destroy_candev(dev->canch[i]);
if (!can)
continue;
gs_destroy_candev(can);
}
usb_kill_anchored_urbs(&dev->rx_submitted); usb_kill_anchored_urbs(&dev->rx_submitted);
kfree(dev);
} }
static const struct usb_device_id gs_usb_table[] = { static const struct usb_device_id gs_usb_table[] = {

Просмотреть файл

@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id)
int i; int i;
pci_unmap_single(VORTEX_PCI(vp), pci_unmap_single(VORTEX_PCI(vp),
le32_to_cpu(vp->tx_ring[entry].frag[0].addr), le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
le32_to_cpu(vp->tx_ring[entry].frag[0].length), le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)

Просмотреть файл

@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
priv->mdio->id); priv->mdio->id);
mdiobus_unregister(priv->mdio); mdiobus_unregister(priv->mdio);
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio); mdiobus_free(priv->mdio);
priv->mdio = NULL; priv->mdio = NULL;
} }

Просмотреть файл

@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
goto err_disable_clk; goto err_disable_clk;
} }
priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (of_phy_is_fixed_link(pdev->dev.of_node)) {
ret = of_phy_register_fixed_link(pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "bad fixed-link spec\n");
goto err_free_bus;
}
priv->phy_node = of_node_get(pdev->dev.of_node);
}
if (!priv->phy_node)
priv->phy_node = of_parse_phandle(pdev->dev.of_node,
"phy-handle", 0);
if (!priv->phy_node) { if (!priv->phy_node) {
dev_err(&pdev->dev, "no PHY specified\n"); dev_err(&pdev->dev, "no PHY specified\n");
ret = -ENODEV; ret = -ENODEV;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше