Merge branches 'devel-stable', 'fixes' and 'mmci' into for-linus
This commit is contained in:
Коммит
16af43fef8
|
@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
real-time workloads. It can also improve energy
|
real-time workloads. It can also improve energy
|
||||||
efficiency for asymmetric multiprocessors.
|
efficiency for asymmetric multiprocessors.
|
||||||
|
|
||||||
rcu_nocbs_poll [KNL,BOOT]
|
rcu_nocb_poll [KNL,BOOT]
|
||||||
Rather than requiring that offloaded CPUs
|
Rather than requiring that offloaded CPUs
|
||||||
(specified by rcu_nocbs= above) explicitly
|
(specified by rcu_nocbs= above) explicitly
|
||||||
awaken the corresponding "rcuoN" kthreads,
|
awaken the corresponding "rcuoN" kthreads,
|
||||||
|
|
|
@ -57,7 +57,7 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
|
||||||
Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
|
Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover
|
||||||
protocol entry point.
|
protocol entry point.
|
||||||
|
|
||||||
Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields
|
Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields
|
||||||
to struct boot_params for for loading bzImage and ramdisk
|
to struct boot_params for for loading bzImage and ramdisk
|
||||||
above 4G in 64bit.
|
above 4G in 64bit.
|
||||||
|
|
||||||
|
|
|
@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE
|
||||||
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
|
M: Haavard Skinnemoen <hskinnemoen@gmail.com>
|
||||||
M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
|
M: Hans-Christian Egtvedt <egtvedt@samfundet.no>
|
||||||
W: http://www.atmel.com/products/AVR32/
|
W: http://www.atmel.com/products/AVR32/
|
||||||
W: http://avr32linux.org/
|
W: http://mirror.egtvedt.no/avr32linux.org/
|
||||||
W: http://avrfreaks.net/
|
W: http://avrfreaks.net/
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/avr32/
|
F: arch/avr32/
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -1,8 +1,8 @@
|
||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 8
|
PATCHLEVEL = 8
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc5
|
EXTRAVERSION = -rc7
|
||||||
NAME = Terrified Chipmunk
|
NAME = Unicycling Gorilla
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
# To see a list of typical targets execute "make help"
|
# To see a list of typical targets execute "make help"
|
||||||
|
|
|
@ -68,8 +68,8 @@ else
|
||||||
endif
|
endif
|
||||||
|
|
||||||
check_for_multiple_loadaddr = \
|
check_for_multiple_loadaddr = \
|
||||||
if [ $(words $(UIMAGE_LOADADDR)) -gt 1 ]; then \
|
if [ $(words $(UIMAGE_LOADADDR)) -ne 1 ]; then \
|
||||||
echo 'multiple load addresses: $(UIMAGE_LOADADDR)'; \
|
echo 'multiple (or no) load addresses: $(UIMAGE_LOADADDR)'; \
|
||||||
echo 'This is incompatible with uImages'; \
|
echo 'This is incompatible with uImages'; \
|
||||||
echo 'Specify LOADADDR on the commandline to build an uImage'; \
|
echo 'Specify LOADADDR on the commandline to build an uImage'; \
|
||||||
false; \
|
false; \
|
||||||
|
|
|
@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
|
||||||
irq_set_chained_handler(irq, gic_handle_cascade_irq);
|
irq_set_chained_handler(irq, gic_handle_cascade_irq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u8 gic_get_cpumask(struct gic_chip_data *gic)
|
||||||
|
{
|
||||||
|
void __iomem *base = gic_data_dist_base(gic);
|
||||||
|
u32 mask, i;
|
||||||
|
|
||||||
|
for (i = mask = 0; i < 32; i += 4) {
|
||||||
|
mask = readl_relaxed(base + GIC_DIST_TARGET + i);
|
||||||
|
mask |= mask >> 16;
|
||||||
|
mask |= mask >> 8;
|
||||||
|
if (mask)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!mask)
|
||||||
|
pr_crit("GIC CPU mask not found - kernel will fail to boot.\n");
|
||||||
|
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init gic_dist_init(struct gic_chip_data *gic)
|
static void __init gic_dist_init(struct gic_chip_data *gic)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
|
||||||
/*
|
/*
|
||||||
* Set all global interrupts to this CPU only.
|
* Set all global interrupts to this CPU only.
|
||||||
*/
|
*/
|
||||||
cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0);
|
cpumask = gic_get_cpumask(gic);
|
||||||
|
cpumask |= cpumask << 8;
|
||||||
|
cpumask |= cpumask << 16;
|
||||||
for (i = 32; i < gic_irqs; i += 4)
|
for (i = 32; i < gic_irqs; i += 4)
|
||||||
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
|
||||||
|
|
||||||
|
@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
|
||||||
* Get what the GIC says our CPU mask is.
|
* Get what the GIC says our CPU mask is.
|
||||||
*/
|
*/
|
||||||
BUG_ON(cpu >= NR_GIC_CPU_IF);
|
BUG_ON(cpu >= NR_GIC_CPU_IF);
|
||||||
cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0);
|
cpu_mask = gic_get_cpumask(gic);
|
||||||
gic_cpu_map[cpu] = cpu_mask;
|
gic_cpu_map[cpu] = cpu_mask;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -24,6 +24,7 @@ extern struct arm_delay_ops {
|
||||||
void (*delay)(unsigned long);
|
void (*delay)(unsigned long);
|
||||||
void (*const_udelay)(unsigned long);
|
void (*const_udelay)(unsigned long);
|
||||||
void (*udelay)(unsigned long);
|
void (*udelay)(unsigned long);
|
||||||
|
bool const_clock;
|
||||||
} arm_delay_ops;
|
} arm_delay_ops;
|
||||||
|
|
||||||
#define __delay(n) arm_delay_ops.delay(n)
|
#define __delay(n) arm_delay_ops.delay(n)
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
*/
|
*/
|
||||||
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
|
||||||
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
|
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000))
|
||||||
#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3)
|
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The maximum size of a 26-bit user space task.
|
* The maximum size of a 26-bit user space task.
|
||||||
|
|
|
@ -247,7 +247,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||||
|
|
||||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||||
{
|
{
|
||||||
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
|
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
|
||||||
|
L_PTE_NONE | L_PTE_VALID;
|
||||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||||
return pte;
|
return pte;
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
|
||||||
* detectable in cyc_to_fixed_sched_clock().
|
* detectable in cyc_to_fixed_sched_clock().
|
||||||
*/
|
*/
|
||||||
raw_local_irq_save(flags);
|
raw_local_irq_save(flags);
|
||||||
cd.epoch_cyc = cyc;
|
cd.epoch_cyc_copy = cyc;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
cd.epoch_ns = ns;
|
cd.epoch_ns = ns;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
cd.epoch_cyc_copy = cyc;
|
cd.epoch_cyc = cyc;
|
||||||
raw_local_irq_restore(flags);
|
raw_local_irq_restore(flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -686,6 +686,9 @@ static int cpufreq_callback(struct notifier_block *nb,
|
||||||
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
if (freq->flags & CPUFREQ_CONST_LOOPS)
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
if (arm_delay_ops.const_clock)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
if (!per_cpu(l_p_j_ref, cpu)) {
|
if (!per_cpu(l_p_j_ref, cpu)) {
|
||||||
per_cpu(l_p_j_ref, cpu) =
|
per_cpu(l_p_j_ref, cpu) =
|
||||||
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
per_cpu(cpu_data, cpu).loops_per_jiffy;
|
||||||
|
|
|
@ -77,6 +77,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
|
||||||
arm_delay_ops.delay = __timer_delay;
|
arm_delay_ops.delay = __timer_delay;
|
||||||
arm_delay_ops.const_udelay = __timer_const_udelay;
|
arm_delay_ops.const_udelay = __timer_const_udelay;
|
||||||
arm_delay_ops.udelay = __timer_udelay;
|
arm_delay_ops.udelay = __timer_udelay;
|
||||||
|
arm_delay_ops.const_clock = true;
|
||||||
delay_calibrated = true;
|
delay_calibrated = true;
|
||||||
} else {
|
} else {
|
||||||
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
|
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
|
||||||
|
|
|
@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT
|
||||||
select CPU_EXYNOS4210
|
select CPU_EXYNOS4210
|
||||||
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
|
select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
select PINCTRL_EXYNOS4
|
select PINCTRL_EXYNOS
|
||||||
select USE_OF
|
select USE_OF
|
||||||
help
|
help
|
||||||
Machine support for Samsung Exynos4 machine with device tree enabled.
|
Machine support for Samsung Exynos4 machine with device tree enabled.
|
||||||
|
|
|
@ -115,7 +115,7 @@
|
||||||
/*
|
/*
|
||||||
* Only define NR_IRQS if less than NR_IRQS_EB
|
* Only define NR_IRQS if less than NR_IRQS_EB
|
||||||
*/
|
*/
|
||||||
#define NR_IRQS_EB (IRQ_EB_GIC_START + 96)
|
#define NR_IRQS_EB (IRQ_EB_GIC_START + 128)
|
||||||
|
|
||||||
#if defined(CONFIG_MACH_REALVIEW_EB) \
|
#if defined(CONFIG_MACH_REALVIEW_EB) \
|
||||||
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
|
&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB))
|
||||||
|
|
|
@ -749,7 +749,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
unsigned long instr = 0, instrptr;
|
unsigned long instr = 0, instrptr;
|
||||||
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
|
int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
|
||||||
unsigned int type;
|
unsigned int type;
|
||||||
mm_segment_t fs;
|
|
||||||
unsigned int fault;
|
unsigned int fault;
|
||||||
u16 tinstr = 0;
|
u16 tinstr = 0;
|
||||||
int isize = 4;
|
int isize = 4;
|
||||||
|
@ -760,16 +759,15 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
|
|
||||||
instrptr = instruction_pointer(regs);
|
instrptr = instruction_pointer(regs);
|
||||||
|
|
||||||
fs = get_fs();
|
|
||||||
set_fs(KERNEL_DS);
|
|
||||||
if (thumb_mode(regs)) {
|
if (thumb_mode(regs)) {
|
||||||
fault = __get_user(tinstr, (u16 *)(instrptr & ~1));
|
u16 *ptr = (u16 *)(instrptr & ~1);
|
||||||
|
fault = probe_kernel_address(ptr, tinstr);
|
||||||
if (!fault) {
|
if (!fault) {
|
||||||
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
|
if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
|
||||||
IS_T32(tinstr)) {
|
IS_T32(tinstr)) {
|
||||||
/* Thumb-2 32-bit */
|
/* Thumb-2 32-bit */
|
||||||
u16 tinst2 = 0;
|
u16 tinst2 = 0;
|
||||||
fault = __get_user(tinst2, (u16 *)(instrptr+2));
|
fault = probe_kernel_address(ptr + 1, tinst2);
|
||||||
instr = (tinstr << 16) | tinst2;
|
instr = (tinstr << 16) | tinst2;
|
||||||
thumb2_32b = 1;
|
thumb2_32b = 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -778,8 +776,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else
|
} else
|
||||||
fault = __get_user(instr, (u32 *)instrptr);
|
fault = probe_kernel_address(instrptr, instr);
|
||||||
set_fs(fs);
|
|
||||||
|
|
||||||
if (fault) {
|
if (fault) {
|
||||||
type = TYPE_FAULT;
|
type = TYPE_FAULT;
|
||||||
|
|
|
@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
|
|
||||||
if (is_coherent || nommu())
|
if (is_coherent || nommu())
|
||||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||||
else if (gfp & GFP_ATOMIC)
|
else if (!(gfp & __GFP_WAIT))
|
||||||
addr = __alloc_from_pool(size, &page);
|
addr = __alloc_from_pool(size, &page);
|
||||||
else if (!IS_ENABLED(CONFIG_CMA))
|
else if (!IS_ENABLED(CONFIG_CMA))
|
||||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
||||||
|
|
|
@ -22,12 +22,14 @@
|
||||||
.macro DBGSTR, str
|
.macro DBGSTR, str
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
stmfd sp!, {r0-r3, ip, lr}
|
stmfd sp!, {r0-r3, ip, lr}
|
||||||
add r0, pc, #4
|
ldr r0, =1f
|
||||||
bl printk
|
bl printk
|
||||||
b 1f
|
ldmfd sp!, {r0-r3, ip, lr}
|
||||||
.asciz KERN_DEBUG "VFP: \str\n"
|
|
||||||
.balign 4
|
.pushsection .rodata, "a"
|
||||||
1: ldmfd sp!, {r0-r3, ip, lr}
|
1: .ascii KERN_DEBUG "VFP: \str\n"
|
||||||
|
.byte 0
|
||||||
|
.previous
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -35,12 +37,14 @@
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
stmfd sp!, {r0-r3, ip, lr}
|
stmfd sp!, {r0-r3, ip, lr}
|
||||||
mov r1, \arg
|
mov r1, \arg
|
||||||
add r0, pc, #4
|
ldr r0, =1f
|
||||||
bl printk
|
bl printk
|
||||||
b 1f
|
ldmfd sp!, {r0-r3, ip, lr}
|
||||||
.asciz KERN_DEBUG "VFP: \str\n"
|
|
||||||
.balign 4
|
.pushsection .rodata, "a"
|
||||||
1: ldmfd sp!, {r0-r3, ip, lr}
|
1: .ascii KERN_DEBUG "VFP: \str\n"
|
||||||
|
.byte 0
|
||||||
|
.previous
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -50,12 +54,14 @@
|
||||||
mov r3, \arg3
|
mov r3, \arg3
|
||||||
mov r2, \arg2
|
mov r2, \arg2
|
||||||
mov r1, \arg1
|
mov r1, \arg1
|
||||||
add r0, pc, #4
|
ldr r0, =1f
|
||||||
bl printk
|
bl printk
|
||||||
b 1f
|
ldmfd sp!, {r0-r3, ip, lr}
|
||||||
.asciz KERN_DEBUG "VFP: \str\n"
|
|
||||||
.balign 4
|
.pushsection .rodata, "a"
|
||||||
1: ldmfd sp!, {r0-r3, ip, lr}
|
1: .ascii KERN_DEBUG "VFP: \str\n"
|
||||||
|
.byte 0
|
||||||
|
.previous
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -413,7 +413,7 @@ void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
|
||||||
* If there isn't a second FP instruction, exit now. Note that
|
* If there isn't a second FP instruction, exit now. Note that
|
||||||
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
|
* the FPEXC.FP2V bit is valid only if FPEXC.EX is 1.
|
||||||
*/
|
*/
|
||||||
if (fpexc ^ (FPEXC_EX | FPEXC_FP2V))
|
if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
|
||||||
goto exit;
|
goto exit;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -336,4 +336,14 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||||
|
|
||||||
|
/* drivers/base/dma-mapping.c */
|
||||||
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||||
|
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||||
|
|
||||||
#endif /* __ASM_AVR32_DMA_MAPPING_H */
|
#endif /* __ASM_AVR32_DMA_MAPPING_H */
|
||||||
|
|
|
@ -154,4 +154,14 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
_dma_sync((dma_addr_t)vaddr, size, dir);
|
_dma_sync((dma_addr_t)vaddr, size, dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* drivers/base/dma-mapping.c */
|
||||||
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||||
|
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||||
|
|
||||||
#endif /* _BLACKFIN_DMA_MAPPING_H */
|
#endif /* _BLACKFIN_DMA_MAPPING_H */
|
||||||
|
|
|
@ -89,4 +89,19 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
|
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f))
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
|
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h))
|
||||||
|
|
||||||
|
/* Not supported for now */
|
||||||
|
static inline int dma_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_C6X_DMA_MAPPING_H */
|
#endif /* _ASM_C6X_DMA_MAPPING_H */
|
||||||
|
|
|
@ -158,5 +158,15 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* drivers/base/dma-mapping.c */
|
||||||
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||||
|
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||||
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -132,4 +132,19 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
flush_write_buffers();
|
flush_write_buffers();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Not supported for now */
|
||||||
|
static inline int dma_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_DMA_MAPPING_H */
|
#endif /* _ASM_DMA_MAPPING_H */
|
||||||
|
|
|
@ -115,4 +115,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
|
||||||
#include <asm-generic/dma-mapping-broken.h>
|
#include <asm-generic/dma-mapping-broken.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* drivers/base/dma-mapping.c */
|
||||||
|
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr, size_t size);
|
||||||
|
extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size);
|
||||||
|
|
||||||
|
#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
|
||||||
|
#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
|
||||||
|
|
||||||
#endif /* _M68K_DMA_MAPPING_H */
|
#endif /* _M68K_DMA_MAPPING_H */
|
||||||
|
|
|
@ -8,8 +8,10 @@ config BCM47XX_SSB
|
||||||
select SSB_DRIVER_EXTIF
|
select SSB_DRIVER_EXTIF
|
||||||
select SSB_EMBEDDED
|
select SSB_EMBEDDED
|
||||||
select SSB_B43_PCI_BRIDGE if PCI
|
select SSB_B43_PCI_BRIDGE if PCI
|
||||||
|
select SSB_DRIVER_PCICORE if PCI
|
||||||
select SSB_PCICORE_HOSTMODE if PCI
|
select SSB_PCICORE_HOSTMODE if PCI
|
||||||
select SSB_DRIVER_GPIO
|
select SSB_DRIVER_GPIO
|
||||||
|
select GPIOLIB
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
|
Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support.
|
||||||
|
@ -25,6 +27,7 @@ config BCM47XX_BCMA
|
||||||
select BCMA_HOST_PCI if PCI
|
select BCMA_HOST_PCI if PCI
|
||||||
select BCMA_DRIVER_PCI_HOSTMODE if PCI
|
select BCMA_DRIVER_PCI_HOSTMODE if PCI
|
||||||
select BCMA_DRIVER_GPIO
|
select BCMA_DRIVER_GPIO
|
||||||
|
select GPIOLIB
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
|
Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus.
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
* measurement, and debugging facilities.
|
* measurement, and debugging facilities.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
#include <linux/irqflags.h>
|
#include <linux/irqflags.h>
|
||||||
#include <asm/octeon/cvmx.h>
|
#include <asm/octeon/cvmx.h>
|
||||||
#include <asm/octeon/cvmx-l2c.h>
|
#include <asm/octeon/cvmx-l2c.h>
|
||||||
|
@ -285,22 +286,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter)
|
||||||
*/
|
*/
|
||||||
static void fault_in(uint64_t addr, int len)
|
static void fault_in(uint64_t addr, int len)
|
||||||
{
|
{
|
||||||
volatile char *ptr;
|
char *ptr;
|
||||||
volatile char dummy;
|
|
||||||
/*
|
/*
|
||||||
* Adjust addr and length so we get all cache lines even for
|
* Adjust addr and length so we get all cache lines even for
|
||||||
* small ranges spanning two cache lines.
|
* small ranges spanning two cache lines.
|
||||||
*/
|
*/
|
||||||
len += addr & CVMX_CACHE_LINE_MASK;
|
len += addr & CVMX_CACHE_LINE_MASK;
|
||||||
addr &= ~CVMX_CACHE_LINE_MASK;
|
addr &= ~CVMX_CACHE_LINE_MASK;
|
||||||
ptr = (volatile char *)cvmx_phys_to_ptr(addr);
|
ptr = cvmx_phys_to_ptr(addr);
|
||||||
/*
|
/*
|
||||||
* Invalidate L1 cache to make sure all loads result in data
|
* Invalidate L1 cache to make sure all loads result in data
|
||||||
* being in L2.
|
* being in L2.
|
||||||
*/
|
*/
|
||||||
CVMX_DCACHE_INVALIDATE;
|
CVMX_DCACHE_INVALIDATE;
|
||||||
while (len > 0) {
|
while (len > 0) {
|
||||||
dummy += *ptr;
|
ACCESS_ONCE(*ptr);
|
||||||
len -= CVMX_CACHE_LINE_SIZE;
|
len -= CVMX_CACHE_LINE_SIZE;
|
||||||
ptr += CVMX_CACHE_LINE_SIZE;
|
ptr += CVMX_CACHE_LINE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
#include <asm/mipsregs.h>
|
#include <asm/mipsregs.h>
|
||||||
|
|
||||||
#define DSP_DEFAULT 0x00000000
|
#define DSP_DEFAULT 0x00000000
|
||||||
#define DSP_MASK 0x3ff
|
#define DSP_MASK 0x3f
|
||||||
|
|
||||||
#define __enable_dsp_hazard() \
|
#define __enable_dsp_hazard() \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -353,6 +353,7 @@ union mips_instruction {
|
||||||
struct u_format u_format;
|
struct u_format u_format;
|
||||||
struct c_format c_format;
|
struct c_format c_format;
|
||||||
struct r_format r_format;
|
struct r_format r_format;
|
||||||
|
struct p_format p_format;
|
||||||
struct f_format f_format;
|
struct f_format f_format;
|
||||||
struct ma_format ma_format;
|
struct ma_format ma_format;
|
||||||
struct b_format b_format;
|
struct b_format b_format;
|
||||||
|
|
|
@ -21,4 +21,4 @@
|
||||||
#define R10000_LLSC_WAR 0
|
#define R10000_LLSC_WAR 0
|
||||||
#define MIPS34K_MISSED_ITLB_WAR 0
|
#define MIPS34K_MISSED_ITLB_WAR 0
|
||||||
|
|
||||||
#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */
|
#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */
|
||||||
|
|
|
@ -230,6 +230,7 @@ static inline void pud_clear(pud_t *pudp)
|
||||||
#else
|
#else
|
||||||
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
|
#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
|
||||||
#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
|
#define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
|
||||||
|
#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot))
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define __pgd_offset(address) pgd_index(address)
|
#define __pgd_offset(address) pgd_index(address)
|
||||||
|
|
|
@ -3,6 +3,7 @@ include include/uapi/asm-generic/Kbuild.asm
|
||||||
|
|
||||||
header-y += auxvec.h
|
header-y += auxvec.h
|
||||||
header-y += bitsperlong.h
|
header-y += bitsperlong.h
|
||||||
|
header-y += break.h
|
||||||
header-y += byteorder.h
|
header-y += byteorder.h
|
||||||
header-y += cachectl.h
|
header-y += cachectl.h
|
||||||
header-y += errno.h
|
header-y += errno.h
|
||||||
|
|
|
@ -25,6 +25,12 @@
|
||||||
#define MCOUNT_OFFSET_INSNS 4
|
#define MCOUNT_OFFSET_INSNS 4
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* Arch override because MIPS doesn't need to run this from stop_machine() */
|
||||||
|
void arch_ftrace_update_code(int command)
|
||||||
|
{
|
||||||
|
ftrace_modify_all_code(command);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check if the address is in kernel space
|
* Check if the address is in kernel space
|
||||||
*
|
*
|
||||||
|
@ -89,6 +95,24 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef CONFIG_64BIT
|
||||||
|
static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
|
||||||
|
unsigned int new_code2)
|
||||||
|
{
|
||||||
|
int faulted;
|
||||||
|
|
||||||
|
safe_store_code(new_code1, ip, faulted);
|
||||||
|
if (unlikely(faulted))
|
||||||
|
return -EFAULT;
|
||||||
|
ip += 4;
|
||||||
|
safe_store_code(new_code2, ip, faulted);
|
||||||
|
if (unlikely(faulted))
|
||||||
|
return -EFAULT;
|
||||||
|
flush_icache_range(ip, ip + 8); /* original ip + 12 */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The details about the calling site of mcount on MIPS
|
* The details about the calling site of mcount on MIPS
|
||||||
*
|
*
|
||||||
|
@ -131,8 +155,18 @@ int ftrace_make_nop(struct module *mod,
|
||||||
* needed.
|
* needed.
|
||||||
*/
|
*/
|
||||||
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
|
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
|
||||||
|
#ifdef CONFIG_64BIT
|
||||||
return ftrace_modify_code(ip, new);
|
return ftrace_modify_code(ip, new);
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* On 32 bit MIPS platforms, gcc adds a stack adjust
|
||||||
|
* instruction in the delay slot after the branch to
|
||||||
|
* mcount and expects mcount to restore the sp on return.
|
||||||
|
* This is based on a legacy API and does nothing but
|
||||||
|
* waste instructions so it's being removed at runtime.
|
||||||
|
*/
|
||||||
|
return ftrace_modify_code_2(ip, new, INSN_NOP);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||||
|
|
|
@ -46,9 +46,8 @@
|
||||||
PTR_L a5, PT_R9(sp)
|
PTR_L a5, PT_R9(sp)
|
||||||
PTR_L a6, PT_R10(sp)
|
PTR_L a6, PT_R10(sp)
|
||||||
PTR_L a7, PT_R11(sp)
|
PTR_L a7, PT_R11(sp)
|
||||||
PTR_ADDIU sp, PT_SIZE
|
|
||||||
#else
|
#else
|
||||||
PTR_ADDIU sp, (PT_SIZE + 8)
|
PTR_ADDIU sp, PT_SIZE
|
||||||
#endif
|
#endif
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -69,7 +68,9 @@ NESTED(ftrace_caller, PT_SIZE, ra)
|
||||||
.globl _mcount
|
.globl _mcount
|
||||||
_mcount:
|
_mcount:
|
||||||
b ftrace_stub
|
b ftrace_stub
|
||||||
nop
|
addiu sp,sp,8
|
||||||
|
|
||||||
|
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
|
||||||
lw t1, function_trace_stop
|
lw t1, function_trace_stop
|
||||||
bnez t1, ftrace_stub
|
bnez t1, ftrace_stub
|
||||||
nop
|
nop
|
||||||
|
|
|
@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v)
|
||||||
|
|
||||||
printk(KERN_WARNING
|
printk(KERN_WARNING
|
||||||
"VPE loader: TC %d is already in use.\n",
|
"VPE loader: TC %d is already in use.\n",
|
||||||
t->index);
|
v->tc->index);
|
||||||
return -ENOEXEC;
|
return -ENOEXEC;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* tell oprofile which irq to use */
|
/* tell oprofile which irq to use */
|
||||||
cp0_perfcount_irq = LTQ_PERF_IRQ;
|
cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if the timer irq is not one of the mips irqs we need to
|
* if the timer irq is not one of the mips irqs we need to
|
||||||
|
|
|
@ -21,7 +21,7 @@ void __delay(unsigned long loops)
|
||||||
" .set noreorder \n"
|
" .set noreorder \n"
|
||||||
" .align 3 \n"
|
" .align 3 \n"
|
||||||
"1: bnez %0, 1b \n"
|
"1: bnez %0, 1b \n"
|
||||||
#if __SIZEOF_LONG__ == 4
|
#if BITS_PER_LONG == 32
|
||||||
" subu %0, 1 \n"
|
" subu %0, 1 \n"
|
||||||
#else
|
#else
|
||||||
" dsubu %0, 1 \n"
|
" dsubu %0, 1 \n"
|
||||||
|
|
|
@ -190,9 +190,3 @@ void __iounmap(const volatile void __iomem *addr)
|
||||||
|
|
||||||
EXPORT_SYMBOL(__ioremap);
|
EXPORT_SYMBOL(__ioremap);
|
||||||
EXPORT_SYMBOL(__iounmap);
|
EXPORT_SYMBOL(__iounmap);
|
||||||
|
|
||||||
int __virt_addr_valid(const volatile void *kaddr)
|
|
||||||
{
|
|
||||||
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
|
||||||
|
|
|
@ -192,3 +192,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int __virt_addr_valid(const volatile void *kaddr)
|
||||||
|
{
|
||||||
|
return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(__virt_addr_valid);
|
||||||
|
|
|
@ -193,8 +193,11 @@ static void nlm_init_node(void)
|
||||||
|
|
||||||
void __init prom_init(void)
|
void __init prom_init(void)
|
||||||
{
|
{
|
||||||
int i, *argv, *envp; /* passed as 32 bit ptrs */
|
int *argv, *envp; /* passed as 32 bit ptrs */
|
||||||
struct psb_info *prom_infop;
|
struct psb_info *prom_infop;
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
int i;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* truncate to 32 bit and sign extend all args */
|
/* truncate to 32 bit and sign extend all args */
|
||||||
argv = (int *)(long)(int)fw_arg1;
|
argv = (int *)(long)(int)fw_arg1;
|
||||||
|
|
|
@ -24,7 +24,7 @@
|
||||||
#include <asm/mach-ath79/pci.h>
|
#include <asm/mach-ath79/pci.h>
|
||||||
|
|
||||||
#define AR71XX_PCI_MEM_BASE 0x10000000
|
#define AR71XX_PCI_MEM_BASE 0x10000000
|
||||||
#define AR71XX_PCI_MEM_SIZE 0x08000000
|
#define AR71XX_PCI_MEM_SIZE 0x07000000
|
||||||
|
|
||||||
#define AR71XX_PCI_WIN0_OFFS 0x10000000
|
#define AR71XX_PCI_WIN0_OFFS 0x10000000
|
||||||
#define AR71XX_PCI_WIN1_OFFS 0x11000000
|
#define AR71XX_PCI_WIN1_OFFS 0x11000000
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
#define AR724X_PCI_CTRL_SIZE 0x100
|
#define AR724X_PCI_CTRL_SIZE 0x100
|
||||||
|
|
||||||
#define AR724X_PCI_MEM_BASE 0x10000000
|
#define AR724X_PCI_MEM_BASE 0x10000000
|
||||||
#define AR724X_PCI_MEM_SIZE 0x08000000
|
#define AR724X_PCI_MEM_SIZE 0x04000000
|
||||||
|
|
||||||
#define AR724X_PCI_REG_RESET 0x18
|
#define AR724X_PCI_REG_RESET 0x18
|
||||||
#define AR724X_PCI_REG_INT_STATUS 0x4c
|
#define AR724X_PCI_REG_INT_STATUS 0x4c
|
||||||
|
|
|
@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size,
|
||||||
mn10300_dcache_flush_inv();
|
mn10300_dcache_flush_inv();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Not supported for now */
|
||||||
|
static inline int dma_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -238,4 +238,19 @@ void * sba_get_iommu(struct parisc_device *dev);
|
||||||
/* At the moment, we panic on error for IOMMU resource exaustion */
|
/* At the moment, we panic on error for IOMMU resource exaustion */
|
||||||
#define dma_mapping_error(dev, x) 0
|
#define dma_mapping_error(dev, x) 0
|
||||||
|
|
||||||
|
/* This API cannot be supported on PA-RISC */
|
||||||
|
static inline int dma_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -115,11 +115,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||||
sldi r29,r5,SID_SHIFT - VPN_SHIFT
|
sldi r29,r5,SID_SHIFT - VPN_SHIFT
|
||||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
||||||
or r29,r28,r29
|
or r29,r28,r29
|
||||||
|
/*
|
||||||
/* Calculate hash value for primary slot and store it in r28 */
|
* Calculate hash value for primary slot and store it in r28
|
||||||
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
* r3 = va, r5 = vsid
|
||||||
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
|
* r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
|
||||||
xor r28,r5,r0
|
*/
|
||||||
|
rldicl r0,r3,64-12,48
|
||||||
|
xor r28,r5,r0 /* hash */
|
||||||
b 4f
|
b 4f
|
||||||
|
|
||||||
3: /* Calc vpn and put it in r29 */
|
3: /* Calc vpn and put it in r29 */
|
||||||
|
@ -130,11 +132,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||||
/*
|
/*
|
||||||
* calculate hash value for primary slot and
|
* calculate hash value for primary slot and
|
||||||
* store it in r28 for 1T segment
|
* store it in r28 for 1T segment
|
||||||
|
* r3 = va, r5 = vsid
|
||||||
*/
|
*/
|
||||||
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
sldi r28,r5,25 /* vsid << 25 */
|
||||||
clrldi r5,r5,40 /* vsid & 0xffffff */
|
/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
|
||||||
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
|
rldicl r0,r3,64-12,36
|
||||||
xor r28,r28,r5
|
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
|
||||||
xor r28,r28,r0 /* hash */
|
xor r28,r28,r0 /* hash */
|
||||||
|
|
||||||
/* Convert linux PTE bits into HW equivalents */
|
/* Convert linux PTE bits into HW equivalents */
|
||||||
|
@ -407,11 +410,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||||
*/
|
*/
|
||||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
||||||
or r29,r28,r29
|
or r29,r28,r29
|
||||||
|
/*
|
||||||
/* Calculate hash value for primary slot and store it in r28 */
|
* Calculate hash value for primary slot and store it in r28
|
||||||
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
* r3 = va, r5 = vsid
|
||||||
rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */
|
* r0 = (va >> 12) & ((1ul << (28 - 12)) -1)
|
||||||
xor r28,r5,r0
|
*/
|
||||||
|
rldicl r0,r3,64-12,48
|
||||||
|
xor r28,r5,r0 /* hash */
|
||||||
b 4f
|
b 4f
|
||||||
|
|
||||||
3: /* Calc vpn and put it in r29 */
|
3: /* Calc vpn and put it in r29 */
|
||||||
|
@ -426,11 +431,12 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||||
/*
|
/*
|
||||||
* Calculate hash value for primary slot and
|
* Calculate hash value for primary slot and
|
||||||
* store it in r28 for 1T segment
|
* store it in r28 for 1T segment
|
||||||
|
* r3 = va, r5 = vsid
|
||||||
*/
|
*/
|
||||||
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
sldi r28,r5,25 /* vsid << 25 */
|
||||||
clrldi r5,r5,40 /* vsid & 0xffffff */
|
/* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */
|
||||||
rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */
|
rldicl r0,r3,64-12,36
|
||||||
xor r28,r28,r5
|
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
|
||||||
xor r28,r28,r0 /* hash */
|
xor r28,r28,r0 /* hash */
|
||||||
|
|
||||||
/* Convert linux PTE bits into HW equivalents */
|
/* Convert linux PTE bits into HW equivalents */
|
||||||
|
@ -752,25 +758,27 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT)
|
||||||
or r29,r28,r29
|
or r29,r28,r29
|
||||||
|
|
||||||
/* Calculate hash value for primary slot and store it in r28 */
|
/* Calculate hash value for primary slot and store it in r28
|
||||||
rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */
|
* r3 = va, r5 = vsid
|
||||||
rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */
|
* r0 = (va >> 16) & ((1ul << (28 - 16)) -1)
|
||||||
xor r28,r5,r0
|
*/
|
||||||
|
rldicl r0,r3,64-16,52
|
||||||
|
xor r28,r5,r0 /* hash */
|
||||||
b 4f
|
b 4f
|
||||||
|
|
||||||
3: /* Calc vpn and put it in r29 */
|
3: /* Calc vpn and put it in r29 */
|
||||||
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
|
sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT
|
||||||
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
|
rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT)
|
||||||
or r29,r28,r29
|
or r29,r28,r29
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* calculate hash value for primary slot and
|
* calculate hash value for primary slot and
|
||||||
* store it in r28 for 1T segment
|
* store it in r28 for 1T segment
|
||||||
|
* r3 = va, r5 = vsid
|
||||||
*/
|
*/
|
||||||
rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */
|
sldi r28,r5,25 /* vsid << 25 */
|
||||||
clrldi r5,r5,40 /* vsid & 0xffffff */
|
/* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */
|
||||||
rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */
|
rldicl r0,r3,64-16,40
|
||||||
xor r28,r28,r5
|
xor r28,r28,r5 /* vsid ^ ( vsid << 25) */
|
||||||
xor r28,r28,r0 /* hash */
|
xor r28,r28,r0 /* hash */
|
||||||
|
|
||||||
/* Convert linux PTE bits into HW equivalents */
|
/* Convert linux PTE bits into HW equivalents */
|
||||||
|
|
|
@ -207,7 +207,7 @@ sysexit_from_sys_call:
|
||||||
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
||||||
jnz ia32_ret_from_sys_call
|
jnz ia32_ret_from_sys_call
|
||||||
TRACE_IRQS_ON
|
TRACE_IRQS_ON
|
||||||
sti
|
ENABLE_INTERRUPTS(CLBR_NONE)
|
||||||
movl %eax,%esi /* second arg, syscall return value */
|
movl %eax,%esi /* second arg, syscall return value */
|
||||||
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
|
cmpl $-MAX_ERRNO,%eax /* is it an error ? */
|
||||||
jbe 1f
|
jbe 1f
|
||||||
|
@ -217,7 +217,7 @@ sysexit_from_sys_call:
|
||||||
call __audit_syscall_exit
|
call __audit_syscall_exit
|
||||||
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
|
movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
|
||||||
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
|
movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
|
||||||
cli
|
DISABLE_INTERRUPTS(CLBR_NONE)
|
||||||
TRACE_IRQS_OFF
|
TRACE_IRQS_OFF
|
||||||
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
|
||||||
jz \exit
|
jz \exit
|
||||||
|
|
|
@ -298,8 +298,7 @@ struct _cache_attr {
|
||||||
unsigned int);
|
unsigned int);
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_AMD_NB
|
#if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* L3 cache descriptors
|
* L3 cache descriptors
|
||||||
*/
|
*/
|
||||||
|
@ -524,9 +523,9 @@ store_subcaches(struct _cpuid4_info *this_leaf, const char *buf, size_t count,
|
||||||
static struct _cache_attr subcaches =
|
static struct _cache_attr subcaches =
|
||||||
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
|
__ATTR(subcaches, 0644, show_subcaches, store_subcaches);
|
||||||
|
|
||||||
#else /* CONFIG_AMD_NB */
|
#else
|
||||||
#define amd_init_l3_cache(x, y)
|
#define amd_init_l3_cache(x, y)
|
||||||
#endif /* CONFIG_AMD_NB */
|
#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
|
||||||
|
|
||||||
static int
|
static int
|
||||||
__cpuinit cpuid4_cache_lookup_regs(int index,
|
__cpuinit cpuid4_cache_lookup_regs(int index,
|
||||||
|
|
|
@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case 28: /* Atom */
|
case 28: /* Atom */
|
||||||
case 54: /* Cedariew */
|
case 38: /* Lincroft */
|
||||||
|
case 39: /* Penwell */
|
||||||
|
case 53: /* Cloverview */
|
||||||
|
case 54: /* Cedarview */
|
||||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
|
@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
|
||||||
pr_cont("SandyBridge events, ");
|
pr_cont("SandyBridge events, ");
|
||||||
break;
|
break;
|
||||||
case 58: /* IvyBridge */
|
case 58: /* IvyBridge */
|
||||||
|
case 62: /* IvyBridge EP */
|
||||||
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
|
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
|
||||||
|
|
|
@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static __initconst u64 p6_hw_cache_event_ids
|
static u64 p6_hw_cache_event_ids
|
||||||
[PERF_COUNT_HW_CACHE_MAX]
|
[PERF_COUNT_HW_CACHE_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||||
|
|
|
@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
|
||||||
static void usage(const char *err)
|
static void usage(const char *err)
|
||||||
{
|
{
|
||||||
if (err)
|
if (err)
|
||||||
fprintf(stderr, "Error: %s\n\n", err);
|
fprintf(stderr, "%s: Error: %s\n\n", prog, err);
|
||||||
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
|
fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
|
||||||
fprintf(stderr, "\t-y 64bit mode\n");
|
fprintf(stderr, "\t-y 64bit mode\n");
|
||||||
fprintf(stderr, "\t-n 32bit mode\n");
|
fprintf(stderr, "\t-n 32bit mode\n");
|
||||||
|
@ -269,7 +269,13 @@ int main(int argc, char **argv)
|
||||||
insns++;
|
insns++;
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed);
|
fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
|
||||||
|
prog,
|
||||||
|
(errors) ? "Failure" : "Success",
|
||||||
|
insns,
|
||||||
|
(input_file) ? "given" : "random",
|
||||||
|
errors,
|
||||||
|
seed);
|
||||||
|
|
||||||
return errors ? 1 : 0;
|
return errors ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -170,4 +170,19 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
consistent_sync(vaddr, size, direction);
|
consistent_sync(vaddr, size, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Not supported for now */
|
||||||
|
static inline int dma_mmap_coherent(struct device *dev,
|
||||||
|
struct vm_area_struct *vma, void *cpu_addr,
|
||||||
|
dma_addr_t dma_addr, size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
|
void *cpu_addr, dma_addr_t dma_addr,
|
||||||
|
size_t size)
|
||||||
|
{
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _XTENSA_DMA_MAPPING_H */
|
#endif /* _XTENSA_DMA_MAPPING_H */
|
||||||
|
|
|
@ -35,6 +35,8 @@ static DEFINE_IDR(ext_devt_idr);
|
||||||
|
|
||||||
static struct device_type disk_type;
|
static struct device_type disk_type;
|
||||||
|
|
||||||
|
static void disk_check_events(struct disk_events *ev,
|
||||||
|
unsigned int *clearing_ptr);
|
||||||
static void disk_alloc_events(struct gendisk *disk);
|
static void disk_alloc_events(struct gendisk *disk);
|
||||||
static void disk_add_events(struct gendisk *disk);
|
static void disk_add_events(struct gendisk *disk);
|
||||||
static void disk_del_events(struct gendisk *disk);
|
static void disk_del_events(struct gendisk *disk);
|
||||||
|
@ -1549,6 +1551,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
||||||
const struct block_device_operations *bdops = disk->fops;
|
const struct block_device_operations *bdops = disk->fops;
|
||||||
struct disk_events *ev = disk->ev;
|
struct disk_events *ev = disk->ev;
|
||||||
unsigned int pending;
|
unsigned int pending;
|
||||||
|
unsigned int clearing = mask;
|
||||||
|
|
||||||
if (!ev) {
|
if (!ev) {
|
||||||
/* for drivers still using the old ->media_changed method */
|
/* for drivers still using the old ->media_changed method */
|
||||||
|
@ -1558,34 +1561,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* tell the workfn about the events being cleared */
|
disk_block_events(disk);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* store the union of mask and ev->clearing on the stack so that the
|
||||||
|
* race with disk_flush_events does not cause ambiguity (ev->clearing
|
||||||
|
* can still be modified even if events are blocked).
|
||||||
|
*/
|
||||||
spin_lock_irq(&ev->lock);
|
spin_lock_irq(&ev->lock);
|
||||||
ev->clearing |= mask;
|
clearing |= ev->clearing;
|
||||||
|
ev->clearing = 0;
|
||||||
spin_unlock_irq(&ev->lock);
|
spin_unlock_irq(&ev->lock);
|
||||||
|
|
||||||
/* uncondtionally schedule event check and wait for it to finish */
|
disk_check_events(ev, &clearing);
|
||||||
disk_block_events(disk);
|
/*
|
||||||
queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
|
* if ev->clearing is not 0, the disk_flush_events got called in the
|
||||||
flush_delayed_work(&ev->dwork);
|
* middle of this function, so we want to run the workfn without delay.
|
||||||
__disk_unblock_events(disk, false);
|
*/
|
||||||
|
__disk_unblock_events(disk, ev->clearing ? true : false);
|
||||||
|
|
||||||
/* then, fetch and clear pending events */
|
/* then, fetch and clear pending events */
|
||||||
spin_lock_irq(&ev->lock);
|
spin_lock_irq(&ev->lock);
|
||||||
WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */
|
|
||||||
pending = ev->pending & mask;
|
pending = ev->pending & mask;
|
||||||
ev->pending &= ~mask;
|
ev->pending &= ~mask;
|
||||||
spin_unlock_irq(&ev->lock);
|
spin_unlock_irq(&ev->lock);
|
||||||
|
WARN_ON_ONCE(clearing & mask);
|
||||||
|
|
||||||
return pending;
|
return pending;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Separate this part out so that a different pointer for clearing_ptr can be
|
||||||
|
* passed in for disk_clear_events.
|
||||||
|
*/
|
||||||
static void disk_events_workfn(struct work_struct *work)
|
static void disk_events_workfn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct delayed_work *dwork = to_delayed_work(work);
|
struct delayed_work *dwork = to_delayed_work(work);
|
||||||
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
|
struct disk_events *ev = container_of(dwork, struct disk_events, dwork);
|
||||||
|
|
||||||
|
disk_check_events(ev, &ev->clearing);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void disk_check_events(struct disk_events *ev,
|
||||||
|
unsigned int *clearing_ptr)
|
||||||
|
{
|
||||||
struct gendisk *disk = ev->disk;
|
struct gendisk *disk = ev->disk;
|
||||||
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
|
char *envp[ARRAY_SIZE(disk_uevents) + 1] = { };
|
||||||
unsigned int clearing = ev->clearing;
|
unsigned int clearing = *clearing_ptr;
|
||||||
unsigned int events;
|
unsigned int events;
|
||||||
unsigned long intv;
|
unsigned long intv;
|
||||||
int nr_events = 0, i;
|
int nr_events = 0, i;
|
||||||
|
@ -1598,7 +1620,7 @@ static void disk_events_workfn(struct work_struct *work)
|
||||||
|
|
||||||
events &= ~ev->pending;
|
events &= ~ev->pending;
|
||||||
ev->pending |= events;
|
ev->pending |= events;
|
||||||
ev->clearing &= ~clearing;
|
*clearing_ptr &= ~clearing;
|
||||||
|
|
||||||
intv = disk_events_poll_jiffies(disk);
|
intv = disk_events_poll_jiffies(disk);
|
||||||
if (!ev->block && intv)
|
if (!ev->block && intv)
|
||||||
|
|
|
@ -129,7 +129,7 @@ static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
|
||||||
writel(value, ahb->regs + offset);
|
writel(value, ahb->regs + offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARCH_TEGRA_3x_SOC
|
#ifdef CONFIG_TEGRA_IOMMU_SMMU
|
||||||
static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
|
static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
|
||||||
{
|
{
|
||||||
struct tegra_ahb *ahb = dev_get_drvdata(dev);
|
struct tegra_ahb *ahb = dev_get_drvdata(dev);
|
||||||
|
|
|
@ -636,82 +636,82 @@ struct rx_buf_desc {
|
||||||
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
|
#define SEG_BASE IPHASE5575_FRAG_CONTROL_REG_BASE
|
||||||
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
|
#define REASS_BASE IPHASE5575_REASS_CONTROL_REG_BASE
|
||||||
|
|
||||||
typedef volatile u_int freg_t;
|
typedef volatile u_int ffreg_t;
|
||||||
typedef u_int rreg_t;
|
typedef u_int rreg_t;
|
||||||
|
|
||||||
typedef struct _ffredn_t {
|
typedef struct _ffredn_t {
|
||||||
freg_t idlehead_high; /* Idle cell header (high) */
|
ffreg_t idlehead_high; /* Idle cell header (high) */
|
||||||
freg_t idlehead_low; /* Idle cell header (low) */
|
ffreg_t idlehead_low; /* Idle cell header (low) */
|
||||||
freg_t maxrate; /* Maximum rate */
|
ffreg_t maxrate; /* Maximum rate */
|
||||||
freg_t stparms; /* Traffic Management Parameters */
|
ffreg_t stparms; /* Traffic Management Parameters */
|
||||||
freg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
|
ffreg_t abrubr_abr; /* ABRUBR Priority Byte 1, TCR Byte 0 */
|
||||||
freg_t rm_type; /* */
|
ffreg_t rm_type; /* */
|
||||||
u_int filler5[0x17 - 0x06];
|
u_int filler5[0x17 - 0x06];
|
||||||
freg_t cmd_reg; /* Command register */
|
ffreg_t cmd_reg; /* Command register */
|
||||||
u_int filler18[0x20 - 0x18];
|
u_int filler18[0x20 - 0x18];
|
||||||
freg_t cbr_base; /* CBR Pointer Base */
|
ffreg_t cbr_base; /* CBR Pointer Base */
|
||||||
freg_t vbr_base; /* VBR Pointer Base */
|
ffreg_t vbr_base; /* VBR Pointer Base */
|
||||||
freg_t abr_base; /* ABR Pointer Base */
|
ffreg_t abr_base; /* ABR Pointer Base */
|
||||||
freg_t ubr_base; /* UBR Pointer Base */
|
ffreg_t ubr_base; /* UBR Pointer Base */
|
||||||
u_int filler24;
|
u_int filler24;
|
||||||
freg_t vbrwq_base; /* VBR Wait Queue Base */
|
ffreg_t vbrwq_base; /* VBR Wait Queue Base */
|
||||||
freg_t abrwq_base; /* ABR Wait Queue Base */
|
ffreg_t abrwq_base; /* ABR Wait Queue Base */
|
||||||
freg_t ubrwq_base; /* UBR Wait Queue Base */
|
ffreg_t ubrwq_base; /* UBR Wait Queue Base */
|
||||||
freg_t vct_base; /* Main VC Table Base */
|
ffreg_t vct_base; /* Main VC Table Base */
|
||||||
freg_t vcte_base; /* Extended Main VC Table Base */
|
ffreg_t vcte_base; /* Extended Main VC Table Base */
|
||||||
u_int filler2a[0x2C - 0x2A];
|
u_int filler2a[0x2C - 0x2A];
|
||||||
freg_t cbr_tab_beg; /* CBR Table Begin */
|
ffreg_t cbr_tab_beg; /* CBR Table Begin */
|
||||||
freg_t cbr_tab_end; /* CBR Table End */
|
ffreg_t cbr_tab_end; /* CBR Table End */
|
||||||
freg_t cbr_pointer; /* CBR Pointer */
|
ffreg_t cbr_pointer; /* CBR Pointer */
|
||||||
u_int filler2f[0x30 - 0x2F];
|
u_int filler2f[0x30 - 0x2F];
|
||||||
freg_t prq_st_adr; /* Packet Ready Queue Start Address */
|
ffreg_t prq_st_adr; /* Packet Ready Queue Start Address */
|
||||||
freg_t prq_ed_adr; /* Packet Ready Queue End Address */
|
ffreg_t prq_ed_adr; /* Packet Ready Queue End Address */
|
||||||
freg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
|
ffreg_t prq_rd_ptr; /* Packet Ready Queue read pointer */
|
||||||
freg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
|
ffreg_t prq_wr_ptr; /* Packet Ready Queue write pointer */
|
||||||
freg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
|
ffreg_t tcq_st_adr; /* Transmit Complete Queue Start Address*/
|
||||||
freg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
|
ffreg_t tcq_ed_adr; /* Transmit Complete Queue End Address */
|
||||||
freg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
|
ffreg_t tcq_rd_ptr; /* Transmit Complete Queue read pointer */
|
||||||
freg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
|
ffreg_t tcq_wr_ptr; /* Transmit Complete Queue write pointer*/
|
||||||
u_int filler38[0x40 - 0x38];
|
u_int filler38[0x40 - 0x38];
|
||||||
freg_t queue_base; /* Base address for PRQ and TCQ */
|
ffreg_t queue_base; /* Base address for PRQ and TCQ */
|
||||||
freg_t desc_base; /* Base address of descriptor table */
|
ffreg_t desc_base; /* Base address of descriptor table */
|
||||||
u_int filler42[0x45 - 0x42];
|
u_int filler42[0x45 - 0x42];
|
||||||
freg_t mode_reg_0; /* Mode register 0 */
|
ffreg_t mode_reg_0; /* Mode register 0 */
|
||||||
freg_t mode_reg_1; /* Mode register 1 */
|
ffreg_t mode_reg_1; /* Mode register 1 */
|
||||||
freg_t intr_status_reg;/* Interrupt Status register */
|
ffreg_t intr_status_reg;/* Interrupt Status register */
|
||||||
freg_t mask_reg; /* Mask Register */
|
ffreg_t mask_reg; /* Mask Register */
|
||||||
freg_t cell_ctr_high1; /* Total cell transfer count (high) */
|
ffreg_t cell_ctr_high1; /* Total cell transfer count (high) */
|
||||||
freg_t cell_ctr_lo1; /* Total cell transfer count (low) */
|
ffreg_t cell_ctr_lo1; /* Total cell transfer count (low) */
|
||||||
freg_t state_reg; /* Status register */
|
ffreg_t state_reg; /* Status register */
|
||||||
u_int filler4c[0x58 - 0x4c];
|
u_int filler4c[0x58 - 0x4c];
|
||||||
freg_t curr_desc_num; /* Contains the current descriptor num */
|
ffreg_t curr_desc_num; /* Contains the current descriptor num */
|
||||||
freg_t next_desc; /* Next descriptor */
|
ffreg_t next_desc; /* Next descriptor */
|
||||||
freg_t next_vc; /* Next VC */
|
ffreg_t next_vc; /* Next VC */
|
||||||
u_int filler5b[0x5d - 0x5b];
|
u_int filler5b[0x5d - 0x5b];
|
||||||
freg_t present_slot_cnt;/* Present slot count */
|
ffreg_t present_slot_cnt;/* Present slot count */
|
||||||
u_int filler5e[0x6a - 0x5e];
|
u_int filler5e[0x6a - 0x5e];
|
||||||
freg_t new_desc_num; /* New descriptor number */
|
ffreg_t new_desc_num; /* New descriptor number */
|
||||||
freg_t new_vc; /* New VC */
|
ffreg_t new_vc; /* New VC */
|
||||||
freg_t sched_tbl_ptr; /* Schedule table pointer */
|
ffreg_t sched_tbl_ptr; /* Schedule table pointer */
|
||||||
freg_t vbrwq_wptr; /* VBR wait queue write pointer */
|
ffreg_t vbrwq_wptr; /* VBR wait queue write pointer */
|
||||||
freg_t vbrwq_rptr; /* VBR wait queue read pointer */
|
ffreg_t vbrwq_rptr; /* VBR wait queue read pointer */
|
||||||
freg_t abrwq_wptr; /* ABR wait queue write pointer */
|
ffreg_t abrwq_wptr; /* ABR wait queue write pointer */
|
||||||
freg_t abrwq_rptr; /* ABR wait queue read pointer */
|
ffreg_t abrwq_rptr; /* ABR wait queue read pointer */
|
||||||
freg_t ubrwq_wptr; /* UBR wait queue write pointer */
|
ffreg_t ubrwq_wptr; /* UBR wait queue write pointer */
|
||||||
freg_t ubrwq_rptr; /* UBR wait queue read pointer */
|
ffreg_t ubrwq_rptr; /* UBR wait queue read pointer */
|
||||||
freg_t cbr_vc; /* CBR VC */
|
ffreg_t cbr_vc; /* CBR VC */
|
||||||
freg_t vbr_sb_vc; /* VBR SB VC */
|
ffreg_t vbr_sb_vc; /* VBR SB VC */
|
||||||
freg_t abr_sb_vc; /* ABR SB VC */
|
ffreg_t abr_sb_vc; /* ABR SB VC */
|
||||||
freg_t ubr_sb_vc; /* UBR SB VC */
|
ffreg_t ubr_sb_vc; /* UBR SB VC */
|
||||||
freg_t vbr_next_link; /* VBR next link */
|
ffreg_t vbr_next_link; /* VBR next link */
|
||||||
freg_t abr_next_link; /* ABR next link */
|
ffreg_t abr_next_link; /* ABR next link */
|
||||||
freg_t ubr_next_link; /* UBR next link */
|
ffreg_t ubr_next_link; /* UBR next link */
|
||||||
u_int filler7a[0x7c-0x7a];
|
u_int filler7a[0x7c-0x7a];
|
||||||
freg_t out_rate_head; /* Out of rate head */
|
ffreg_t out_rate_head; /* Out of rate head */
|
||||||
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
|
u_int filler7d[0xca-0x7d]; /* pad out to full address space */
|
||||||
freg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
|
ffreg_t cell_ctr_high1_nc;/* Total cell transfer count (high) */
|
||||||
freg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
|
ffreg_t cell_ctr_lo1_nc;/* Total cell transfer count (low) */
|
||||||
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
|
u_int fillercc[0x100-0xcc]; /* pad out to full address space */
|
||||||
} ffredn_t;
|
} ffredn_t;
|
||||||
|
|
||||||
typedef struct _rfredn_t {
|
typedef struct _rfredn_t {
|
||||||
|
|
|
@ -94,11 +94,16 @@ void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
|
||||||
#ifdef CONFIG_BCMA_DRIVER_GPIO
|
#ifdef CONFIG_BCMA_DRIVER_GPIO
|
||||||
/* driver_gpio.c */
|
/* driver_gpio.c */
|
||||||
int bcma_gpio_init(struct bcma_drv_cc *cc);
|
int bcma_gpio_init(struct bcma_drv_cc *cc);
|
||||||
|
int bcma_gpio_unregister(struct bcma_drv_cc *cc);
|
||||||
#else
|
#else
|
||||||
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
|
static inline int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||||
{
|
{
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
}
|
}
|
||||||
|
static inline int bcma_gpio_unregister(struct bcma_drv_cc *cc)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
#endif /* CONFIG_BCMA_DRIVER_GPIO */
|
#endif /* CONFIG_BCMA_DRIVER_GPIO */
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc)
|
||||||
struct bcma_bus *bus = cc->core->bus;
|
struct bcma_bus *bus = cc->core->bus;
|
||||||
|
|
||||||
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
|
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 &&
|
||||||
cc->core->id.rev != 0x38) {
|
cc->core->id.rev != 38) {
|
||||||
bcma_err(bus, "NAND flash on unsupported board!\n");
|
bcma_err(bus, "NAND flash on unsupported board!\n");
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,3 +96,8 @@ int bcma_gpio_init(struct bcma_drv_cc *cc)
|
||||||
|
|
||||||
return gpiochip_add(chip);
|
return gpiochip_add(chip);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bcma_gpio_unregister(struct bcma_drv_cc *cc)
|
||||||
|
{
|
||||||
|
return gpiochip_remove(&cc->gpio);
|
||||||
|
}
|
||||||
|
|
|
@ -268,6 +268,13 @@ int bcma_bus_register(struct bcma_bus *bus)
|
||||||
void bcma_bus_unregister(struct bcma_bus *bus)
|
void bcma_bus_unregister(struct bcma_bus *bus)
|
||||||
{
|
{
|
||||||
struct bcma_device *cores[3];
|
struct bcma_device *cores[3];
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = bcma_gpio_unregister(&bus->drv_cc);
|
||||||
|
if (err == -EBUSY)
|
||||||
|
bcma_err(bus, "Some GPIOs are still in use.\n");
|
||||||
|
else if (err)
|
||||||
|
bcma_err(bus, "Can not unregister GPIO driver: %i\n", err);
|
||||||
|
|
||||||
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
|
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
|
||||||
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
|
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
|
||||||
|
|
|
@ -168,7 +168,7 @@ static void wake_all_senders(struct drbd_tconn *tconn) {
|
||||||
}
|
}
|
||||||
|
|
||||||
/* must hold resource->req_lock */
|
/* must hold resource->req_lock */
|
||||||
static void start_new_tl_epoch(struct drbd_tconn *tconn)
|
void start_new_tl_epoch(struct drbd_tconn *tconn)
|
||||||
{
|
{
|
||||||
/* no point closing an epoch, if it is empty, anyways. */
|
/* no point closing an epoch, if it is empty, anyways. */
|
||||||
if (tconn->current_tle_writes == 0)
|
if (tconn->current_tle_writes == 0)
|
||||||
|
|
|
@ -267,6 +267,7 @@ struct bio_and_error {
|
||||||
int error;
|
int error;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
extern void start_new_tl_epoch(struct drbd_tconn *tconn);
|
||||||
extern void drbd_req_destroy(struct kref *kref);
|
extern void drbd_req_destroy(struct kref *kref);
|
||||||
extern void _req_may_be_done(struct drbd_request *req,
|
extern void _req_may_be_done(struct drbd_request *req,
|
||||||
struct bio_and_error *m);
|
struct bio_and_error *m);
|
||||||
|
|
|
@ -931,6 +931,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
|
||||||
enum drbd_state_rv rv = SS_SUCCESS;
|
enum drbd_state_rv rv = SS_SUCCESS;
|
||||||
enum sanitize_state_warnings ssw;
|
enum sanitize_state_warnings ssw;
|
||||||
struct after_state_chg_work *ascw;
|
struct after_state_chg_work *ascw;
|
||||||
|
bool did_remote, should_do_remote;
|
||||||
|
|
||||||
os = drbd_read_state(mdev);
|
os = drbd_read_state(mdev);
|
||||||
|
|
||||||
|
@ -981,11 +982,17 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
|
||||||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
|
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
|
||||||
atomic_inc(&mdev->local_cnt);
|
atomic_inc(&mdev->local_cnt);
|
||||||
|
|
||||||
|
did_remote = drbd_should_do_remote(mdev->state);
|
||||||
mdev->state.i = ns.i;
|
mdev->state.i = ns.i;
|
||||||
|
should_do_remote = drbd_should_do_remote(mdev->state);
|
||||||
mdev->tconn->susp = ns.susp;
|
mdev->tconn->susp = ns.susp;
|
||||||
mdev->tconn->susp_nod = ns.susp_nod;
|
mdev->tconn->susp_nod = ns.susp_nod;
|
||||||
mdev->tconn->susp_fen = ns.susp_fen;
|
mdev->tconn->susp_fen = ns.susp_fen;
|
||||||
|
|
||||||
|
/* put replicated vs not-replicated requests in seperate epochs */
|
||||||
|
if (did_remote != should_do_remote)
|
||||||
|
start_new_tl_epoch(mdev->tconn);
|
||||||
|
|
||||||
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
|
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
|
||||||
drbd_print_uuids(mdev, "attached to UUIDs");
|
drbd_print_uuids(mdev, "attached to UUIDs");
|
||||||
|
|
||||||
|
|
|
@ -626,12 +626,13 @@ static void mtip_timeout_function(unsigned long int data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
|
if (cmdto_cnt) {
|
||||||
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
|
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
|
||||||
|
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
|
||||||
mtip_restart_port(port);
|
mtip_restart_port(port);
|
||||||
|
wake_up_interruptible(&port->svc_wait);
|
||||||
|
}
|
||||||
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
|
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
|
||||||
wake_up_interruptible(&port->svc_wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (port->ic_pause_timer) {
|
if (port->ic_pause_timer) {
|
||||||
|
@ -3887,7 +3888,12 @@ static int mtip_block_remove(struct driver_data *dd)
|
||||||
* Delete our gendisk structure. This also removes the device
|
* Delete our gendisk structure. This also removes the device
|
||||||
* from /dev
|
* from /dev
|
||||||
*/
|
*/
|
||||||
del_gendisk(dd->disk);
|
if (dd->disk) {
|
||||||
|
if (dd->disk->queue)
|
||||||
|
del_gendisk(dd->disk);
|
||||||
|
else
|
||||||
|
put_disk(dd->disk);
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&rssd_index_lock);
|
spin_lock(&rssd_index_lock);
|
||||||
ida_remove(&rssd_index_ida, dd->index);
|
ida_remove(&rssd_index_ida, dd->index);
|
||||||
|
@ -3921,7 +3927,13 @@ static int mtip_block_shutdown(struct driver_data *dd)
|
||||||
"Shutting down %s ...\n", dd->disk->disk_name);
|
"Shutting down %s ...\n", dd->disk->disk_name);
|
||||||
|
|
||||||
/* Delete our gendisk structure, and cleanup the blk queue. */
|
/* Delete our gendisk structure, and cleanup the blk queue. */
|
||||||
del_gendisk(dd->disk);
|
if (dd->disk) {
|
||||||
|
if (dd->disk->queue)
|
||||||
|
del_gendisk(dd->disk);
|
||||||
|
else
|
||||||
|
put_disk(dd->disk);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
spin_lock(&rssd_index_lock);
|
spin_lock(&rssd_index_lock);
|
||||||
ida_remove(&rssd_index_ida, dd->index);
|
ida_remove(&rssd_index_ida, dd->index);
|
||||||
|
|
|
@ -161,10 +161,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
|
||||||
static void make_response(struct xen_blkif *blkif, u64 id,
|
static void make_response(struct xen_blkif *blkif, u64 id,
|
||||||
unsigned short op, int st);
|
unsigned short op, int st);
|
||||||
|
|
||||||
#define foreach_grant(pos, rbtree, node) \
|
#define foreach_grant_safe(pos, n, rbtree, node) \
|
||||||
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node); \
|
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
|
||||||
|
(n) = rb_next(&(pos)->node); \
|
||||||
&(pos)->node != NULL; \
|
&(pos)->node != NULL; \
|
||||||
(pos) = container_of(rb_next(&(pos)->node), typeof(*(pos)), node))
|
(pos) = container_of(n, typeof(*(pos)), node), \
|
||||||
|
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
|
||||||
|
|
||||||
|
|
||||||
static void add_persistent_gnt(struct rb_root *root,
|
static void add_persistent_gnt(struct rb_root *root,
|
||||||
|
@ -217,10 +219,11 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
|
||||||
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||||
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
||||||
struct persistent_gnt *persistent_gnt;
|
struct persistent_gnt *persistent_gnt;
|
||||||
|
struct rb_node *n;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int segs_to_unmap = 0;
|
int segs_to_unmap = 0;
|
||||||
|
|
||||||
foreach_grant(persistent_gnt, root, node) {
|
foreach_grant_safe(persistent_gnt, n, root, node) {
|
||||||
BUG_ON(persistent_gnt->handle ==
|
BUG_ON(persistent_gnt->handle ==
|
||||||
BLKBACK_INVALID_HANDLE);
|
BLKBACK_INVALID_HANDLE);
|
||||||
gnttab_set_unmap_op(&unmap[segs_to_unmap],
|
gnttab_set_unmap_op(&unmap[segs_to_unmap],
|
||||||
|
@ -230,9 +233,6 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
|
||||||
persistent_gnt->handle);
|
persistent_gnt->handle);
|
||||||
|
|
||||||
pages[segs_to_unmap] = persistent_gnt->page;
|
pages[segs_to_unmap] = persistent_gnt->page;
|
||||||
rb_erase(&persistent_gnt->node, root);
|
|
||||||
kfree(persistent_gnt);
|
|
||||||
num--;
|
|
||||||
|
|
||||||
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
|
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
|
||||||
!rb_next(&persistent_gnt->node)) {
|
!rb_next(&persistent_gnt->node)) {
|
||||||
|
@ -241,6 +241,10 @@ static void free_persistent_gnts(struct rb_root *root, unsigned int num)
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
segs_to_unmap = 0;
|
segs_to_unmap = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rb_erase(&persistent_gnt->node, root);
|
||||||
|
kfree(persistent_gnt);
|
||||||
|
num--;
|
||||||
}
|
}
|
||||||
BUG_ON(num != 0);
|
BUG_ON(num != 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -792,6 +792,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||||
{
|
{
|
||||||
struct llist_node *all_gnts;
|
struct llist_node *all_gnts;
|
||||||
struct grant *persistent_gnt;
|
struct grant *persistent_gnt;
|
||||||
|
struct llist_node *n;
|
||||||
|
|
||||||
/* Prevent new requests being issued until we fix things up. */
|
/* Prevent new requests being issued until we fix things up. */
|
||||||
spin_lock_irq(&info->io_lock);
|
spin_lock_irq(&info->io_lock);
|
||||||
|
@ -804,7 +805,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||||
/* Remove all persistent grants */
|
/* Remove all persistent grants */
|
||||||
if (info->persistent_gnts_c) {
|
if (info->persistent_gnts_c) {
|
||||||
all_gnts = llist_del_all(&info->persistent_gnts);
|
all_gnts = llist_del_all(&info->persistent_gnts);
|
||||||
llist_for_each_entry(persistent_gnt, all_gnts, node) {
|
llist_for_each_entry_safe(persistent_gnt, n, all_gnts, node) {
|
||||||
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
|
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
|
||||||
__free_page(pfn_to_page(persistent_gnt->pfn));
|
__free_page(pfn_to_page(persistent_gnt->pfn));
|
||||||
kfree(persistent_gnt);
|
kfree(persistent_gnt);
|
||||||
|
@ -835,7 +836,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
|
||||||
static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||||
struct blkif_response *bret)
|
struct blkif_response *bret)
|
||||||
{
|
{
|
||||||
int i;
|
int i = 0;
|
||||||
struct bio_vec *bvec;
|
struct bio_vec *bvec;
|
||||||
struct req_iterator iter;
|
struct req_iterator iter;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -852,7 +853,8 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||||
*/
|
*/
|
||||||
rq_for_each_segment(bvec, s->request, iter) {
|
rq_for_each_segment(bvec, s->request, iter) {
|
||||||
BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
|
BUG_ON((bvec->bv_offset + bvec->bv_len) > PAGE_SIZE);
|
||||||
i = offset >> PAGE_SHIFT;
|
if (bvec->bv_offset < offset)
|
||||||
|
i++;
|
||||||
BUG_ON(i >= s->req.u.rw.nr_segments);
|
BUG_ON(i >= s->req.u.rw.nr_segments);
|
||||||
shared_data = kmap_atomic(
|
shared_data = kmap_atomic(
|
||||||
pfn_to_page(s->grants_used[i]->pfn));
|
pfn_to_page(s->grants_used[i]->pfn));
|
||||||
|
@ -861,7 +863,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
|
||||||
bvec->bv_len);
|
bvec->bv_len);
|
||||||
bvec_kunmap_irq(bvec_data, &flags);
|
bvec_kunmap_irq(bvec_data, &flags);
|
||||||
kunmap_atomic(shared_data);
|
kunmap_atomic(shared_data);
|
||||||
offset += bvec->bv_len;
|
offset = bvec->bv_offset + bvec->bv_len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Add the persistent grant into the list of free grants */
|
/* Add the persistent grant into the list of free grants */
|
||||||
|
|
|
@ -2062,7 +2062,8 @@ static void virtcons_remove(struct virtio_device *vdev)
|
||||||
/* Disable interrupts for vqs */
|
/* Disable interrupts for vqs */
|
||||||
vdev->config->reset(vdev);
|
vdev->config->reset(vdev);
|
||||||
/* Finish up work that's lined up */
|
/* Finish up work that's lined up */
|
||||||
cancel_work_sync(&portdev->control_work);
|
if (use_multiport(portdev))
|
||||||
|
cancel_work_sync(&portdev->control_work);
|
||||||
|
|
||||||
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
list_for_each_entry_safe(port, port2, &portdev->ports, list)
|
||||||
unplug_port(port);
|
unplug_port(port);
|
||||||
|
|
|
@ -1313,14 +1313,18 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||||||
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
|
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
|
||||||
radeon_wait_for_vblank(rdev, i);
|
radeon_wait_for_vblank(rdev, i);
|
||||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||||
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
|
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
|
||||||
radeon_wait_for_vblank(rdev, i);
|
radeon_wait_for_vblank(rdev, i);
|
||||||
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* wait for the next frame */
|
/* wait for the next frame */
|
||||||
|
@ -1345,6 +1349,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
|
||||||
blackout &= ~BLACKOUT_MODE_MASK;
|
blackout &= ~BLACKOUT_MODE_MASK;
|
||||||
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
|
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
|
||||||
}
|
}
|
||||||
|
/* wait for the MC to settle */
|
||||||
|
udelay(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
|
||||||
|
@ -1378,11 +1384,15 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
|
||||||
if (ASIC_IS_DCE6(rdev)) {
|
if (ASIC_IS_DCE6(rdev)) {
|
||||||
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
|
||||||
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||||
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||||
} else {
|
} else {
|
||||||
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
|
||||||
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
|
||||||
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
|
||||||
|
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
|
||||||
}
|
}
|
||||||
/* wait for the next frame */
|
/* wait for the next frame */
|
||||||
frame_count = radeon_get_vblank_counter(rdev, i);
|
frame_count = radeon_get_vblank_counter(rdev, i);
|
||||||
|
@ -2036,9 +2046,20 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||||
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
|
||||||
WREG32(DMA_TILING_CONFIG, gb_addr_config);
|
WREG32(DMA_TILING_CONFIG, gb_addr_config);
|
||||||
|
|
||||||
tmp = gb_addr_config & NUM_PIPES_MASK;
|
if ((rdev->config.evergreen.max_backends == 1) &&
|
||||||
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
(rdev->flags & RADEON_IS_IGP)) {
|
||||||
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
if ((disabled_rb_mask & 3) == 1) {
|
||||||
|
/* RB0 disabled, RB1 enabled */
|
||||||
|
tmp = 0x11111111;
|
||||||
|
} else {
|
||||||
|
/* RB1 disabled, RB0 enabled */
|
||||||
|
tmp = 0x00000000;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tmp = gb_addr_config & NUM_PIPES_MASK;
|
||||||
|
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
|
||||||
|
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
|
||||||
|
}
|
||||||
WREG32(GB_BACKEND_MAP, tmp);
|
WREG32(GB_BACKEND_MAP, tmp);
|
||||||
|
|
||||||
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
WREG32(CGTS_SYS_TCC_DISABLE, 0);
|
||||||
|
|
|
@ -1462,12 +1462,15 @@ u32 r6xx_remap_render_backend(struct radeon_device *rdev,
|
||||||
u32 disabled_rb_mask)
|
u32 disabled_rb_mask)
|
||||||
{
|
{
|
||||||
u32 rendering_pipe_num, rb_num_width, req_rb_num;
|
u32 rendering_pipe_num, rb_num_width, req_rb_num;
|
||||||
u32 pipe_rb_ratio, pipe_rb_remain;
|
u32 pipe_rb_ratio, pipe_rb_remain, tmp;
|
||||||
u32 data = 0, mask = 1 << (max_rb_num - 1);
|
u32 data = 0, mask = 1 << (max_rb_num - 1);
|
||||||
unsigned i, j;
|
unsigned i, j;
|
||||||
|
|
||||||
/* mask out the RBs that don't exist on that asic */
|
/* mask out the RBs that don't exist on that asic */
|
||||||
disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
|
tmp = disabled_rb_mask | ((0xff << max_rb_num) & 0xff);
|
||||||
|
/* make sure at least one RB is available */
|
||||||
|
if ((tmp & 0xff) != 0xff)
|
||||||
|
disabled_rb_mask = tmp;
|
||||||
|
|
||||||
rendering_pipe_num = 1 << tiling_pipe_num;
|
rendering_pipe_num = 1 << tiling_pipe_num;
|
||||||
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
|
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
|
||||||
|
|
|
@ -1445,7 +1445,7 @@ static struct radeon_asic cayman_asic = {
|
||||||
.vm = {
|
.vm = {
|
||||||
.init = &cayman_vm_init,
|
.init = &cayman_vm_init,
|
||||||
.fini = &cayman_vm_fini,
|
.fini = &cayman_vm_fini,
|
||||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||||
.set_page = &cayman_vm_set_page,
|
.set_page = &cayman_vm_set_page,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
|
@ -1572,7 +1572,7 @@ static struct radeon_asic trinity_asic = {
|
||||||
.vm = {
|
.vm = {
|
||||||
.init = &cayman_vm_init,
|
.init = &cayman_vm_init,
|
||||||
.fini = &cayman_vm_fini,
|
.fini = &cayman_vm_fini,
|
||||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||||
.set_page = &cayman_vm_set_page,
|
.set_page = &cayman_vm_set_page,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
|
@ -1699,7 +1699,7 @@ static struct radeon_asic si_asic = {
|
||||||
.vm = {
|
.vm = {
|
||||||
.init = &si_vm_init,
|
.init = &si_vm_init,
|
||||||
.fini = &si_vm_fini,
|
.fini = &si_vm_fini,
|
||||||
.pt_ring_index = R600_RING_TYPE_DMA_INDEX,
|
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
|
||||||
.set_page = &si_vm_set_page,
|
.set_page = &si_vm_set_page,
|
||||||
},
|
},
|
||||||
.ring = {
|
.ring = {
|
||||||
|
|
|
@ -2470,6 +2470,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
|
||||||
1),
|
1),
|
||||||
ATOM_DEVICE_CRT1_SUPPORT);
|
ATOM_DEVICE_CRT1_SUPPORT);
|
||||||
}
|
}
|
||||||
|
/* RV100 board with external TDMS bit mis-set.
|
||||||
|
* Actually uses internal TMDS, clear the bit.
|
||||||
|
*/
|
||||||
|
if (dev->pdev->device == 0x5159 &&
|
||||||
|
dev->pdev->subsystem_vendor == 0x1014 &&
|
||||||
|
dev->pdev->subsystem_device == 0x029A) {
|
||||||
|
tmp &= ~(1 << 4);
|
||||||
|
}
|
||||||
if ((tmp >> 4) & 0x1) {
|
if ((tmp >> 4) & 0x1) {
|
||||||
devices |= ATOM_DEVICE_DFP2_SUPPORT;
|
devices |= ATOM_DEVICE_DFP2_SUPPORT;
|
||||||
radeon_add_legacy_encoder(dev,
|
radeon_add_legacy_encoder(dev,
|
||||||
|
|
|
@ -1115,8 +1115,10 @@ radeon_user_framebuffer_create(struct drm_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
|
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
|
||||||
if (radeon_fb == NULL)
|
if (radeon_fb == NULL) {
|
||||||
|
drm_gem_object_unreference_unlocked(obj);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
|
ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
|
|
@ -377,6 +377,9 @@ int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsi
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
/* make sure we aren't trying to allocate more space than there is on the ring */
|
||||||
|
if (ndw > (ring->ring_size / 4))
|
||||||
|
return -ENOMEM;
|
||||||
/* Align requested size with padding so unlock_commit can
|
/* Align requested size with padding so unlock_commit can
|
||||||
* pad safely */
|
* pad safely */
|
||||||
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
cayman 0x9400
|
cayman 0x9400
|
||||||
0x0000802C GRBM_GFX_INDEX
|
0x0000802C GRBM_GFX_INDEX
|
||||||
|
0x00008040 WAIT_UNTIL
|
||||||
0x000084FC CP_STRMOUT_CNTL
|
0x000084FC CP_STRMOUT_CNTL
|
||||||
0x000085F0 CP_COHER_CNTL
|
0x000085F0 CP_COHER_CNTL
|
||||||
0x000085F4 CP_COHER_SIZE
|
0x000085F4 CP_COHER_SIZE
|
||||||
|
|
|
@ -336,6 +336,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
|
||||||
WREG32(R600_CITF_CNTL, blackout);
|
WREG32(R600_CITF_CNTL, blackout);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* wait for the MC to settle */
|
||||||
|
udelay(100);
|
||||||
}
|
}
|
||||||
|
|
||||||
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
|
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
|
||||||
|
|
|
@ -429,7 +429,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||||
struct ttm_bo_device *bdev = bo->bdev;
|
struct ttm_bo_device *bdev = bo->bdev;
|
||||||
struct ttm_bo_driver *driver = bdev->driver;
|
struct ttm_bo_driver *driver = bdev->driver;
|
||||||
|
|
||||||
fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
|
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
|
||||||
if (!fbo)
|
if (!fbo)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -448,7 +448,12 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
|
||||||
fbo->vm_node = NULL;
|
fbo->vm_node = NULL;
|
||||||
atomic_set(&fbo->cpu_writers, 0);
|
atomic_set(&fbo->cpu_writers, 0);
|
||||||
|
|
||||||
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
spin_lock(&bdev->fence_lock);
|
||||||
|
if (bo->sync_obj)
|
||||||
|
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
|
||||||
|
else
|
||||||
|
fbo->sync_obj = NULL;
|
||||||
|
spin_unlock(&bdev->fence_lock);
|
||||||
kref_init(&fbo->list_kref);
|
kref_init(&fbo->list_kref);
|
||||||
kref_init(&fbo->kref);
|
kref_init(&fbo->kref);
|
||||||
fbo->destroy = &ttm_transfered_destroy;
|
fbo->destroy = &ttm_transfered_destroy;
|
||||||
|
@ -661,13 +666,11 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
||||||
*/
|
*/
|
||||||
|
|
||||||
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
|
||||||
|
|
||||||
/* ttm_buffer_object_transfer accesses bo->sync_obj */
|
|
||||||
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
|
||||||
spin_unlock(&bdev->fence_lock);
|
spin_unlock(&bdev->fence_lock);
|
||||||
if (tmp_obj)
|
if (tmp_obj)
|
||||||
driver->sync_obj_unref(&tmp_obj);
|
driver->sync_obj_unref(&tmp_obj);
|
||||||
|
|
||||||
|
ret = ttm_buffer_object_transfer(bo, &ghost_obj);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
|
|
@ -306,6 +306,9 @@
|
||||||
#define USB_VENDOR_ID_EZKEY 0x0518
|
#define USB_VENDOR_ID_EZKEY 0x0518
|
||||||
#define USB_DEVICE_ID_BTC_8193 0x0002
|
#define USB_DEVICE_ID_BTC_8193 0x0002
|
||||||
|
|
||||||
|
#define USB_VENDOR_ID_FORMOSA 0x147a
|
||||||
|
#define USB_DEVICE_ID_FORMOSA_IR_RECEIVER 0xe03e
|
||||||
|
|
||||||
#define USB_VENDOR_ID_FREESCALE 0x15A2
|
#define USB_VENDOR_ID_FREESCALE 0x15A2
|
||||||
#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
|
#define USB_DEVICE_ID_FREESCALE_MX28 0x004F
|
||||||
|
|
||||||
|
|
|
@ -540,13 +540,24 @@ static int i2c_hid_output_raw_report(struct hid_device *hid, __u8 *buf,
|
||||||
{
|
{
|
||||||
struct i2c_client *client = hid->driver_data;
|
struct i2c_client *client = hid->driver_data;
|
||||||
int report_id = buf[0];
|
int report_id = buf[0];
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (report_type == HID_INPUT_REPORT)
|
if (report_type == HID_INPUT_REPORT)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
return i2c_hid_set_report(client,
|
if (report_id) {
|
||||||
|
buf++;
|
||||||
|
count--;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = i2c_hid_set_report(client,
|
||||||
report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
|
report_type == HID_FEATURE_REPORT ? 0x03 : 0x02,
|
||||||
report_id, buf, count);
|
report_id, buf, count);
|
||||||
|
|
||||||
|
if (report_id && ret >= 0)
|
||||||
|
ret++; /* add report_id to the number of transfered bytes */
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int i2c_hid_parse(struct hid_device *hid)
|
static int i2c_hid_parse(struct hid_device *hid)
|
||||||
|
|
|
@ -70,6 +70,7 @@ static const struct hid_blacklist {
|
||||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
||||||
|
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
|
||||||
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
|
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
|
||||||
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
|
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
|
||||||
|
|
|
@ -263,20 +263,15 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
|
||||||
struct qib_qp __rcu **qpp;
|
struct qib_qp __rcu **qpp;
|
||||||
|
|
||||||
qpp = &dev->qp_table[n];
|
qpp = &dev->qp_table[n];
|
||||||
q = rcu_dereference_protected(*qpp,
|
for (; (q = rcu_dereference_protected(*qpp,
|
||||||
lockdep_is_held(&dev->qpt_lock));
|
lockdep_is_held(&dev->qpt_lock))) != NULL;
|
||||||
for (; q; qpp = &q->next) {
|
qpp = &q->next)
|
||||||
if (q == qp) {
|
if (q == qp) {
|
||||||
atomic_dec(&qp->refcount);
|
atomic_dec(&qp->refcount);
|
||||||
*qpp = qp->next;
|
*qpp = qp->next;
|
||||||
rcu_assign_pointer(qp->next, NULL);
|
rcu_assign_pointer(qp->next, NULL);
|
||||||
q = rcu_dereference_protected(*qpp,
|
|
||||||
lockdep_is_held(&dev->qpt_lock));
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
q = rcu_dereference_protected(*qpp,
|
|
||||||
lockdep_is_held(&dev->qpt_lock));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&dev->qpt_lock, flags);
|
spin_unlock_irqrestore(&dev->qpt_lock, flags);
|
||||||
|
|
|
@ -741,6 +741,9 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||||
|
|
||||||
tx_req->mapping = addr;
|
tx_req->mapping = addr;
|
||||||
|
|
||||||
|
skb_orphan(skb);
|
||||||
|
skb_dst_drop(skb);
|
||||||
|
|
||||||
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
|
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
|
||||||
addr, skb->len);
|
addr, skb->len);
|
||||||
if (unlikely(rc)) {
|
if (unlikely(rc)) {
|
||||||
|
@ -752,9 +755,6 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
|
||||||
dev->trans_start = jiffies;
|
dev->trans_start = jiffies;
|
||||||
++tx->tx_head;
|
++tx->tx_head;
|
||||||
|
|
||||||
skb_orphan(skb);
|
|
||||||
skb_dst_drop(skb);
|
|
||||||
|
|
||||||
if (++priv->tx_outstanding == ipoib_sendq_size) {
|
if (++priv->tx_outstanding == ipoib_sendq_size) {
|
||||||
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
|
||||||
tx->qp->qp_num);
|
tx->qp->qp_num);
|
||||||
|
|
|
@ -600,6 +600,9 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||||
netif_stop_queue(dev);
|
netif_stop_queue(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
skb_orphan(skb);
|
||||||
|
skb_dst_drop(skb);
|
||||||
|
|
||||||
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
|
rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
|
||||||
address->ah, qpn, tx_req, phead, hlen);
|
address->ah, qpn, tx_req, phead, hlen);
|
||||||
if (unlikely(rc)) {
|
if (unlikely(rc)) {
|
||||||
|
@ -615,9 +618,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
||||||
|
|
||||||
address->last_send = priv->tx_head;
|
address->last_send = priv->tx_head;
|
||||||
++priv->tx_head;
|
++priv->tx_head;
|
||||||
|
|
||||||
skb_orphan(skb);
|
|
||||||
skb_dst_drop(skb);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
|
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
|
||||||
|
|
|
@ -2746,19 +2746,9 @@ static int thin_iterate_devices(struct dm_target *ti,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* A thin device always inherits its queue limits from its pool.
|
|
||||||
*/
|
|
||||||
static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
|
|
||||||
{
|
|
||||||
struct thin_c *tc = ti->private;
|
|
||||||
|
|
||||||
*limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct target_type thin_target = {
|
static struct target_type thin_target = {
|
||||||
.name = "thin",
|
.name = "thin",
|
||||||
.version = {1, 6, 0},
|
.version = {1, 7, 0},
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.ctr = thin_ctr,
|
.ctr = thin_ctr,
|
||||||
.dtr = thin_dtr,
|
.dtr = thin_dtr,
|
||||||
|
@ -2767,7 +2757,6 @@ static struct target_type thin_target = {
|
||||||
.postsuspend = thin_postsuspend,
|
.postsuspend = thin_postsuspend,
|
||||||
.status = thin_status,
|
.status = thin_status,
|
||||||
.iterate_devices = thin_iterate_devices,
|
.iterate_devices = thin_iterate_devices,
|
||||||
.io_hints = thin_io_hints,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/*----------------------------------------------------------------*/
|
/*----------------------------------------------------------------*/
|
||||||
|
|
|
@ -1188,6 +1188,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
|
||||||
{
|
{
|
||||||
struct dm_target *ti;
|
struct dm_target *ti;
|
||||||
sector_t len;
|
sector_t len;
|
||||||
|
unsigned num_requests;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ti = dm_table_find_target(ci->map, ci->sector);
|
ti = dm_table_find_target(ci->map, ci->sector);
|
||||||
|
@ -1200,7 +1201,8 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
|
||||||
* reconfiguration might also have changed that since the
|
* reconfiguration might also have changed that since the
|
||||||
* check was performed.
|
* check was performed.
|
||||||
*/
|
*/
|
||||||
if (!get_num_requests || !get_num_requests(ti))
|
num_requests = get_num_requests ? get_num_requests(ti) : 0;
|
||||||
|
if (!num_requests)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
if (is_split_required && !is_split_required(ti))
|
if (is_split_required && !is_split_required(ti))
|
||||||
|
@ -1208,7 +1210,7 @@ static int __clone_and_map_changing_extent_only(struct clone_info *ci,
|
||||||
else
|
else
|
||||||
len = min(ci->sector_count, max_io_len(ci->sector, ti));
|
len = min(ci->sector_count, max_io_len(ci->sector, ti));
|
||||||
|
|
||||||
__issue_target_requests(ci, ti, ti->num_discard_requests, len);
|
__issue_target_requests(ci, ti, num_requests, len);
|
||||||
|
|
||||||
ci->sector += len;
|
ci->sector += len;
|
||||||
} while (ci->sector_count -= len);
|
} while (ci->sector_count -= len);
|
||||||
|
|
|
@ -374,6 +374,7 @@ static int usb_keene_probe(struct usb_interface *intf,
|
||||||
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
|
radio->vdev.ioctl_ops = &usb_keene_ioctl_ops;
|
||||||
radio->vdev.lock = &radio->lock;
|
radio->vdev.lock = &radio->lock;
|
||||||
radio->vdev.release = video_device_release_empty;
|
radio->vdev.release = video_device_release_empty;
|
||||||
|
radio->vdev.vfl_dir = VFL_DIR_TX;
|
||||||
|
|
||||||
radio->usbdev = interface_to_usbdev(intf);
|
radio->usbdev = interface_to_usbdev(intf);
|
||||||
radio->intf = intf;
|
radio->intf = intf;
|
||||||
|
|
|
@ -250,6 +250,7 @@ static struct video_device radio_si4713_vdev_template = {
|
||||||
.name = "radio-si4713",
|
.name = "radio-si4713",
|
||||||
.release = video_device_release,
|
.release = video_device_release,
|
||||||
.ioctl_ops = &radio_si4713_ioctl_ops,
|
.ioctl_ops = &radio_si4713_ioctl_ops,
|
||||||
|
.vfl_dir = VFL_DIR_TX,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Platform driver interface */
|
/* Platform driver interface */
|
||||||
|
|
|
@ -1971,6 +1971,7 @@ static struct video_device wl1273_viddev_template = {
|
||||||
.ioctl_ops = &wl1273_ioctl_ops,
|
.ioctl_ops = &wl1273_ioctl_ops,
|
||||||
.name = WL1273_FM_DRIVER_NAME,
|
.name = WL1273_FM_DRIVER_NAME,
|
||||||
.release = wl1273_vdev_release,
|
.release = wl1273_vdev_release,
|
||||||
|
.vfl_dir = VFL_DIR_TX,
|
||||||
};
|
};
|
||||||
|
|
||||||
static int wl1273_fm_radio_remove(struct platform_device *pdev)
|
static int wl1273_fm_radio_remove(struct platform_device *pdev)
|
||||||
|
|
|
@ -518,6 +518,16 @@ static struct video_device fm_viddev_template = {
|
||||||
.ioctl_ops = &fm_drv_ioctl_ops,
|
.ioctl_ops = &fm_drv_ioctl_ops,
|
||||||
.name = FM_DRV_NAME,
|
.name = FM_DRV_NAME,
|
||||||
.release = video_device_release,
|
.release = video_device_release,
|
||||||
|
/*
|
||||||
|
* To ensure both the tuner and modulator ioctls are accessible we
|
||||||
|
* set the vfl_dir to M2M to indicate this.
|
||||||
|
*
|
||||||
|
* It is not really a mem2mem device of course, but it can both receive
|
||||||
|
* and transmit using the same radio device. It's the only radio driver
|
||||||
|
* that does this and it should really be split in two radio devices,
|
||||||
|
* but that would affect applications using this driver.
|
||||||
|
*/
|
||||||
|
.vfl_dir = VFL_DIR_M2M,
|
||||||
};
|
};
|
||||||
|
|
||||||
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
|
int fm_v4l2_init_video_device(struct fmdev *fmdev, int radio_nr)
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
|
#include <linux/mmc/pm.h>
|
||||||
#include <linux/mmc/host.h>
|
#include <linux/mmc/host.h>
|
||||||
#include <linux/mmc/card.h>
|
#include <linux/mmc/card.h>
|
||||||
#include <linux/amba/bus.h>
|
#include <linux/amba/bus.h>
|
||||||
|
@ -59,6 +60,7 @@ static unsigned int fmax = 515633;
|
||||||
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
|
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
|
||||||
* @pwrreg_powerup: power up value for MMCIPOWER register
|
* @pwrreg_powerup: power up value for MMCIPOWER register
|
||||||
* @signal_direction: input/out direction of bus signals can be indicated
|
* @signal_direction: input/out direction of bus signals can be indicated
|
||||||
|
* @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
|
||||||
*/
|
*/
|
||||||
struct variant_data {
|
struct variant_data {
|
||||||
unsigned int clkreg;
|
unsigned int clkreg;
|
||||||
|
@ -71,6 +73,7 @@ struct variant_data {
|
||||||
bool blksz_datactrl16;
|
bool blksz_datactrl16;
|
||||||
u32 pwrreg_powerup;
|
u32 pwrreg_powerup;
|
||||||
bool signal_direction;
|
bool signal_direction;
|
||||||
|
bool pwrreg_clkgate;
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct variant_data variant_arm = {
|
static struct variant_data variant_arm = {
|
||||||
|
@ -87,6 +90,14 @@ static struct variant_data variant_arm_extended_fifo = {
|
||||||
.pwrreg_powerup = MCI_PWR_UP,
|
.pwrreg_powerup = MCI_PWR_UP,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct variant_data variant_arm_extended_fifo_hwfc = {
|
||||||
|
.fifosize = 128 * 4,
|
||||||
|
.fifohalfsize = 64 * 4,
|
||||||
|
.clkreg_enable = MCI_ARM_HWFCEN,
|
||||||
|
.datalength_bits = 16,
|
||||||
|
.pwrreg_powerup = MCI_PWR_UP,
|
||||||
|
};
|
||||||
|
|
||||||
static struct variant_data variant_u300 = {
|
static struct variant_data variant_u300 = {
|
||||||
.fifosize = 16 * 4,
|
.fifosize = 16 * 4,
|
||||||
.fifohalfsize = 8 * 4,
|
.fifohalfsize = 8 * 4,
|
||||||
|
@ -95,6 +106,7 @@ static struct variant_data variant_u300 = {
|
||||||
.sdio = true,
|
.sdio = true,
|
||||||
.pwrreg_powerup = MCI_PWR_ON,
|
.pwrreg_powerup = MCI_PWR_ON,
|
||||||
.signal_direction = true,
|
.signal_direction = true,
|
||||||
|
.pwrreg_clkgate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct variant_data variant_nomadik = {
|
static struct variant_data variant_nomadik = {
|
||||||
|
@ -106,6 +118,7 @@ static struct variant_data variant_nomadik = {
|
||||||
.st_clkdiv = true,
|
.st_clkdiv = true,
|
||||||
.pwrreg_powerup = MCI_PWR_ON,
|
.pwrreg_powerup = MCI_PWR_ON,
|
||||||
.signal_direction = true,
|
.signal_direction = true,
|
||||||
|
.pwrreg_clkgate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct variant_data variant_ux500 = {
|
static struct variant_data variant_ux500 = {
|
||||||
|
@ -118,6 +131,7 @@ static struct variant_data variant_ux500 = {
|
||||||
.st_clkdiv = true,
|
.st_clkdiv = true,
|
||||||
.pwrreg_powerup = MCI_PWR_ON,
|
.pwrreg_powerup = MCI_PWR_ON,
|
||||||
.signal_direction = true,
|
.signal_direction = true,
|
||||||
|
.pwrreg_clkgate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct variant_data variant_ux500v2 = {
|
static struct variant_data variant_ux500v2 = {
|
||||||
|
@ -131,8 +145,27 @@ static struct variant_data variant_ux500v2 = {
|
||||||
.blksz_datactrl16 = true,
|
.blksz_datactrl16 = true,
|
||||||
.pwrreg_powerup = MCI_PWR_ON,
|
.pwrreg_powerup = MCI_PWR_ON,
|
||||||
.signal_direction = true,
|
.signal_direction = true,
|
||||||
|
.pwrreg_clkgate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Validate mmc prerequisites
|
||||||
|
*/
|
||||||
|
static int mmci_validate_data(struct mmci_host *host,
|
||||||
|
struct mmc_data *data)
|
||||||
|
{
|
||||||
|
if (!data)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!is_power_of_2(data->blksz)) {
|
||||||
|
dev_err(mmc_dev(host->mmc),
|
||||||
|
"unsupported block size (%d bytes)\n", data->blksz);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This must be called with host->lock held
|
* This must be called with host->lock held
|
||||||
*/
|
*/
|
||||||
|
@ -202,6 +235,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
|
||||||
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
|
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
|
||||||
clk |= MCI_ST_8BIT_BUS;
|
clk |= MCI_ST_8BIT_BUS;
|
||||||
|
|
||||||
|
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
|
||||||
|
clk |= MCI_ST_UX500_NEG_EDGE;
|
||||||
|
|
||||||
mmci_write_clkreg(host, clk);
|
mmci_write_clkreg(host, clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -352,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
|
||||||
host->dma_rx_channel = host->dma_tx_channel = NULL;
|
host->dma_rx_channel = host->dma_tx_channel = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mmci_dma_data_error(struct mmci_host *host)
|
||||||
|
{
|
||||||
|
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
||||||
|
dmaengine_terminate_all(host->dma_current);
|
||||||
|
host->dma_current = NULL;
|
||||||
|
host->dma_desc_current = NULL;
|
||||||
|
host->data->host_cookie = 0;
|
||||||
|
}
|
||||||
|
|
||||||
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
||||||
{
|
{
|
||||||
struct dma_chan *chan = host->dma_current;
|
struct dma_chan *chan;
|
||||||
enum dma_data_direction dir;
|
enum dma_data_direction dir;
|
||||||
|
|
||||||
|
if (data->flags & MMC_DATA_READ) {
|
||||||
|
dir = DMA_FROM_DEVICE;
|
||||||
|
chan = host->dma_rx_channel;
|
||||||
|
} else {
|
||||||
|
dir = DMA_TO_DEVICE;
|
||||||
|
chan = host->dma_tx_channel;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
||||||
|
{
|
||||||
u32 status;
|
u32 status;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -374,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
||||||
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
||||||
*/
|
*/
|
||||||
if (status & MCI_RXDATAAVLBLMASK) {
|
if (status & MCI_RXDATAAVLBLMASK) {
|
||||||
dmaengine_terminate_all(chan);
|
mmci_dma_data_error(host);
|
||||||
if (!data->error)
|
if (!data->error)
|
||||||
data->error = -EIO;
|
data->error = -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (data->flags & MMC_DATA_WRITE) {
|
|
||||||
dir = DMA_TO_DEVICE;
|
|
||||||
} else {
|
|
||||||
dir = DMA_FROM_DEVICE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!data->host_cookie)
|
if (!data->host_cookie)
|
||||||
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
mmci_dma_unmap(host, data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use of DMA with scatter-gather is impossible.
|
* Use of DMA with scatter-gather is impossible.
|
||||||
|
@ -396,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
||||||
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
|
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
|
||||||
mmci_dma_release(host);
|
mmci_dma_release(host);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
host->dma_current = NULL;
|
||||||
|
host->dma_desc_current = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmci_dma_data_error(struct mmci_host *host)
|
/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
|
||||||
{
|
static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||||
dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
struct dma_chan **dma_chan,
|
||||||
dmaengine_terminate_all(host->dma_current);
|
struct dma_async_tx_descriptor **dma_desc)
|
||||||
}
|
|
||||||
|
|
||||||
static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
||||||
struct mmci_host_next *next)
|
|
||||||
{
|
{
|
||||||
struct variant_data *variant = host->variant;
|
struct variant_data *variant = host->variant;
|
||||||
struct dma_slave_config conf = {
|
struct dma_slave_config conf = {
|
||||||
|
@ -423,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||||
enum dma_data_direction buffer_dirn;
|
enum dma_data_direction buffer_dirn;
|
||||||
int nr_sg;
|
int nr_sg;
|
||||||
|
|
||||||
/* Check if next job is already prepared */
|
|
||||||
if (data->host_cookie && !next &&
|
|
||||||
host->dma_current && host->dma_desc_current)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!next) {
|
|
||||||
host->dma_current = NULL;
|
|
||||||
host->dma_desc_current = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data->flags & MMC_DATA_READ) {
|
if (data->flags & MMC_DATA_READ) {
|
||||||
conf.direction = DMA_DEV_TO_MEM;
|
conf.direction = DMA_DEV_TO_MEM;
|
||||||
buffer_dirn = DMA_FROM_DEVICE;
|
buffer_dirn = DMA_FROM_DEVICE;
|
||||||
|
@ -462,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||||
if (!desc)
|
if (!desc)
|
||||||
goto unmap_exit;
|
goto unmap_exit;
|
||||||
|
|
||||||
if (next) {
|
*dma_chan = chan;
|
||||||
next->dma_chan = chan;
|
*dma_desc = desc;
|
||||||
next->dma_desc = desc;
|
|
||||||
} else {
|
|
||||||
host->dma_current = chan;
|
|
||||||
host->dma_desc_current = desc;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
unmap_exit:
|
unmap_exit:
|
||||||
if (!next)
|
|
||||||
dmaengine_terminate_all(chan);
|
|
||||||
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int mmci_dma_prep_data(struct mmci_host *host,
|
||||||
|
struct mmc_data *data)
|
||||||
|
{
|
||||||
|
/* Check if next job is already prepared. */
|
||||||
|
if (host->dma_current && host->dma_desc_current)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
/* No job were prepared thus do it now. */
|
||||||
|
return __mmci_dma_prep_data(host, data, &host->dma_current,
|
||||||
|
&host->dma_desc_current);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int mmci_dma_prep_next(struct mmci_host *host,
|
||||||
|
struct mmc_data *data)
|
||||||
|
{
|
||||||
|
struct mmci_host_next *nd = &host->next_data;
|
||||||
|
return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
|
||||||
|
}
|
||||||
|
|
||||||
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct mmc_data *data = host->data;
|
struct mmc_data *data = host->data;
|
||||||
|
|
||||||
ret = mmci_dma_prep_data(host, host->data, NULL);
|
ret = mmci_dma_prep_data(host, host->data);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -514,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
||||||
{
|
{
|
||||||
struct mmci_host_next *next = &host->next_data;
|
struct mmci_host_next *next = &host->next_data;
|
||||||
|
|
||||||
if (data->host_cookie && data->host_cookie != next->cookie) {
|
WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
|
||||||
pr_warning("[%s] invalid cookie: data->host_cookie %d"
|
WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
|
||||||
" host->next_data.cookie %d\n",
|
|
||||||
__func__, data->host_cookie, host->next_data.cookie);
|
|
||||||
data->host_cookie = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!data->host_cookie)
|
|
||||||
return;
|
|
||||||
|
|
||||||
host->dma_desc_current = next->dma_desc;
|
host->dma_desc_current = next->dma_desc;
|
||||||
host->dma_current = next->dma_chan;
|
host->dma_current = next->dma_chan;
|
||||||
|
|
||||||
next->dma_desc = NULL;
|
next->dma_desc = NULL;
|
||||||
next->dma_chan = NULL;
|
next->dma_chan = NULL;
|
||||||
}
|
}
|
||||||
|
@ -541,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||||
if (!data)
|
if (!data)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (data->host_cookie) {
|
BUG_ON(data->host_cookie);
|
||||||
data->host_cookie = 0;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* if config for dma */
|
if (mmci_validate_data(host, data))
|
||||||
if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
|
return;
|
||||||
((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
|
|
||||||
if (mmci_dma_prep_data(host, data, nd))
|
if (!mmci_dma_prep_next(host, data))
|
||||||
data->host_cookie = 0;
|
data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
||||||
else
|
|
||||||
data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||||
|
@ -561,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||||
{
|
{
|
||||||
struct mmci_host *host = mmc_priv(mmc);
|
struct mmci_host *host = mmc_priv(mmc);
|
||||||
struct mmc_data *data = mrq->data;
|
struct mmc_data *data = mrq->data;
|
||||||
struct dma_chan *chan;
|
|
||||||
enum dma_data_direction dir;
|
|
||||||
|
|
||||||
if (!data)
|
if (!data || !data->host_cookie)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (data->flags & MMC_DATA_READ) {
|
mmci_dma_unmap(host, data);
|
||||||
dir = DMA_FROM_DEVICE;
|
|
||||||
chan = host->dma_rx_channel;
|
|
||||||
} else {
|
|
||||||
dir = DMA_TO_DEVICE;
|
|
||||||
chan = host->dma_tx_channel;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
struct mmci_host_next *next = &host->next_data;
|
||||||
|
struct dma_chan *chan;
|
||||||
|
if (data->flags & MMC_DATA_READ)
|
||||||
|
chan = host->dma_rx_channel;
|
||||||
|
else
|
||||||
|
chan = host->dma_tx_channel;
|
||||||
|
dmaengine_terminate_all(chan);
|
||||||
|
|
||||||
/* if config for dma */
|
next->dma_desc = NULL;
|
||||||
if (chan) {
|
next->dma_chan = NULL;
|
||||||
if (err)
|
|
||||||
dmaengine_terminate_all(chan);
|
|
||||||
if (data->host_cookie)
|
|
||||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
|
||||||
data->sg_len, dir);
|
|
||||||
mrq->data->host_cookie = 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -604,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void mmci_dma_finalize(struct mmci_host *host,
|
||||||
|
struct mmc_data *data)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
static inline void mmci_dma_data_error(struct mmci_host *host)
|
static inline void mmci_dma_data_error(struct mmci_host *host)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
@ -680,6 +719,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
||||||
mmci_write_clkreg(host, clk);
|
mmci_write_clkreg(host, clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50)
|
||||||
|
datactrl |= MCI_ST_DPSM_DDRMODE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Attempt to use DMA operation mode, if this
|
* Attempt to use DMA operation mode, if this
|
||||||
* should fail, fall back to PIO mode
|
* should fail, fall back to PIO mode
|
||||||
|
@ -751,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
||||||
u32 remain, success;
|
u32 remain, success;
|
||||||
|
|
||||||
/* Terminate the DMA transfer */
|
/* Terminate the DMA transfer */
|
||||||
if (dma_inprogress(host))
|
if (dma_inprogress(host)) {
|
||||||
mmci_dma_data_error(host);
|
mmci_dma_data_error(host);
|
||||||
|
mmci_dma_unmap(host, data);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculate how far we are into the transfer. Note that
|
* Calculate how far we are into the transfer. Note that
|
||||||
|
@ -791,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
||||||
|
|
||||||
if (status & MCI_DATAEND || data->error) {
|
if (status & MCI_DATAEND || data->error) {
|
||||||
if (dma_inprogress(host))
|
if (dma_inprogress(host))
|
||||||
mmci_dma_unmap(host, data);
|
mmci_dma_finalize(host, data);
|
||||||
mmci_stop_data(host);
|
mmci_stop_data(host);
|
||||||
|
|
||||||
if (!data->error)
|
if (!data->error)
|
||||||
|
@ -828,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
||||||
if (!cmd->data || cmd->error) {
|
if (!cmd->data || cmd->error) {
|
||||||
if (host->data) {
|
if (host->data) {
|
||||||
/* Terminate the DMA transfer */
|
/* Terminate the DMA transfer */
|
||||||
if (dma_inprogress(host))
|
if (dma_inprogress(host)) {
|
||||||
mmci_dma_data_error(host);
|
mmci_dma_data_error(host);
|
||||||
|
mmci_dma_unmap(host, host->data);
|
||||||
|
}
|
||||||
mmci_stop_data(host);
|
mmci_stop_data(host);
|
||||||
}
|
}
|
||||||
mmci_request_end(host, cmd->mrq);
|
mmci_request_end(host, cmd->mrq);
|
||||||
|
@ -1055,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||||
|
|
||||||
WARN_ON(host->mrq != NULL);
|
WARN_ON(host->mrq != NULL);
|
||||||
|
|
||||||
if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
|
mrq->cmd->error = mmci_validate_data(host, mrq->data);
|
||||||
dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
|
if (mrq->cmd->error) {
|
||||||
mrq->data->blksz);
|
|
||||||
mrq->cmd->error = -EINVAL;
|
|
||||||
mmc_request_done(mmc, mrq);
|
mmc_request_done(mmc, mrq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -1086,7 +1130,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||||
struct variant_data *variant = host->variant;
|
struct variant_data *variant = host->variant;
|
||||||
u32 pwr = 0;
|
u32 pwr = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int ret;
|
|
||||||
|
|
||||||
pm_runtime_get_sync(mmc_dev(mmc));
|
pm_runtime_get_sync(mmc_dev(mmc));
|
||||||
|
|
||||||
|
@ -1096,23 +1139,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||||
|
|
||||||
switch (ios->power_mode) {
|
switch (ios->power_mode) {
|
||||||
case MMC_POWER_OFF:
|
case MMC_POWER_OFF:
|
||||||
if (host->vcc)
|
if (!IS_ERR(mmc->supply.vmmc))
|
||||||
ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
|
||||||
break;
|
break;
|
||||||
case MMC_POWER_UP:
|
case MMC_POWER_UP:
|
||||||
if (host->vcc) {
|
if (!IS_ERR(mmc->supply.vmmc))
|
||||||
ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
|
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||||
if (ret) {
|
|
||||||
dev_err(mmc_dev(mmc), "unable to set OCR\n");
|
|
||||||
/*
|
|
||||||
* The .set_ios() function in the mmc_host_ops
|
|
||||||
* struct return void, and failing to set the
|
|
||||||
* power should be rare so we print an error
|
|
||||||
* and return here.
|
|
||||||
*/
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/*
|
/*
|
||||||
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
|
* The ST Micro variant doesn't have the PL180s MCI_PWR_UP
|
||||||
* and instead uses MCI_PWR_ON so apply whatever value is
|
* and instead uses MCI_PWR_ON so apply whatever value is
|
||||||
|
@ -1154,6 +1187,13 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If clock = 0 and the variant requires the MMCIPOWER to be used for
|
||||||
|
* gating the clock, the MCI_PWR_ON bit is cleared.
|
||||||
|
*/
|
||||||
|
if (!ios->clock && variant->pwrreg_clkgate)
|
||||||
|
pwr &= ~MCI_PWR_ON;
|
||||||
|
|
||||||
spin_lock_irqsave(&host->lock, flags);
|
spin_lock_irqsave(&host->lock, flags);
|
||||||
|
|
||||||
mmci_set_clkreg(host, ios->clock);
|
mmci_set_clkreg(host, ios->clock);
|
||||||
|
@ -1161,7 +1201,6 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||||
|
|
||||||
spin_unlock_irqrestore(&host->lock, flags);
|
spin_unlock_irqrestore(&host->lock, flags);
|
||||||
|
|
||||||
out:
|
|
||||||
pm_runtime_mark_last_busy(mmc_dev(mmc));
|
pm_runtime_mark_last_busy(mmc_dev(mmc));
|
||||||
pm_runtime_put_autosuspend(mmc_dev(mmc));
|
pm_runtime_put_autosuspend(mmc_dev(mmc));
|
||||||
}
|
}
|
||||||
|
@ -1384,32 +1423,19 @@ static int mmci_probe(struct amba_device *dev,
|
||||||
} else
|
} else
|
||||||
dev_warn(&dev->dev, "could not get default pinstate\n");
|
dev_warn(&dev->dev, "could not get default pinstate\n");
|
||||||
|
|
||||||
#ifdef CONFIG_REGULATOR
|
/* Get regulators and the supported OCR mask */
|
||||||
/* If we're using the regulator framework, try to fetch a regulator */
|
mmc_regulator_get_supply(mmc);
|
||||||
host->vcc = regulator_get(&dev->dev, "vmmc");
|
if (!mmc->ocr_avail)
|
||||||
if (IS_ERR(host->vcc))
|
|
||||||
host->vcc = NULL;
|
|
||||||
else {
|
|
||||||
int mask = mmc_regulator_get_ocrmask(host->vcc);
|
|
||||||
|
|
||||||
if (mask < 0)
|
|
||||||
dev_err(&dev->dev, "error getting OCR mask (%d)\n",
|
|
||||||
mask);
|
|
||||||
else {
|
|
||||||
host->mmc->ocr_avail = (u32) mask;
|
|
||||||
if (plat->ocr_mask)
|
|
||||||
dev_warn(&dev->dev,
|
|
||||||
"Provided ocr_mask/setpower will not be used "
|
|
||||||
"(using regulator instead)\n");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
/* Fall back to platform data if no regulator is found */
|
|
||||||
if (host->vcc == NULL)
|
|
||||||
mmc->ocr_avail = plat->ocr_mask;
|
mmc->ocr_avail = plat->ocr_mask;
|
||||||
|
else if (plat->ocr_mask)
|
||||||
|
dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
|
||||||
|
|
||||||
mmc->caps = plat->capabilities;
|
mmc->caps = plat->capabilities;
|
||||||
mmc->caps2 = plat->capabilities2;
|
mmc->caps2 = plat->capabilities2;
|
||||||
|
|
||||||
|
/* We support these PM capabilities. */
|
||||||
|
mmc->pm_caps = MMC_PM_KEEP_POWER;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can do SGIO
|
* We can do SGIO
|
||||||
*/
|
*/
|
||||||
|
@ -1585,10 +1611,6 @@ static int mmci_remove(struct amba_device *dev)
|
||||||
clk_disable_unprepare(host->clk);
|
clk_disable_unprepare(host->clk);
|
||||||
clk_put(host->clk);
|
clk_put(host->clk);
|
||||||
|
|
||||||
if (host->vcc)
|
|
||||||
mmc_regulator_set_ocr(mmc, host->vcc, 0);
|
|
||||||
regulator_put(host->vcc);
|
|
||||||
|
|
||||||
mmc_free_host(mmc);
|
mmc_free_host(mmc);
|
||||||
|
|
||||||
amba_release_regions(dev);
|
amba_release_regions(dev);
|
||||||
|
@ -1636,8 +1658,37 @@ static int mmci_resume(struct device *dev)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PM_RUNTIME
|
||||||
|
static int mmci_runtime_suspend(struct device *dev)
|
||||||
|
{
|
||||||
|
struct amba_device *adev = to_amba_device(dev);
|
||||||
|
struct mmc_host *mmc = amba_get_drvdata(adev);
|
||||||
|
|
||||||
|
if (mmc) {
|
||||||
|
struct mmci_host *host = mmc_priv(mmc);
|
||||||
|
clk_disable_unprepare(host->clk);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int mmci_runtime_resume(struct device *dev)
|
||||||
|
{
|
||||||
|
struct amba_device *adev = to_amba_device(dev);
|
||||||
|
struct mmc_host *mmc = amba_get_drvdata(adev);
|
||||||
|
|
||||||
|
if (mmc) {
|
||||||
|
struct mmci_host *host = mmc_priv(mmc);
|
||||||
|
clk_prepare_enable(host->clk);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static const struct dev_pm_ops mmci_dev_pm_ops = {
|
static const struct dev_pm_ops mmci_dev_pm_ops = {
|
||||||
SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
|
SET_SYSTEM_SLEEP_PM_OPS(mmci_suspend, mmci_resume)
|
||||||
|
SET_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct amba_id mmci_ids[] = {
|
static struct amba_id mmci_ids[] = {
|
||||||
|
@ -1651,6 +1702,11 @@ static struct amba_id mmci_ids[] = {
|
||||||
.mask = 0xff0fffff,
|
.mask = 0xff0fffff,
|
||||||
.data = &variant_arm_extended_fifo,
|
.data = &variant_arm_extended_fifo,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.id = 0x02041180,
|
||||||
|
.mask = 0xff0fffff,
|
||||||
|
.data = &variant_arm_extended_fifo_hwfc,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
.id = 0x00041181,
|
.id = 0x00041181,
|
||||||
.mask = 0x000fffff,
|
.mask = 0x000fffff,
|
||||||
|
|
|
@ -28,6 +28,8 @@
|
||||||
#define MCI_ST_UX500_NEG_EDGE (1 << 13)
|
#define MCI_ST_UX500_NEG_EDGE (1 << 13)
|
||||||
#define MCI_ST_UX500_HWFCEN (1 << 14)
|
#define MCI_ST_UX500_HWFCEN (1 << 14)
|
||||||
#define MCI_ST_UX500_CLK_INV (1 << 15)
|
#define MCI_ST_UX500_CLK_INV (1 << 15)
|
||||||
|
/* Modified PL180 on Versatile Express platform */
|
||||||
|
#define MCI_ARM_HWFCEN (1 << 12)
|
||||||
|
|
||||||
#define MMCIARGUMENT 0x008
|
#define MMCIARGUMENT 0x008
|
||||||
#define MMCICOMMAND 0x00c
|
#define MMCICOMMAND 0x00c
|
||||||
|
@ -193,7 +195,6 @@ struct mmci_host {
|
||||||
/* pio stuff */
|
/* pio stuff */
|
||||||
struct sg_mapping_iter sg_miter;
|
struct sg_mapping_iter sg_miter;
|
||||||
unsigned int size;
|
unsigned int size;
|
||||||
struct regulator *vcc;
|
|
||||||
|
|
||||||
/* pinctrl handles */
|
/* pinctrl handles */
|
||||||
struct pinctrl *pinctrl;
|
struct pinctrl *pinctrl;
|
||||||
|
|
|
@ -272,6 +272,7 @@ config MTD_DOCG3
|
||||||
tristate "M-Systems Disk-On-Chip G3"
|
tristate "M-Systems Disk-On-Chip G3"
|
||||||
select BCH
|
select BCH
|
||||||
select BCH_CONST_PARAMS
|
select BCH_CONST_PARAMS
|
||||||
|
select BITREVERSE
|
||||||
---help---
|
---help---
|
||||||
This provides an MTD device driver for the M-Systems DiskOnChip
|
This provides an MTD device driver for the M-Systems DiskOnChip
|
||||||
G3 devices.
|
G3 devices.
|
||||||
|
|
|
@ -170,7 +170,7 @@ static int of_flash_probe(struct platform_device *dev)
|
||||||
resource_size_t res_size;
|
resource_size_t res_size;
|
||||||
struct mtd_part_parser_data ppdata;
|
struct mtd_part_parser_data ppdata;
|
||||||
bool map_indirect;
|
bool map_indirect;
|
||||||
const char *mtd_name;
|
const char *mtd_name = NULL;
|
||||||
|
|
||||||
match = of_match_device(of_flash_match, &dev->dev);
|
match = of_match_device(of_flash_match, &dev->dev);
|
||||||
if (!match)
|
if (!match)
|
||||||
|
|
|
@ -17,8 +17,8 @@
|
||||||
#include "bcm47xxnflash.h"
|
#include "bcm47xxnflash.h"
|
||||||
|
|
||||||
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
|
/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
|
||||||
* shown 164 retries as maxiumum. */
|
* shown ~1000 retries as maxiumum. */
|
||||||
#define NFLASH_READY_RETRIES 1000
|
#define NFLASH_READY_RETRIES 10000
|
||||||
|
|
||||||
#define NFLASH_SECTOR_SIZE 512
|
#define NFLASH_SECTOR_SIZE 512
|
||||||
|
|
||||||
|
|
|
@ -523,7 +523,7 @@ static struct nand_ecclayout hwecc4_2048 __initconst = {
|
||||||
static const struct of_device_id davinci_nand_of_match[] = {
|
static const struct of_device_id davinci_nand_of_match[] = {
|
||||||
{.compatible = "ti,davinci-nand", },
|
{.compatible = "ti,davinci-nand", },
|
||||||
{},
|
{},
|
||||||
}
|
};
|
||||||
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
|
MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
|
||||||
|
|
||||||
static struct davinci_nand_pdata
|
static struct davinci_nand_pdata
|
||||||
|
|
|
@ -2857,8 +2857,11 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
|
||||||
int i;
|
int i;
|
||||||
int val;
|
int val;
|
||||||
|
|
||||||
/* ONFI need to be probed in 8 bits mode */
|
/* ONFI need to be probed in 8 bits mode, and 16 bits should be selected with NAND_BUSWIDTH_AUTO */
|
||||||
WARN_ON(chip->options & NAND_BUSWIDTH_16);
|
if (chip->options & NAND_BUSWIDTH_16) {
|
||||||
|
pr_err("Trying ONFI probe in 16 bits mode, aborting !\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
/* Try ONFI for unknown chip or LP */
|
/* Try ONFI for unknown chip or LP */
|
||||||
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
|
chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
|
||||||
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
|
if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
|
||||||
|
|
|
@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d,
|
||||||
pr_info("%s: Setting primary slave to None.\n",
|
pr_info("%s: Setting primary slave to None.\n",
|
||||||
bond->dev->name);
|
bond->dev->name);
|
||||||
bond->primary_slave = NULL;
|
bond->primary_slave = NULL;
|
||||||
|
memset(bond->params.primary, 0, sizeof(bond->params.primary));
|
||||||
bond_select_active_slave(bond);
|
bond_select_active_slave(bond);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
|
@ -488,8 +488,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface,
|
||||||
|
|
||||||
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
|
priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
|
||||||
IFX_WRITE_LOW_16BIT(mask));
|
IFX_WRITE_LOW_16BIT(mask));
|
||||||
|
|
||||||
|
/* According to C_CAN documentation, the reserved bit
|
||||||
|
* in IFx_MASK2 register is fixed 1
|
||||||
|
*/
|
||||||
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
|
priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
|
||||||
IFX_WRITE_HIGH_16BIT(mask));
|
IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
|
||||||
|
|
||||||
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
|
priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
|
||||||
IFX_WRITE_LOW_16BIT(id));
|
IFX_WRITE_LOW_16BIT(id));
|
||||||
|
|
|
@ -36,13 +36,13 @@
|
||||||
|
|
||||||
#define DRV_VER "4.4.161.0u"
|
#define DRV_VER "4.4.161.0u"
|
||||||
#define DRV_NAME "be2net"
|
#define DRV_NAME "be2net"
|
||||||
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
|
#define BE_NAME "Emulex BladeEngine2"
|
||||||
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
|
#define BE3_NAME "Emulex BladeEngine3"
|
||||||
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
|
#define OC_NAME "Emulex OneConnect"
|
||||||
#define OC_NAME_BE OC_NAME "(be3)"
|
#define OC_NAME_BE OC_NAME "(be3)"
|
||||||
#define OC_NAME_LANCER OC_NAME "(Lancer)"
|
#define OC_NAME_LANCER OC_NAME "(Lancer)"
|
||||||
#define OC_NAME_SH OC_NAME "(Skyhawk)"
|
#define OC_NAME_SH OC_NAME "(Skyhawk)"
|
||||||
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
|
#define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver"
|
||||||
|
|
||||||
#define BE_VENDOR_ID 0x19a2
|
#define BE_VENDOR_ID 0x19a2
|
||||||
#define EMULEX_VENDOR_ID 0x10df
|
#define EMULEX_VENDOR_ID 0x10df
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
MODULE_VERSION(DRV_VER);
|
MODULE_VERSION(DRV_VER);
|
||||||
MODULE_DEVICE_TABLE(pci, be_dev_ids);
|
MODULE_DEVICE_TABLE(pci, be_dev_ids);
|
||||||
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
|
MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
|
||||||
MODULE_AUTHOR("ServerEngines Corporation");
|
MODULE_AUTHOR("Emulex Corporation");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
||||||
static unsigned int num_vfs;
|
static unsigned int num_vfs;
|
||||||
|
|
|
@ -232,6 +232,7 @@
|
||||||
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
|
#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
|
||||||
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
|
#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
|
||||||
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
|
#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
|
||||||
|
#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */
|
||||||
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
|
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
|
||||||
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
|
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
|
||||||
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
|
#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
|
||||||
|
@ -389,6 +390,12 @@
|
||||||
|
|
||||||
#define E1000_PBS_16K E1000_PBA_16K
|
#define E1000_PBS_16K E1000_PBA_16K
|
||||||
|
|
||||||
|
/* Uncorrectable/correctable ECC Error counts and enable bits */
|
||||||
|
#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF
|
||||||
|
#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00
|
||||||
|
#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8
|
||||||
|
#define E1000_PBECCSTS_ECC_ENABLE 0x00010000
|
||||||
|
|
||||||
#define IFS_MAX 80
|
#define IFS_MAX 80
|
||||||
#define IFS_MIN 40
|
#define IFS_MIN 40
|
||||||
#define IFS_RATIO 4
|
#define IFS_RATIO 4
|
||||||
|
@ -408,6 +415,7 @@
|
||||||
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
|
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
|
||||||
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
|
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
|
||||||
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
|
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
|
||||||
|
#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
|
||||||
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
|
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
|
||||||
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
|
#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */
|
||||||
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
|
#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
|
||||||
|
@ -443,6 +451,7 @@
|
||||||
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
|
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
|
||||||
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
|
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
|
||||||
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
|
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
|
||||||
|
#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
|
||||||
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
|
#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
|
||||||
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
|
#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
|
||||||
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
|
#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */
|
||||||
|
|
|
@ -309,6 +309,8 @@ struct e1000_adapter {
|
||||||
|
|
||||||
struct napi_struct napi;
|
struct napi_struct napi;
|
||||||
|
|
||||||
|
unsigned int uncorr_errors; /* uncorrectable ECC errors */
|
||||||
|
unsigned int corr_errors; /* correctable ECC errors */
|
||||||
unsigned int restart_queue;
|
unsigned int restart_queue;
|
||||||
u32 txd_cmd;
|
u32 txd_cmd;
|
||||||
|
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче