Merge branch 'for-tip' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu into x86/urgent
This commit is contained in:
Коммит
b7f797cb60
|
@ -1882,6 +1882,12 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
Format: { 0 | 1 }
|
||||
See arch/parisc/kernel/pdc_chassis.c
|
||||
|
||||
percpu_alloc= [X86] Select which percpu first chunk allocator to use.
|
||||
Allowed values are one of "lpage", "embed" and "4k".
|
||||
See comments in arch/x86/kernel/setup_percpu.c for
|
||||
details on each allocator. This parameter is primarily
|
||||
for debugging and performance comparison.
|
||||
|
||||
pf. [PARIDE]
|
||||
See Documentation/blockdev/paride.txt.
|
||||
|
||||
|
|
|
@ -10,6 +10,8 @@ Required properties:
|
|||
- interrupts : should contain eSDHC interrupt.
|
||||
- interrupt-parent : interrupt source phandle.
|
||||
- clock-frequency : specifies eSDHC base clock frequency.
|
||||
- sdhci,1-bit-only : (optional) specifies that a controller can
|
||||
only handle 1-bit data transfers.
|
||||
|
||||
Example:
|
||||
|
||||
|
|
|
@ -139,6 +139,7 @@ ALC883/888
|
|||
acer Acer laptops (Travelmate 3012WTMi, Aspire 5600, etc)
|
||||
acer-aspire Acer Aspire 9810
|
||||
acer-aspire-4930g Acer Aspire 4930G
|
||||
acer-aspire-6530g Acer Aspire 6530G
|
||||
acer-aspire-8930g Acer Aspire 8930G
|
||||
medion Medion Laptops
|
||||
medion-md2 Medion MD2
|
||||
|
|
22
MAINTAINERS
22
MAINTAINERS
|
@ -1010,6 +1010,13 @@ W: http://www.at91.com/
|
|||
S: Maintained
|
||||
F: drivers/mmc/host/at91_mci.c
|
||||
|
||||
ATMEL AT91 / AT32 MCI DRIVER
|
||||
P: Nicolas Ferre
|
||||
M: nicolas.ferre@atmel.com
|
||||
S: Maintained
|
||||
F: drivers/mmc/host/atmel-mci.c
|
||||
F: drivers/mmc/host/atmel-mci-regs.h
|
||||
|
||||
ATMEL AT91 / AT32 SERIAL DRIVER
|
||||
P: Haavard Skinnemoen
|
||||
M: hskinnemoen@atmel.com
|
||||
|
@ -5094,6 +5101,13 @@ L: sdhci-devel@lists.ossman.eu
|
|||
S: Maintained
|
||||
F: drivers/mmc/host/sdhci.*
|
||||
|
||||
SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) SAMSUNG DRIVER
|
||||
P: Ben Dooks
|
||||
M: ben-linux@fluff.org
|
||||
L: sdhci-devel@lists.ossman.eu
|
||||
S: Maintained
|
||||
F: drivers/mmc/host/sdhci-s3c.c
|
||||
|
||||
SECURITY SUBSYSTEM
|
||||
P: James Morris
|
||||
M: jmorris@namei.org
|
||||
|
@ -6216,6 +6230,14 @@ S: Maintained
|
|||
F: Documentation/i2c/busses/i2c-viapro
|
||||
F: drivers/i2c/busses/i2c-viapro.c
|
||||
|
||||
VIA SD/MMC CARD CONTROLLER DRIVER
|
||||
P: Joseph Chan
|
||||
M: JosephChan@via.com.tw
|
||||
P: Harald Welte
|
||||
M: HaraldWelte@viatech.com
|
||||
S: Maintained
|
||||
F: drivers/mmc/host/via-sdmmc.c
|
||||
|
||||
VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
|
||||
P: Joseph Chan
|
||||
M: JosephChan@via.com.tw
|
||||
|
|
|
@ -146,7 +146,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
|
|||
/* If for any reason at all we couldn't handle the fault,
|
||||
make sure we exit gracefully rather than endlessly redo
|
||||
the fault. */
|
||||
fault = handle_mm_fault(mm, vma, address, cause > 0);
|
||||
fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0);
|
||||
up_read(&mm->mmap_sem);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
|
|
|
@ -208,7 +208,7 @@ good_area:
|
|||
* than endlessly redo the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11));
|
||||
fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -133,7 +133,7 @@ good_area:
|
|||
* fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess);
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -163,7 +163,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
|
|||
* the fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess & 1);
|
||||
fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -163,7 +163,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, ear0, write);
|
||||
fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -154,7 +154,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
|
|||
* sure we exit gracefully rather than endlessly redo the
|
||||
* fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) != 0);
|
||||
fault = handle_mm_fault(mm, vma, address, (mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened
|
||||
|
|
|
@ -196,7 +196,7 @@ survive:
|
|||
*/
|
||||
addr = (address & PAGE_MASK);
|
||||
set_thread_fault_code(error_code);
|
||||
fault = handle_mm_fault(mm, vma, addr, write);
|
||||
fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -155,7 +155,7 @@ good_area:
|
|||
*/
|
||||
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
#ifdef DEBUG
|
||||
printk("handle_mm_fault returns %d\n",fault);
|
||||
#endif
|
||||
|
|
|
@ -232,7 +232,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, is_write);
|
||||
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -102,7 +102,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -258,7 +258,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -202,7 +202,7 @@ good_area:
|
|||
* fault.
|
||||
*/
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) != 0);
|
||||
fault = handle_mm_fault(mm, vma, address, (acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
/*
|
||||
* We hit a shared mapping outside of the file, or some
|
||||
|
|
|
@ -302,7 +302,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
ret = handle_mm_fault(mm, vma, address, is_write);
|
||||
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(ret & VM_FAULT_ERROR)) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -70,7 +70,7 @@ int spu_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
|
|||
}
|
||||
|
||||
ret = 0;
|
||||
*flt = handle_mm_fault(mm, vma, ea, is_write);
|
||||
*flt = handle_mm_fault(mm, vma, ea, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(*flt & VM_FAULT_ERROR)) {
|
||||
if (*flt & VM_FAULT_OOM) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -66,7 +66,7 @@ static int __handle_fault(struct mm_struct *mm, unsigned long address,
|
|||
}
|
||||
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, write_access);
|
||||
fault = handle_mm_fault(mm, vma, address, write_access ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -352,7 +352,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
up_read(&mm->mmap_sem);
|
||||
|
|
|
@ -133,7 +133,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess);
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -187,7 +187,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess);
|
||||
fault = handle_mm_fault(mm, vma, address, writeaccess ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -241,7 +241,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
@ -484,7 +484,7 @@ good_area:
|
|||
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
|
||||
goto bad_area;
|
||||
}
|
||||
switch (handle_mm_fault(mm, vma, address, write)) {
|
||||
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
|
||||
case VM_FAULT_SIGBUS:
|
||||
case VM_FAULT_OOM:
|
||||
goto do_sigbus;
|
||||
|
|
|
@ -398,7 +398,7 @@ good_area:
|
|||
goto bad_area;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE));
|
||||
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -65,7 +65,7 @@ good_area:
|
|||
do {
|
||||
int fault;
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, is_write);
|
||||
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -845,7 +845,7 @@ ENTRY(aesni_cbc_enc)
|
|||
*/
|
||||
ENTRY(aesni_cbc_dec)
|
||||
cmp $16, LEN
|
||||
jb .Lcbc_dec_ret
|
||||
jb .Lcbc_dec_just_ret
|
||||
mov 480(KEYP), KLEN
|
||||
add $240, KEYP
|
||||
movups (IVP), IV
|
||||
|
@ -891,6 +891,7 @@ ENTRY(aesni_cbc_dec)
|
|||
add $16, OUTP
|
||||
cmp $16, LEN
|
||||
jge .Lcbc_dec_loop1
|
||||
movups IV, (IVP)
|
||||
.Lcbc_dec_ret:
|
||||
movups IV, (IVP)
|
||||
.Lcbc_dec_just_ret:
|
||||
ret
|
||||
|
|
|
@ -198,6 +198,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc,
|
|||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
kernel_fpu_begin();
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
|
@ -221,6 +222,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc,
|
|||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
kernel_fpu_begin();
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
|
@ -266,6 +268,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc,
|
|||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
kernel_fpu_begin();
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
|
@ -289,6 +292,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc,
|
|||
|
||||
blkcipher_walk_init(&walk, dst, src, nbytes);
|
||||
err = blkcipher_walk_virt(desc, &walk);
|
||||
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
|
||||
|
||||
kernel_fpu_begin();
|
||||
while ((nbytes = walk.nbytes)) {
|
||||
|
|
|
@ -48,7 +48,7 @@ static int crypto_fpu_encrypt(struct blkcipher_desc *desc_in,
|
|||
struct blkcipher_desc desc = {
|
||||
.tfm = child,
|
||||
.info = desc_in->info,
|
||||
.flags = desc_in->flags,
|
||||
.flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
};
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
@ -67,7 +67,7 @@ static int crypto_fpu_decrypt(struct blkcipher_desc *desc_in,
|
|||
struct blkcipher_desc desc = {
|
||||
.tfm = child,
|
||||
.info = desc_in->info,
|
||||
.flags = desc_in->flags,
|
||||
.flags = desc_in->flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
};
|
||||
|
||||
kernel_fpu_begin();
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
|
||||
#else /* ...!ASSEMBLY */
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -155,6 +156,15 @@ do { \
|
|||
/* We can use this directly for local CPU (faster). */
|
||||
DECLARE_PER_CPU(unsigned long, this_cpu_off);
|
||||
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
void *pcpu_lpage_remapped(void *kaddr);
|
||||
#else
|
||||
static inline void *pcpu_lpage_remapped(void *kaddr)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -124,7 +124,7 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
|
|||
}
|
||||
|
||||
/*
|
||||
* Remap allocator
|
||||
* Large page remap allocator
|
||||
*
|
||||
* This allocator uses PMD page as unit. A PMD page is allocated for
|
||||
* each cpu and each is remapped into vmalloc area using PMD mapping.
|
||||
|
@ -137,105 +137,185 @@ static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
|
|||
* better than only using 4k mappings while still being NUMA friendly.
|
||||
*/
|
||||
#ifdef CONFIG_NEED_MULTIPLE_NODES
|
||||
static size_t pcpur_size __initdata;
|
||||
static void **pcpur_ptrs __initdata;
|
||||
struct pcpul_ent {
|
||||
unsigned int cpu;
|
||||
void *ptr;
|
||||
};
|
||||
|
||||
static struct page * __init pcpur_get_page(unsigned int cpu, int pageno)
|
||||
static size_t pcpul_size;
|
||||
static struct pcpul_ent *pcpul_map;
|
||||
static struct vm_struct pcpul_vm;
|
||||
|
||||
static struct page * __init pcpul_get_page(unsigned int cpu, int pageno)
|
||||
{
|
||||
size_t off = (size_t)pageno << PAGE_SHIFT;
|
||||
|
||||
if (off >= pcpur_size)
|
||||
if (off >= pcpul_size)
|
||||
return NULL;
|
||||
|
||||
return virt_to_page(pcpur_ptrs[cpu] + off);
|
||||
return virt_to_page(pcpul_map[cpu].ptr + off);
|
||||
}
|
||||
|
||||
static ssize_t __init setup_pcpu_remap(size_t static_size)
|
||||
static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
|
||||
{
|
||||
static struct vm_struct vm;
|
||||
size_t ptrs_size, dyn_size;
|
||||
size_t map_size, dyn_size;
|
||||
unsigned int cpu;
|
||||
int i, j;
|
||||
ssize_t ret;
|
||||
|
||||
/*
|
||||
* If large page isn't supported, there's no benefit in doing
|
||||
* this. Also, on non-NUMA, embedding is better.
|
||||
*
|
||||
* NOTE: disabled for now.
|
||||
*/
|
||||
if (true || !cpu_has_pse || !pcpu_need_numa())
|
||||
if (!chosen) {
|
||||
size_t vm_size = VMALLOC_END - VMALLOC_START;
|
||||
size_t tot_size = num_possible_cpus() * PMD_SIZE;
|
||||
|
||||
/* on non-NUMA, embedding is better */
|
||||
if (!pcpu_need_numa())
|
||||
return -EINVAL;
|
||||
|
||||
/* don't consume more than 20% of vmalloc area */
|
||||
if (tot_size > vm_size / 5) {
|
||||
pr_info("PERCPU: too large chunk size %zuMB for "
|
||||
"large page remap\n", tot_size >> 20);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* need PSE */
|
||||
if (!cpu_has_pse) {
|
||||
pr_warning("PERCPU: lpage allocator requires PSE\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Currently supports only single page. Supporting multiple
|
||||
* pages won't be too difficult if it ever becomes necessary.
|
||||
*/
|
||||
pcpur_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
|
||||
pcpul_size = PFN_ALIGN(static_size + PERCPU_MODULE_RESERVE +
|
||||
PERCPU_DYNAMIC_RESERVE);
|
||||
if (pcpur_size > PMD_SIZE) {
|
||||
if (pcpul_size > PMD_SIZE) {
|
||||
pr_warning("PERCPU: static data is larger than large page, "
|
||||
"can't use large page\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dyn_size = pcpur_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
|
||||
dyn_size = pcpul_size - static_size - PERCPU_FIRST_CHUNK_RESERVE;
|
||||
|
||||
/* allocate pointer array and alloc large pages */
|
||||
ptrs_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpur_ptrs[0]));
|
||||
pcpur_ptrs = alloc_bootmem(ptrs_size);
|
||||
map_size = PFN_ALIGN(num_possible_cpus() * sizeof(pcpul_map[0]));
|
||||
pcpul_map = alloc_bootmem(map_size);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pcpur_ptrs[cpu] = pcpu_alloc_bootmem(cpu, PMD_SIZE, PMD_SIZE);
|
||||
if (!pcpur_ptrs[cpu])
|
||||
pcpul_map[cpu].cpu = cpu;
|
||||
pcpul_map[cpu].ptr = pcpu_alloc_bootmem(cpu, PMD_SIZE,
|
||||
PMD_SIZE);
|
||||
if (!pcpul_map[cpu].ptr) {
|
||||
pr_warning("PERCPU: failed to allocate large page "
|
||||
"for cpu%u\n", cpu);
|
||||
goto enomem;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only use pcpur_size bytes and give back the rest.
|
||||
* Only use pcpul_size bytes and give back the rest.
|
||||
*
|
||||
* Ingo: The 2MB up-rounding bootmem is needed to make
|
||||
* sure the partial 2MB page is still fully RAM - it's
|
||||
* not well-specified to have a PAT-incompatible area
|
||||
* (unmapped RAM, device memory, etc.) in that hole.
|
||||
*/
|
||||
free_bootmem(__pa(pcpur_ptrs[cpu] + pcpur_size),
|
||||
PMD_SIZE - pcpur_size);
|
||||
free_bootmem(__pa(pcpul_map[cpu].ptr + pcpul_size),
|
||||
PMD_SIZE - pcpul_size);
|
||||
|
||||
memcpy(pcpur_ptrs[cpu], __per_cpu_load, static_size);
|
||||
memcpy(pcpul_map[cpu].ptr, __per_cpu_load, static_size);
|
||||
}
|
||||
|
||||
/* allocate address and map */
|
||||
vm.flags = VM_ALLOC;
|
||||
vm.size = num_possible_cpus() * PMD_SIZE;
|
||||
vm_area_register_early(&vm, PMD_SIZE);
|
||||
pcpul_vm.flags = VM_ALLOC;
|
||||
pcpul_vm.size = num_possible_cpus() * PMD_SIZE;
|
||||
vm_area_register_early(&pcpul_vm, PMD_SIZE);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
pmd_t *pmd;
|
||||
pmd_t *pmd, pmd_v;
|
||||
|
||||
pmd = populate_extra_pmd((unsigned long)vm.addr
|
||||
+ cpu * PMD_SIZE);
|
||||
set_pmd(pmd, pfn_pmd(page_to_pfn(virt_to_page(pcpur_ptrs[cpu])),
|
||||
PAGE_KERNEL_LARGE));
|
||||
pmd = populate_extra_pmd((unsigned long)pcpul_vm.addr +
|
||||
cpu * PMD_SIZE);
|
||||
pmd_v = pfn_pmd(page_to_pfn(virt_to_page(pcpul_map[cpu].ptr)),
|
||||
PAGE_KERNEL_LARGE);
|
||||
set_pmd(pmd, pmd_v);
|
||||
}
|
||||
|
||||
/* we're ready, commit */
|
||||
pr_info("PERCPU: Remapped at %p with large pages, static data "
|
||||
"%zu bytes\n", vm.addr, static_size);
|
||||
"%zu bytes\n", pcpul_vm.addr, static_size);
|
||||
|
||||
ret = pcpu_setup_first_chunk(pcpur_get_page, static_size,
|
||||
ret = pcpu_setup_first_chunk(pcpul_get_page, static_size,
|
||||
PERCPU_FIRST_CHUNK_RESERVE, dyn_size,
|
||||
PMD_SIZE, vm.addr, NULL);
|
||||
goto out_free_ar;
|
||||
PMD_SIZE, pcpul_vm.addr, NULL);
|
||||
|
||||
/* sort pcpul_map array for pcpu_lpage_remapped() */
|
||||
for (i = 0; i < num_possible_cpus() - 1; i++)
|
||||
for (j = i + 1; j < num_possible_cpus(); j++)
|
||||
if (pcpul_map[i].ptr > pcpul_map[j].ptr) {
|
||||
struct pcpul_ent tmp = pcpul_map[i];
|
||||
pcpul_map[i] = pcpul_map[j];
|
||||
pcpul_map[j] = tmp;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
enomem:
|
||||
for_each_possible_cpu(cpu)
|
||||
if (pcpur_ptrs[cpu])
|
||||
free_bootmem(__pa(pcpur_ptrs[cpu]), PMD_SIZE);
|
||||
ret = -ENOMEM;
|
||||
out_free_ar:
|
||||
free_bootmem(__pa(pcpur_ptrs), ptrs_size);
|
||||
return ret;
|
||||
if (pcpul_map[cpu].ptr)
|
||||
free_bootmem(__pa(pcpul_map[cpu].ptr), pcpul_size);
|
||||
free_bootmem(__pa(pcpul_map), map_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* pcpu_lpage_remapped - determine whether a kaddr is in pcpul recycled area
|
||||
* @kaddr: the kernel address in question
|
||||
*
|
||||
* Determine whether @kaddr falls in the pcpul recycled area. This is
|
||||
* used by pageattr to detect VM aliases and break up the pcpu PMD
|
||||
* mapping such that the same physical page is not mapped under
|
||||
* different attributes.
|
||||
*
|
||||
* The recycled area is always at the tail of a partially used PMD
|
||||
* page.
|
||||
*
|
||||
* RETURNS:
|
||||
* Address of corresponding remapped pcpu address if match is found;
|
||||
* otherwise, NULL.
|
||||
*/
|
||||
void *pcpu_lpage_remapped(void *kaddr)
|
||||
{
|
||||
void *pmd_addr = (void *)((unsigned long)kaddr & PMD_MASK);
|
||||
unsigned long offset = (unsigned long)kaddr & ~PMD_MASK;
|
||||
int left = 0, right = num_possible_cpus() - 1;
|
||||
int pos;
|
||||
|
||||
/* pcpul in use at all? */
|
||||
if (!pcpul_map)
|
||||
return NULL;
|
||||
|
||||
/* okay, perform binary search */
|
||||
while (left <= right) {
|
||||
pos = (left + right) / 2;
|
||||
|
||||
if (pcpul_map[pos].ptr < pmd_addr)
|
||||
left = pos + 1;
|
||||
else if (pcpul_map[pos].ptr > pmd_addr)
|
||||
right = pos - 1;
|
||||
else {
|
||||
/* it shouldn't be in the area for the first chunk */
|
||||
WARN_ON(offset < pcpul_size);
|
||||
|
||||
return pcpul_vm.addr +
|
||||
pcpul_map[pos].cpu * PMD_SIZE + offset;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static ssize_t __init setup_pcpu_remap(size_t static_size)
|
||||
static ssize_t __init setup_pcpu_lpage(size_t static_size, bool chosen)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -249,7 +329,7 @@ static ssize_t __init setup_pcpu_remap(size_t static_size)
|
|||
* mapping so that it can use PMD mapping without additional TLB
|
||||
* pressure.
|
||||
*/
|
||||
static ssize_t __init setup_pcpu_embed(size_t static_size)
|
||||
static ssize_t __init setup_pcpu_embed(size_t static_size, bool chosen)
|
||||
{
|
||||
size_t reserve = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE;
|
||||
|
||||
|
@ -258,7 +338,7 @@ static ssize_t __init setup_pcpu_embed(size_t static_size)
|
|||
* this. Also, embedding allocation doesn't play well with
|
||||
* NUMA.
|
||||
*/
|
||||
if (!cpu_has_pse || pcpu_need_numa())
|
||||
if (!chosen && (!cpu_has_pse || pcpu_need_numa()))
|
||||
return -EINVAL;
|
||||
|
||||
return pcpu_embed_first_chunk(static_size, PERCPU_FIRST_CHUNK_RESERVE,
|
||||
|
@ -308,8 +388,11 @@ static ssize_t __init setup_pcpu_4k(size_t static_size)
|
|||
void *ptr;
|
||||
|
||||
ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
|
||||
if (!ptr)
|
||||
if (!ptr) {
|
||||
pr_warning("PERCPU: failed to allocate "
|
||||
"4k page for cpu%u\n", cpu);
|
||||
goto enomem;
|
||||
}
|
||||
|
||||
memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
|
||||
pcpu4k_pages[j++] = virt_to_page(ptr);
|
||||
|
@ -333,6 +416,16 @@ out_free_ar:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/* for explicit first chunk allocator selection */
|
||||
static char pcpu_chosen_alloc[16] __initdata;
|
||||
|
||||
static int __init percpu_alloc_setup(char *str)
|
||||
{
|
||||
strncpy(pcpu_chosen_alloc, str, sizeof(pcpu_chosen_alloc) - 1);
|
||||
return 0;
|
||||
}
|
||||
early_param("percpu_alloc", percpu_alloc_setup);
|
||||
|
||||
static inline void setup_percpu_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@ -346,11 +439,6 @@ static inline void setup_percpu_segment(int cpu)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Great future plan:
|
||||
* Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
|
||||
* Always point %gs to its beginning
|
||||
*/
|
||||
void __init setup_per_cpu_areas(void)
|
||||
{
|
||||
size_t static_size = __per_cpu_end - __per_cpu_start;
|
||||
|
@ -367,9 +455,26 @@ void __init setup_per_cpu_areas(void)
|
|||
* of large page mappings. Please read comments on top of
|
||||
* each allocator for details.
|
||||
*/
|
||||
ret = setup_pcpu_remap(static_size);
|
||||
if (ret < 0)
|
||||
ret = setup_pcpu_embed(static_size);
|
||||
ret = -EINVAL;
|
||||
if (strlen(pcpu_chosen_alloc)) {
|
||||
if (strcmp(pcpu_chosen_alloc, "4k")) {
|
||||
if (!strcmp(pcpu_chosen_alloc, "lpage"))
|
||||
ret = setup_pcpu_lpage(static_size, true);
|
||||
else if (!strcmp(pcpu_chosen_alloc, "embed"))
|
||||
ret = setup_pcpu_embed(static_size, true);
|
||||
else
|
||||
pr_warning("PERCPU: unknown allocator %s "
|
||||
"specified\n", pcpu_chosen_alloc);
|
||||
if (ret < 0)
|
||||
pr_warning("PERCPU: %s allocator failed (%zd), "
|
||||
"falling back to 4k\n",
|
||||
pcpu_chosen_alloc, ret);
|
||||
}
|
||||
} else {
|
||||
ret = setup_pcpu_lpage(static_size, false);
|
||||
if (ret < 0)
|
||||
ret = setup_pcpu_embed(static_size, false);
|
||||
}
|
||||
if (ret < 0)
|
||||
ret = setup_pcpu_4k(static_size);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -1113,7 +1113,7 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault:
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write);
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
mm_fault_error(regs, error_code, address, fault);
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/pfn.h>
|
||||
|
||||
#include <asm/e820.h>
|
||||
#include <asm/processor.h>
|
||||
|
@ -681,8 +682,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
|
|||
static int cpa_process_alias(struct cpa_data *cpa)
|
||||
{
|
||||
struct cpa_data alias_cpa;
|
||||
int ret = 0;
|
||||
unsigned long temp_cpa_vaddr, vaddr;
|
||||
unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
|
||||
unsigned long vaddr, remapped;
|
||||
int ret;
|
||||
|
||||
if (cpa->pfn >= max_pfn_mapped)
|
||||
return 0;
|
||||
|
@ -706,42 +708,55 @@ static int cpa_process_alias(struct cpa_data *cpa)
|
|||
PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
|
||||
|
||||
alias_cpa = *cpa;
|
||||
temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
|
||||
alias_cpa.vaddr = &temp_cpa_vaddr;
|
||||
alias_cpa.vaddr = &laddr;
|
||||
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
|
||||
|
||||
|
||||
ret = __change_page_attr_set_clr(&alias_cpa, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (ret)
|
||||
return ret;
|
||||
/*
|
||||
* No need to redo, when the primary call touched the high
|
||||
* mapping already:
|
||||
*/
|
||||
if (within(vaddr, (unsigned long) _text, _brk_end))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If the physical address is inside the kernel map, we need
|
||||
* If the primary call didn't touch the high mapping already
|
||||
* and the physical address is inside the kernel map, we need
|
||||
* to touch the high mapped kernel as well:
|
||||
*/
|
||||
if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
|
||||
return 0;
|
||||
if (!within(vaddr, (unsigned long)_text, _brk_end) &&
|
||||
within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
|
||||
unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
|
||||
__START_KERNEL_map - phys_base;
|
||||
alias_cpa = *cpa;
|
||||
alias_cpa.vaddr = &temp_cpa_vaddr;
|
||||
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
|
||||
|
||||
alias_cpa = *cpa;
|
||||
temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
|
||||
alias_cpa.vaddr = &temp_cpa_vaddr;
|
||||
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
|
||||
/*
|
||||
* The high mapping range is imprecise, so ignore the
|
||||
* return value.
|
||||
*/
|
||||
__change_page_attr_set_clr(&alias_cpa, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The high mapping range is imprecise, so ignore the return value.
|
||||
* If the PMD page was partially used for per-cpu remapping,
|
||||
* the recycled area needs to be split and modified. Because
|
||||
* the area is always proper subset of a PMD page
|
||||
* cpa->numpages is guaranteed to be 1 for these areas, so
|
||||
* there's no need to loop over and check for further remaps.
|
||||
*/
|
||||
__change_page_attr_set_clr(&alias_cpa, 0);
|
||||
#endif
|
||||
return ret;
|
||||
remapped = (unsigned long)pcpu_lpage_remapped((void *)laddr);
|
||||
if (remapped) {
|
||||
WARN_ON(cpa->numpages > 1);
|
||||
alias_cpa = *cpa;
|
||||
alias_cpa.vaddr = &remapped;
|
||||
alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
|
||||
ret = __change_page_attr_set_clr(&alias_cpa, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
|
||||
|
|
|
@ -106,7 +106,7 @@ good_area:
|
|||
* the fault.
|
||||
*/
|
||||
survive:
|
||||
fault = handle_mm_fault(mm, vma, address, is_write);
|
||||
fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
|
|
@ -18,9 +18,22 @@
|
|||
#include <linux/percpu.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/i387.h>
|
||||
#include "padlock.h"
|
||||
|
||||
/*
|
||||
* Number of data blocks actually fetched for each xcrypt insn.
|
||||
* Processors with prefetch errata will fetch extra blocks.
|
||||
*/
|
||||
static unsigned int ecb_fetch_blocks = 2;
|
||||
#define MAX_ECB_FETCH_BLOCKS (8)
|
||||
#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
|
||||
|
||||
static unsigned int cbc_fetch_blocks = 1;
|
||||
#define MAX_CBC_FETCH_BLOCKS (4)
|
||||
#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
|
||||
|
||||
/* Control word. */
|
||||
struct cword {
|
||||
unsigned int __attribute__ ((__packed__))
|
||||
|
@ -172,73 +185,111 @@ static inline void padlock_store_cword(struct cword *cword)
|
|||
* should be used only inside the irq_ts_save/restore() context
|
||||
*/
|
||||
|
||||
static inline void padlock_xcrypt(const u8 *input, u8 *output, void *key,
|
||||
struct cword *control_word)
|
||||
static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
||||
struct cword *control_word, int count)
|
||||
{
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
||||
: "+S"(input), "+D"(output)
|
||||
: "d"(control_word), "b"(key), "c"(1));
|
||||
: "d"(control_word), "b"(key), "c"(count));
|
||||
}
|
||||
|
||||
static void aes_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword)
|
||||
static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
||||
u8 *iv, struct cword *control_word, int count)
|
||||
{
|
||||
u8 buf[AES_BLOCK_SIZE * 2 + PADLOCK_ALIGNMENT - 1];
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
|
||||
: "+S" (input), "+D" (output), "+a" (iv)
|
||||
: "d" (control_word), "b" (key), "c" (count));
|
||||
return iv;
|
||||
}
|
||||
|
||||
static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key,
|
||||
struct cword *cword, int count)
|
||||
{
|
||||
/*
|
||||
* Padlock prefetches extra data so we must provide mapped input buffers.
|
||||
* Assume there are at least 16 bytes of stack already in use.
|
||||
*/
|
||||
u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
|
||||
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
|
||||
|
||||
memcpy(tmp, in, AES_BLOCK_SIZE);
|
||||
padlock_xcrypt(tmp, out, key, cword);
|
||||
memcpy(tmp, in, count * AES_BLOCK_SIZE);
|
||||
rep_xcrypt_ecb(tmp, out, key, cword, count);
|
||||
}
|
||||
|
||||
static inline void aes_crypt(const u8 *in, u8 *out, u32 *key,
|
||||
struct cword *cword)
|
||||
static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key,
|
||||
u8 *iv, struct cword *cword, int count)
|
||||
{
|
||||
/* padlock_xcrypt requires at least two blocks of data. */
|
||||
if (unlikely(!(((unsigned long)in ^ (PAGE_SIZE - AES_BLOCK_SIZE)) &
|
||||
(PAGE_SIZE - 1)))) {
|
||||
aes_crypt_copy(in, out, key, cword);
|
||||
/*
|
||||
* Padlock prefetches extra data so we must provide mapped input buffers.
|
||||
* Assume there are at least 16 bytes of stack already in use.
|
||||
*/
|
||||
u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1];
|
||||
u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT);
|
||||
|
||||
memcpy(tmp, in, count * AES_BLOCK_SIZE);
|
||||
return rep_xcrypt_cbc(tmp, out, key, iv, cword, count);
|
||||
}
|
||||
|
||||
static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
|
||||
struct cword *cword, int count)
|
||||
{
|
||||
/* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
|
||||
* We could avoid some copying here but it's probably not worth it.
|
||||
*/
|
||||
if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
|
||||
ecb_crypt_copy(in, out, key, cword, count);
|
||||
return;
|
||||
}
|
||||
|
||||
padlock_xcrypt(in, out, key, cword);
|
||||
rep_xcrypt_ecb(in, out, key, cword, count);
|
||||
}
|
||||
|
||||
static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
|
||||
u8 *iv, struct cword *cword, int count)
|
||||
{
|
||||
/* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
|
||||
if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
|
||||
return cbc_crypt_copy(in, out, key, iv, cword, count);
|
||||
|
||||
return rep_xcrypt_cbc(in, out, key, iv, cword, count);
|
||||
}
|
||||
|
||||
static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
|
||||
void *control_word, u32 count)
|
||||
{
|
||||
if (count == 1) {
|
||||
aes_crypt(input, output, key, control_word);
|
||||
u32 initial = count & (ecb_fetch_blocks - 1);
|
||||
|
||||
if (count < ecb_fetch_blocks) {
|
||||
ecb_crypt(input, output, key, control_word, count);
|
||||
return;
|
||||
}
|
||||
|
||||
asm volatile ("test $1, %%cl;"
|
||||
"je 1f;"
|
||||
#ifndef CONFIG_X86_64
|
||||
"lea -1(%%ecx), %%eax;"
|
||||
"mov $1, %%ecx;"
|
||||
#else
|
||||
"lea -1(%%rcx), %%rax;"
|
||||
"mov $1, %%rcx;"
|
||||
#endif
|
||||
".byte 0xf3,0x0f,0xa7,0xc8;" /* rep xcryptecb */
|
||||
#ifndef CONFIG_X86_64
|
||||
"mov %%eax, %%ecx;"
|
||||
#else
|
||||
"mov %%rax, %%rcx;"
|
||||
#endif
|
||||
"1:"
|
||||
".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
||||
if (initial)
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
||||
: "+S"(input), "+D"(output)
|
||||
: "d"(control_word), "b"(key), "c"(initial));
|
||||
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
|
||||
: "+S"(input), "+D"(output)
|
||||
: "d"(control_word), "b"(key), "c"(count)
|
||||
: "ax");
|
||||
: "d"(control_word), "b"(key), "c"(count - initial));
|
||||
}
|
||||
|
||||
static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
|
||||
u8 *iv, void *control_word, u32 count)
|
||||
{
|
||||
/* rep xcryptcbc */
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0"
|
||||
u32 initial = count & (cbc_fetch_blocks - 1);
|
||||
|
||||
if (count < cbc_fetch_blocks)
|
||||
return cbc_crypt(input, output, key, iv, control_word, count);
|
||||
|
||||
if (initial)
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
|
||||
: "+S" (input), "+D" (output), "+a" (iv)
|
||||
: "d" (control_word), "b" (key), "c" (count));
|
||||
|
||||
asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
|
||||
: "+S" (input), "+D" (output), "+a" (iv)
|
||||
: "d" (control_word), "b" (key), "c" (count));
|
||||
: "d" (control_word), "b" (key), "c" (count-initial));
|
||||
return iv;
|
||||
}
|
||||
|
||||
|
@ -249,7 +300,7 @@ static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|||
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
ts_state = irq_ts_save();
|
||||
aes_crypt(in, out, ctx->E, &ctx->cword.encrypt);
|
||||
ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1);
|
||||
irq_ts_restore(ts_state);
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
}
|
||||
|
@ -261,7 +312,7 @@ static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
|
|||
|
||||
padlock_reset_key(&ctx->cword.encrypt);
|
||||
ts_state = irq_ts_save();
|
||||
aes_crypt(in, out, ctx->D, &ctx->cword.decrypt);
|
||||
ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1);
|
||||
irq_ts_restore(ts_state);
|
||||
padlock_store_cword(&ctx->cword.encrypt);
|
||||
}
|
||||
|
@ -454,6 +505,7 @@ static struct crypto_alg cbc_aes_alg = {
|
|||
static int __init padlock_init(void)
|
||||
{
|
||||
int ret;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (!cpu_has_xcrypt) {
|
||||
printk(KERN_NOTICE PFX "VIA PadLock not detected.\n");
|
||||
|
@ -476,6 +528,12 @@ static int __init padlock_init(void)
|
|||
|
||||
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
|
||||
|
||||
if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
|
||||
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
|
||||
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
|
||||
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -94,6 +94,31 @@ config MMC_SDHCI_PLTFM
|
|||
|
||||
If unsure, say N.
|
||||
|
||||
config MMC_SDHCI_S3C
|
||||
tristate "SDHCI support on Samsung S3C SoC"
|
||||
depends on MMC_SDHCI && (PLAT_S3C24XX || PLAT_S3C64XX)
|
||||
help
|
||||
This selects the Secure Digital Host Controller Interface (SDHCI)
|
||||
often referrered to as the HSMMC block in some of the Samsung S3C
|
||||
range of SoC.
|
||||
|
||||
Note, due to the problems with DMA, the DMA support is only
|
||||
available with CONFIG_EXPERIMENTAL is selected.
|
||||
|
||||
If you have a controller with this interface, say Y or M here.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config MMC_SDHCI_S3C_DMA
|
||||
bool "DMA support on S3C SDHCI"
|
||||
depends on MMC_SDHCI_S3C && EXPERIMENTAL
|
||||
help
|
||||
Enable DMA support on the Samsung S3C SDHCI glue. The DMA
|
||||
has proved to be problematic if the controller encounters
|
||||
certain errors, and thus should be treated with care.
|
||||
|
||||
YMMV.
|
||||
|
||||
config MMC_OMAP
|
||||
tristate "TI OMAP Multimedia Card Interface support"
|
||||
depends on ARCH_OMAP
|
||||
|
@ -265,3 +290,14 @@ config MMC_CB710
|
|||
This driver can also be built as a module. If so, the module
|
||||
will be called cb710-mmc.
|
||||
|
||||
config MMC_VIA_SDMMC
|
||||
tristate "VIA SD/MMC Card Reader Driver"
|
||||
depends on PCI
|
||||
help
|
||||
This selects the VIA SD/MMC Card Reader driver, say Y or M here.
|
||||
VIA provides one multi-functional card reader which integrated into
|
||||
some motherboards manufactured by VIA. This card reader supports
|
||||
SD/MMC/SDHC.
|
||||
If you have a controller with this interface, say Y or M here.
|
||||
|
||||
If unsure, say N.
|
||||
|
|
|
@ -15,6 +15,7 @@ obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
|
|||
obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
|
||||
obj-$(CONFIG_MMC_SDHCI_OF) += sdhci-of.o
|
||||
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
|
||||
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
|
||||
obj-$(CONFIG_MMC_WBSD) += wbsd.o
|
||||
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
|
||||
obj-$(CONFIG_MMC_OMAP) += omap.o
|
||||
|
@ -31,6 +32,7 @@ obj-$(CONFIG_MMC_S3C) += s3cmci.o
|
|||
obj-$(CONFIG_MMC_SDRICOH_CS) += sdricoh_cs.o
|
||||
obj-$(CONFIG_MMC_TMIO) += tmio_mmc.o
|
||||
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
|
||||
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
|
||||
|
||||
ifeq ($(CONFIG_CB710_DEBUG),y)
|
||||
CFLAGS-cb710-mmc += -DDEBUG
|
||||
|
|
|
@ -794,7 +794,7 @@ static void s3cmci_dma_setup(struct s3cmci_host *host,
|
|||
host->mem->start + host->sdidata);
|
||||
|
||||
if (!setup_ok) {
|
||||
s3c2410_dma_config(host->dma, 4, 0);
|
||||
s3c2410_dma_config(host->dma, 4);
|
||||
s3c2410_dma_set_buffdone_fn(host->dma,
|
||||
s3cmci_dma_done_callback);
|
||||
s3c2410_dma_setflags(host->dma, S3C2410_DMAF_AUTOSTART);
|
||||
|
|
|
@ -250,6 +250,9 @@ static int __devinit sdhci_of_probe(struct of_device *ofdev,
|
|||
host->ops = &sdhci_of_data->ops;
|
||||
}
|
||||
|
||||
if (of_get_property(np, "sdhci,1-bit-only", NULL))
|
||||
host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
|
||||
|
||||
clk = of_get_property(np, "clock-frequency", &size);
|
||||
if (clk && size == sizeof(*clk) && *clk)
|
||||
of_host->clock = *clk;
|
||||
|
|
|
@ -284,6 +284,18 @@ static const struct sdhci_pci_fixes sdhci_jmicron = {
|
|||
.resume = jmicron_resume,
|
||||
};
|
||||
|
||||
static int via_probe(struct sdhci_pci_chip *chip)
|
||||
{
|
||||
if (chip->pdev->revision == 0x10)
|
||||
chip->quirks |= SDHCI_QUIRK_DELAY_AFTER_POWER;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_via = {
|
||||
.probe = via_probe,
|
||||
};
|
||||
|
||||
static const struct pci_device_id pci_ids[] __devinitdata = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_RICOH,
|
||||
|
@ -349,6 +361,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
|
|||
.driver_data = (kernel_ulong_t)&sdhci_jmicron,
|
||||
},
|
||||
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_VIA,
|
||||
.device = 0x95d0,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.driver_data = (kernel_ulong_t)&sdhci_via,
|
||||
},
|
||||
|
||||
{ /* Generic SD host controller */
|
||||
PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00)
|
||||
},
|
||||
|
|
|
@ -0,0 +1,428 @@
|
|||
/* linux/drivers/mmc/host/sdhci-s3c.c
|
||||
*
|
||||
* Copyright 2008 Openmoko Inc.
|
||||
* Copyright 2008 Simtec Electronics
|
||||
* Ben Dooks <ben@simtec.co.uk>
|
||||
* http://armlinux.simtec.co.uk/
|
||||
*
|
||||
* SDHCI (HSMMC) support for Samsung SoC
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <linux/mmc/host.h>
|
||||
|
||||
#include <plat/sdhci.h>
|
||||
#include <plat/regs-sdhci.h>
|
||||
|
||||
#include "sdhci.h"
|
||||
|
||||
#define MAX_BUS_CLK (4)
|
||||
|
||||
/**
|
||||
* struct sdhci_s3c - S3C SDHCI instance
|
||||
* @host: The SDHCI host created
|
||||
* @pdev: The platform device we where created from.
|
||||
* @ioarea: The resource created when we claimed the IO area.
|
||||
* @pdata: The platform data for this controller.
|
||||
* @cur_clk: The index of the current bus clock.
|
||||
* @clk_io: The clock for the internal bus interface.
|
||||
* @clk_bus: The clocks that are available for the SD/MMC bus clock.
|
||||
*/
|
||||
struct sdhci_s3c {
|
||||
struct sdhci_host *host;
|
||||
struct platform_device *pdev;
|
||||
struct resource *ioarea;
|
||||
struct s3c_sdhci_platdata *pdata;
|
||||
unsigned int cur_clk;
|
||||
|
||||
struct clk *clk_io;
|
||||
struct clk *clk_bus[MAX_BUS_CLK];
|
||||
};
|
||||
|
||||
static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)
|
||||
{
|
||||
return sdhci_priv(host);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_curclk - convert ctrl2 register to clock source number
|
||||
* @ctrl2: Control2 register value.
|
||||
*/
|
||||
static u32 get_curclk(u32 ctrl2)
|
||||
{
|
||||
ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
|
||||
ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
|
||||
|
||||
return ctrl2;
|
||||
}
|
||||
|
||||
static void sdhci_s3c_check_sclk(struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_s3c *ourhost = to_s3c(host);
|
||||
u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
|
||||
|
||||
if (get_curclk(tmp) != ourhost->cur_clk) {
|
||||
dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n");
|
||||
|
||||
tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
|
||||
tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
|
||||
writel(tmp, host->ioaddr + 0x80);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sdhci_s3c_get_max_clk - callback to get maximum clock frequency.
|
||||
* @host: The SDHCI host instance.
|
||||
*
|
||||
* Callback to return the maximum clock rate acheivable by the controller.
|
||||
*/
|
||||
static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_s3c *ourhost = to_s3c(host);
|
||||
struct clk *busclk;
|
||||
unsigned int rate, max;
|
||||
int clk;
|
||||
|
||||
/* note, a reset will reset the clock source */
|
||||
|
||||
sdhci_s3c_check_sclk(host);
|
||||
|
||||
for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
|
||||
busclk = ourhost->clk_bus[clk];
|
||||
if (!busclk)
|
||||
continue;
|
||||
|
||||
rate = clk_get_rate(busclk);
|
||||
if (rate > max)
|
||||
max = rate;
|
||||
}
|
||||
|
||||
return max;
|
||||
}
|
||||
|
||||
static unsigned int sdhci_s3c_get_timeout_clk(struct sdhci_host *host)
|
||||
{
|
||||
return sdhci_s3c_get_max_clk(host) / 1000000;
|
||||
}
|
||||
|
||||
/**
|
||||
* sdhci_s3c_consider_clock - consider one the bus clocks for current setting
|
||||
* @ourhost: Our SDHCI instance.
|
||||
* @src: The source clock index.
|
||||
* @wanted: The clock frequency wanted.
|
||||
*/
|
||||
static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,
|
||||
unsigned int src,
|
||||
unsigned int wanted)
|
||||
{
|
||||
unsigned long rate;
|
||||
struct clk *clksrc = ourhost->clk_bus[src];
|
||||
int div;
|
||||
|
||||
if (!clksrc)
|
||||
return UINT_MAX;
|
||||
|
||||
rate = clk_get_rate(clksrc);
|
||||
|
||||
for (div = 1; div < 256; div *= 2) {
|
||||
if ((rate / div) <= wanted)
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
|
||||
src, rate, wanted, rate / div);
|
||||
|
||||
return (wanted - (rate / div));
|
||||
}
|
||||
|
||||
/**
|
||||
* sdhci_s3c_set_clock - callback on clock change
|
||||
* @host: The SDHCI host being changed
|
||||
* @clock: The clock rate being requested.
|
||||
*
|
||||
* When the card's clock is going to be changed, look at the new frequency
|
||||
* and find the best clock source to go with it.
|
||||
*/
|
||||
static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
|
||||
{
|
||||
struct sdhci_s3c *ourhost = to_s3c(host);
|
||||
unsigned int best = UINT_MAX;
|
||||
unsigned int delta;
|
||||
int best_src = 0;
|
||||
int src;
|
||||
u32 ctrl;
|
||||
|
||||
/* don't bother if the clock is going off. */
|
||||
if (clock == 0)
|
||||
return;
|
||||
|
||||
for (src = 0; src < MAX_BUS_CLK; src++) {
|
||||
delta = sdhci_s3c_consider_clock(ourhost, src, clock);
|
||||
if (delta < best) {
|
||||
best = delta;
|
||||
best_src = src;
|
||||
}
|
||||
}
|
||||
|
||||
dev_dbg(&ourhost->pdev->dev,
|
||||
"selected source %d, clock %d, delta %d\n",
|
||||
best_src, clock, best);
|
||||
|
||||
/* select the new clock source */
|
||||
|
||||
if (ourhost->cur_clk != best_src) {
|
||||
struct clk *clk = ourhost->clk_bus[best_src];
|
||||
|
||||
/* turn clock off to card before changing clock source */
|
||||
writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL);
|
||||
|
||||
ourhost->cur_clk = best_src;
|
||||
host->max_clk = clk_get_rate(clk);
|
||||
host->timeout_clk = sdhci_s3c_get_timeout_clk(host);
|
||||
|
||||
ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2);
|
||||
ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
|
||||
ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
|
||||
writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2);
|
||||
}
|
||||
|
||||
/* reconfigure the hardware for new clock rate */
|
||||
|
||||
{
|
||||
struct mmc_ios ios;
|
||||
|
||||
ios.clock = clock;
|
||||
|
||||
if (ourhost->pdata->cfg_card)
|
||||
(ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
|
||||
&ios, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static struct sdhci_ops sdhci_s3c_ops = {
|
||||
.get_max_clock = sdhci_s3c_get_max_clk,
|
||||
.get_timeout_clock = sdhci_s3c_get_timeout_clk,
|
||||
.set_clock = sdhci_s3c_set_clock,
|
||||
};
|
||||
|
||||
static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct sdhci_host *host;
|
||||
struct sdhci_s3c *sc;
|
||||
struct resource *res;
|
||||
int ret, irq, ptr, clks;
|
||||
|
||||
if (!pdata) {
|
||||
dev_err(dev, "no device data specified\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "no irq specified\n");
|
||||
return irq;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
dev_err(dev, "no memory specified\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
|
||||
if (IS_ERR(host)) {
|
||||
dev_err(dev, "sdhci_alloc_host() failed\n");
|
||||
return PTR_ERR(host);
|
||||
}
|
||||
|
||||
sc = sdhci_priv(host);
|
||||
|
||||
sc->host = host;
|
||||
sc->pdev = pdev;
|
||||
sc->pdata = pdata;
|
||||
|
||||
platform_set_drvdata(pdev, host);
|
||||
|
||||
sc->clk_io = clk_get(dev, "hsmmc");
|
||||
if (IS_ERR(sc->clk_io)) {
|
||||
dev_err(dev, "failed to get io clock\n");
|
||||
ret = PTR_ERR(sc->clk_io);
|
||||
goto err_io_clk;
|
||||
}
|
||||
|
||||
/* enable the local io clock and keep it running for the moment. */
|
||||
clk_enable(sc->clk_io);
|
||||
|
||||
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
|
||||
struct clk *clk;
|
||||
char *name = pdata->clocks[ptr];
|
||||
|
||||
if (name == NULL)
|
||||
continue;
|
||||
|
||||
clk = clk_get(dev, name);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(dev, "failed to get clock %s\n", name);
|
||||
continue;
|
||||
}
|
||||
|
||||
clks++;
|
||||
sc->clk_bus[ptr] = clk;
|
||||
clk_enable(clk);
|
||||
|
||||
dev_info(dev, "clock source %d: %s (%ld Hz)\n",
|
||||
ptr, name, clk_get_rate(clk));
|
||||
}
|
||||
|
||||
if (clks == 0) {
|
||||
dev_err(dev, "failed to find any bus clocks\n");
|
||||
ret = -ENOENT;
|
||||
goto err_no_busclks;
|
||||
}
|
||||
|
||||
sc->ioarea = request_mem_region(res->start, resource_size(res),
|
||||
mmc_hostname(host->mmc));
|
||||
if (!sc->ioarea) {
|
||||
dev_err(dev, "failed to reserve register area\n");
|
||||
ret = -ENXIO;
|
||||
goto err_req_regs;
|
||||
}
|
||||
|
||||
host->ioaddr = ioremap_nocache(res->start, resource_size(res));
|
||||
if (!host->ioaddr) {
|
||||
dev_err(dev, "failed to map registers\n");
|
||||
ret = -ENXIO;
|
||||
goto err_req_regs;
|
||||
}
|
||||
|
||||
/* Ensure we have minimal gpio selected CMD/CLK/Detect */
|
||||
if (pdata->cfg_gpio)
|
||||
pdata->cfg_gpio(pdev, pdata->max_width);
|
||||
|
||||
host->hw_name = "samsung-hsmmc";
|
||||
host->ops = &sdhci_s3c_ops;
|
||||
host->quirks = 0;
|
||||
host->irq = irq;
|
||||
|
||||
/* Setup quirks for the controller */
|
||||
|
||||
/* Currently with ADMA enabled we are getting some length
|
||||
* interrupts that are not being dealt with, do disable
|
||||
* ADMA until this is sorted out. */
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
|
||||
host->quirks |= SDHCI_QUIRK_32BIT_ADMA_SIZE;
|
||||
|
||||
#ifndef CONFIG_MMC_SDHCI_S3C_DMA
|
||||
|
||||
/* we currently see overruns on errors, so disable the SDMA
|
||||
* support as well. */
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
|
||||
|
||||
/* PIO currently has problems with multi-block IO */
|
||||
host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
|
||||
|
||||
#endif /* CONFIG_MMC_SDHCI_S3C_DMA */
|
||||
|
||||
/* It seems we do not get an DATA transfer complete on non-busy
|
||||
* transfers, not sure if this is a problem with this specific
|
||||
* SDHCI block, or a missing configuration that needs to be set. */
|
||||
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
|
||||
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
|
||||
SDHCI_QUIRK_32BIT_DMA_SIZE);
|
||||
|
||||
ret = sdhci_add_host(host);
|
||||
if (ret) {
|
||||
dev_err(dev, "sdhci_add_host() failed\n");
|
||||
goto err_add_host;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_add_host:
|
||||
release_resource(sc->ioarea);
|
||||
kfree(sc->ioarea);
|
||||
|
||||
err_req_regs:
|
||||
for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
|
||||
clk_disable(sc->clk_bus[ptr]);
|
||||
clk_put(sc->clk_bus[ptr]);
|
||||
}
|
||||
|
||||
err_no_busclks:
|
||||
clk_disable(sc->clk_io);
|
||||
clk_put(sc->clk_io);
|
||||
|
||||
err_io_clk:
|
||||
sdhci_free_host(host);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
|
||||
{
|
||||
struct sdhci_host *host = platform_get_drvdata(dev);
|
||||
|
||||
sdhci_suspend_host(host, pm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sdhci_s3c_resume(struct platform_device *dev)
|
||||
{
|
||||
struct sdhci_host *host = platform_get_drvdata(dev);
|
||||
|
||||
sdhci_resume_host(host);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#else
|
||||
#define sdhci_s3c_suspend NULL
|
||||
#define sdhci_s3c_resume NULL
|
||||
#endif
|
||||
|
||||
static struct platform_driver sdhci_s3c_driver = {
|
||||
.probe = sdhci_s3c_probe,
|
||||
.remove = __devexit_p(sdhci_s3c_remove),
|
||||
.suspend = sdhci_s3c_suspend,
|
||||
.resume = sdhci_s3c_resume,
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "s3c-sdhci",
|
||||
},
|
||||
};
|
||||
|
||||
static int __init sdhci_s3c_init(void)
|
||||
{
|
||||
return platform_driver_register(&sdhci_s3c_driver);
|
||||
}
|
||||
|
||||
static void __exit sdhci_s3c_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&sdhci_s3c_driver);
|
||||
}
|
||||
|
||||
module_init(sdhci_s3c_init);
|
||||
module_exit(sdhci_s3c_exit);
|
||||
|
||||
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
|
||||
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS("platform:s3c-sdhci");
|
|
@ -584,7 +584,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)
|
|||
* longer to time out, but that's much better than having a too-short
|
||||
* timeout value.
|
||||
*/
|
||||
if ((host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL))
|
||||
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
|
||||
return 0xE;
|
||||
|
||||
/* timeout in us */
|
||||
|
@ -1051,12 +1051,19 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
|
|||
* At least the Marvell CaFe chip gets confused if we set the voltage
|
||||
* and set turn on power at the same time, so set the voltage first.
|
||||
*/
|
||||
if ((host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER))
|
||||
if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
|
||||
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
|
||||
|
||||
pwr |= SDHCI_POWER_ON;
|
||||
|
||||
sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
|
||||
|
||||
/*
|
||||
* Some controllers need an extra 10ms delay of 10ms before they
|
||||
* can apply clock after applying power
|
||||
*/
|
||||
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
/*****************************************************************************\
|
||||
|
@ -1382,6 +1389,35 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
|
|||
sdhci_finish_command(host);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static void sdhci_show_adma_error(struct sdhci_host *host)
|
||||
{
|
||||
const char *name = mmc_hostname(host->mmc);
|
||||
u8 *desc = host->adma_desc;
|
||||
__le32 *dma;
|
||||
__le16 *len;
|
||||
u8 attr;
|
||||
|
||||
sdhci_dumpregs(host);
|
||||
|
||||
while (true) {
|
||||
dma = (__le32 *)(desc + 4);
|
||||
len = (__le16 *)(desc + 2);
|
||||
attr = *desc;
|
||||
|
||||
DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
|
||||
name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr);
|
||||
|
||||
desc += 8;
|
||||
|
||||
if (attr & 2)
|
||||
break;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void sdhci_show_adma_error(struct sdhci_host *host) { }
|
||||
#endif
|
||||
|
||||
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
|
||||
{
|
||||
BUG_ON(intmask == 0);
|
||||
|
@ -1411,8 +1447,11 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
|
|||
host->data->error = -ETIMEDOUT;
|
||||
else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT))
|
||||
host->data->error = -EILSEQ;
|
||||
else if (intmask & SDHCI_INT_ADMA_ERROR)
|
||||
else if (intmask & SDHCI_INT_ADMA_ERROR) {
|
||||
printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
|
||||
sdhci_show_adma_error(host);
|
||||
host->data->error = -EIO;
|
||||
}
|
||||
|
||||
if (host->data->error)
|
||||
sdhci_finish_data(host);
|
||||
|
@ -1729,7 +1768,10 @@ int sdhci_add_host(struct sdhci_host *host)
|
|||
mmc->ops = &sdhci_ops;
|
||||
mmc->f_min = host->max_clk / 256;
|
||||
mmc->f_max = host->max_clk;
|
||||
mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
|
||||
mmc->caps = MMC_CAP_SDIO_IRQ;
|
||||
|
||||
if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
|
||||
mmc->caps |= MMC_CAP_4_BIT_DATA;
|
||||
|
||||
if (caps & SDHCI_CAN_DO_HISPD)
|
||||
mmc->caps |= MMC_CAP_SD_HIGHSPEED;
|
||||
|
@ -1802,7 +1844,7 @@ int sdhci_add_host(struct sdhci_host *host)
|
|||
/*
|
||||
* Maximum block count.
|
||||
*/
|
||||
mmc->max_blk_count = 65535;
|
||||
mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
|
||||
|
||||
/*
|
||||
* Init tasklets.
|
||||
|
|
|
@ -226,6 +226,12 @@ struct sdhci_host {
|
|||
#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19)
|
||||
/* Controller has to be forced to use block size of 2048 bytes */
|
||||
#define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20)
|
||||
/* Controller cannot do multi-block transfers */
|
||||
#define SDHCI_QUIRK_NO_MULTIBLOCK (1<<21)
|
||||
/* Controller can only handle 1-bit data transfers */
|
||||
#define SDHCI_QUIRK_FORCE_1_BIT_DATA (1<<22)
|
||||
/* Controller needs 10ms delay between applying power and clock */
|
||||
#define SDHCI_QUIRK_DELAY_AFTER_POWER (1<<23)
|
||||
|
||||
int irq; /* Device IRQ */
|
||||
void __iomem * ioaddr; /* Mapped address */
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -810,11 +810,11 @@ extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
|
|||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, int write_access);
|
||||
unsigned long address, unsigned int flags);
|
||||
#else
|
||||
static inline int handle_mm_fault(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
int write_access)
|
||||
unsigned int flags)
|
||||
{
|
||||
/* should never happen if there's no MMU */
|
||||
BUG();
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#ifndef _IPC_UTIL_H
|
||||
#define _IPC_UTIL_H
|
||||
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
#define SEQ_MULTIPLIER (IPCMNI)
|
||||
|
|
|
@ -472,7 +472,7 @@ config LOCKDEP
|
|||
bool
|
||||
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
|
||||
select STACKTRACE
|
||||
select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND && !S390
|
||||
select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390
|
||||
select KALLSYMS
|
||||
select KALLSYMS_ALL
|
||||
|
||||
|
|
149
lib/dma-debug.c
149
lib/dma-debug.c
|
@ -262,11 +262,12 @@ static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket,
|
|||
*/
|
||||
matches += 1;
|
||||
match_lvl = 0;
|
||||
entry->size == ref->size ? ++match_lvl : match_lvl;
|
||||
entry->type == ref->type ? ++match_lvl : match_lvl;
|
||||
entry->direction == ref->direction ? ++match_lvl : match_lvl;
|
||||
entry->size == ref->size ? ++match_lvl : 0;
|
||||
entry->type == ref->type ? ++match_lvl : 0;
|
||||
entry->direction == ref->direction ? ++match_lvl : 0;
|
||||
entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
|
||||
|
||||
if (match_lvl == 3) {
|
||||
if (match_lvl == 4) {
|
||||
/* perfect-fit - return the result */
|
||||
return entry;
|
||||
} else if (match_lvl > last_lvl) {
|
||||
|
@ -873,72 +874,68 @@ static void check_for_illegal_area(struct device *dev, void *addr, u64 size)
|
|||
"[addr=%p] [size=%llu]\n", addr, size);
|
||||
}
|
||||
|
||||
static void check_sync(struct device *dev, dma_addr_t addr,
|
||||
u64 size, u64 offset, int direction, bool to_cpu)
|
||||
static void check_sync(struct device *dev,
|
||||
struct dma_debug_entry *ref,
|
||||
bool to_cpu)
|
||||
{
|
||||
struct dma_debug_entry ref = {
|
||||
.dev = dev,
|
||||
.dev_addr = addr,
|
||||
.size = size,
|
||||
.direction = direction,
|
||||
};
|
||||
struct dma_debug_entry *entry;
|
||||
struct hash_bucket *bucket;
|
||||
unsigned long flags;
|
||||
|
||||
bucket = get_hash_bucket(&ref, &flags);
|
||||
bucket = get_hash_bucket(ref, &flags);
|
||||
|
||||
entry = hash_bucket_find(bucket, &ref);
|
||||
entry = hash_bucket_find(bucket, ref);
|
||||
|
||||
if (!entry) {
|
||||
err_printk(dev, NULL, "DMA-API: device driver tries "
|
||||
"to sync DMA memory it has not allocated "
|
||||
"[device address=0x%016llx] [size=%llu bytes]\n",
|
||||
(unsigned long long)addr, size);
|
||||
(unsigned long long)ref->dev_addr, ref->size);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((offset + size) > entry->size) {
|
||||
if (ref->size > entry->size) {
|
||||
err_printk(dev, entry, "DMA-API: device driver syncs"
|
||||
" DMA memory outside allocated range "
|
||||
"[device address=0x%016llx] "
|
||||
"[allocation size=%llu bytes] [sync offset=%llu] "
|
||||
"[sync size=%llu]\n", entry->dev_addr, entry->size,
|
||||
offset, size);
|
||||
"[allocation size=%llu bytes] "
|
||||
"[sync offset+size=%llu]\n",
|
||||
entry->dev_addr, entry->size,
|
||||
ref->size);
|
||||
}
|
||||
|
||||
if (direction != entry->direction) {
|
||||
if (ref->direction != entry->direction) {
|
||||
err_printk(dev, entry, "DMA-API: device driver syncs "
|
||||
"DMA memory with different direction "
|
||||
"[device address=0x%016llx] [size=%llu bytes] "
|
||||
"[mapped with %s] [synced with %s]\n",
|
||||
(unsigned long long)addr, entry->size,
|
||||
(unsigned long long)ref->dev_addr, entry->size,
|
||||
dir2name[entry->direction],
|
||||
dir2name[direction]);
|
||||
dir2name[ref->direction]);
|
||||
}
|
||||
|
||||
if (entry->direction == DMA_BIDIRECTIONAL)
|
||||
goto out;
|
||||
|
||||
if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
|
||||
!(direction == DMA_TO_DEVICE))
|
||||
!(ref->direction == DMA_TO_DEVICE))
|
||||
err_printk(dev, entry, "DMA-API: device driver syncs "
|
||||
"device read-only DMA memory for cpu "
|
||||
"[device address=0x%016llx] [size=%llu bytes] "
|
||||
"[mapped with %s] [synced with %s]\n",
|
||||
(unsigned long long)addr, entry->size,
|
||||
(unsigned long long)ref->dev_addr, entry->size,
|
||||
dir2name[entry->direction],
|
||||
dir2name[direction]);
|
||||
dir2name[ref->direction]);
|
||||
|
||||
if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
|
||||
!(direction == DMA_FROM_DEVICE))
|
||||
!(ref->direction == DMA_FROM_DEVICE))
|
||||
err_printk(dev, entry, "DMA-API: device driver syncs "
|
||||
"device write-only DMA memory to device "
|
||||
"[device address=0x%016llx] [size=%llu bytes] "
|
||||
"[mapped with %s] [synced with %s]\n",
|
||||
(unsigned long long)addr, entry->size,
|
||||
(unsigned long long)ref->dev_addr, entry->size,
|
||||
dir2name[entry->direction],
|
||||
dir2name[direction]);
|
||||
dir2name[ref->direction]);
|
||||
|
||||
out:
|
||||
put_hash_bucket(bucket, &flags);
|
||||
|
@ -1036,19 +1033,16 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
|
|||
}
|
||||
EXPORT_SYMBOL(debug_dma_map_sg);
|
||||
|
||||
static int get_nr_mapped_entries(struct device *dev, struct scatterlist *s)
|
||||
static int get_nr_mapped_entries(struct device *dev,
|
||||
struct dma_debug_entry *ref)
|
||||
{
|
||||
struct dma_debug_entry *entry, ref;
|
||||
struct dma_debug_entry *entry;
|
||||
struct hash_bucket *bucket;
|
||||
unsigned long flags;
|
||||
int mapped_ents;
|
||||
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = sg_dma_address(s);
|
||||
ref.size = sg_dma_len(s),
|
||||
|
||||
bucket = get_hash_bucket(&ref, &flags);
|
||||
entry = hash_bucket_find(bucket, &ref);
|
||||
bucket = get_hash_bucket(ref, &flags);
|
||||
entry = hash_bucket_find(bucket, ref);
|
||||
mapped_ents = 0;
|
||||
|
||||
if (entry)
|
||||
|
@ -1076,16 +1070,14 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
.dev_addr = sg_dma_address(s),
|
||||
.size = sg_dma_len(s),
|
||||
.direction = dir,
|
||||
.sg_call_ents = 0,
|
||||
.sg_call_ents = nelems,
|
||||
};
|
||||
|
||||
if (mapped_ents && i >= mapped_ents)
|
||||
break;
|
||||
|
||||
if (!i) {
|
||||
ref.sg_call_ents = nelems;
|
||||
mapped_ents = get_nr_mapped_entries(dev, s);
|
||||
}
|
||||
if (!i)
|
||||
mapped_ents = get_nr_mapped_entries(dev, &ref);
|
||||
|
||||
check_unmap(&ref);
|
||||
}
|
||||
|
@ -1140,10 +1132,19 @@ EXPORT_SYMBOL(debug_dma_free_coherent);
|
|||
void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref;
|
||||
|
||||
if (unlikely(global_disable))
|
||||
return;
|
||||
|
||||
check_sync(dev, dma_handle, size, 0, direction, true);
|
||||
ref.type = dma_debug_single;
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = dma_handle;
|
||||
ref.size = size;
|
||||
ref.direction = direction;
|
||||
ref.sg_call_ents = 0;
|
||||
|
||||
check_sync(dev, &ref, true);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
|
||||
|
||||
|
@ -1151,10 +1152,19 @@ void debug_dma_sync_single_for_device(struct device *dev,
|
|||
dma_addr_t dma_handle, size_t size,
|
||||
int direction)
|
||||
{
|
||||
struct dma_debug_entry ref;
|
||||
|
||||
if (unlikely(global_disable))
|
||||
return;
|
||||
|
||||
check_sync(dev, dma_handle, size, 0, direction, false);
|
||||
ref.type = dma_debug_single;
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = dma_handle;
|
||||
ref.size = size;
|
||||
ref.direction = direction;
|
||||
ref.sg_call_ents = 0;
|
||||
|
||||
check_sync(dev, &ref, false);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_for_device);
|
||||
|
||||
|
@ -1163,10 +1173,19 @@ void debug_dma_sync_single_range_for_cpu(struct device *dev,
|
|||
unsigned long offset, size_t size,
|
||||
int direction)
|
||||
{
|
||||
struct dma_debug_entry ref;
|
||||
|
||||
if (unlikely(global_disable))
|
||||
return;
|
||||
|
||||
check_sync(dev, dma_handle, size, offset, direction, true);
|
||||
ref.type = dma_debug_single;
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = dma_handle;
|
||||
ref.size = offset + size;
|
||||
ref.direction = direction;
|
||||
ref.sg_call_ents = 0;
|
||||
|
||||
check_sync(dev, &ref, true);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu);
|
||||
|
||||
|
@ -1175,10 +1194,19 @@ void debug_dma_sync_single_range_for_device(struct device *dev,
|
|||
unsigned long offset,
|
||||
size_t size, int direction)
|
||||
{
|
||||
struct dma_debug_entry ref;
|
||||
|
||||
if (unlikely(global_disable))
|
||||
return;
|
||||
|
||||
check_sync(dev, dma_handle, size, offset, direction, false);
|
||||
ref.type = dma_debug_single;
|
||||
ref.dev = dev;
|
||||
ref.dev_addr = dma_handle;
|
||||
ref.size = offset + size;
|
||||
ref.direction = direction;
|
||||
ref.sg_call_ents = 0;
|
||||
|
||||
check_sync(dev, &ref, false);
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_single_range_for_device);
|
||||
|
||||
|
@ -1192,14 +1220,24 @@ void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
|
|||
return;
|
||||
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_sg,
|
||||
.dev = dev,
|
||||
.paddr = sg_phys(s),
|
||||
.dev_addr = sg_dma_address(s),
|
||||
.size = sg_dma_len(s),
|
||||
.direction = direction,
|
||||
.sg_call_ents = nelems,
|
||||
};
|
||||
|
||||
if (!i)
|
||||
mapped_ents = get_nr_mapped_entries(dev, s);
|
||||
mapped_ents = get_nr_mapped_entries(dev, &ref);
|
||||
|
||||
if (i >= mapped_ents)
|
||||
break;
|
||||
|
||||
check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
|
||||
direction, true);
|
||||
check_sync(dev, &ref, true);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
|
||||
|
@ -1214,14 +1252,23 @@ void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
|
|||
return;
|
||||
|
||||
for_each_sg(sg, s, nelems, i) {
|
||||
|
||||
struct dma_debug_entry ref = {
|
||||
.type = dma_debug_sg,
|
||||
.dev = dev,
|
||||
.paddr = sg_phys(s),
|
||||
.dev_addr = sg_dma_address(s),
|
||||
.size = sg_dma_len(s),
|
||||
.direction = direction,
|
||||
.sg_call_ents = nelems,
|
||||
};
|
||||
if (!i)
|
||||
mapped_ents = get_nr_mapped_entries(dev, s);
|
||||
mapped_ents = get_nr_mapped_entries(dev, &ref);
|
||||
|
||||
if (i >= mapped_ents)
|
||||
break;
|
||||
|
||||
check_sync(dev, sg_dma_address(s), sg_dma_len(s), 0,
|
||||
direction, false);
|
||||
check_sync(dev, &ref, false);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
|
||||
|
|
48
mm/memory.c
48
mm/memory.c
|
@ -1310,8 +1310,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
cond_resched();
|
||||
while (!(page = follow_page(vma, start, foll_flags))) {
|
||||
int ret;
|
||||
ret = handle_mm_fault(mm, vma, start,
|
||||
foll_flags & FOLL_WRITE);
|
||||
|
||||
/* FOLL_WRITE matches FAULT_FLAG_WRITE! */
|
||||
ret = handle_mm_fault(mm, vma, start, foll_flags & FOLL_WRITE);
|
||||
if (ret & VM_FAULT_ERROR) {
|
||||
if (ret & VM_FAULT_OOM)
|
||||
return i ? i : -ENOMEM;
|
||||
|
@ -2496,7 +2497,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
|
|||
*/
|
||||
static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
||||
int write_access, pte_t orig_pte)
|
||||
unsigned int flags, pte_t orig_pte)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
struct page *page;
|
||||
|
@ -2572,9 +2573,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
|
||||
inc_mm_counter(mm, anon_rss);
|
||||
pte = mk_pte(page, vma->vm_page_prot);
|
||||
if (write_access && reuse_swap_page(page)) {
|
||||
if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
|
||||
pte = maybe_mkwrite(pte_mkdirty(pte), vma);
|
||||
write_access = 0;
|
||||
flags &= ~FAULT_FLAG_WRITE;
|
||||
}
|
||||
flush_icache_page(vma, page);
|
||||
set_pte_at(mm, address, page_table, pte);
|
||||
|
@ -2587,7 +2588,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
try_to_free_swap(page);
|
||||
unlock_page(page);
|
||||
|
||||
if (write_access) {
|
||||
if (flags & FAULT_FLAG_WRITE) {
|
||||
ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
|
||||
if (ret & VM_FAULT_ERROR)
|
||||
ret &= VM_FAULT_ERROR;
|
||||
|
@ -2616,7 +2617,7 @@ out_page:
|
|||
*/
|
||||
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
||||
int write_access)
|
||||
unsigned int flags)
|
||||
{
|
||||
struct page *page;
|
||||
spinlock_t *ptl;
|
||||
|
@ -2776,7 +2777,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
* due to the bad i386 page protection. But it's valid
|
||||
* for other architectures too.
|
||||
*
|
||||
* Note that if write_access is true, we either now have
|
||||
* Note that if FAULT_FLAG_WRITE is set, we either now have
|
||||
* an exclusive copy of the page, or this is a shared mapping,
|
||||
* so we can make it writable and dirty to avoid having to
|
||||
* handle that later.
|
||||
|
@ -2847,11 +2848,10 @@ unwritable_page:
|
|||
|
||||
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
||||
int write_access, pte_t orig_pte)
|
||||
unsigned int flags, pte_t orig_pte)
|
||||
{
|
||||
pgoff_t pgoff = (((address & PAGE_MASK)
|
||||
- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
|
||||
unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
|
||||
|
||||
pte_unmap(page_table);
|
||||
return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
|
||||
|
@ -2868,12 +2868,12 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
*/
|
||||
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pte_t *page_table, pmd_t *pmd,
|
||||
int write_access, pte_t orig_pte)
|
||||
unsigned int flags, pte_t orig_pte)
|
||||
{
|
||||
unsigned int flags = FAULT_FLAG_NONLINEAR |
|
||||
(write_access ? FAULT_FLAG_WRITE : 0);
|
||||
pgoff_t pgoff;
|
||||
|
||||
flags |= FAULT_FLAG_NONLINEAR;
|
||||
|
||||
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
|
||||
return 0;
|
||||
|
||||
|
@ -2904,7 +2904,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
*/
|
||||
static inline int handle_pte_fault(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma, unsigned long address,
|
||||
pte_t *pte, pmd_t *pmd, int write_access)
|
||||
pte_t *pte, pmd_t *pmd, unsigned int flags)
|
||||
{
|
||||
pte_t entry;
|
||||
spinlock_t *ptl;
|
||||
|
@ -2915,30 +2915,30 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|||
if (vma->vm_ops) {
|
||||
if (likely(vma->vm_ops->fault))
|
||||
return do_linear_fault(mm, vma, address,
|
||||
pte, pmd, write_access, entry);
|
||||
pte, pmd, flags, entry);
|
||||
}
|
||||
return do_anonymous_page(mm, vma, address,
|
||||
pte, pmd, write_access);
|
||||
pte, pmd, flags);
|
||||
}
|
||||
if (pte_file(entry))
|
||||
return do_nonlinear_fault(mm, vma, address,
|
||||
pte, pmd, write_access, entry);
|
||||
pte, pmd, flags, entry);
|
||||
return do_swap_page(mm, vma, address,
|
||||
pte, pmd, write_access, entry);
|
||||
pte, pmd, flags, entry);
|
||||
}
|
||||
|
||||
ptl = pte_lockptr(mm, pmd);
|
||||
spin_lock(ptl);
|
||||
if (unlikely(!pte_same(*pte, entry)))
|
||||
goto unlock;
|
||||
if (write_access) {
|
||||
if (flags & FAULT_FLAG_WRITE) {
|
||||
if (!pte_write(entry))
|
||||
return do_wp_page(mm, vma, address,
|
||||
pte, pmd, ptl, entry);
|
||||
entry = pte_mkdirty(entry);
|
||||
}
|
||||
entry = pte_mkyoung(entry);
|
||||
if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
|
||||
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
|
||||
update_mmu_cache(vma, address, entry);
|
||||
} else {
|
||||
/*
|
||||
|
@ -2947,7 +2947,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
|
|||
* This still avoids useless tlb flushes for .text page faults
|
||||
* with threads.
|
||||
*/
|
||||
if (write_access)
|
||||
if (flags & FAULT_FLAG_WRITE)
|
||||
flush_tlb_page(vma, address);
|
||||
}
|
||||
unlock:
|
||||
|
@ -2959,7 +2959,7 @@ unlock:
|
|||
* By the time we get here, we already hold the mm semaphore
|
||||
*/
|
||||
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, int write_access)
|
||||
unsigned long address, unsigned int flags)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
|
@ -2971,7 +2971,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
count_vm_event(PGFAULT);
|
||||
|
||||
if (unlikely(is_vm_hugetlb_page(vma)))
|
||||
return hugetlb_fault(mm, vma, address, write_access);
|
||||
return hugetlb_fault(mm, vma, address, flags);
|
||||
|
||||
pgd = pgd_offset(mm, address);
|
||||
pud = pud_alloc(mm, pgd, address);
|
||||
|
@ -2984,7 +2984,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (!pte)
|
||||
return VM_FAULT_OOM;
|
||||
|
||||
return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
|
||||
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
|
||||
}
|
||||
|
||||
#ifndef __PAGETABLE_PUD_FOLDED
|
||||
|
|
24
mm/percpu.c
24
mm/percpu.c
|
@ -549,14 +549,14 @@ static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
|
|||
* @chunk: chunk of interest
|
||||
* @page_start: page index of the first page to unmap
|
||||
* @page_end: page index of the last page to unmap + 1
|
||||
* @flush: whether to flush cache and tlb or not
|
||||
* @flush_tlb: whether to flush tlb or not
|
||||
*
|
||||
* For each cpu, unmap pages [@page_start,@page_end) out of @chunk.
|
||||
* If @flush is true, vcache is flushed before unmapping and tlb
|
||||
* after.
|
||||
*/
|
||||
static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
|
||||
bool flush)
|
||||
bool flush_tlb)
|
||||
{
|
||||
unsigned int last = num_possible_cpus() - 1;
|
||||
unsigned int cpu;
|
||||
|
@ -569,9 +569,8 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
|
|||
* the whole region at once rather than doing it for each cpu.
|
||||
* This could be an overkill but is more scalable.
|
||||
*/
|
||||
if (flush)
|
||||
flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
|
||||
pcpu_chunk_addr(chunk, last, page_end));
|
||||
flush_cache_vunmap(pcpu_chunk_addr(chunk, 0, page_start),
|
||||
pcpu_chunk_addr(chunk, last, page_end));
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
unmap_kernel_range_noflush(
|
||||
|
@ -579,7 +578,7 @@ static void pcpu_unmap(struct pcpu_chunk *chunk, int page_start, int page_end,
|
|||
(page_end - page_start) << PAGE_SHIFT);
|
||||
|
||||
/* ditto as flush_cache_vunmap() */
|
||||
if (flush)
|
||||
if (flush_tlb)
|
||||
flush_tlb_kernel_range(pcpu_chunk_addr(chunk, 0, page_start),
|
||||
pcpu_chunk_addr(chunk, last, page_end));
|
||||
}
|
||||
|
@ -1234,6 +1233,7 @@ static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
|
|||
ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
|
||||
ssize_t dyn_size, ssize_t unit_size)
|
||||
{
|
||||
size_t chunk_size;
|
||||
unsigned int cpu;
|
||||
|
||||
/* determine parameters and allocate */
|
||||
|
@ -1248,11 +1248,15 @@ ssize_t __init pcpu_embed_first_chunk(size_t static_size, size_t reserved_size,
|
|||
} else
|
||||
pcpue_unit_size = max_t(size_t, pcpue_size, PCPU_MIN_UNIT_SIZE);
|
||||
|
||||
pcpue_ptr = __alloc_bootmem_nopanic(
|
||||
num_possible_cpus() * pcpue_unit_size,
|
||||
PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
|
||||
if (!pcpue_ptr)
|
||||
chunk_size = pcpue_unit_size * num_possible_cpus();
|
||||
|
||||
pcpue_ptr = __alloc_bootmem_nopanic(chunk_size, PAGE_SIZE,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
if (!pcpue_ptr) {
|
||||
pr_warning("PERCPU: failed to allocate %zu bytes for "
|
||||
"embedding\n", chunk_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* return the leftover and copy */
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
|
|
@ -972,8 +972,6 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, unsigned int codec_addr
|
|||
snd_hda_codec_read(codec, nid, 0,
|
||||
AC_VERB_GET_SUBSYSTEM_ID, 0);
|
||||
}
|
||||
if (bus->modelname)
|
||||
codec->modelname = kstrdup(bus->modelname, GFP_KERNEL);
|
||||
|
||||
/* power-up all before initialization */
|
||||
hda_set_power_state(codec,
|
||||
|
|
|
@ -224,6 +224,7 @@ enum {
|
|||
ALC883_ACER,
|
||||
ALC883_ACER_ASPIRE,
|
||||
ALC888_ACER_ASPIRE_4930G,
|
||||
ALC888_ACER_ASPIRE_6530G,
|
||||
ALC888_ACER_ASPIRE_8930G,
|
||||
ALC883_MEDION,
|
||||
ALC883_MEDION_MD2,
|
||||
|
@ -970,7 +971,7 @@ static void alc_automute_pin(struct hda_codec *codec)
|
|||
}
|
||||
}
|
||||
|
||||
#if 0 /* it's broken in some acses -- temporarily disabled */
|
||||
#if 0 /* it's broken in some cases -- temporarily disabled */
|
||||
static void alc_mic_automute(struct hda_codec *codec)
|
||||
{
|
||||
struct alc_spec *spec = codec->spec;
|
||||
|
@ -1170,7 +1171,7 @@ static int alc_subsystem_id(struct hda_codec *codec,
|
|||
|
||||
/* invalid SSID, check the special NID pin defcfg instead */
|
||||
/*
|
||||
* 31~30 : port conetcivity
|
||||
* 31~30 : port connectivity
|
||||
* 29~21 : reserve
|
||||
* 20 : PCBEEP input
|
||||
* 19~16 : Check sum (15:1)
|
||||
|
@ -1470,6 +1471,25 @@ static struct hda_verb alc888_acer_aspire_4930g_verbs[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
/*
|
||||
* ALC888 Acer Aspire 6530G model
|
||||
*/
|
||||
|
||||
static struct hda_verb alc888_acer_aspire_6530g_verbs[] = {
|
||||
/* Bias voltage on for external mic port */
|
||||
{0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80},
|
||||
/* Enable unsolicited event for HP jack */
|
||||
{0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
|
||||
/* Enable speaker output */
|
||||
{0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
|
||||
{0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
|
||||
/* Enable headphone output */
|
||||
{0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP},
|
||||
{0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
|
||||
{0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
|
||||
{ }
|
||||
};
|
||||
|
||||
/*
|
||||
* ALC889 Acer Aspire 8930G model
|
||||
*/
|
||||
|
@ -1544,6 +1564,25 @@ static struct hda_input_mux alc888_2_capture_sources[2] = {
|
|||
}
|
||||
};
|
||||
|
||||
static struct hda_input_mux alc888_acer_aspire_6530_sources[2] = {
|
||||
/* Interal mic only available on one ADC */
|
||||
{
|
||||
.num_items = 3,
|
||||
.items = {
|
||||
{ "Ext Mic", 0x0 },
|
||||
{ "CD", 0x4 },
|
||||
{ "Int Mic", 0xb },
|
||||
},
|
||||
},
|
||||
{
|
||||
.num_items = 2,
|
||||
.items = {
|
||||
{ "Ext Mic", 0x0 },
|
||||
{ "CD", 0x4 },
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
static struct hda_input_mux alc889_capture_sources[3] = {
|
||||
/* Digital mic only available on first "ADC" */
|
||||
{
|
||||
|
@ -6347,7 +6386,7 @@ static struct hda_channel_mode alc882_sixstack_modes[2] = {
|
|||
};
|
||||
|
||||
/*
|
||||
* macbook pro ALC885 can switch LineIn to LineOut without loosing Mic
|
||||
* macbook pro ALC885 can switch LineIn to LineOut without losing Mic
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -7047,7 +7086,7 @@ static struct hda_verb alc882_auto_init_verbs[] = {
|
|||
#define alc882_loopbacks alc880_loopbacks
|
||||
#endif
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc882_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc882_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc882_pcm_digital_playback alc880_pcm_digital_playback
|
||||
|
@ -8068,7 +8107,7 @@ static struct snd_kcontrol_new alc883_fivestack_mixer[] = {
|
|||
{ } /* end */
|
||||
};
|
||||
|
||||
static struct snd_kcontrol_new alc883_tagra_mixer[] = {
|
||||
static struct snd_kcontrol_new alc883_targa_mixer[] = {
|
||||
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
|
||||
HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
|
||||
HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
|
||||
|
@ -8088,7 +8127,7 @@ static struct snd_kcontrol_new alc883_tagra_mixer[] = {
|
|||
{ } /* end */
|
||||
};
|
||||
|
||||
static struct snd_kcontrol_new alc883_tagra_2ch_mixer[] = {
|
||||
static struct snd_kcontrol_new alc883_targa_2ch_mixer[] = {
|
||||
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
|
||||
HDA_CODEC_MUTE("Headphone Playback Switch", 0x14, 0x0, HDA_OUTPUT),
|
||||
HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
|
||||
|
@ -8153,6 +8192,19 @@ static struct snd_kcontrol_new alc883_acer_aspire_mixer[] = {
|
|||
{ } /* end */
|
||||
};
|
||||
|
||||
static struct snd_kcontrol_new alc888_acer_aspire_6530_mixer[] = {
|
||||
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
|
||||
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
|
||||
HDA_CODEC_VOLUME("LFE Playback Volume", 0x0f, 0x0, HDA_OUTPUT),
|
||||
HDA_BIND_MUTE("LFE Playback Switch", 0x0f, 2, HDA_INPUT),
|
||||
HDA_CODEC_VOLUME("CD Playback Volume", 0x0b, 0x04, HDA_INPUT),
|
||||
HDA_CODEC_MUTE("CD Playback Switch", 0x0b, 0x04, HDA_INPUT),
|
||||
HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
|
||||
HDA_CODEC_VOLUME("Mic Boost", 0x18, 0, HDA_INPUT),
|
||||
HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
|
||||
{ } /* end */
|
||||
};
|
||||
|
||||
static struct snd_kcontrol_new alc888_lenovo_sky_mixer[] = {
|
||||
HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
|
||||
HDA_BIND_MUTE("Front Playback Switch", 0x0c, 2, HDA_INPUT),
|
||||
|
@ -8417,7 +8469,7 @@ static struct hda_verb alc883_2ch_fujitsu_pi2515_verbs[] = {
|
|||
{ } /* end */
|
||||
};
|
||||
|
||||
static struct hda_verb alc883_tagra_verbs[] = {
|
||||
static struct hda_verb alc883_targa_verbs[] = {
|
||||
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
|
||||
{0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
|
||||
|
||||
|
@ -8626,8 +8678,8 @@ static void alc883_medion_md2_init_hook(struct hda_codec *codec)
|
|||
}
|
||||
|
||||
/* toggle speaker-output according to the hp-jack state */
|
||||
#define alc883_tagra_init_hook alc882_targa_init_hook
|
||||
#define alc883_tagra_unsol_event alc882_targa_unsol_event
|
||||
#define alc883_targa_init_hook alc882_targa_init_hook
|
||||
#define alc883_targa_unsol_event alc882_targa_unsol_event
|
||||
|
||||
static void alc883_clevo_m720_mic_automute(struct hda_codec *codec)
|
||||
{
|
||||
|
@ -8957,7 +9009,7 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
|
|||
#define alc883_loopbacks alc880_loopbacks
|
||||
#endif
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc883_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc883_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc883_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
|
||||
|
@ -8978,6 +9030,7 @@ static const char *alc883_models[ALC883_MODEL_LAST] = {
|
|||
[ALC883_ACER] = "acer",
|
||||
[ALC883_ACER_ASPIRE] = "acer-aspire",
|
||||
[ALC888_ACER_ASPIRE_4930G] = "acer-aspire-4930g",
|
||||
[ALC888_ACER_ASPIRE_6530G] = "acer-aspire-6530g",
|
||||
[ALC888_ACER_ASPIRE_8930G] = "acer-aspire-8930g",
|
||||
[ALC883_MEDION] = "medion",
|
||||
[ALC883_MEDION_MD2] = "medion-md2",
|
||||
|
@ -9021,7 +9074,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
|
|||
SND_PCI_QUIRK(0x1025, 0x015e, "Acer Aspire 6930G",
|
||||
ALC888_ACER_ASPIRE_4930G),
|
||||
SND_PCI_QUIRK(0x1025, 0x0166, "Acer Aspire 6530G",
|
||||
ALC888_ACER_ASPIRE_4930G),
|
||||
ALC888_ACER_ASPIRE_6530G),
|
||||
/* default Acer -- disabled as it causes more problems.
|
||||
* model=auto should work fine now
|
||||
*/
|
||||
|
@ -9069,6 +9122,7 @@ static struct snd_pci_quirk alc883_cfg_tbl[] = {
|
|||
SND_PCI_QUIRK(0x1462, 0x7267, "MSI", ALC883_3ST_6ch_DIG),
|
||||
SND_PCI_QUIRK(0x1462, 0x7280, "MSI", ALC883_6ST_DIG),
|
||||
SND_PCI_QUIRK(0x1462, 0x7327, "MSI", ALC883_6ST_DIG),
|
||||
SND_PCI_QUIRK(0x1462, 0x7350, "MSI", ALC883_6ST_DIG),
|
||||
SND_PCI_QUIRK(0x1462, 0xa422, "MSI", ALC883_TARGA_2ch_DIG),
|
||||
SND_PCI_QUIRK(0x147b, 0x1083, "Abit IP35-PRO", ALC883_6ST_DIG),
|
||||
SND_PCI_QUIRK(0x1558, 0x0721, "Clevo laptop M720R", ALC883_CLEVO_M720),
|
||||
|
@ -9165,8 +9219,8 @@ static struct alc_config_preset alc883_presets[] = {
|
|||
.input_mux = &alc883_capture_source,
|
||||
},
|
||||
[ALC883_TARGA_DIG] = {
|
||||
.mixers = { alc883_tagra_mixer, alc883_chmode_mixer },
|
||||
.init_verbs = { alc883_init_verbs, alc883_tagra_verbs},
|
||||
.mixers = { alc883_targa_mixer, alc883_chmode_mixer },
|
||||
.init_verbs = { alc883_init_verbs, alc883_targa_verbs},
|
||||
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
|
||||
.dac_nids = alc883_dac_nids,
|
||||
.dig_out_nid = ALC883_DIGOUT_NID,
|
||||
|
@ -9174,12 +9228,12 @@ static struct alc_config_preset alc883_presets[] = {
|
|||
.channel_mode = alc883_3ST_6ch_modes,
|
||||
.need_dac_fix = 1,
|
||||
.input_mux = &alc883_capture_source,
|
||||
.unsol_event = alc883_tagra_unsol_event,
|
||||
.init_hook = alc883_tagra_init_hook,
|
||||
.unsol_event = alc883_targa_unsol_event,
|
||||
.init_hook = alc883_targa_init_hook,
|
||||
},
|
||||
[ALC883_TARGA_2ch_DIG] = {
|
||||
.mixers = { alc883_tagra_2ch_mixer},
|
||||
.init_verbs = { alc883_init_verbs, alc883_tagra_verbs},
|
||||
.mixers = { alc883_targa_2ch_mixer},
|
||||
.init_verbs = { alc883_init_verbs, alc883_targa_verbs},
|
||||
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
|
||||
.dac_nids = alc883_dac_nids,
|
||||
.adc_nids = alc883_adc_nids_alt,
|
||||
|
@ -9188,13 +9242,13 @@ static struct alc_config_preset alc883_presets[] = {
|
|||
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
|
||||
.channel_mode = alc883_3ST_2ch_modes,
|
||||
.input_mux = &alc883_capture_source,
|
||||
.unsol_event = alc883_tagra_unsol_event,
|
||||
.init_hook = alc883_tagra_init_hook,
|
||||
.unsol_event = alc883_targa_unsol_event,
|
||||
.init_hook = alc883_targa_init_hook,
|
||||
},
|
||||
[ALC883_TARGA_8ch_DIG] = {
|
||||
.mixers = { alc883_base_mixer, alc883_chmode_mixer },
|
||||
.init_verbs = { alc883_init_verbs, alc880_gpio3_init_verbs,
|
||||
alc883_tagra_verbs },
|
||||
alc883_targa_verbs },
|
||||
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
|
||||
.dac_nids = alc883_dac_nids,
|
||||
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
|
||||
|
@ -9206,8 +9260,8 @@ static struct alc_config_preset alc883_presets[] = {
|
|||
.channel_mode = alc883_4ST_8ch_modes,
|
||||
.need_dac_fix = 1,
|
||||
.input_mux = &alc883_capture_source,
|
||||
.unsol_event = alc883_tagra_unsol_event,
|
||||
.init_hook = alc883_tagra_init_hook,
|
||||
.unsol_event = alc883_targa_unsol_event,
|
||||
.init_hook = alc883_targa_init_hook,
|
||||
},
|
||||
[ALC883_ACER] = {
|
||||
.mixers = { alc883_base_mixer },
|
||||
|
@ -9255,6 +9309,24 @@ static struct alc_config_preset alc883_presets[] = {
|
|||
.unsol_event = alc_automute_amp_unsol_event,
|
||||
.init_hook = alc888_acer_aspire_4930g_init_hook,
|
||||
},
|
||||
[ALC888_ACER_ASPIRE_6530G] = {
|
||||
.mixers = { alc888_acer_aspire_6530_mixer },
|
||||
.init_verbs = { alc883_init_verbs, alc880_gpio1_init_verbs,
|
||||
alc888_acer_aspire_6530g_verbs },
|
||||
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
|
||||
.dac_nids = alc883_dac_nids,
|
||||
.num_adc_nids = ARRAY_SIZE(alc883_adc_nids_rev),
|
||||
.adc_nids = alc883_adc_nids_rev,
|
||||
.capsrc_nids = alc883_capsrc_nids_rev,
|
||||
.dig_out_nid = ALC883_DIGOUT_NID,
|
||||
.num_channel_mode = ARRAY_SIZE(alc883_3ST_2ch_modes),
|
||||
.channel_mode = alc883_3ST_2ch_modes,
|
||||
.num_mux_defs =
|
||||
ARRAY_SIZE(alc888_2_capture_sources),
|
||||
.input_mux = alc888_acer_aspire_6530_sources,
|
||||
.unsol_event = alc_automute_amp_unsol_event,
|
||||
.init_hook = alc888_acer_aspire_4930g_init_hook,
|
||||
},
|
||||
[ALC888_ACER_ASPIRE_8930G] = {
|
||||
.mixers = { alc888_base_mixer,
|
||||
alc883_chmode_mixer },
|
||||
|
@ -9361,7 +9433,7 @@ static struct alc_config_preset alc883_presets[] = {
|
|||
.init_hook = alc888_lenovo_ms7195_front_automute,
|
||||
},
|
||||
[ALC883_HAIER_W66] = {
|
||||
.mixers = { alc883_tagra_2ch_mixer},
|
||||
.mixers = { alc883_targa_2ch_mixer},
|
||||
.init_verbs = { alc883_init_verbs, alc883_haier_w66_verbs},
|
||||
.num_dacs = ARRAY_SIZE(alc883_dac_nids),
|
||||
.dac_nids = alc883_dac_nids,
|
||||
|
@ -11131,7 +11203,7 @@ static struct hda_verb alc262_toshiba_rx1_unsol_verbs[] = {
|
|||
#define alc262_loopbacks alc880_loopbacks
|
||||
#endif
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc262_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc262_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc262_pcm_digital_playback alc880_pcm_digital_playback
|
||||
|
@ -12286,7 +12358,7 @@ static void alc268_auto_init_mono_speaker_out(struct hda_codec *codec)
|
|||
AC_VERB_SET_AMP_GAIN_MUTE, dac_vol2);
|
||||
}
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc268_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc268_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc268_pcm_analog_alt_capture alc880_pcm_analog_alt_capture
|
||||
|
@ -13197,7 +13269,7 @@ static int alc269_auto_create_analog_input_ctls(struct alc_spec *spec,
|
|||
#define alc269_loopbacks alc880_loopbacks
|
||||
#endif
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc269_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc269_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc269_pcm_digital_playback alc880_pcm_digital_playback
|
||||
|
@ -14059,7 +14131,7 @@ static void alc861_toshiba_unsol_event(struct hda_codec *codec,
|
|||
alc861_toshiba_automute(codec);
|
||||
}
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc861_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc861_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc861_pcm_digital_playback alc880_pcm_digital_playback
|
||||
|
@ -14582,7 +14654,7 @@ static hda_nid_t alc861vd_dac_nids[4] = {
|
|||
|
||||
/* dac_nids for ALC660vd are in a different order - according to
|
||||
* Realtek's driver.
|
||||
* This should probably tesult in a different mixer for 6stack models
|
||||
* This should probably result in a different mixer for 6stack models
|
||||
* of ALC660vd codecs, but for now there is only 3stack mixer
|
||||
* - and it is the same as in 861vd.
|
||||
* adc_nids in ALC660vd are (is) the same as in 861vd
|
||||
|
@ -15027,7 +15099,7 @@ static void alc861vd_dallas_init_hook(struct hda_codec *codec)
|
|||
#define alc861vd_loopbacks alc880_loopbacks
|
||||
#endif
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc861vd_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc861vd_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc861vd_pcm_digital_playback alc880_pcm_digital_playback
|
||||
|
@ -15206,7 +15278,7 @@ static void alc861vd_auto_init_hp_out(struct hda_codec *codec)
|
|||
hda_nid_t pin;
|
||||
|
||||
pin = spec->autocfg.hp_pins[0];
|
||||
if (pin) /* connect to front and use dac 0 */
|
||||
if (pin) /* connect to front and use dac 0 */
|
||||
alc861vd_auto_set_output_and_unmute(codec, pin, PIN_HP, 0);
|
||||
pin = spec->autocfg.speaker_pins[0];
|
||||
if (pin)
|
||||
|
@ -16669,7 +16741,7 @@ static struct snd_kcontrol_new alc272_nc10_mixer[] = {
|
|||
#endif
|
||||
|
||||
|
||||
/* pcm configuration: identiacal with ALC880 */
|
||||
/* pcm configuration: identical with ALC880 */
|
||||
#define alc662_pcm_analog_playback alc880_pcm_analog_playback
|
||||
#define alc662_pcm_analog_capture alc880_pcm_analog_capture
|
||||
#define alc662_pcm_digital_playback alc880_pcm_digital_playback
|
||||
|
|
|
@ -297,9 +297,9 @@ static int txx9aclc_pcm_new(struct snd_card *card, struct snd_soc_dai *dai,
|
|||
static bool filter(struct dma_chan *chan, void *param)
|
||||
{
|
||||
struct txx9aclc_dmadata *dmadata = param;
|
||||
char devname[BUS_ID_SIZE + 2];
|
||||
char devname[20 + 2]; /* FIXME: old BUS_ID_SIZE + 2 */
|
||||
|
||||
sprintf(devname, "%s.%d", dmadata->dma_res->name,
|
||||
snprintf(devname, sizeof(devname), "%s.%d", dmadata->dma_res->name,
|
||||
(int)dmadata->dma_res->start);
|
||||
if (strcmp(dev_name(chan->device->dev), devname) == 0) {
|
||||
chan->private = &dmadata->dma_slave;
|
||||
|
|
|
@ -199,8 +199,9 @@ static int snd_usb_caiaq_pcm_prepare(struct snd_pcm_substream *substream)
|
|||
dev->period_out_count[index] = BYTES_PER_SAMPLE + 1;
|
||||
dev->audio_out_buf_pos[index] = BYTES_PER_SAMPLE + 1;
|
||||
} else {
|
||||
dev->period_in_count[index] = BYTES_PER_SAMPLE;
|
||||
dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE;
|
||||
int in_pos = (dev->spec.data_alignment == 2) ? 0 : 2;
|
||||
dev->period_in_count[index] = BYTES_PER_SAMPLE + in_pos;
|
||||
dev->audio_in_buf_pos[index] = BYTES_PER_SAMPLE + in_pos;
|
||||
}
|
||||
|
||||
if (dev->streaming)
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "input.h"
|
||||
|
||||
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
|
||||
MODULE_DESCRIPTION("caiaq USB audio, version 1.3.16");
|
||||
MODULE_DESCRIPTION("caiaq USB audio, version 1.3.17");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_SUPPORTED_DEVICE("{{Native Instruments, RigKontrol2},"
|
||||
"{Native Instruments, RigKontrol3},"
|
||||
|
|
Загрузка…
Ссылка в новой задаче