Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/tokenring/tmspci.c drivers/net/ucc_geth_mii.c
This commit is contained in:
Коммит
508827ff0a
|
@ -0,0 +1,35 @@
|
||||||
|
|
||||||
|
Options for the ipv6 module are supplied as parameters at load time.
|
||||||
|
|
||||||
|
Module options may be given as command line arguments to the insmod
|
||||||
|
or modprobe command, but are usually specified in either the
|
||||||
|
/etc/modules.conf or /etc/modprobe.conf configuration file, or in a
|
||||||
|
distro-specific configuration file.
|
||||||
|
|
||||||
|
The available ipv6 module parameters are listed below. If a parameter
|
||||||
|
is not specified the default value is used.
|
||||||
|
|
||||||
|
The parameters are as follows:
|
||||||
|
|
||||||
|
disable
|
||||||
|
|
||||||
|
Specifies whether to load the IPv6 module, but disable all
|
||||||
|
its functionality. This might be used when another module
|
||||||
|
has a dependency on the IPv6 module being loaded, but no
|
||||||
|
IPv6 addresses or operations are desired.
|
||||||
|
|
||||||
|
The possible values and their effects are:
|
||||||
|
|
||||||
|
0
|
||||||
|
IPv6 is enabled.
|
||||||
|
|
||||||
|
This is the default value.
|
||||||
|
|
||||||
|
1
|
||||||
|
IPv6 is disabled.
|
||||||
|
|
||||||
|
No IPv6 addresses will be added to interfaces, and
|
||||||
|
it will not be possible to open an IPv6 socket.
|
||||||
|
|
||||||
|
A reboot is required to enable IPv6.
|
||||||
|
|
|
@ -4,7 +4,7 @@ Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc.
|
The Chelsio T3 ASIC based Adapters (S310, S320, S302, S304, Mezz cards, etc.
|
||||||
series of products) supports iSCSI acceleration and iSCSI Direct Data Placement
|
series of products) support iSCSI acceleration and iSCSI Direct Data Placement
|
||||||
(DDP) where the hardware handles the expensive byte touching operations, such
|
(DDP) where the hardware handles the expensive byte touching operations, such
|
||||||
as CRC computation and verification, and direct DMA to the final host memory
|
as CRC computation and verification, and direct DMA to the final host memory
|
||||||
destination:
|
destination:
|
||||||
|
@ -31,9 +31,9 @@ destination:
|
||||||
the TCP segments onto the wire. It handles TCP retransmission if
|
the TCP segments onto the wire. It handles TCP retransmission if
|
||||||
needed.
|
needed.
|
||||||
|
|
||||||
On receving, S3 h/w recovers the iSCSI PDU by reassembling TCP
|
On receiving, S3 h/w recovers the iSCSI PDU by reassembling TCP
|
||||||
segments, separating the header and data, calculating and verifying
|
segments, separating the header and data, calculating and verifying
|
||||||
the digests, then forwards the header to the host. The payload data,
|
the digests, then forwarding the header to the host. The payload data,
|
||||||
if possible, will be directly placed into the pre-posted host DDP
|
if possible, will be directly placed into the pre-posted host DDP
|
||||||
buffer. Otherwise, the payload data will be sent to the host too.
|
buffer. Otherwise, the payload data will be sent to the host too.
|
||||||
|
|
||||||
|
@ -68,9 +68,8 @@ The following steps need to be taken to accelerates the open-iscsi initiator:
|
||||||
sure the ip address is unique in the network.
|
sure the ip address is unique in the network.
|
||||||
|
|
||||||
3. edit /etc/iscsi/iscsid.conf
|
3. edit /etc/iscsi/iscsid.conf
|
||||||
The default setting for MaxRecvDataSegmentLength (131072) is too big,
|
The default setting for MaxRecvDataSegmentLength (131072) is too big;
|
||||||
replace "node.conn[0].iscsi.MaxRecvDataSegmentLength" to be a value no
|
replace with a value no bigger than 15360 (for example 8192):
|
||||||
bigger than 15360 (for example 8192):
|
|
||||||
|
|
||||||
node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192
|
node.conn[0].iscsi.MaxRecvDataSegmentLength = 8192
|
||||||
|
|
||||||
|
|
|
@ -2466,7 +2466,7 @@ S: Maintained
|
||||||
|
|
||||||
ISDN SUBSYSTEM
|
ISDN SUBSYSTEM
|
||||||
P: Karsten Keil
|
P: Karsten Keil
|
||||||
M: kkeil@suse.de
|
M: isdn@linux-pingi.de
|
||||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||||
W: http://www.isdn4linux.de
|
W: http://www.isdn4linux.de
|
||||||
T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git
|
T: git kernel.org:/pub/scm/linux/kernel/kkeil/isdn-2.6.git
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
||||||
VERSION = 2
|
VERSION = 2
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 29
|
SUBLEVEL = 29
|
||||||
EXTRAVERSION = -rc6
|
EXTRAVERSION = -rc7
|
||||||
NAME = Erotic Pickled Herring
|
NAME = Erotic Pickled Herring
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
|
|
@ -233,12 +233,13 @@ static void __init cacheid_init(void)
|
||||||
unsigned int cachetype = read_cpuid_cachetype();
|
unsigned int cachetype = read_cpuid_cachetype();
|
||||||
unsigned int arch = cpu_architecture();
|
unsigned int arch = cpu_architecture();
|
||||||
|
|
||||||
if (arch >= CPU_ARCH_ARMv7) {
|
if (arch >= CPU_ARCH_ARMv6) {
|
||||||
cacheid = CACHEID_VIPT_NONALIASING;
|
if ((cachetype & (7 << 29)) == 4 << 29) {
|
||||||
if ((cachetype & (3 << 14)) == 1 << 14)
|
/* ARMv7 register format */
|
||||||
cacheid |= CACHEID_ASID_TAGGED;
|
cacheid = CACHEID_VIPT_NONALIASING;
|
||||||
} else if (arch >= CPU_ARCH_ARMv6) {
|
if ((cachetype & (3 << 14)) == 1 << 14)
|
||||||
if (cachetype & (1 << 23))
|
cacheid |= CACHEID_ASID_TAGGED;
|
||||||
|
} else if (cachetype & (1 << 23))
|
||||||
cacheid = CACHEID_VIPT_ALIASING;
|
cacheid = CACHEID_VIPT_ALIASING;
|
||||||
else
|
else
|
||||||
cacheid = CACHEID_VIPT_NONALIASING;
|
cacheid = CACHEID_VIPT_NONALIASING;
|
||||||
|
|
|
@ -332,7 +332,6 @@ static int at91_pm_enter(suspend_state_t state)
|
||||||
at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
|
at91_sys_read(AT91_AIC_IPR) & at91_sys_read(AT91_AIC_IMR));
|
||||||
|
|
||||||
error:
|
error:
|
||||||
sdram_selfrefresh_disable();
|
|
||||||
target_state = PM_SUSPEND_ON;
|
target_state = PM_SUSPEND_ON;
|
||||||
at91_irq_resume();
|
at91_irq_resume();
|
||||||
at91_gpio_resume();
|
at91_gpio_resume();
|
||||||
|
|
|
@ -81,7 +81,7 @@ static inline void __init ldp_init_smc911x(void)
|
||||||
}
|
}
|
||||||
|
|
||||||
ldp_smc911x_resources[0].start = cs_mem_base + 0x0;
|
ldp_smc911x_resources[0].start = cs_mem_base + 0x0;
|
||||||
ldp_smc911x_resources[0].end = cs_mem_base + 0xf;
|
ldp_smc911x_resources[0].end = cs_mem_base + 0xff;
|
||||||
udelay(100);
|
udelay(100);
|
||||||
|
|
||||||
eth_gpio = LDP_SMC911X_GPIO;
|
eth_gpio = LDP_SMC911X_GPIO;
|
||||||
|
|
|
@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
|
||||||
#ifdef CONFIG_CPU_32v6K
|
#ifdef CONFIG_CPU_32v6K
|
||||||
clrex
|
clrex
|
||||||
#else
|
#else
|
||||||
strex r0, r1, [sp] @ Clear the exclusive monitor
|
sub r1, sp, #4 @ Get unused stack location
|
||||||
|
strex r0, r1, [r1] @ Clear the exclusive monitor
|
||||||
#endif
|
#endif
|
||||||
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
mrc p15, 0, r1, c5, c0, 0 @ get FSR
|
||||||
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
mrc p15, 0, r0, c6, c0, 0 @ get FAR
|
||||||
|
|
|
@ -55,7 +55,7 @@ static void s3c_irq_eint_unmask(unsigned int irq)
|
||||||
u32 mask;
|
u32 mask;
|
||||||
|
|
||||||
mask = __raw_readl(S3C64XX_EINT0MASK);
|
mask = __raw_readl(S3C64XX_EINT0MASK);
|
||||||
mask |= eint_irq_to_bit(irq);
|
mask &= ~eint_irq_to_bit(irq);
|
||||||
__raw_writel(mask, S3C64XX_EINT0MASK);
|
__raw_writel(mask, S3C64XX_EINT0MASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
#ifndef __ASM_SECCOMP_H
|
#ifndef __ASM_SECCOMP_H
|
||||||
|
|
||||||
#include <linux/thread_info.h>
|
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
|
|
||||||
#define __NR_seccomp_read __NR_read
|
#define __NR_seccomp_read __NR_read
|
||||||
|
|
|
@ -210,5 +210,10 @@ struct compat_shmid64_ds {
|
||||||
compat_ulong_t __unused6;
|
compat_ulong_t __unused6;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline int is_compat_task(void)
|
||||||
|
{
|
||||||
|
return test_thread_flag(TIF_32BIT);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* _ASM_POWERPC_COMPAT_H */
|
#endif /* _ASM_POWERPC_COMPAT_H */
|
||||||
|
|
|
@ -1,10 +1,6 @@
|
||||||
#ifndef _ASM_POWERPC_SECCOMP_H
|
#ifndef _ASM_POWERPC_SECCOMP_H
|
||||||
#define _ASM_POWERPC_SECCOMP_H
|
#define _ASM_POWERPC_SECCOMP_H
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
|
||||||
#include <linux/thread_info.h>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
|
|
||||||
#define __NR_seccomp_read __NR_read
|
#define __NR_seccomp_read __NR_read
|
||||||
|
|
|
@ -142,6 +142,10 @@ static void __init gef_sbc610_nec_fixup(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
|
|
||||||
|
/* Do not do the fixup on other platforms! */
|
||||||
|
if (!machine_is(gef_sbc610))
|
||||||
|
return;
|
||||||
|
|
||||||
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
|
printk(KERN_INFO "Running NEC uPD720101 Fixup\n");
|
||||||
|
|
||||||
/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
|
/* Ensure ports 1, 2, 3, 4 & 5 are enabled */
|
||||||
|
|
|
@ -556,7 +556,7 @@ static void __exit aes_s390_fini(void)
|
||||||
module_init(aes_s390_init);
|
module_init(aes_s390_init);
|
||||||
module_exit(aes_s390_fini);
|
module_exit(aes_s390_fini);
|
||||||
|
|
||||||
MODULE_ALIAS("aes");
|
MODULE_ALIAS("aes-all");
|
||||||
|
|
||||||
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
|
|
@ -240,4 +240,9 @@ struct compat_shmid64_ds {
|
||||||
unsigned int __unused2;
|
unsigned int __unused2;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static inline int is_compat_task(void)
|
||||||
|
{
|
||||||
|
return test_thread_flag(TIF_32BIT);
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _ASM_SPARC64_COMPAT_H */
|
#endif /* _ASM_SPARC64_COMPAT_H */
|
||||||
|
|
|
@ -1,11 +1,5 @@
|
||||||
#ifndef _ASM_SECCOMP_H
|
#ifndef _ASM_SECCOMP_H
|
||||||
|
|
||||||
#include <linux/thread_info.h> /* already defines TIF_32BIT */
|
|
||||||
|
|
||||||
#ifndef TIF_32BIT
|
|
||||||
#error "unexpected TIF_32BIT on sparc64"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
|
|
||||||
#define __NR_seccomp_read __NR_read
|
#define __NR_seccomp_read __NR_read
|
||||||
|
|
|
@ -1,12 +1,6 @@
|
||||||
#ifndef _ASM_X86_SECCOMP_32_H
|
#ifndef _ASM_X86_SECCOMP_32_H
|
||||||
#define _ASM_X86_SECCOMP_32_H
|
#define _ASM_X86_SECCOMP_32_H
|
||||||
|
|
||||||
#include <linux/thread_info.h>
|
|
||||||
|
|
||||||
#ifdef TIF_32BIT
|
|
||||||
#error "unexpected TIF_32BIT on i386"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
|
|
||||||
#define __NR_seccomp_read __NR_read
|
#define __NR_seccomp_read __NR_read
|
||||||
|
|
|
@ -1,14 +1,6 @@
|
||||||
#ifndef _ASM_X86_SECCOMP_64_H
|
#ifndef _ASM_X86_SECCOMP_64_H
|
||||||
#define _ASM_X86_SECCOMP_64_H
|
#define _ASM_X86_SECCOMP_64_H
|
||||||
|
|
||||||
#include <linux/thread_info.h>
|
|
||||||
|
|
||||||
#ifdef TIF_32BIT
|
|
||||||
#error "unexpected TIF_32BIT on x86_64"
|
|
||||||
#else
|
|
||||||
#define TIF_32BIT TIF_IA32
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <linux/unistd.h>
|
#include <linux/unistd.h>
|
||||||
#include <asm/ia32_unistd.h>
|
#include <asm/ia32_unistd.h>
|
||||||
|
|
||||||
|
|
|
@ -1388,7 +1388,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
# define IS_IA32 1
|
# define IS_IA32 1
|
||||||
#elif defined CONFIG_IA32_EMULATION
|
#elif defined CONFIG_IA32_EMULATION
|
||||||
# define IS_IA32 test_thread_flag(TIF_IA32)
|
# define IS_IA32 is_compat_task()
|
||||||
#else
|
#else
|
||||||
# define IS_IA32 0
|
# define IS_IA32 0
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -714,6 +714,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
||||||
pos = start_pfn << PAGE_SHIFT;
|
pos = start_pfn << PAGE_SHIFT;
|
||||||
end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
|
end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
|
||||||
<< (PMD_SHIFT - PAGE_SHIFT);
|
<< (PMD_SHIFT - PAGE_SHIFT);
|
||||||
|
if (end_pfn > (end >> PAGE_SHIFT))
|
||||||
|
end_pfn = end >> PAGE_SHIFT;
|
||||||
if (start_pfn < end_pfn) {
|
if (start_pfn < end_pfn) {
|
||||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
|
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
|
||||||
pos = end_pfn << PAGE_SHIFT;
|
pos = end_pfn << PAGE_SHIFT;
|
||||||
|
|
|
@ -20,23 +20,16 @@
|
||||||
#include <asm/pat.h>
|
#include <asm/pat.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
#ifdef CONFIG_X86_PAE
|
int is_io_mapping_possible(resource_size_t base, unsigned long size)
|
||||||
int
|
|
||||||
is_io_mapping_possible(resource_size_t base, unsigned long size)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
int
|
|
||||||
is_io_mapping_possible(resource_size_t base, unsigned long size)
|
|
||||||
{
|
{
|
||||||
|
#ifndef CONFIG_X86_PAE
|
||||||
/* There is no way to map greater than 1 << 32 address without PAE */
|
/* There is no way to map greater than 1 << 32 address without PAE */
|
||||||
if (base + size > 0x100000000ULL)
|
if (base + size > 0x100000000ULL)
|
||||||
return 0;
|
return 0;
|
||||||
|
#endif
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
#endif
|
EXPORT_SYMBOL_GPL(is_io_mapping_possible);
|
||||||
|
|
||||||
/* Map 'pfn' using fixed map 'type' and protections 'prot'
|
/* Map 'pfn' using fixed map 'type' and protections 'prot'
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -32,11 +32,14 @@ struct kmmio_fault_page {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct kmmio_fault_page *release_next;
|
struct kmmio_fault_page *release_next;
|
||||||
unsigned long page; /* location of the fault page */
|
unsigned long page; /* location of the fault page */
|
||||||
|
bool old_presence; /* page presence prior to arming */
|
||||||
|
bool armed;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Number of times this page has been registered as a part
|
* Number of times this page has been registered as a part
|
||||||
* of a probe. If zero, page is disarmed and this may be freed.
|
* of a probe. If zero, page is disarmed and this may be freed.
|
||||||
* Used only by writers (RCU).
|
* Used only by writers (RCU) and post_kmmio_handler().
|
||||||
|
* Protected by kmmio_lock, when linked into kmmio_page_table.
|
||||||
*/
|
*/
|
||||||
int count;
|
int count;
|
||||||
};
|
};
|
||||||
|
@ -105,57 +108,85 @@ static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void set_page_present(unsigned long addr, bool present,
|
static void set_pmd_presence(pmd_t *pmd, bool present, bool *old)
|
||||||
unsigned int *pglevel)
|
{
|
||||||
|
pmdval_t v = pmd_val(*pmd);
|
||||||
|
*old = !!(v & _PAGE_PRESENT);
|
||||||
|
v &= ~_PAGE_PRESENT;
|
||||||
|
if (present)
|
||||||
|
v |= _PAGE_PRESENT;
|
||||||
|
set_pmd(pmd, __pmd(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void set_pte_presence(pte_t *pte, bool present, bool *old)
|
||||||
|
{
|
||||||
|
pteval_t v = pte_val(*pte);
|
||||||
|
*old = !!(v & _PAGE_PRESENT);
|
||||||
|
v &= ~_PAGE_PRESENT;
|
||||||
|
if (present)
|
||||||
|
v |= _PAGE_PRESENT;
|
||||||
|
set_pte_atomic(pte, __pte(v));
|
||||||
|
}
|
||||||
|
|
||||||
|
static int set_page_presence(unsigned long addr, bool present, bool *old)
|
||||||
{
|
{
|
||||||
pteval_t pteval;
|
|
||||||
pmdval_t pmdval;
|
|
||||||
unsigned int level;
|
unsigned int level;
|
||||||
pmd_t *pmd;
|
|
||||||
pte_t *pte = lookup_address(addr, &level);
|
pte_t *pte = lookup_address(addr, &level);
|
||||||
|
|
||||||
if (!pte) {
|
if (!pte) {
|
||||||
pr_err("kmmio: no pte for page 0x%08lx\n", addr);
|
pr_err("kmmio: no pte for page 0x%08lx\n", addr);
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pglevel)
|
|
||||||
*pglevel = level;
|
|
||||||
|
|
||||||
switch (level) {
|
switch (level) {
|
||||||
case PG_LEVEL_2M:
|
case PG_LEVEL_2M:
|
||||||
pmd = (pmd_t *)pte;
|
set_pmd_presence((pmd_t *)pte, present, old);
|
||||||
pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
|
|
||||||
if (present)
|
|
||||||
pmdval |= _PAGE_PRESENT;
|
|
||||||
set_pmd(pmd, __pmd(pmdval));
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PG_LEVEL_4K:
|
case PG_LEVEL_4K:
|
||||||
pteval = pte_val(*pte) & ~_PAGE_PRESENT;
|
set_pte_presence(pte, present, old);
|
||||||
if (present)
|
|
||||||
pteval |= _PAGE_PRESENT;
|
|
||||||
set_pte_atomic(pte, __pte(pteval));
|
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
pr_err("kmmio: unexpected page level 0x%x.\n", level);
|
pr_err("kmmio: unexpected page level 0x%x.\n", level);
|
||||||
return;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
__flush_tlb_one(addr);
|
__flush_tlb_one(addr);
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Mark the given page as not present. Access to it will trigger a fault. */
|
/*
|
||||||
static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
|
* Mark the given page as not present. Access to it will trigger a fault.
|
||||||
|
*
|
||||||
|
* Struct kmmio_fault_page is protected by RCU and kmmio_lock, but the
|
||||||
|
* protection is ignored here. RCU read lock is assumed held, so the struct
|
||||||
|
* will not disappear unexpectedly. Furthermore, the caller must guarantee,
|
||||||
|
* that double arming the same virtual address (page) cannot occur.
|
||||||
|
*
|
||||||
|
* Double disarming on the other hand is allowed, and may occur when a fault
|
||||||
|
* and mmiotrace shutdown happen simultaneously.
|
||||||
|
*/
|
||||||
|
static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
|
||||||
{
|
{
|
||||||
set_page_present(page & PAGE_MASK, false, pglevel);
|
int ret;
|
||||||
|
WARN_ONCE(f->armed, KERN_ERR "kmmio page already armed.\n");
|
||||||
|
if (f->armed) {
|
||||||
|
pr_warning("kmmio double-arm: page 0x%08lx, ref %d, old %d\n",
|
||||||
|
f->page, f->count, f->old_presence);
|
||||||
|
}
|
||||||
|
ret = set_page_presence(f->page, false, &f->old_presence);
|
||||||
|
WARN_ONCE(ret < 0, KERN_ERR "kmmio arming 0x%08lx failed.\n", f->page);
|
||||||
|
f->armed = true;
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Mark the given page as present. */
|
/** Restore the given page to saved presence state. */
|
||||||
static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
|
static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
|
||||||
{
|
{
|
||||||
set_page_present(page & PAGE_MASK, true, pglevel);
|
bool tmp;
|
||||||
|
int ret = set_page_presence(f->page, f->old_presence, &tmp);
|
||||||
|
WARN_ONCE(ret < 0,
|
||||||
|
KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
|
||||||
|
f->armed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -202,28 +233,32 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
||||||
|
|
||||||
ctx = &get_cpu_var(kmmio_ctx);
|
ctx = &get_cpu_var(kmmio_ctx);
|
||||||
if (ctx->active) {
|
if (ctx->active) {
|
||||||
disarm_kmmio_fault_page(faultpage->page, NULL);
|
|
||||||
if (addr == ctx->addr) {
|
if (addr == ctx->addr) {
|
||||||
/*
|
/*
|
||||||
* On SMP we sometimes get recursive probe hits on the
|
* A second fault on the same page means some other
|
||||||
* same address. Context is already saved, fall out.
|
* condition needs handling by do_page_fault(), the
|
||||||
|
* page really not being present is the most common.
|
||||||
*/
|
*/
|
||||||
pr_debug("kmmio: duplicate probe hit on CPU %d, for "
|
pr_debug("kmmio: secondary hit for 0x%08lx CPU %d.\n",
|
||||||
"address 0x%08lx.\n",
|
addr, smp_processor_id());
|
||||||
smp_processor_id(), addr);
|
|
||||||
ret = 1;
|
if (!faultpage->old_presence)
|
||||||
goto no_kmmio_ctx;
|
pr_info("kmmio: unexpected secondary hit for "
|
||||||
}
|
"address 0x%08lx on CPU %d.\n", addr,
|
||||||
/*
|
smp_processor_id());
|
||||||
* Prevent overwriting already in-flight context.
|
} else {
|
||||||
* This should not happen, let's hope disarming at least
|
/*
|
||||||
* prevents a panic.
|
* Prevent overwriting already in-flight context.
|
||||||
*/
|
* This should not happen, let's hope disarming at
|
||||||
pr_emerg("kmmio: recursive probe hit on CPU %d, "
|
* least prevents a panic.
|
||||||
|
*/
|
||||||
|
pr_emerg("kmmio: recursive probe hit on CPU %d, "
|
||||||
"for address 0x%08lx. Ignoring.\n",
|
"for address 0x%08lx. Ignoring.\n",
|
||||||
smp_processor_id(), addr);
|
smp_processor_id(), addr);
|
||||||
pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
|
pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
|
||||||
ctx->addr);
|
ctx->addr);
|
||||||
|
disarm_kmmio_fault_page(faultpage);
|
||||||
|
}
|
||||||
goto no_kmmio_ctx;
|
goto no_kmmio_ctx;
|
||||||
}
|
}
|
||||||
ctx->active++;
|
ctx->active++;
|
||||||
|
@ -244,7 +279,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
|
||||||
regs->flags &= ~X86_EFLAGS_IF;
|
regs->flags &= ~X86_EFLAGS_IF;
|
||||||
|
|
||||||
/* Now we set present bit in PTE and single step. */
|
/* Now we set present bit in PTE and single step. */
|
||||||
disarm_kmmio_fault_page(ctx->fpage->page, NULL);
|
disarm_kmmio_fault_page(ctx->fpage);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If another cpu accesses the same page while we are stepping,
|
* If another cpu accesses the same page while we are stepping,
|
||||||
|
@ -275,7 +310,7 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
|
||||||
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
|
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
|
||||||
|
|
||||||
if (!ctx->active) {
|
if (!ctx->active) {
|
||||||
pr_debug("kmmio: spurious debug trap on CPU %d.\n",
|
pr_warning("kmmio: spurious debug trap on CPU %d.\n",
|
||||||
smp_processor_id());
|
smp_processor_id());
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -283,7 +318,11 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
|
||||||
if (ctx->probe && ctx->probe->post_handler)
|
if (ctx->probe && ctx->probe->post_handler)
|
||||||
ctx->probe->post_handler(ctx->probe, condition, regs);
|
ctx->probe->post_handler(ctx->probe, condition, regs);
|
||||||
|
|
||||||
arm_kmmio_fault_page(ctx->fpage->page, NULL);
|
/* Prevent racing against release_kmmio_fault_page(). */
|
||||||
|
spin_lock(&kmmio_lock);
|
||||||
|
if (ctx->fpage->count)
|
||||||
|
arm_kmmio_fault_page(ctx->fpage);
|
||||||
|
spin_unlock(&kmmio_lock);
|
||||||
|
|
||||||
regs->flags &= ~X86_EFLAGS_TF;
|
regs->flags &= ~X86_EFLAGS_TF;
|
||||||
regs->flags |= ctx->saved_flags;
|
regs->flags |= ctx->saved_flags;
|
||||||
|
@ -315,20 +354,24 @@ static int add_kmmio_fault_page(unsigned long page)
|
||||||
f = get_kmmio_fault_page(page);
|
f = get_kmmio_fault_page(page);
|
||||||
if (f) {
|
if (f) {
|
||||||
if (!f->count)
|
if (!f->count)
|
||||||
arm_kmmio_fault_page(f->page, NULL);
|
arm_kmmio_fault_page(f);
|
||||||
f->count++;
|
f->count++;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
f = kmalloc(sizeof(*f), GFP_ATOMIC);
|
f = kzalloc(sizeof(*f), GFP_ATOMIC);
|
||||||
if (!f)
|
if (!f)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
f->count = 1;
|
f->count = 1;
|
||||||
f->page = page;
|
f->page = page;
|
||||||
list_add_rcu(&f->list, kmmio_page_list(f->page));
|
|
||||||
|
|
||||||
arm_kmmio_fault_page(f->page, NULL);
|
if (arm_kmmio_fault_page(f)) {
|
||||||
|
kfree(f);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
list_add_rcu(&f->list, kmmio_page_list(f->page));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -347,7 +390,7 @@ static void release_kmmio_fault_page(unsigned long page,
|
||||||
f->count--;
|
f->count--;
|
||||||
BUG_ON(f->count < 0);
|
BUG_ON(f->count < 0);
|
||||||
if (!f->count) {
|
if (!f->count) {
|
||||||
disarm_kmmio_fault_page(f->page, NULL);
|
disarm_kmmio_fault_page(f);
|
||||||
f->release_next = *release_list;
|
f->release_next = *release_list;
|
||||||
*release_list = f;
|
*release_list = f;
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <linux/bootmem.h>
|
#include <linux/bootmem.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
@ -868,6 +869,7 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||||
else
|
else
|
||||||
return pgprot_noncached(prot);
|
return pgprot_noncached(prot);
|
||||||
}
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(pgprot_writecombine);
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
|
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* Written by Pekka Paalanen, 2008 <pq@iki.fi>
|
* Written by Pekka Paalanen, 2008-2009 <pq@iki.fi>
|
||||||
*/
|
*/
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
@ -9,35 +9,74 @@
|
||||||
|
|
||||||
static unsigned long mmio_address;
|
static unsigned long mmio_address;
|
||||||
module_param(mmio_address, ulong, 0);
|
module_param(mmio_address, ulong, 0);
|
||||||
MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
|
MODULE_PARM_DESC(mmio_address, " Start address of the mapping of 16 kB "
|
||||||
|
"(or 8 MB if read_far is non-zero).");
|
||||||
|
|
||||||
|
static unsigned long read_far = 0x400100;
|
||||||
|
module_param(read_far, ulong, 0);
|
||||||
|
MODULE_PARM_DESC(read_far, " Offset of a 32-bit read within 8 MB "
|
||||||
|
"(default: 0x400100).");
|
||||||
|
|
||||||
|
static unsigned v16(unsigned i)
|
||||||
|
{
|
||||||
|
return i * 12 + 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned v32(unsigned i)
|
||||||
|
{
|
||||||
|
return i * 212371 + 13;
|
||||||
|
}
|
||||||
|
|
||||||
static void do_write_test(void __iomem *p)
|
static void do_write_test(void __iomem *p)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
pr_info(MODULE_NAME ": write test.\n");
|
||||||
mmiotrace_printk("Write test.\n");
|
mmiotrace_printk("Write test.\n");
|
||||||
|
|
||||||
for (i = 0; i < 256; i++)
|
for (i = 0; i < 256; i++)
|
||||||
iowrite8(i, p + i);
|
iowrite8(i, p + i);
|
||||||
|
|
||||||
for (i = 1024; i < (5 * 1024); i += 2)
|
for (i = 1024; i < (5 * 1024); i += 2)
|
||||||
iowrite16(i * 12 + 7, p + i);
|
iowrite16(v16(i), p + i);
|
||||||
|
|
||||||
for (i = (5 * 1024); i < (16 * 1024); i += 4)
|
for (i = (5 * 1024); i < (16 * 1024); i += 4)
|
||||||
iowrite32(i * 212371 + 13, p + i);
|
iowrite32(v32(i), p + i);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_read_test(void __iomem *p)
|
static void do_read_test(void __iomem *p)
|
||||||
{
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
unsigned errs[3] = { 0 };
|
||||||
|
pr_info(MODULE_NAME ": read test.\n");
|
||||||
mmiotrace_printk("Read test.\n");
|
mmiotrace_printk("Read test.\n");
|
||||||
|
|
||||||
for (i = 0; i < 256; i++)
|
for (i = 0; i < 256; i++)
|
||||||
ioread8(p + i);
|
if (ioread8(p + i) != i)
|
||||||
|
++errs[0];
|
||||||
|
|
||||||
for (i = 1024; i < (5 * 1024); i += 2)
|
for (i = 1024; i < (5 * 1024); i += 2)
|
||||||
ioread16(p + i);
|
if (ioread16(p + i) != v16(i))
|
||||||
|
++errs[1];
|
||||||
|
|
||||||
for (i = (5 * 1024); i < (16 * 1024); i += 4)
|
for (i = (5 * 1024); i < (16 * 1024); i += 4)
|
||||||
ioread32(p + i);
|
if (ioread32(p + i) != v32(i))
|
||||||
|
++errs[2];
|
||||||
|
|
||||||
|
mmiotrace_printk("Read errors: 8-bit %d, 16-bit %d, 32-bit %d.\n",
|
||||||
|
errs[0], errs[1], errs[2]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void do_test(void)
|
static void do_read_far_test(void __iomem *p)
|
||||||
{
|
{
|
||||||
void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
|
pr_info(MODULE_NAME ": read far test.\n");
|
||||||
|
mmiotrace_printk("Read far test.\n");
|
||||||
|
|
||||||
|
ioread32(p + read_far);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void do_test(unsigned long size)
|
||||||
|
{
|
||||||
|
void __iomem *p = ioremap_nocache(mmio_address, size);
|
||||||
if (!p) {
|
if (!p) {
|
||||||
pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
|
pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
|
||||||
return;
|
return;
|
||||||
|
@ -45,11 +84,15 @@ static void do_test(void)
|
||||||
mmiotrace_printk("ioremap returned %p.\n", p);
|
mmiotrace_printk("ioremap returned %p.\n", p);
|
||||||
do_write_test(p);
|
do_write_test(p);
|
||||||
do_read_test(p);
|
do_read_test(p);
|
||||||
|
if (read_far && read_far < size - 4)
|
||||||
|
do_read_far_test(p);
|
||||||
iounmap(p);
|
iounmap(p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init init(void)
|
static int __init init(void)
|
||||||
{
|
{
|
||||||
|
unsigned long size = (read_far) ? (8 << 20) : (16 << 10);
|
||||||
|
|
||||||
if (mmio_address == 0) {
|
if (mmio_address == 0) {
|
||||||
pr_err(MODULE_NAME ": you have to use the module argument "
|
pr_err(MODULE_NAME ": you have to use the module argument "
|
||||||
"mmio_address.\n");
|
"mmio_address.\n");
|
||||||
|
@ -58,10 +101,11 @@ static int __init init(void)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
|
pr_warning(MODULE_NAME ": WARNING: mapping %lu kB @ 0x%08lx in PCI "
|
||||||
"in PCI address space, and writing "
|
"address space, and writing 16 kB of rubbish in there.\n",
|
||||||
"rubbish in there.\n", mmio_address);
|
size >> 10, mmio_address);
|
||||||
do_test();
|
do_test(size);
|
||||||
|
pr_info(MODULE_NAME ": All done.\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,8 +78,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
|
||||||
if (cpu_has_arch_perfmon) {
|
if (cpu_has_arch_perfmon) {
|
||||||
union cpuid10_eax eax;
|
union cpuid10_eax eax;
|
||||||
eax.full = cpuid_eax(0xa);
|
eax.full = cpuid_eax(0xa);
|
||||||
if (counter_width < eax.split.bit_width)
|
|
||||||
counter_width = eax.split.bit_width;
|
/*
|
||||||
|
* For Core2 (family 6, model 15), don't reset the
|
||||||
|
* counter width:
|
||||||
|
*/
|
||||||
|
if (!(eax.split.version_id == 0 &&
|
||||||
|
current_cpu_data.x86 == 6 &&
|
||||||
|
current_cpu_data.x86_model == 15)) {
|
||||||
|
|
||||||
|
if (counter_width < eax.split.bit_width)
|
||||||
|
counter_width = eax.split.bit_width;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* clear all counters */
|
/* clear all counters */
|
||||||
|
|
15
crypto/api.c
15
crypto/api.c
|
@ -215,8 +215,19 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
|
||||||
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
mask &= ~(CRYPTO_ALG_LARVAL | CRYPTO_ALG_DEAD);
|
||||||
type &= mask;
|
type &= mask;
|
||||||
|
|
||||||
alg = try_then_request_module(crypto_alg_lookup(name, type, mask),
|
alg = crypto_alg_lookup(name, type, mask);
|
||||||
name);
|
if (!alg) {
|
||||||
|
char tmp[CRYPTO_MAX_ALG_NAME];
|
||||||
|
|
||||||
|
request_module(name);
|
||||||
|
|
||||||
|
if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask) &&
|
||||||
|
snprintf(tmp, sizeof(tmp), "%s-all", name) < sizeof(tmp))
|
||||||
|
request_module(tmp);
|
||||||
|
|
||||||
|
alg = crypto_alg_lookup(name, type, mask);
|
||||||
|
}
|
||||||
|
|
||||||
if (alg)
|
if (alg)
|
||||||
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
|
return crypto_is_larval(alg) ? crypto_larval_wait(alg) : alg;
|
||||||
|
|
||||||
|
|
|
@ -173,7 +173,7 @@ skbfree(struct sk_buff *skb)
|
||||||
return;
|
return;
|
||||||
while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
|
while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0)
|
||||||
msleep(Sms);
|
msleep(Sms);
|
||||||
if (i <= 0) {
|
if (i < 0) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"aoe: %s holds ref: %s\n",
|
"aoe: %s holds ref: %s\n",
|
||||||
skb->dev ? skb->dev->name : "netif",
|
skb->dev ? skb->dev->name : "netif",
|
||||||
|
|
|
@ -457,10 +457,12 @@ static int init_ixp_crypto(void)
|
||||||
if (!ctx_pool) {
|
if (!ctx_pool) {
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0);
|
ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
|
||||||
|
"ixp_crypto:out", NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0);
|
ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
|
||||||
|
"ixp_crypto:in", NULL);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
qmgr_release_queue(SEND_QID);
|
qmgr_release_queue(SEND_QID);
|
||||||
goto err;
|
goto err;
|
||||||
|
|
|
@ -489,4 +489,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Michal Ludvig");
|
MODULE_AUTHOR("Michal Ludvig");
|
||||||
|
|
||||||
MODULE_ALIAS("aes");
|
MODULE_ALIAS("aes-all");
|
||||||
|
|
|
@ -304,7 +304,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
MODULE_AUTHOR("Michal Ludvig");
|
MODULE_AUTHOR("Michal Ludvig");
|
||||||
|
|
||||||
MODULE_ALIAS("sha1");
|
MODULE_ALIAS("sha1-all");
|
||||||
MODULE_ALIAS("sha256");
|
MODULE_ALIAS("sha256-all");
|
||||||
MODULE_ALIAS("sha1-padlock");
|
MODULE_ALIAS("sha1-padlock");
|
||||||
MODULE_ALIAS("sha256-padlock");
|
MODULE_ALIAS("sha256-padlock");
|
||||||
|
|
|
@ -1401,7 +1401,7 @@ MODULE_ALIAS("platform:iop-adma");
|
||||||
|
|
||||||
static struct platform_driver iop_adma_driver = {
|
static struct platform_driver iop_adma_driver = {
|
||||||
.probe = iop_adma_probe,
|
.probe = iop_adma_probe,
|
||||||
.remove = iop_adma_remove,
|
.remove = __devexit_p(iop_adma_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.name = "iop-adma",
|
.name = "iop-adma",
|
||||||
|
|
|
@ -1287,7 +1287,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
|
||||||
|
|
||||||
static struct platform_driver mv_xor_driver = {
|
static struct platform_driver mv_xor_driver = {
|
||||||
.probe = mv_xor_probe,
|
.probe = mv_xor_probe,
|
||||||
.remove = mv_xor_remove,
|
.remove = __devexit_p(mv_xor_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.name = MV_XOR_NAME,
|
.name = MV_XOR_NAME,
|
||||||
|
|
|
@ -420,7 +420,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
|
||||||
dev->sigdata.lock = NULL;
|
dev->sigdata.lock = NULL;
|
||||||
master->lock.hw_lock = NULL; /* SHM removed */
|
master->lock.hw_lock = NULL; /* SHM removed */
|
||||||
master->lock.file_priv = NULL;
|
master->lock.file_priv = NULL;
|
||||||
wake_up_interruptible(&master->lock.lock_queue);
|
wake_up_interruptible_all(&master->lock.lock_queue);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case _DRM_AGP:
|
case _DRM_AGP:
|
||||||
|
|
|
@ -484,6 +484,7 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
|
|
||||||
if (file_priv->is_master) {
|
if (file_priv->is_master) {
|
||||||
|
struct drm_master *master = file_priv->master;
|
||||||
struct drm_file *temp;
|
struct drm_file *temp;
|
||||||
list_for_each_entry(temp, &dev->filelist, lhead) {
|
list_for_each_entry(temp, &dev->filelist, lhead) {
|
||||||
if ((temp->master == file_priv->master) &&
|
if ((temp->master == file_priv->master) &&
|
||||||
|
@ -491,6 +492,19 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||||
temp->authenticated = 0;
|
temp->authenticated = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Since the master is disappearing, so is the
|
||||||
|
* possibility to lock.
|
||||||
|
*/
|
||||||
|
|
||||||
|
if (master->lock.hw_lock) {
|
||||||
|
if (dev->sigdata.lock == master->lock.hw_lock)
|
||||||
|
dev->sigdata.lock = NULL;
|
||||||
|
master->lock.hw_lock = NULL;
|
||||||
|
master->lock.file_priv = NULL;
|
||||||
|
wake_up_interruptible_all(&master->lock.lock_queue);
|
||||||
|
}
|
||||||
|
|
||||||
if (file_priv->minor->master == file_priv->master) {
|
if (file_priv->minor->master == file_priv->master) {
|
||||||
/* drop the reference held my the minor */
|
/* drop the reference held my the minor */
|
||||||
drm_master_put(&file_priv->minor->master);
|
drm_master_put(&file_priv->minor->master);
|
||||||
|
|
|
@ -80,6 +80,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||||
__set_current_state(TASK_INTERRUPTIBLE);
|
__set_current_state(TASK_INTERRUPTIBLE);
|
||||||
if (!master->lock.hw_lock) {
|
if (!master->lock.hw_lock) {
|
||||||
/* Device has been unregistered */
|
/* Device has been unregistered */
|
||||||
|
send_sig(SIGTERM, current, 0);
|
||||||
ret = -EINTR;
|
ret = -EINTR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -93,7 +94,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||||
/* Contention */
|
/* Contention */
|
||||||
schedule();
|
schedule();
|
||||||
if (signal_pending(current)) {
|
if (signal_pending(current)) {
|
||||||
ret = -ERESTARTSYS;
|
ret = -EINTR;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,14 +146,6 @@ static void drm_master_destroy(struct kref *kref)
|
||||||
|
|
||||||
drm_ht_remove(&master->magiclist);
|
drm_ht_remove(&master->magiclist);
|
||||||
|
|
||||||
if (master->lock.hw_lock) {
|
|
||||||
if (dev->sigdata.lock == master->lock.hw_lock)
|
|
||||||
dev->sigdata.lock = NULL;
|
|
||||||
master->lock.hw_lock = NULL;
|
|
||||||
master->lock.file_priv = NULL;
|
|
||||||
wake_up_interruptible(&master->lock.lock_queue);
|
|
||||||
}
|
|
||||||
|
|
||||||
drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
|
drm_free(master, sizeof(*master), DRM_MEM_DRIVER);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,7 +168,7 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
|
||||||
file_priv->minor->master != file_priv->master) {
|
file_priv->minor->master != file_priv->master) {
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
file_priv->minor->master = drm_master_get(file_priv->master);
|
file_priv->minor->master = drm_master_get(file_priv->master);
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -383,12 +383,13 @@ int i915_irq_emit(struct drm_device *dev, void *data,
|
||||||
drm_i915_irq_emit_t *emit = data;
|
drm_i915_irq_emit_t *emit = data;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
|
|
||||||
|
|
||||||
if (!dev_priv) {
|
if (!dev_priv) {
|
||||||
DRM_ERROR("called with no initialization\n");
|
DRM_ERROR("called with no initialization\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||||
|
|
||||||
mutex_lock(&dev->struct_mutex);
|
mutex_lock(&dev->struct_mutex);
|
||||||
result = i915_emit_irq(dev);
|
result = i915_emit_irq(dev);
|
||||||
mutex_unlock(&dev->struct_mutex);
|
mutex_unlock(&dev->struct_mutex);
|
||||||
|
|
|
@ -482,7 +482,7 @@ mv64xxx_i2c_map_regs(struct platform_device *pd,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __devexit
|
static void
|
||||||
mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
|
mv64xxx_i2c_unmap_regs(struct mv64xxx_i2c_data *drv_data)
|
||||||
{
|
{
|
||||||
if (drv_data->reg_base) {
|
if (drv_data->reg_base) {
|
||||||
|
@ -577,7 +577,7 @@ mv64xxx_i2c_remove(struct platform_device *dev)
|
||||||
|
|
||||||
static struct platform_driver mv64xxx_i2c_driver = {
|
static struct platform_driver mv64xxx_i2c_driver = {
|
||||||
.probe = mv64xxx_i2c_probe,
|
.probe = mv64xxx_i2c_probe,
|
||||||
.remove = mv64xxx_i2c_remove,
|
.remove = __devexit_p(mv64xxx_i2c_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.name = MV64XXX_I2C_CTLR_NAME,
|
.name = MV64XXX_I2C_CTLR_NAME,
|
||||||
|
|
|
@ -839,7 +839,7 @@ static void atkbd_disconnect(struct serio *serio)
|
||||||
*/
|
*/
|
||||||
static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd)
|
static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd)
|
||||||
{
|
{
|
||||||
const unsigned int forced_release_keys[] = {
|
static const unsigned int forced_release_keys[] = {
|
||||||
0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93,
|
0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8f, 0x93,
|
||||||
};
|
};
|
||||||
int i;
|
int i;
|
||||||
|
@ -856,7 +856,7 @@ static void atkbd_dell_laptop_keymap_fixup(struct atkbd *atkbd)
|
||||||
*/
|
*/
|
||||||
static void atkbd_hp_keymap_fixup(struct atkbd *atkbd)
|
static void atkbd_hp_keymap_fixup(struct atkbd *atkbd)
|
||||||
{
|
{
|
||||||
const unsigned int forced_release_keys[] = {
|
static const unsigned int forced_release_keys[] = {
|
||||||
0x94,
|
0x94,
|
||||||
};
|
};
|
||||||
int i;
|
int i;
|
||||||
|
|
|
@ -209,8 +209,8 @@ static int __devinit bfin_kpad_probe(struct platform_device *pdev)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pdata->debounce_time || !pdata->debounce_time > MAX_MULT ||
|
if (!pdata->debounce_time || pdata->debounce_time > MAX_MULT ||
|
||||||
!pdata->coldrive_time || !pdata->coldrive_time > MAX_MULT) {
|
!pdata->coldrive_time || pdata->coldrive_time > MAX_MULT) {
|
||||||
printk(KERN_ERR DRV_NAME
|
printk(KERN_ERR DRV_NAME
|
||||||
": Invalid Debounce/Columdrive Time from pdata\n");
|
": Invalid Debounce/Columdrive Time from pdata\n");
|
||||||
bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */
|
bfin_write_KPAD_MSEL(0xFF0); /* Default MSEL */
|
||||||
|
|
|
@ -288,7 +288,7 @@ static int corgikbd_resume(struct platform_device *dev)
|
||||||
#define corgikbd_resume NULL
|
#define corgikbd_resume NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init corgikbd_probe(struct platform_device *pdev)
|
static int __devinit corgikbd_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct corgikbd *corgikbd;
|
struct corgikbd *corgikbd;
|
||||||
struct input_dev *input_dev;
|
struct input_dev *input_dev;
|
||||||
|
@ -368,7 +368,7 @@ static int __init corgikbd_probe(struct platform_device *pdev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int corgikbd_remove(struct platform_device *pdev)
|
static int __devexit corgikbd_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct corgikbd *corgikbd = platform_get_drvdata(pdev);
|
struct corgikbd *corgikbd = platform_get_drvdata(pdev);
|
||||||
|
@ -388,7 +388,7 @@ static int corgikbd_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
static struct platform_driver corgikbd_driver = {
|
static struct platform_driver corgikbd_driver = {
|
||||||
.probe = corgikbd_probe,
|
.probe = corgikbd_probe,
|
||||||
.remove = corgikbd_remove,
|
.remove = __devexit_p(corgikbd_remove),
|
||||||
.suspend = corgikbd_suspend,
|
.suspend = corgikbd_suspend,
|
||||||
.resume = corgikbd_resume,
|
.resume = corgikbd_resume,
|
||||||
.driver = {
|
.driver = {
|
||||||
|
@ -397,7 +397,7 @@ static struct platform_driver corgikbd_driver = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit corgikbd_init(void)
|
static int __init corgikbd_init(void)
|
||||||
{
|
{
|
||||||
return platform_driver_register(&corgikbd_driver);
|
return platform_driver_register(&corgikbd_driver);
|
||||||
}
|
}
|
||||||
|
|
|
@ -279,7 +279,7 @@ static int omap_kp_resume(struct platform_device *dev)
|
||||||
#define omap_kp_resume NULL
|
#define omap_kp_resume NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init omap_kp_probe(struct platform_device *pdev)
|
static int __devinit omap_kp_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct omap_kp *omap_kp;
|
struct omap_kp *omap_kp;
|
||||||
struct input_dev *input_dev;
|
struct input_dev *input_dev;
|
||||||
|
@ -422,7 +422,7 @@ err1:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int omap_kp_remove(struct platform_device *pdev)
|
static int __devexit omap_kp_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
|
struct omap_kp *omap_kp = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
@ -454,7 +454,7 @@ static int omap_kp_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
static struct platform_driver omap_kp_driver = {
|
static struct platform_driver omap_kp_driver = {
|
||||||
.probe = omap_kp_probe,
|
.probe = omap_kp_probe,
|
||||||
.remove = omap_kp_remove,
|
.remove = __devexit_p(omap_kp_remove),
|
||||||
.suspend = omap_kp_suspend,
|
.suspend = omap_kp_suspend,
|
||||||
.resume = omap_kp_resume,
|
.resume = omap_kp_resume,
|
||||||
.driver = {
|
.driver = {
|
||||||
|
@ -463,7 +463,7 @@ static struct platform_driver omap_kp_driver = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit omap_kp_init(void)
|
static int __init omap_kp_init(void)
|
||||||
{
|
{
|
||||||
printk(KERN_INFO "OMAP Keypad Driver\n");
|
printk(KERN_INFO "OMAP Keypad Driver\n");
|
||||||
return platform_driver_register(&omap_kp_driver);
|
return platform_driver_register(&omap_kp_driver);
|
||||||
|
|
|
@ -343,7 +343,7 @@ static int spitzkbd_resume(struct platform_device *dev)
|
||||||
#define spitzkbd_resume NULL
|
#define spitzkbd_resume NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init spitzkbd_probe(struct platform_device *dev)
|
static int __devinit spitzkbd_probe(struct platform_device *dev)
|
||||||
{
|
{
|
||||||
struct spitzkbd *spitzkbd;
|
struct spitzkbd *spitzkbd;
|
||||||
struct input_dev *input_dev;
|
struct input_dev *input_dev;
|
||||||
|
@ -444,7 +444,7 @@ static int __init spitzkbd_probe(struct platform_device *dev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int spitzkbd_remove(struct platform_device *dev)
|
static int __devexit spitzkbd_remove(struct platform_device *dev)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
|
struct spitzkbd *spitzkbd = platform_get_drvdata(dev);
|
||||||
|
@ -470,7 +470,7 @@ static int spitzkbd_remove(struct platform_device *dev)
|
||||||
|
|
||||||
static struct platform_driver spitzkbd_driver = {
|
static struct platform_driver spitzkbd_driver = {
|
||||||
.probe = spitzkbd_probe,
|
.probe = spitzkbd_probe,
|
||||||
.remove = spitzkbd_remove,
|
.remove = __devexit_p(spitzkbd_remove),
|
||||||
.suspend = spitzkbd_suspend,
|
.suspend = spitzkbd_suspend,
|
||||||
.resume = spitzkbd_resume,
|
.resume = spitzkbd_resume,
|
||||||
.driver = {
|
.driver = {
|
||||||
|
@ -479,7 +479,7 @@ static struct platform_driver spitzkbd_driver = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit spitzkbd_init(void)
|
static int __init spitzkbd_init(void)
|
||||||
{
|
{
|
||||||
return platform_driver_register(&spitzkbd_driver);
|
return platform_driver_register(&spitzkbd_driver);
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ config MOUSE_PS2_SYNAPTICS
|
||||||
config MOUSE_PS2_LIFEBOOK
|
config MOUSE_PS2_LIFEBOOK
|
||||||
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
|
bool "Fujitsu Lifebook PS/2 mouse protocol extension" if EMBEDDED
|
||||||
default y
|
default y
|
||||||
depends on MOUSE_PS2
|
depends on MOUSE_PS2 && X86
|
||||||
help
|
help
|
||||||
Say Y here if you have a Fujitsu B-series Lifebook PS/2
|
Say Y here if you have a Fujitsu B-series Lifebook PS/2
|
||||||
TouchScreen connected to your system.
|
TouchScreen connected to your system.
|
||||||
|
|
|
@ -542,7 +542,7 @@ int elantech_detect(struct psmouse *psmouse, int set_properties)
|
||||||
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
|
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
|
||||||
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
|
ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
|
||||||
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
|
ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
|
||||||
pr_err("elantech.c: sending Elantech magic knock failed.\n");
|
pr_debug("elantech.c: sending Elantech magic knock failed.\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -551,8 +551,27 @@ int elantech_detect(struct psmouse *psmouse, int set_properties)
|
||||||
* set of magic numbers
|
* set of magic numbers
|
||||||
*/
|
*/
|
||||||
if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
|
if (param[0] != 0x3c || param[1] != 0x03 || param[2] != 0xc8) {
|
||||||
pr_info("elantech.c: unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
|
pr_debug("elantech.c: "
|
||||||
param[0], param[1], param[2]);
|
"unexpected magic knock result 0x%02x, 0x%02x, 0x%02x.\n",
|
||||||
|
param[0], param[1], param[2]);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Query touchpad's firmware version and see if it reports known
|
||||||
|
* value to avoid mis-detection. Logitech mice are known to respond
|
||||||
|
* to Elantech magic knock and there might be more.
|
||||||
|
*/
|
||||||
|
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
|
||||||
|
pr_debug("elantech.c: failed to query firmware version.\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_debug("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
|
||||||
|
param[0], param[1], param[2]);
|
||||||
|
|
||||||
|
if (param[0] == 0 || param[1] != 0) {
|
||||||
|
pr_debug("elantech.c: Probably not a real Elantech touchpad. Aborting.\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -600,8 +619,7 @@ int elantech_init(struct psmouse *psmouse)
|
||||||
int i, error;
|
int i, error;
|
||||||
unsigned char param[3];
|
unsigned char param[3];
|
||||||
|
|
||||||
etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
|
psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL);
|
||||||
psmouse->private = etd;
|
|
||||||
if (!etd)
|
if (!etd)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -610,14 +628,12 @@ int elantech_init(struct psmouse *psmouse)
|
||||||
etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
|
etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Find out what version hardware this is
|
* Do the version query again so we can store the result
|
||||||
*/
|
*/
|
||||||
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
|
if (synaptics_send_cmd(psmouse, ETP_FW_VERSION_QUERY, param)) {
|
||||||
pr_err("elantech.c: failed to query firmware version.\n");
|
pr_err("elantech.c: failed to query firmware version.\n");
|
||||||
goto init_fail;
|
goto init_fail;
|
||||||
}
|
}
|
||||||
pr_info("elantech.c: Elantech version query result 0x%02x, 0x%02x, 0x%02x.\n",
|
|
||||||
param[0], param[1], param[2]);
|
|
||||||
etd->fw_version_maj = param[0];
|
etd->fw_version_maj = param[0];
|
||||||
etd->fw_version_min = param[2];
|
etd->fw_version_min = param[2];
|
||||||
|
|
||||||
|
|
|
@ -83,7 +83,7 @@ static int write_tbcr(struct pxa930_trkball *trkball, int v)
|
||||||
|
|
||||||
__raw_writel(v, trkball->mmio_base + TBCR);
|
__raw_writel(v, trkball->mmio_base + TBCR);
|
||||||
|
|
||||||
while (i--) {
|
while (--i) {
|
||||||
if (__raw_readl(trkball->mmio_base + TBCR) == v)
|
if (__raw_readl(trkball->mmio_base + TBCR) == v)
|
||||||
break;
|
break;
|
||||||
msleep(1);
|
msleep(1);
|
||||||
|
|
|
@ -182,11 +182,6 @@ static int synaptics_identify(struct psmouse *psmouse)
|
||||||
|
|
||||||
static int synaptics_query_hardware(struct psmouse *psmouse)
|
static int synaptics_query_hardware(struct psmouse *psmouse)
|
||||||
{
|
{
|
||||||
int retries = 0;
|
|
||||||
|
|
||||||
while ((retries++ < 3) && psmouse_reset(psmouse))
|
|
||||||
/* empty */;
|
|
||||||
|
|
||||||
if (synaptics_identify(psmouse))
|
if (synaptics_identify(psmouse))
|
||||||
return -1;
|
return -1;
|
||||||
if (synaptics_model_id(psmouse))
|
if (synaptics_model_id(psmouse))
|
||||||
|
@ -582,6 +577,8 @@ static int synaptics_reconnect(struct psmouse *psmouse)
|
||||||
struct synaptics_data *priv = psmouse->private;
|
struct synaptics_data *priv = psmouse->private;
|
||||||
struct synaptics_data old_priv = *priv;
|
struct synaptics_data old_priv = *priv;
|
||||||
|
|
||||||
|
psmouse_reset(psmouse);
|
||||||
|
|
||||||
if (synaptics_detect(psmouse, 0))
|
if (synaptics_detect(psmouse, 0))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -640,6 +637,8 @@ int synaptics_init(struct psmouse *psmouse)
|
||||||
if (!priv)
|
if (!priv)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
psmouse_reset(psmouse);
|
||||||
|
|
||||||
if (synaptics_query_hardware(psmouse)) {
|
if (synaptics_query_hardware(psmouse)) {
|
||||||
printk(KERN_ERR "Unable to query Synaptics hardware.\n");
|
printk(KERN_ERR "Unable to query Synaptics hardware.\n");
|
||||||
goto init_fail;
|
goto init_fail;
|
||||||
|
|
|
@ -57,7 +57,7 @@ static int amba_kmi_write(struct serio *io, unsigned char val)
|
||||||
struct amba_kmi_port *kmi = io->port_data;
|
struct amba_kmi_port *kmi = io->port_data;
|
||||||
unsigned int timeleft = 10000; /* timeout in 100ms */
|
unsigned int timeleft = 10000; /* timeout in 100ms */
|
||||||
|
|
||||||
while ((readb(KMISTAT) & KMISTAT_TXEMPTY) == 0 && timeleft--)
|
while ((readb(KMISTAT) & KMISTAT_TXEMPTY) == 0 && --timeleft)
|
||||||
udelay(10);
|
udelay(10);
|
||||||
|
|
||||||
if (timeleft)
|
if (timeleft)
|
||||||
|
@ -129,8 +129,8 @@ static int amba_kmi_probe(struct amba_device *dev, void *id)
|
||||||
io->write = amba_kmi_write;
|
io->write = amba_kmi_write;
|
||||||
io->open = amba_kmi_open;
|
io->open = amba_kmi_open;
|
||||||
io->close = amba_kmi_close;
|
io->close = amba_kmi_close;
|
||||||
strlcpy(io->name, dev->dev.bus_id, sizeof(io->name));
|
strlcpy(io->name, dev_name(&dev->dev), sizeof(io->name));
|
||||||
strlcpy(io->phys, dev->dev.bus_id, sizeof(io->phys));
|
strlcpy(io->phys, dev_name(&dev->dev), sizeof(io->phys));
|
||||||
io->port_data = kmi;
|
io->port_data = kmi;
|
||||||
io->dev.parent = &dev->dev;
|
io->dev.parent = &dev->dev;
|
||||||
|
|
||||||
|
|
|
@ -359,7 +359,7 @@ static int __init gscps2_probe(struct parisc_device *dev)
|
||||||
|
|
||||||
snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s",
|
snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s",
|
||||||
(ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
|
(ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
|
||||||
strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys));
|
strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
|
||||||
serio->id.type = SERIO_8042;
|
serio->id.type = SERIO_8042;
|
||||||
serio->write = gscps2_write;
|
serio->write = gscps2_write;
|
||||||
serio->open = gscps2_open;
|
serio->open = gscps2_open;
|
||||||
|
|
|
@ -246,8 +246,8 @@ static int __devinit ps2_probe(struct sa1111_dev *dev)
|
||||||
serio->write = ps2_write;
|
serio->write = ps2_write;
|
||||||
serio->open = ps2_open;
|
serio->open = ps2_open;
|
||||||
serio->close = ps2_close;
|
serio->close = ps2_close;
|
||||||
strlcpy(serio->name, dev->dev.bus_id, sizeof(serio->name));
|
strlcpy(serio->name, dev_name(&dev->dev), sizeof(serio->name));
|
||||||
strlcpy(serio->phys, dev->dev.bus_id, sizeof(serio->phys));
|
strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
|
||||||
serio->port_data = ps2if;
|
serio->port_data = ps2if;
|
||||||
serio->dev.parent = &dev->dev;
|
serio->dev.parent = &dev->dev;
|
||||||
ps2if->io = serio;
|
ps2if->io = serio;
|
||||||
|
|
|
@ -236,7 +236,7 @@ static int __devinit atmel_tsadcc_probe(struct platform_device *pdev)
|
||||||
ts_dev->bufferedmeasure = 0;
|
ts_dev->bufferedmeasure = 0;
|
||||||
|
|
||||||
snprintf(ts_dev->phys, sizeof(ts_dev->phys),
|
snprintf(ts_dev->phys, sizeof(ts_dev->phys),
|
||||||
"%s/input0", pdev->dev.bus_id);
|
"%s/input0", dev_name(&pdev->dev));
|
||||||
|
|
||||||
input_dev->name = "atmel touch screen controller";
|
input_dev->name = "atmel touch screen controller";
|
||||||
input_dev->phys = ts_dev->phys;
|
input_dev->phys = ts_dev->phys;
|
||||||
|
|
|
@ -268,7 +268,7 @@ static int corgits_resume(struct platform_device *dev)
|
||||||
#define corgits_resume NULL
|
#define corgits_resume NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int __init corgits_probe(struct platform_device *pdev)
|
static int __devinit corgits_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct corgi_ts *corgi_ts;
|
struct corgi_ts *corgi_ts;
|
||||||
struct input_dev *input_dev;
|
struct input_dev *input_dev;
|
||||||
|
@ -343,7 +343,7 @@ static int __init corgits_probe(struct platform_device *pdev)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int corgits_remove(struct platform_device *pdev)
|
static int __devexit corgits_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
|
struct corgi_ts *corgi_ts = platform_get_drvdata(pdev);
|
||||||
|
|
||||||
|
@ -352,12 +352,13 @@ static int corgits_remove(struct platform_device *pdev)
|
||||||
corgi_ts->machinfo->put_hsync();
|
corgi_ts->machinfo->put_hsync();
|
||||||
input_unregister_device(corgi_ts->input);
|
input_unregister_device(corgi_ts->input);
|
||||||
kfree(corgi_ts);
|
kfree(corgi_ts);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct platform_driver corgits_driver = {
|
static struct platform_driver corgits_driver = {
|
||||||
.probe = corgits_probe,
|
.probe = corgits_probe,
|
||||||
.remove = corgits_remove,
|
.remove = __devexit_p(corgits_remove),
|
||||||
.suspend = corgits_suspend,
|
.suspend = corgits_suspend,
|
||||||
.resume = corgits_resume,
|
.resume = corgits_resume,
|
||||||
.driver = {
|
.driver = {
|
||||||
|
@ -366,7 +367,7 @@ static struct platform_driver corgits_driver = {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
static int __devinit corgits_init(void)
|
static int __init corgits_init(void)
|
||||||
{
|
{
|
||||||
return platform_driver_register(&corgits_driver);
|
return platform_driver_register(&corgits_driver);
|
||||||
}
|
}
|
||||||
|
|
|
@ -289,7 +289,8 @@ static int tsc2007_probe(struct i2c_client *client,
|
||||||
|
|
||||||
pdata->init_platform_hw();
|
pdata->init_platform_hw();
|
||||||
|
|
||||||
snprintf(ts->phys, sizeof(ts->phys), "%s/input0", client->dev.bus_id);
|
snprintf(ts->phys, sizeof(ts->phys),
|
||||||
|
"%s/input0", dev_name(&client->dev));
|
||||||
|
|
||||||
input_dev->name = "TSC2007 Touchscreen";
|
input_dev->name = "TSC2007 Touchscreen";
|
||||||
input_dev->phys = ts->phys;
|
input_dev->phys = ts->phys;
|
||||||
|
|
|
@ -60,6 +60,10 @@ static int swap_xy;
|
||||||
module_param(swap_xy, bool, 0644);
|
module_param(swap_xy, bool, 0644);
|
||||||
MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped.");
|
MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped.");
|
||||||
|
|
||||||
|
static int hwcalib_xy;
|
||||||
|
module_param(hwcalib_xy, bool, 0644);
|
||||||
|
MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available");
|
||||||
|
|
||||||
/* device specifc data/functions */
|
/* device specifc data/functions */
|
||||||
struct usbtouch_usb;
|
struct usbtouch_usb;
|
||||||
struct usbtouch_device_info {
|
struct usbtouch_device_info {
|
||||||
|
@ -118,6 +122,7 @@ enum {
|
||||||
|
|
||||||
#define USB_DEVICE_HID_CLASS(vend, prod) \
|
#define USB_DEVICE_HID_CLASS(vend, prod) \
|
||||||
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
|
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS \
|
||||||
|
| USB_DEVICE_ID_MATCH_INT_PROTOCOL \
|
||||||
| USB_DEVICE_ID_MATCH_DEVICE, \
|
| USB_DEVICE_ID_MATCH_DEVICE, \
|
||||||
.idVendor = (vend), \
|
.idVendor = (vend), \
|
||||||
.idProduct = (prod), \
|
.idProduct = (prod), \
|
||||||
|
@ -260,8 +265,13 @@ static int panjit_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
|
||||||
|
|
||||||
static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
|
static int mtouch_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
|
||||||
{
|
{
|
||||||
dev->x = (pkt[8] << 8) | pkt[7];
|
if (hwcalib_xy) {
|
||||||
dev->y = (pkt[10] << 8) | pkt[9];
|
dev->x = (pkt[4] << 8) | pkt[3];
|
||||||
|
dev->y = 0xffff - ((pkt[6] << 8) | pkt[5]);
|
||||||
|
} else {
|
||||||
|
dev->x = (pkt[8] << 8) | pkt[7];
|
||||||
|
dev->y = (pkt[10] << 8) | pkt[9];
|
||||||
|
}
|
||||||
dev->touch = (pkt[2] & 0x40) ? 1 : 0;
|
dev->touch = (pkt[2] & 0x40) ? 1 : 0;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -294,6 +304,12 @@ static int mtouch_init(struct usbtouch_usb *usbtouch)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Default min/max xy are the raw values, override if using hw-calib */
|
||||||
|
if (hwcalib_xy) {
|
||||||
|
input_set_abs_params(usbtouch->input, ABS_X, 0, 0xffff, 0, 0);
|
||||||
|
input_set_abs_params(usbtouch->input, ABS_Y, 0, 0xffff, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -46,8 +46,8 @@ static int uvc_input_init(struct uvc_device *dev)
|
||||||
usb_to_input_id(udev, &input->id);
|
usb_to_input_id(udev, &input->id);
|
||||||
input->dev.parent = &dev->intf->dev;
|
input->dev.parent = &dev->intf->dev;
|
||||||
|
|
||||||
set_bit(EV_KEY, input->evbit);
|
__set_bit(EV_KEY, input->evbit);
|
||||||
set_bit(BTN_0, input->keybit);
|
__set_bit(KEY_CAMERA, input->keybit);
|
||||||
|
|
||||||
if ((ret = input_register_device(input)) < 0)
|
if ((ret = input_register_device(input)) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
|
@ -70,8 +70,10 @@ static void uvc_input_cleanup(struct uvc_device *dev)
|
||||||
static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
|
static void uvc_input_report_key(struct uvc_device *dev, unsigned int code,
|
||||||
int value)
|
int value)
|
||||||
{
|
{
|
||||||
if (dev->input)
|
if (dev->input) {
|
||||||
input_report_key(dev->input, code, value);
|
input_report_key(dev->input, code, value);
|
||||||
|
input_sync(dev->input);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -96,7 +98,7 @@ static void uvc_event_streaming(struct uvc_device *dev, __u8 *data, int len)
|
||||||
return;
|
return;
|
||||||
uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
|
uvc_trace(UVC_TRACE_STATUS, "Button (intf %u) %s len %d\n",
|
||||||
data[1], data[3] ? "pressed" : "released", len);
|
data[1], data[3] ? "pressed" : "released", len);
|
||||||
uvc_input_report_key(dev, BTN_0, data[3]);
|
uvc_input_report_key(dev, KEY_CAMERA, data[3]);
|
||||||
} else {
|
} else {
|
||||||
uvc_trace(UVC_TRACE_STATUS, "Stream %u error event %02x %02x "
|
uvc_trace(UVC_TRACE_STATUS, "Stream %u error event %02x %02x "
|
||||||
"len %d.\n", data[1], data[2], data[3], len);
|
"len %d.\n", data[1], data[2], data[3], len);
|
||||||
|
|
|
@ -91,9 +91,9 @@ MODULE_PARM_DESC(mpt_msi_enable_fc, " Enable MSI Support for FC \
|
||||||
controllers (default=0)");
|
controllers (default=0)");
|
||||||
|
|
||||||
static int mpt_msi_enable_sas;
|
static int mpt_msi_enable_sas;
|
||||||
module_param(mpt_msi_enable_sas, int, 1);
|
module_param(mpt_msi_enable_sas, int, 0);
|
||||||
MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \
|
MODULE_PARM_DESC(mpt_msi_enable_sas, " Enable MSI Support for SAS \
|
||||||
controllers (default=1)");
|
controllers (default=0)");
|
||||||
|
|
||||||
|
|
||||||
static int mpt_channel_mapping;
|
static int mpt_channel_mapping;
|
||||||
|
|
|
@ -107,6 +107,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
|
||||||
|
|
||||||
static const struct sdhci_pci_fixes sdhci_cafe = {
|
static const struct sdhci_pci_fixes sdhci_cafe = {
|
||||||
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
|
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
|
||||||
|
SDHCI_QUIRK_NO_BUSY_IRQ |
|
||||||
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
|
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1291,8 +1291,11 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
|
||||||
if (host->cmd->data)
|
if (host->cmd->data)
|
||||||
DBG("Cannot wait for busy signal when also "
|
DBG("Cannot wait for busy signal when also "
|
||||||
"doing a data transfer");
|
"doing a data transfer");
|
||||||
else
|
else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/* The controller does not support the end-of-busy IRQ,
|
||||||
|
* fall through and take the SDHCI_INT_RESPONSE */
|
||||||
}
|
}
|
||||||
|
|
||||||
if (intmask & SDHCI_INT_RESPONSE)
|
if (intmask & SDHCI_INT_RESPONSE)
|
||||||
|
|
|
@ -208,6 +208,8 @@ struct sdhci_host {
|
||||||
#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
|
#define SDHCI_QUIRK_BROKEN_TIMEOUT_VAL (1<<12)
|
||||||
/* Controller has an issue with buffer bits for small transfers */
|
/* Controller has an issue with buffer bits for small transfers */
|
||||||
#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
|
#define SDHCI_QUIRK_BROKEN_SMALL_PIO (1<<13)
|
||||||
|
/* Controller does not provide transfer-complete interrupt when not busy */
|
||||||
|
#define SDHCI_QUIRK_NO_BUSY_IRQ (1<<14)
|
||||||
|
|
||||||
int irq; /* Device IRQ */
|
int irq; /* Device IRQ */
|
||||||
void __iomem * ioaddr; /* Mapped address */
|
void __iomem * ioaddr; /* Mapped address */
|
||||||
|
|
|
@ -149,7 +149,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
|
||||||
|
|
||||||
static struct platform_driver orion_nand_driver = {
|
static struct platform_driver orion_nand_driver = {
|
||||||
.probe = orion_nand_probe,
|
.probe = orion_nand_probe,
|
||||||
.remove = orion_nand_remove,
|
.remove = __devexit_p(orion_nand_remove),
|
||||||
.driver = {
|
.driver = {
|
||||||
.name = "orion_nand",
|
.name = "orion_nand",
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
#
|
#
|
||||||
|
|
||||||
obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
|
obj-$(CONFIG_ARM_AM79C961A) += am79c961a.o
|
||||||
obj-$(CONFIG_ARM_ETHERH) += etherh.o ../8390.o
|
obj-$(CONFIG_ARM_ETHERH) += etherh.o
|
||||||
obj-$(CONFIG_ARM_ETHER3) += ether3.o
|
obj-$(CONFIG_ARM_ETHER3) += ether3.o
|
||||||
obj-$(CONFIG_ARM_ETHER1) += ether1.o
|
obj-$(CONFIG_ARM_ETHER1) += ether1.o
|
||||||
obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
|
obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o
|
||||||
|
|
|
@ -641,15 +641,15 @@ static const struct net_device_ops etherh_netdev_ops = {
|
||||||
.ndo_open = etherh_open,
|
.ndo_open = etherh_open,
|
||||||
.ndo_stop = etherh_close,
|
.ndo_stop = etherh_close,
|
||||||
.ndo_set_config = etherh_set_config,
|
.ndo_set_config = etherh_set_config,
|
||||||
.ndo_start_xmit = ei_start_xmit,
|
.ndo_start_xmit = __ei_start_xmit,
|
||||||
.ndo_tx_timeout = ei_tx_timeout,
|
.ndo_tx_timeout = __ei_tx_timeout,
|
||||||
.ndo_get_stats = ei_get_stats,
|
.ndo_get_stats = __ei_get_stats,
|
||||||
.ndo_set_multicast_list = ei_set_multicast_list,
|
.ndo_set_multicast_list = __ei_set_multicast_list,
|
||||||
.ndo_validate_addr = eth_validate_addr,
|
.ndo_validate_addr = eth_validate_addr,
|
||||||
.ndo_set_mac_address = eth_mac_addr,
|
.ndo_set_mac_address = eth_mac_addr,
|
||||||
.ndo_change_mtu = eth_change_mtu,
|
.ndo_change_mtu = eth_change_mtu,
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
.ndo_poll_controller = ei_poll,
|
.ndo_poll_controller = __ei_poll,
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -560,7 +560,7 @@ ks8695_reset(struct ks8695_priv *ksp)
|
||||||
msleep(1);
|
msleep(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reset_timeout == 0) {
|
if (reset_timeout < 0) {
|
||||||
dev_crit(ksp->dev,
|
dev_crit(ksp->dev,
|
||||||
"Timeout waiting for DMA engines to reset\n");
|
"Timeout waiting for DMA engines to reset\n");
|
||||||
/* And blithely carry on */
|
/* And blithely carry on */
|
||||||
|
|
|
@ -4121,7 +4121,7 @@ static int bond_neigh_setup(struct net_device *dev, struct neigh_parms *parms)
|
||||||
const struct net_device_ops *slave_ops
|
const struct net_device_ops *slave_ops
|
||||||
= slave->dev->netdev_ops;
|
= slave->dev->netdev_ops;
|
||||||
if (slave_ops->ndo_neigh_setup)
|
if (slave_ops->ndo_neigh_setup)
|
||||||
return slave_ops->ndo_neigh_setup(dev, parms);
|
return slave_ops->ndo_neigh_setup(slave->dev, parms);
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -129,7 +129,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
|
||||||
static int fsl_pq_mdio_reset(struct mii_bus *bus)
|
static int fsl_pq_mdio_reset(struct mii_bus *bus)
|
||||||
{
|
{
|
||||||
struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
|
struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv;
|
||||||
unsigned int timeout = PHY_INIT_TIMEOUT;
|
int timeout = PHY_INIT_TIMEOUT;
|
||||||
|
|
||||||
mutex_lock(&bus->mdio_lock);
|
mutex_lock(&bus->mdio_lock);
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ static int fsl_pq_mdio_reset(struct mii_bus *bus)
|
||||||
|
|
||||||
mutex_unlock(&bus->mdio_lock);
|
mutex_unlock(&bus->mdio_lock);
|
||||||
|
|
||||||
if(timeout == 0) {
|
if (timeout < 0) {
|
||||||
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
|
printk(KERN_ERR "%s: The MII Bus is stuck!\n",
|
||||||
bus->name);
|
bus->name);
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
|
|
|
@ -956,13 +956,14 @@ jme_process_receive(struct jme_adapter *jme, int limit)
|
||||||
goto out_inc;
|
goto out_inc;
|
||||||
|
|
||||||
i = atomic_read(&rxring->next_to_clean);
|
i = atomic_read(&rxring->next_to_clean);
|
||||||
while (limit-- > 0) {
|
while (limit > 0) {
|
||||||
rxdesc = rxring->desc;
|
rxdesc = rxring->desc;
|
||||||
rxdesc += i;
|
rxdesc += i;
|
||||||
|
|
||||||
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
|
if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
|
||||||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
|
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
|
||||||
goto out;
|
goto out;
|
||||||
|
--limit;
|
||||||
|
|
||||||
desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
|
desccnt = rxdesc->descwb.desccnt & RXWBDCNT_DCNT;
|
||||||
|
|
||||||
|
|
|
@ -1035,7 +1035,8 @@ static int el3_rx(struct net_device *dev, int worklimit)
|
||||||
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
|
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
|
||||||
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
|
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
|
||||||
while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
|
while (!((rx_status = inw(ioaddr + RxStatus)) & 0x8000) &&
|
||||||
(--worklimit >= 0)) {
|
worklimit > 0) {
|
||||||
|
worklimit--;
|
||||||
if (rx_status & 0x4000) { /* Error, update stats. */
|
if (rx_status & 0x4000) { /* Error, update stats. */
|
||||||
short error = rx_status & 0x3800;
|
short error = rx_status & 0x3800;
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
|
|
|
@ -857,7 +857,8 @@ static int el3_rx(struct net_device *dev)
|
||||||
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
|
DEBUG(3, "%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n",
|
||||||
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
|
dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
|
||||||
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
|
while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
|
||||||
(--worklimit >= 0)) {
|
worklimit > 0) {
|
||||||
|
worklimit--;
|
||||||
if (rx_status & 0x4000) { /* Error, update stats. */
|
if (rx_status & 0x4000) { /* Error, update stats. */
|
||||||
short error = rx_status & 0x3800;
|
short error = rx_status & 0x3800;
|
||||||
dev->stats.rx_errors++;
|
dev->stats.rx_errors++;
|
||||||
|
|
|
@ -42,6 +42,16 @@
|
||||||
#define SMC_USE_16BIT 0
|
#define SMC_USE_16BIT 0
|
||||||
#define SMC_USE_32BIT 1
|
#define SMC_USE_32BIT 1
|
||||||
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
|
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
|
||||||
|
#elif defined(CONFIG_ARCH_OMAP34XX)
|
||||||
|
#define SMC_USE_16BIT 0
|
||||||
|
#define SMC_USE_32BIT 1
|
||||||
|
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
|
||||||
|
#define SMC_MEM_RESERVED 1
|
||||||
|
#elif defined(CONFIG_ARCH_OMAP24XX)
|
||||||
|
#define SMC_USE_16BIT 0
|
||||||
|
#define SMC_USE_32BIT 1
|
||||||
|
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
|
||||||
|
#define SMC_MEM_RESERVED 1
|
||||||
#else
|
#else
|
||||||
/*
|
/*
|
||||||
* Default configuration
|
* Default configuration
|
||||||
|
@ -675,6 +685,7 @@ smc_pxa_dma_outsl(struct smc911x_local *lp, u_long physaddr,
|
||||||
#define CHIP_9116 0x0116
|
#define CHIP_9116 0x0116
|
||||||
#define CHIP_9117 0x0117
|
#define CHIP_9117 0x0117
|
||||||
#define CHIP_9118 0x0118
|
#define CHIP_9118 0x0118
|
||||||
|
#define CHIP_9211 0x9211
|
||||||
#define CHIP_9215 0x115A
|
#define CHIP_9215 0x115A
|
||||||
#define CHIP_9217 0x117A
|
#define CHIP_9217 0x117A
|
||||||
#define CHIP_9218 0x118A
|
#define CHIP_9218 0x118A
|
||||||
|
@ -689,6 +700,7 @@ static const struct chip_id chip_ids[] = {
|
||||||
{ CHIP_9116, "LAN9116" },
|
{ CHIP_9116, "LAN9116" },
|
||||||
{ CHIP_9117, "LAN9117" },
|
{ CHIP_9117, "LAN9117" },
|
||||||
{ CHIP_9118, "LAN9118" },
|
{ CHIP_9118, "LAN9118" },
|
||||||
|
{ CHIP_9211, "LAN9211" },
|
||||||
{ CHIP_9215, "LAN9215" },
|
{ CHIP_9215, "LAN9215" },
|
||||||
{ CHIP_9217, "LAN9217" },
|
{ CHIP_9217, "LAN9217" },
|
||||||
{ CHIP_9218, "LAN9218" },
|
{ CHIP_9218, "LAN9218" },
|
||||||
|
|
|
@ -1229,7 +1229,7 @@ static void gem_reset(struct gem *gp)
|
||||||
break;
|
break;
|
||||||
} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
|
} while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST));
|
||||||
|
|
||||||
if (limit <= 0)
|
if (limit < 0)
|
||||||
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
|
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
|
||||||
|
|
||||||
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
|
if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
|
||||||
|
|
|
@ -121,11 +121,6 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
|
||||||
goto err_out_trdev;
|
goto err_out_trdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
|
|
||||||
dev->name, dev);
|
|
||||||
if (ret)
|
|
||||||
goto err_out_region;
|
|
||||||
|
|
||||||
dev->base_addr = pci_ioaddr;
|
dev->base_addr = pci_ioaddr;
|
||||||
dev->irq = pci_irq_line;
|
dev->irq = pci_irq_line;
|
||||||
dev->dma = 0;
|
dev->dma = 0;
|
||||||
|
@ -142,7 +137,7 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
|
||||||
ret = tmsdev_init(dev, &pdev->dev);
|
ret = tmsdev_init(dev, &pdev->dev);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk("%s: unable to get memory for dev->priv.\n", dev->name);
|
printk("%s: unable to get memory for dev->priv.\n", dev->name);
|
||||||
goto err_out_irq;
|
goto err_out_region;
|
||||||
}
|
}
|
||||||
|
|
||||||
tp = netdev_priv(dev);
|
tp = netdev_priv(dev);
|
||||||
|
@ -159,20 +154,25 @@ static int __devinit tms_pci_attach(struct pci_dev *pdev, const struct pci_devic
|
||||||
|
|
||||||
dev->netdev_ops = &tms380tr_netdev_ops;
|
dev->netdev_ops = &tms380tr_netdev_ops;
|
||||||
|
|
||||||
|
ret = request_irq(pdev->irq, tms380tr_interrupt, IRQF_SHARED,
|
||||||
|
dev->name, dev);
|
||||||
|
if (ret)
|
||||||
|
goto err_out_tmsdev;
|
||||||
|
|
||||||
pci_set_drvdata(pdev, dev);
|
pci_set_drvdata(pdev, dev);
|
||||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||||
|
|
||||||
ret = register_netdev(dev);
|
ret = register_netdev(dev);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err_out_tmsdev;
|
goto err_out_irq;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
err_out_irq:
|
||||||
|
free_irq(pdev->irq, dev);
|
||||||
err_out_tmsdev:
|
err_out_tmsdev:
|
||||||
pci_set_drvdata(pdev, NULL);
|
pci_set_drvdata(pdev, NULL);
|
||||||
tmsdev_term(dev);
|
tmsdev_term(dev);
|
||||||
err_out_irq:
|
|
||||||
free_irq(pdev->irq, dev);
|
|
||||||
err_out_region:
|
err_out_region:
|
||||||
release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
|
release_region(pci_ioaddr, TMS_PCI_IO_EXTENT);
|
||||||
err_out_trdev:
|
err_out_trdev:
|
||||||
|
|
|
@ -633,6 +633,10 @@ static const struct usb_device_id products[] = {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
USB_DEVICE(0x0a47, 0x9601), /* Hirose USB-100 */
|
USB_DEVICE(0x0a47, 0x9601), /* Hirose USB-100 */
|
||||||
|
.driver_info = (unsigned long)&dm9601_info,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
USB_DEVICE(0x0fe6, 0x8101), /* DM9601 USB to Fast Ethernet Adapter */
|
||||||
.driver_info = (unsigned long)&dm9601_info,
|
.driver_info = (unsigned long)&dm9601_info,
|
||||||
},
|
},
|
||||||
{}, // END
|
{}, // END
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/netdevice.h>
|
#include <linux/netdevice.h>
|
||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
|
#include <linux/skbuff.h>
|
||||||
#include <scsi/libiscsi_tcp.h>
|
#include <scsi/libiscsi_tcp.h>
|
||||||
|
|
||||||
/* from cxgb3 LLD */
|
/* from cxgb3 LLD */
|
||||||
|
@ -113,6 +114,26 @@ struct cxgb3i_endpoint {
|
||||||
struct cxgb3i_conn *cconn;
|
struct cxgb3i_conn *cconn;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct cxgb3i_task_data - private iscsi task data
|
||||||
|
*
|
||||||
|
* @nr_frags: # of coalesced page frags (from scsi sgl)
|
||||||
|
* @frags: coalesced page frags (from scsi sgl)
|
||||||
|
* @skb: tx pdu skb
|
||||||
|
* @offset: data offset for the next pdu
|
||||||
|
* @count: max. possible pdu payload
|
||||||
|
* @sgoffset: offset to the first sg entry for a given offset
|
||||||
|
*/
|
||||||
|
#define MAX_PDU_FRAGS ((ULP2_MAX_PDU_PAYLOAD + 512 - 1) / 512)
|
||||||
|
struct cxgb3i_task_data {
|
||||||
|
unsigned short nr_frags;
|
||||||
|
skb_frag_t frags[MAX_PDU_FRAGS];
|
||||||
|
struct sk_buff *skb;
|
||||||
|
unsigned int offset;
|
||||||
|
unsigned int count;
|
||||||
|
unsigned int sgoffset;
|
||||||
|
};
|
||||||
|
|
||||||
int cxgb3i_iscsi_init(void);
|
int cxgb3i_iscsi_init(void);
|
||||||
void cxgb3i_iscsi_cleanup(void);
|
void cxgb3i_iscsi_cleanup(void);
|
||||||
|
|
||||||
|
|
|
@ -639,10 +639,11 @@ static int ddp_init(struct t3cdev *tdev)
|
||||||
write_unlock(&cxgb3i_ddp_rwlock);
|
write_unlock(&cxgb3i_ddp_rwlock);
|
||||||
|
|
||||||
ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
|
ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
|
||||||
"pkt %u,%u.\n",
|
"pkt %u/%u, %u/%u.\n",
|
||||||
ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
|
ppmax, ddp->llimit, ddp->ulimit, ddp->idx_bits,
|
||||||
ddp->idx_mask, ddp->rsvd_tag_mask,
|
ddp->idx_mask, ddp->rsvd_tag_mask,
|
||||||
ddp->max_txsz, ddp->max_rxsz);
|
ddp->max_txsz, uinfo.max_txsz,
|
||||||
|
ddp->max_rxsz, uinfo.max_rxsz);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_ddp_map:
|
free_ddp_map:
|
||||||
|
@ -654,8 +655,8 @@ free_ddp_map:
|
||||||
* cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
|
* cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
|
||||||
* @tdev: t3cdev adapter
|
* @tdev: t3cdev adapter
|
||||||
* @tformat: tag format
|
* @tformat: tag format
|
||||||
* @txsz: max tx pkt size, filled in by this func.
|
* @txsz: max tx pdu payload size, filled in by this func.
|
||||||
* @rxsz: max rx pkt size, filled in by this func.
|
* @rxsz: max rx pdu payload size, filled in by this func.
|
||||||
* initialize the ddp pagepod manager for a given adapter if needed and
|
* initialize the ddp pagepod manager for a given adapter if needed and
|
||||||
* setup the tag format for a given iscsi entity
|
* setup the tag format for a given iscsi entity
|
||||||
*/
|
*/
|
||||||
|
@ -685,10 +686,12 @@ int cxgb3i_adapter_ddp_init(struct t3cdev *tdev,
|
||||||
tformat->sw_bits, tformat->rsvd_bits,
|
tformat->sw_bits, tformat->rsvd_bits,
|
||||||
tformat->rsvd_shift, tformat->rsvd_mask);
|
tformat->rsvd_shift, tformat->rsvd_mask);
|
||||||
|
|
||||||
*txsz = ddp->max_txsz;
|
*txsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
||||||
*rxsz = ddp->max_rxsz;
|
ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
|
||||||
ddp_log_info("ddp max pkt size: %u, %u.\n",
|
*rxsz = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
||||||
ddp->max_txsz, ddp->max_rxsz);
|
ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
|
||||||
|
ddp_log_info("max payload size: %u/%u, %u/%u.\n",
|
||||||
|
*txsz, ddp->max_txsz, *rxsz, ddp->max_rxsz);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
|
EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init);
|
||||||
|
|
|
@ -87,8 +87,9 @@ struct cxgb3i_ddp_info {
|
||||||
struct sk_buff **gl_skb;
|
struct sk_buff **gl_skb;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define ISCSI_PDU_NONPAYLOAD_LEN 312 /* bhs(48) + ahs(256) + digest(8) */
|
||||||
#define ULP2_MAX_PKT_SIZE 16224
|
#define ULP2_MAX_PKT_SIZE 16224
|
||||||
#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_MAX)
|
#define ULP2_MAX_PDU_PAYLOAD (ULP2_MAX_PKT_SIZE - ISCSI_PDU_NONPAYLOAD_LEN)
|
||||||
#define PPOD_PAGES_MAX 4
|
#define PPOD_PAGES_MAX 4
|
||||||
#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
|
#define PPOD_PAGES_SHIFT 2 /* 4 pages per pod */
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,8 @@
|
||||||
#include "cxgb3i.h"
|
#include "cxgb3i.h"
|
||||||
|
|
||||||
#define DRV_MODULE_NAME "cxgb3i"
|
#define DRV_MODULE_NAME "cxgb3i"
|
||||||
#define DRV_MODULE_VERSION "1.0.0"
|
#define DRV_MODULE_VERSION "1.0.1"
|
||||||
#define DRV_MODULE_RELDATE "Jun. 1, 2008"
|
#define DRV_MODULE_RELDATE "Jan. 2009"
|
||||||
|
|
||||||
static char version[] =
|
static char version[] =
|
||||||
"Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
|
"Chelsio S3xx iSCSI Driver " DRV_MODULE_NAME
|
||||||
|
|
|
@ -364,7 +364,8 @@ cxgb3i_session_create(struct iscsi_endpoint *ep, u16 cmds_max, u16 qdepth,
|
||||||
|
|
||||||
cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
|
cls_session = iscsi_session_setup(&cxgb3i_iscsi_transport, shost,
|
||||||
cmds_max,
|
cmds_max,
|
||||||
sizeof(struct iscsi_tcp_task),
|
sizeof(struct iscsi_tcp_task) +
|
||||||
|
sizeof(struct cxgb3i_task_data),
|
||||||
initial_cmdsn, ISCSI_MAX_TARGET);
|
initial_cmdsn, ISCSI_MAX_TARGET);
|
||||||
if (!cls_session)
|
if (!cls_session)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -402,17 +403,15 @@ static inline int cxgb3i_conn_max_xmit_dlength(struct iscsi_conn *conn)
|
||||||
{
|
{
|
||||||
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
||||||
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
|
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
|
||||||
unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
unsigned int max = max(512 * MAX_SKB_FRAGS, SKB_TX_HEADROOM);
|
||||||
cconn->hba->snic->tx_max_size -
|
|
||||||
ISCSI_PDU_NONPAYLOAD_MAX);
|
|
||||||
|
|
||||||
|
max = min(cconn->hba->snic->tx_max_size, max);
|
||||||
if (conn->max_xmit_dlength)
|
if (conn->max_xmit_dlength)
|
||||||
conn->max_xmit_dlength = min_t(unsigned int,
|
conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
|
||||||
conn->max_xmit_dlength, max);
|
|
||||||
else
|
else
|
||||||
conn->max_xmit_dlength = max;
|
conn->max_xmit_dlength = max;
|
||||||
align_pdu_size(conn->max_xmit_dlength);
|
align_pdu_size(conn->max_xmit_dlength);
|
||||||
cxgb3i_log_info("conn 0x%p, max xmit %u.\n",
|
cxgb3i_api_debug("conn 0x%p, max xmit %u.\n",
|
||||||
conn, conn->max_xmit_dlength);
|
conn, conn->max_xmit_dlength);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -427,9 +426,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
|
||||||
{
|
{
|
||||||
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
||||||
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
|
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
|
||||||
unsigned int max = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
|
unsigned int max = cconn->hba->snic->rx_max_size;
|
||||||
cconn->hba->snic->rx_max_size -
|
|
||||||
ISCSI_PDU_NONPAYLOAD_MAX);
|
|
||||||
|
|
||||||
align_pdu_size(max);
|
align_pdu_size(max);
|
||||||
if (conn->max_recv_dlength) {
|
if (conn->max_recv_dlength) {
|
||||||
|
@ -439,8 +436,7 @@ static inline int cxgb3i_conn_max_recv_dlength(struct iscsi_conn *conn)
|
||||||
conn->max_recv_dlength, max);
|
conn->max_recv_dlength, max);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
conn->max_recv_dlength = min_t(unsigned int,
|
conn->max_recv_dlength = min(conn->max_recv_dlength, max);
|
||||||
conn->max_recv_dlength, max);
|
|
||||||
align_pdu_size(conn->max_recv_dlength);
|
align_pdu_size(conn->max_recv_dlength);
|
||||||
} else
|
} else
|
||||||
conn->max_recv_dlength = max;
|
conn->max_recv_dlength = max;
|
||||||
|
@ -844,7 +840,7 @@ static struct scsi_host_template cxgb3i_host_template = {
|
||||||
.proc_name = "cxgb3i",
|
.proc_name = "cxgb3i",
|
||||||
.queuecommand = iscsi_queuecommand,
|
.queuecommand = iscsi_queuecommand,
|
||||||
.change_queue_depth = iscsi_change_queue_depth,
|
.change_queue_depth = iscsi_change_queue_depth,
|
||||||
.can_queue = 128 * (ISCSI_DEF_XMIT_CMDS_MAX - 1),
|
.can_queue = CXGB3I_SCSI_QDEPTH_DFLT - 1,
|
||||||
.sg_tablesize = SG_ALL,
|
.sg_tablesize = SG_ALL,
|
||||||
.max_sectors = 0xFFFF,
|
.max_sectors = 0xFFFF,
|
||||||
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
|
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
|
||||||
|
|
|
@ -23,19 +23,19 @@
|
||||||
#include "cxgb3i_ddp.h"
|
#include "cxgb3i_ddp.h"
|
||||||
|
|
||||||
#ifdef __DEBUG_C3CN_CONN__
|
#ifdef __DEBUG_C3CN_CONN__
|
||||||
#define c3cn_conn_debug cxgb3i_log_info
|
#define c3cn_conn_debug cxgb3i_log_debug
|
||||||
#else
|
#else
|
||||||
#define c3cn_conn_debug(fmt...)
|
#define c3cn_conn_debug(fmt...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __DEBUG_C3CN_TX__
|
#ifdef __DEBUG_C3CN_TX__
|
||||||
#define c3cn_tx_debug cxgb3i_log_debug
|
#define c3cn_tx_debug cxgb3i_log_debug
|
||||||
#else
|
#else
|
||||||
#define c3cn_tx_debug(fmt...)
|
#define c3cn_tx_debug(fmt...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef __DEBUG_C3CN_RX__
|
#ifdef __DEBUG_C3CN_RX__
|
||||||
#define c3cn_rx_debug cxgb3i_log_debug
|
#define c3cn_rx_debug cxgb3i_log_debug
|
||||||
#else
|
#else
|
||||||
#define c3cn_rx_debug(fmt...)
|
#define c3cn_rx_debug(fmt...)
|
||||||
#endif
|
#endif
|
||||||
|
@ -47,9 +47,9 @@ static int cxgb3_rcv_win = 256 * 1024;
|
||||||
module_param(cxgb3_rcv_win, int, 0644);
|
module_param(cxgb3_rcv_win, int, 0644);
|
||||||
MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
|
MODULE_PARM_DESC(cxgb3_rcv_win, "TCP receive window in bytes (default=256KB)");
|
||||||
|
|
||||||
static int cxgb3_snd_win = 64 * 1024;
|
static int cxgb3_snd_win = 128 * 1024;
|
||||||
module_param(cxgb3_snd_win, int, 0644);
|
module_param(cxgb3_snd_win, int, 0644);
|
||||||
MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=64KB)");
|
MODULE_PARM_DESC(cxgb3_snd_win, "TCP send window in bytes (default=128KB)");
|
||||||
|
|
||||||
static int cxgb3_rx_credit_thres = 10 * 1024;
|
static int cxgb3_rx_credit_thres = 10 * 1024;
|
||||||
module_param(cxgb3_rx_credit_thres, int, 0644);
|
module_param(cxgb3_rx_credit_thres, int, 0644);
|
||||||
|
@ -301,8 +301,8 @@ static void act_open_req_arp_failure(struct t3cdev *dev, struct sk_buff *skb)
|
||||||
static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
|
static void skb_entail(struct s3_conn *c3cn, struct sk_buff *skb,
|
||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
CXGB3_SKB_CB(skb)->seq = c3cn->write_seq;
|
skb_tcp_seq(skb) = c3cn->write_seq;
|
||||||
CXGB3_SKB_CB(skb)->flags = flags;
|
skb_flags(skb) = flags;
|
||||||
__skb_queue_tail(&c3cn->write_queue, skb);
|
__skb_queue_tail(&c3cn->write_queue, skb);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,12 +457,9 @@ static unsigned int wrlen __read_mostly;
|
||||||
* The number of WRs needed for an skb depends on the number of fragments
|
* The number of WRs needed for an skb depends on the number of fragments
|
||||||
* in the skb and whether it has any payload in its main body. This maps the
|
* in the skb and whether it has any payload in its main body. This maps the
|
||||||
* length of the gather list represented by an skb into the # of necessary WRs.
|
* length of the gather list represented by an skb into the # of necessary WRs.
|
||||||
*
|
* The extra two fragments are for iscsi bhs and payload padding.
|
||||||
* The max. length of an skb is controlled by the max pdu size which is ~16K.
|
|
||||||
* Also, assume the min. fragment length is the sector size (512), then add
|
|
||||||
* extra fragment counts for iscsi bhs and payload padding.
|
|
||||||
*/
|
*/
|
||||||
#define SKB_WR_LIST_SIZE (16384/512 + 3)
|
#define SKB_WR_LIST_SIZE (MAX_SKB_FRAGS + 2)
|
||||||
static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
|
static unsigned int skb_wrs[SKB_WR_LIST_SIZE] __read_mostly;
|
||||||
|
|
||||||
static void s3_init_wr_tab(unsigned int wr_len)
|
static void s3_init_wr_tab(unsigned int wr_len)
|
||||||
|
@ -485,7 +482,7 @@ static void s3_init_wr_tab(unsigned int wr_len)
|
||||||
|
|
||||||
static inline void reset_wr_list(struct s3_conn *c3cn)
|
static inline void reset_wr_list(struct s3_conn *c3cn)
|
||||||
{
|
{
|
||||||
c3cn->wr_pending_head = NULL;
|
c3cn->wr_pending_head = c3cn->wr_pending_tail = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -496,7 +493,7 @@ static inline void reset_wr_list(struct s3_conn *c3cn)
|
||||||
static inline void enqueue_wr(struct s3_conn *c3cn,
|
static inline void enqueue_wr(struct s3_conn *c3cn,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
skb_wr_data(skb) = NULL;
|
skb_tx_wr_next(skb) = NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We want to take an extra reference since both us and the driver
|
* We want to take an extra reference since both us and the driver
|
||||||
|
@ -509,10 +506,22 @@ static inline void enqueue_wr(struct s3_conn *c3cn,
|
||||||
if (!c3cn->wr_pending_head)
|
if (!c3cn->wr_pending_head)
|
||||||
c3cn->wr_pending_head = skb;
|
c3cn->wr_pending_head = skb;
|
||||||
else
|
else
|
||||||
skb_wr_data(skb) = skb;
|
skb_tx_wr_next(c3cn->wr_pending_tail) = skb;
|
||||||
c3cn->wr_pending_tail = skb;
|
c3cn->wr_pending_tail = skb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int count_pending_wrs(struct s3_conn *c3cn)
|
||||||
|
{
|
||||||
|
int n = 0;
|
||||||
|
const struct sk_buff *skb = c3cn->wr_pending_head;
|
||||||
|
|
||||||
|
while (skb) {
|
||||||
|
n += skb->csum;
|
||||||
|
skb = skb_tx_wr_next(skb);
|
||||||
|
}
|
||||||
|
return n;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
|
static inline struct sk_buff *peek_wr(const struct s3_conn *c3cn)
|
||||||
{
|
{
|
||||||
return c3cn->wr_pending_head;
|
return c3cn->wr_pending_head;
|
||||||
|
@ -529,8 +538,8 @@ static inline struct sk_buff *dequeue_wr(struct s3_conn *c3cn)
|
||||||
|
|
||||||
if (likely(skb)) {
|
if (likely(skb)) {
|
||||||
/* Don't bother clearing the tail */
|
/* Don't bother clearing the tail */
|
||||||
c3cn->wr_pending_head = skb_wr_data(skb);
|
c3cn->wr_pending_head = skb_tx_wr_next(skb);
|
||||||
skb_wr_data(skb) = NULL;
|
skb_tx_wr_next(skb) = NULL;
|
||||||
}
|
}
|
||||||
return skb;
|
return skb;
|
||||||
}
|
}
|
||||||
|
@ -543,13 +552,14 @@ static void purge_wr_queue(struct s3_conn *c3cn)
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
|
static inline void make_tx_data_wr(struct s3_conn *c3cn, struct sk_buff *skb,
|
||||||
int len)
|
int len, int req_completion)
|
||||||
{
|
{
|
||||||
struct tx_data_wr *req;
|
struct tx_data_wr *req;
|
||||||
|
|
||||||
skb_reset_transport_header(skb);
|
skb_reset_transport_header(skb);
|
||||||
req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
|
req = (struct tx_data_wr *)__skb_push(skb, sizeof(*req));
|
||||||
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
|
req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA) |
|
||||||
|
(req_completion ? F_WR_COMPL : 0));
|
||||||
req->wr_lo = htonl(V_WR_TID(c3cn->tid));
|
req->wr_lo = htonl(V_WR_TID(c3cn->tid));
|
||||||
req->sndseq = htonl(c3cn->snd_nxt);
|
req->sndseq = htonl(c3cn->snd_nxt);
|
||||||
/* len includes the length of any HW ULP additions */
|
/* len includes the length of any HW ULP additions */
|
||||||
|
@ -592,7 +602,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
|
||||||
|
|
||||||
if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
|
if (unlikely(c3cn->state == C3CN_STATE_CONNECTING ||
|
||||||
c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
|
c3cn->state == C3CN_STATE_CLOSE_WAIT_1 ||
|
||||||
c3cn->state == C3CN_STATE_ABORTING)) {
|
c3cn->state >= C3CN_STATE_ABORTING)) {
|
||||||
c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
|
c3cn_tx_debug("c3cn 0x%p, in closing state %u.\n",
|
||||||
c3cn, c3cn->state);
|
c3cn, c3cn->state);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -615,7 +625,7 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
|
||||||
if (c3cn->wr_avail < wrs_needed) {
|
if (c3cn->wr_avail < wrs_needed) {
|
||||||
c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
|
c3cn_tx_debug("c3cn 0x%p, skb len %u/%u, frag %u, "
|
||||||
"wr %d < %u.\n",
|
"wr %d < %u.\n",
|
||||||
c3cn, skb->len, skb->datalen, frags,
|
c3cn, skb->len, skb->data_len, frags,
|
||||||
wrs_needed, c3cn->wr_avail);
|
wrs_needed, c3cn->wr_avail);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -627,20 +637,24 @@ static int c3cn_push_tx_frames(struct s3_conn *c3cn, int req_completion)
|
||||||
c3cn->wr_unacked += wrs_needed;
|
c3cn->wr_unacked += wrs_needed;
|
||||||
enqueue_wr(c3cn, skb);
|
enqueue_wr(c3cn, skb);
|
||||||
|
|
||||||
if (likely(CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_NEED_HDR)) {
|
c3cn_tx_debug("c3cn 0x%p, enqueue, skb len %u/%u, frag %u, "
|
||||||
len += ulp_extra_len(skb);
|
"wr %d, left %u, unack %u.\n",
|
||||||
make_tx_data_wr(c3cn, skb, len);
|
c3cn, skb->len, skb->data_len, frags,
|
||||||
c3cn->snd_nxt += len;
|
wrs_needed, c3cn->wr_avail, c3cn->wr_unacked);
|
||||||
if ((req_completion
|
|
||||||
&& c3cn->wr_unacked == wrs_needed)
|
|
||||||
|| (CXGB3_SKB_CB(skb)->flags & C3CB_FLAG_COMPL)
|
|
||||||
|| c3cn->wr_unacked >= c3cn->wr_max / 2) {
|
|
||||||
struct work_request_hdr *wr = cplhdr(skb);
|
|
||||||
|
|
||||||
wr->wr_hi |= htonl(F_WR_COMPL);
|
|
||||||
|
if (likely(skb_flags(skb) & C3CB_FLAG_NEED_HDR)) {
|
||||||
|
if ((req_completion &&
|
||||||
|
c3cn->wr_unacked == wrs_needed) ||
|
||||||
|
(skb_flags(skb) & C3CB_FLAG_COMPL) ||
|
||||||
|
c3cn->wr_unacked >= c3cn->wr_max / 2) {
|
||||||
|
req_completion = 1;
|
||||||
c3cn->wr_unacked = 0;
|
c3cn->wr_unacked = 0;
|
||||||
}
|
}
|
||||||
CXGB3_SKB_CB(skb)->flags &= ~C3CB_FLAG_NEED_HDR;
|
len += ulp_extra_len(skb);
|
||||||
|
make_tx_data_wr(c3cn, skb, len, req_completion);
|
||||||
|
c3cn->snd_nxt += len;
|
||||||
|
skb_flags(skb) &= ~C3CB_FLAG_NEED_HDR;
|
||||||
}
|
}
|
||||||
|
|
||||||
total_size += skb->truesize;
|
total_size += skb->truesize;
|
||||||
|
@ -735,8 +749,11 @@ static void process_act_establish(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
|
if (unlikely(c3cn_flag(c3cn, C3CN_ACTIVE_CLOSE_NEEDED)))
|
||||||
/* upper layer has requested closing */
|
/* upper layer has requested closing */
|
||||||
send_abort_req(c3cn);
|
send_abort_req(c3cn);
|
||||||
else if (c3cn_push_tx_frames(c3cn, 1))
|
else {
|
||||||
|
if (skb_queue_len(&c3cn->write_queue))
|
||||||
|
c3cn_push_tx_frames(c3cn, 1);
|
||||||
cxgb3i_conn_tx_open(c3cn);
|
cxgb3i_conn_tx_open(c3cn);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
|
static int do_act_establish(struct t3cdev *cdev, struct sk_buff *skb,
|
||||||
|
@ -1082,8 +1099,8 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
CXGB3_SKB_CB(skb)->seq = ntohl(hdr_cpl->seq);
|
skb_tcp_seq(skb) = ntohl(hdr_cpl->seq);
|
||||||
CXGB3_SKB_CB(skb)->flags = 0;
|
skb_flags(skb) = 0;
|
||||||
|
|
||||||
skb_reset_transport_header(skb);
|
skb_reset_transport_header(skb);
|
||||||
__skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
|
__skb_pull(skb, sizeof(struct cpl_iscsi_hdr));
|
||||||
|
@ -1103,12 +1120,12 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
goto abort_conn;
|
goto abort_conn;
|
||||||
|
|
||||||
skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
|
skb_ulp_mode(skb) = ULP2_FLAG_DATA_READY;
|
||||||
skb_ulp_pdulen(skb) = ntohs(ddp_cpl.len);
|
skb_rx_pdulen(skb) = ntohs(ddp_cpl.len);
|
||||||
skb_ulp_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
|
skb_rx_ddigest(skb) = ntohl(ddp_cpl.ulp_crc);
|
||||||
status = ntohl(ddp_cpl.ddp_status);
|
status = ntohl(ddp_cpl.ddp_status);
|
||||||
|
|
||||||
c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
|
c3cn_rx_debug("rx skb 0x%p, len %u, pdulen %u, ddp status 0x%x.\n",
|
||||||
skb, skb->len, skb_ulp_pdulen(skb), status);
|
skb, skb->len, skb_rx_pdulen(skb), status);
|
||||||
|
|
||||||
if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
|
if (status & (1 << RX_DDP_STATUS_HCRC_SHIFT))
|
||||||
skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
|
skb_ulp_mode(skb) |= ULP2_FLAG_HCRC_ERROR;
|
||||||
|
@ -1126,7 +1143,7 @@ static void process_rx_iscsi_hdr(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
} else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
|
} else if (status & (1 << RX_DDP_STATUS_DDP_SHIFT))
|
||||||
skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
|
skb_ulp_mode(skb) |= ULP2_FLAG_DATA_DDPED;
|
||||||
|
|
||||||
c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_ulp_pdulen(skb);
|
c3cn->rcv_nxt = ntohl(ddp_cpl.seq) + skb_rx_pdulen(skb);
|
||||||
__pskb_trim(skb, len);
|
__pskb_trim(skb, len);
|
||||||
__skb_queue_tail(&c3cn->receive_queue, skb);
|
__skb_queue_tail(&c3cn->receive_queue, skb);
|
||||||
cxgb3i_conn_pdu_ready(c3cn);
|
cxgb3i_conn_pdu_ready(c3cn);
|
||||||
|
@ -1151,12 +1168,27 @@ static int do_iscsi_hdr(struct t3cdev *t3dev, struct sk_buff *skb, void *ctx)
|
||||||
* Process an acknowledgment of WR completion. Advance snd_una and send the
|
* Process an acknowledgment of WR completion. Advance snd_una and send the
|
||||||
* next batch of work requests from the write queue.
|
* next batch of work requests from the write queue.
|
||||||
*/
|
*/
|
||||||
|
static void check_wr_invariants(struct s3_conn *c3cn)
|
||||||
|
{
|
||||||
|
int pending = count_pending_wrs(c3cn);
|
||||||
|
|
||||||
|
if (unlikely(c3cn->wr_avail + pending != c3cn->wr_max))
|
||||||
|
cxgb3i_log_error("TID %u: credit imbalance: avail %u, "
|
||||||
|
"pending %u, total should be %u\n",
|
||||||
|
c3cn->tid, c3cn->wr_avail, pending,
|
||||||
|
c3cn->wr_max);
|
||||||
|
}
|
||||||
|
|
||||||
static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
|
static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct cpl_wr_ack *hdr = cplhdr(skb);
|
struct cpl_wr_ack *hdr = cplhdr(skb);
|
||||||
unsigned int credits = ntohs(hdr->credits);
|
unsigned int credits = ntohs(hdr->credits);
|
||||||
u32 snd_una = ntohl(hdr->snd_una);
|
u32 snd_una = ntohl(hdr->snd_una);
|
||||||
|
|
||||||
|
c3cn_tx_debug("%u WR credits, avail %u, unack %u, TID %u, state %u.\n",
|
||||||
|
credits, c3cn->wr_avail, c3cn->wr_unacked,
|
||||||
|
c3cn->tid, c3cn->state);
|
||||||
|
|
||||||
c3cn->wr_avail += credits;
|
c3cn->wr_avail += credits;
|
||||||
if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
|
if (c3cn->wr_unacked > c3cn->wr_max - c3cn->wr_avail)
|
||||||
c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
|
c3cn->wr_unacked = c3cn->wr_max - c3cn->wr_avail;
|
||||||
|
@ -1171,6 +1203,17 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (unlikely(credits < p->csum)) {
|
if (unlikely(credits < p->csum)) {
|
||||||
|
struct tx_data_wr *w = cplhdr(p);
|
||||||
|
cxgb3i_log_error("TID %u got %u WR credits need %u, "
|
||||||
|
"len %u, main body %u, frags %u, "
|
||||||
|
"seq # %u, ACK una %u, ACK nxt %u, "
|
||||||
|
"WR_AVAIL %u, WRs pending %u\n",
|
||||||
|
c3cn->tid, credits, p->csum, p->len,
|
||||||
|
p->len - p->data_len,
|
||||||
|
skb_shinfo(p)->nr_frags,
|
||||||
|
ntohl(w->sndseq), snd_una,
|
||||||
|
ntohl(hdr->snd_nxt), c3cn->wr_avail,
|
||||||
|
count_pending_wrs(c3cn) - credits);
|
||||||
p->csum -= credits;
|
p->csum -= credits;
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1180,15 +1223,24 @@ static void process_wr_ack(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(before(snd_una, c3cn->snd_una)))
|
check_wr_invariants(c3cn);
|
||||||
|
|
||||||
|
if (unlikely(before(snd_una, c3cn->snd_una))) {
|
||||||
|
cxgb3i_log_error("TID %u, unexpected sequence # %u in WR_ACK "
|
||||||
|
"snd_una %u\n",
|
||||||
|
c3cn->tid, snd_una, c3cn->snd_una);
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
}
|
||||||
|
|
||||||
if (c3cn->snd_una != snd_una) {
|
if (c3cn->snd_una != snd_una) {
|
||||||
c3cn->snd_una = snd_una;
|
c3cn->snd_una = snd_una;
|
||||||
dst_confirm(c3cn->dst_cache);
|
dst_confirm(c3cn->dst_cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (skb_queue_len(&c3cn->write_queue) && c3cn_push_tx_frames(c3cn, 0))
|
if (skb_queue_len(&c3cn->write_queue)) {
|
||||||
|
if (c3cn_push_tx_frames(c3cn, 0))
|
||||||
|
cxgb3i_conn_tx_open(c3cn);
|
||||||
|
} else
|
||||||
cxgb3i_conn_tx_open(c3cn);
|
cxgb3i_conn_tx_open(c3cn);
|
||||||
out_free:
|
out_free:
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
|
@ -1452,7 +1504,7 @@ static void init_offload_conn(struct s3_conn *c3cn,
|
||||||
struct dst_entry *dst)
|
struct dst_entry *dst)
|
||||||
{
|
{
|
||||||
BUG_ON(c3cn->cdev != cdev);
|
BUG_ON(c3cn->cdev != cdev);
|
||||||
c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs;
|
c3cn->wr_max = c3cn->wr_avail = T3C_DATA(cdev)->max_wrs - 1;
|
||||||
c3cn->wr_unacked = 0;
|
c3cn->wr_unacked = 0;
|
||||||
c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
|
c3cn->mss_idx = select_mss(c3cn, dst_mtu(dst));
|
||||||
|
|
||||||
|
@ -1671,9 +1723,17 @@ int cxgb3i_c3cn_send_pdus(struct s3_conn *c3cn, struct sk_buff *skb)
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = -EPIPE;
|
|
||||||
if (c3cn->err) {
|
if (c3cn->err) {
|
||||||
c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
|
c3cn_tx_debug("c3cn 0x%p, err %d.\n", c3cn, c3cn->err);
|
||||||
|
err = -EPIPE;
|
||||||
|
goto out_err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (c3cn->write_seq - c3cn->snd_una >= cxgb3_snd_win) {
|
||||||
|
c3cn_tx_debug("c3cn 0x%p, snd %u - %u > %u.\n",
|
||||||
|
c3cn, c3cn->write_seq, c3cn->snd_una,
|
||||||
|
cxgb3_snd_win);
|
||||||
|
err = -EAGAIN;
|
||||||
goto out_err;
|
goto out_err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,25 +178,33 @@ void cxgb3i_c3cn_release(struct s3_conn *);
|
||||||
* @flag: see C3CB_FLAG_* below
|
* @flag: see C3CB_FLAG_* below
|
||||||
* @ulp_mode: ULP mode/submode of sk_buff
|
* @ulp_mode: ULP mode/submode of sk_buff
|
||||||
* @seq: tcp sequence number
|
* @seq: tcp sequence number
|
||||||
* @ddigest: pdu data digest
|
|
||||||
* @pdulen: recovered pdu length
|
|
||||||
* @wr_data: scratch area for tx wr
|
|
||||||
*/
|
*/
|
||||||
|
struct cxgb3_skb_rx_cb {
|
||||||
|
__u32 ddigest; /* data digest */
|
||||||
|
__u32 pdulen; /* recovered pdu length */
|
||||||
|
};
|
||||||
|
|
||||||
|
struct cxgb3_skb_tx_cb {
|
||||||
|
struct sk_buff *wr_next; /* next wr */
|
||||||
|
};
|
||||||
|
|
||||||
struct cxgb3_skb_cb {
|
struct cxgb3_skb_cb {
|
||||||
__u8 flags;
|
__u8 flags;
|
||||||
__u8 ulp_mode;
|
__u8 ulp_mode;
|
||||||
__u32 seq;
|
__u32 seq;
|
||||||
__u32 ddigest;
|
union {
|
||||||
__u32 pdulen;
|
struct cxgb3_skb_rx_cb rx;
|
||||||
struct sk_buff *wr_data;
|
struct cxgb3_skb_tx_cb tx;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
|
#define CXGB3_SKB_CB(skb) ((struct cxgb3_skb_cb *)&((skb)->cb[0]))
|
||||||
|
#define skb_flags(skb) (CXGB3_SKB_CB(skb)->flags)
|
||||||
#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
|
#define skb_ulp_mode(skb) (CXGB3_SKB_CB(skb)->ulp_mode)
|
||||||
#define skb_ulp_ddigest(skb) (CXGB3_SKB_CB(skb)->ddigest)
|
#define skb_tcp_seq(skb) (CXGB3_SKB_CB(skb)->seq)
|
||||||
#define skb_ulp_pdulen(skb) (CXGB3_SKB_CB(skb)->pdulen)
|
#define skb_rx_ddigest(skb) (CXGB3_SKB_CB(skb)->rx.ddigest)
|
||||||
#define skb_wr_data(skb) (CXGB3_SKB_CB(skb)->wr_data)
|
#define skb_rx_pdulen(skb) (CXGB3_SKB_CB(skb)->rx.pdulen)
|
||||||
|
#define skb_tx_wr_next(skb) (CXGB3_SKB_CB(skb)->tx.wr_next)
|
||||||
|
|
||||||
enum c3cb_flags {
|
enum c3cb_flags {
|
||||||
C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
|
C3CB_FLAG_NEED_HDR = 1 << 0, /* packet needs a TX_DATA_WR header */
|
||||||
|
@ -217,6 +225,7 @@ struct sge_opaque_hdr {
|
||||||
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
|
/* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
|
||||||
#define TX_HEADER_LEN \
|
#define TX_HEADER_LEN \
|
||||||
(sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
|
(sizeof(struct tx_data_wr) + sizeof(struct sge_opaque_hdr))
|
||||||
|
#define SKB_TX_HEADROOM SKB_MAX_HEAD(TX_HEADER_LEN)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* get and set private ip for iscsi traffic
|
* get and set private ip for iscsi traffic
|
||||||
|
|
|
@ -32,6 +32,10 @@
|
||||||
#define cxgb3i_tx_debug(fmt...)
|
#define cxgb3i_tx_debug(fmt...)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/* always allocate rooms for AHS */
|
||||||
|
#define SKB_TX_PDU_HEADER_LEN \
|
||||||
|
(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
|
||||||
|
static unsigned int skb_extra_headroom;
|
||||||
static struct page *pad_page;
|
static struct page *pad_page;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -146,12 +150,13 @@ static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
|
||||||
|
|
||||||
void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
|
void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
|
||||||
{
|
{
|
||||||
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
struct cxgb3i_task_data *tdata = task->dd_data +
|
||||||
|
sizeof(struct iscsi_tcp_task);
|
||||||
|
|
||||||
/* never reached the xmit task callout */
|
/* never reached the xmit task callout */
|
||||||
if (tcp_task->dd_data)
|
if (tdata->skb)
|
||||||
kfree_skb(tcp_task->dd_data);
|
__kfree_skb(tdata->skb);
|
||||||
tcp_task->dd_data = NULL;
|
memset(tdata, 0, sizeof(struct cxgb3i_task_data));
|
||||||
|
|
||||||
/* MNC - Do we need a check in case this is called but
|
/* MNC - Do we need a check in case this is called but
|
||||||
* cxgb3i_conn_alloc_pdu has never been called on the task */
|
* cxgb3i_conn_alloc_pdu has never been called on the task */
|
||||||
|
@ -159,28 +164,102 @@ void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
|
||||||
iscsi_tcp_cleanup_task(task);
|
iscsi_tcp_cleanup_task(task);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
|
||||||
* We do not support ahs yet
|
unsigned int offset, unsigned int *off,
|
||||||
*/
|
struct scatterlist **sgp)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct scatterlist *sg;
|
||||||
|
|
||||||
|
for_each_sg(sgl, sg, sgcnt, i) {
|
||||||
|
if (offset < sg->length) {
|
||||||
|
*off = offset;
|
||||||
|
*sgp = sg;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
offset -= sg->length;
|
||||||
|
}
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
|
||||||
|
unsigned int dlen, skb_frag_t *frags,
|
||||||
|
int frag_max)
|
||||||
|
{
|
||||||
|
unsigned int datalen = dlen;
|
||||||
|
unsigned int sglen = sg->length - sgoffset;
|
||||||
|
struct page *page = sg_page(sg);
|
||||||
|
int i;
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
do {
|
||||||
|
unsigned int copy;
|
||||||
|
|
||||||
|
if (!sglen) {
|
||||||
|
sg = sg_next(sg);
|
||||||
|
if (!sg) {
|
||||||
|
cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
|
||||||
|
__func__, datalen, dlen);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
sgoffset = 0;
|
||||||
|
sglen = sg->length;
|
||||||
|
page = sg_page(sg);
|
||||||
|
|
||||||
|
}
|
||||||
|
copy = min(datalen, sglen);
|
||||||
|
if (i && page == frags[i - 1].page &&
|
||||||
|
sgoffset + sg->offset ==
|
||||||
|
frags[i - 1].page_offset + frags[i - 1].size) {
|
||||||
|
frags[i - 1].size += copy;
|
||||||
|
} else {
|
||||||
|
if (i >= frag_max) {
|
||||||
|
cxgb3i_log_error("%s, too many pages %u, "
|
||||||
|
"dlen %u.\n", __func__,
|
||||||
|
frag_max, dlen);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
frags[i].page = page;
|
||||||
|
frags[i].page_offset = sg->offset + sgoffset;
|
||||||
|
frags[i].size = copy;
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
datalen -= copy;
|
||||||
|
sgoffset += copy;
|
||||||
|
sglen -= copy;
|
||||||
|
} while (datalen);
|
||||||
|
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
|
int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
|
||||||
{
|
{
|
||||||
|
struct iscsi_conn *conn = task->conn;
|
||||||
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
||||||
struct sk_buff *skb;
|
struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
|
||||||
|
struct scsi_cmnd *sc = task->sc;
|
||||||
|
int headroom = SKB_TX_PDU_HEADER_LEN;
|
||||||
|
|
||||||
|
tcp_task->dd_data = tdata;
|
||||||
task->hdr = NULL;
|
task->hdr = NULL;
|
||||||
/* always allocate rooms for AHS */
|
|
||||||
skb = alloc_skb(sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE +
|
/* write command, need to send data pdus */
|
||||||
TX_HEADER_LEN, GFP_ATOMIC);
|
if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
|
||||||
if (!skb)
|
(opcode == ISCSI_OP_SCSI_CMD &&
|
||||||
|
(scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
|
||||||
|
headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
|
||||||
|
|
||||||
|
tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
|
||||||
|
if (!tdata->skb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
skb_reserve(tdata->skb, TX_HEADER_LEN);
|
||||||
|
|
||||||
cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
|
cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
|
||||||
task, opcode, skb);
|
task, opcode, tdata->skb);
|
||||||
|
|
||||||
tcp_task->dd_data = skb;
|
task->hdr = (struct iscsi_hdr *)tdata->skb->data;
|
||||||
skb_reserve(skb, TX_HEADER_LEN);
|
task->hdr_max = SKB_TX_PDU_HEADER_LEN;
|
||||||
task->hdr = (struct iscsi_hdr *)skb->data;
|
|
||||||
task->hdr_max = sizeof(struct iscsi_hdr);
|
|
||||||
|
|
||||||
/* data_out uses scsi_cmd's itt */
|
/* data_out uses scsi_cmd's itt */
|
||||||
if (opcode != ISCSI_OP_SCSI_DATA_OUT)
|
if (opcode != ISCSI_OP_SCSI_DATA_OUT)
|
||||||
|
@ -192,13 +271,13 @@ int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
|
||||||
int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
|
int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
|
||||||
unsigned int count)
|
unsigned int count)
|
||||||
{
|
{
|
||||||
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
|
||||||
struct sk_buff *skb = tcp_task->dd_data;
|
|
||||||
struct iscsi_conn *conn = task->conn;
|
struct iscsi_conn *conn = task->conn;
|
||||||
struct page *pg;
|
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
||||||
|
struct cxgb3i_task_data *tdata = tcp_task->dd_data;
|
||||||
|
struct sk_buff *skb = tdata->skb;
|
||||||
unsigned int datalen = count;
|
unsigned int datalen = count;
|
||||||
int i, padlen = iscsi_padding(count);
|
int i, padlen = iscsi_padding(count);
|
||||||
skb_frag_t *frag;
|
struct page *pg;
|
||||||
|
|
||||||
cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
|
cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
|
||||||
task, task->sc, offset, count, skb);
|
task, task->sc, offset, count, skb);
|
||||||
|
@ -209,90 +288,94 @@ int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (task->sc) {
|
if (task->sc) {
|
||||||
struct scatterlist *sg;
|
struct scsi_data_buffer *sdb = scsi_out(task->sc);
|
||||||
struct scsi_data_buffer *sdb;
|
struct scatterlist *sg = NULL;
|
||||||
unsigned int sgoffset = offset;
|
int err;
|
||||||
struct page *sgpg;
|
|
||||||
unsigned int sglen;
|
|
||||||
|
|
||||||
sdb = scsi_out(task->sc);
|
tdata->offset = offset;
|
||||||
sg = sdb->table.sgl;
|
tdata->count = count;
|
||||||
|
err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
|
||||||
for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
|
tdata->offset, &tdata->sgoffset, &sg);
|
||||||
cxgb3i_tx_debug("sg %d, page 0x%p, len %u offset %u\n",
|
if (err < 0) {
|
||||||
i, sg_page(sg), sg->length, sg->offset);
|
cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
|
||||||
|
sdb->table.nents, tdata->offset,
|
||||||
if (sgoffset < sg->length)
|
sdb->length);
|
||||||
break;
|
return err;
|
||||||
sgoffset -= sg->length;
|
|
||||||
}
|
}
|
||||||
sgpg = sg_page(sg);
|
err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
|
||||||
sglen = sg->length - sgoffset;
|
tdata->frags, MAX_PDU_FRAGS);
|
||||||
|
if (err < 0) {
|
||||||
|
cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
|
||||||
|
sdb->table.nents, tdata->offset,
|
||||||
|
tdata->count);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
tdata->nr_frags = err;
|
||||||
|
|
||||||
do {
|
if (tdata->nr_frags > MAX_SKB_FRAGS ||
|
||||||
int j = skb_shinfo(skb)->nr_frags;
|
(padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
|
||||||
unsigned int copy;
|
char *dst = skb->data + task->hdr_len;
|
||||||
|
skb_frag_t *frag = tdata->frags;
|
||||||
|
|
||||||
if (!sglen) {
|
/* data fits in the skb's headroom */
|
||||||
sg = sg_next(sg);
|
for (i = 0; i < tdata->nr_frags; i++, frag++) {
|
||||||
sgpg = sg_page(sg);
|
char *src = kmap_atomic(frag->page,
|
||||||
sgoffset = 0;
|
KM_SOFTIRQ0);
|
||||||
sglen = sg->length;
|
|
||||||
++i;
|
memcpy(dst, src+frag->page_offset, frag->size);
|
||||||
|
dst += frag->size;
|
||||||
|
kunmap_atomic(src, KM_SOFTIRQ0);
|
||||||
}
|
}
|
||||||
copy = min(sglen, datalen);
|
if (padlen) {
|
||||||
if (j && skb_can_coalesce(skb, j, sgpg,
|
memset(dst, 0, padlen);
|
||||||
sg->offset + sgoffset)) {
|
padlen = 0;
|
||||||
skb_shinfo(skb)->frags[j - 1].size += copy;
|
|
||||||
} else {
|
|
||||||
get_page(sgpg);
|
|
||||||
skb_fill_page_desc(skb, j, sgpg,
|
|
||||||
sg->offset + sgoffset, copy);
|
|
||||||
}
|
}
|
||||||
sgoffset += copy;
|
skb_put(skb, count + padlen);
|
||||||
sglen -= copy;
|
} else {
|
||||||
datalen -= copy;
|
/* data fit into frag_list */
|
||||||
} while (datalen);
|
for (i = 0; i < tdata->nr_frags; i++)
|
||||||
|
get_page(tdata->frags[i].page);
|
||||||
|
|
||||||
|
memcpy(skb_shinfo(skb)->frags, tdata->frags,
|
||||||
|
sizeof(skb_frag_t) * tdata->nr_frags);
|
||||||
|
skb_shinfo(skb)->nr_frags = tdata->nr_frags;
|
||||||
|
skb->len += count;
|
||||||
|
skb->data_len += count;
|
||||||
|
skb->truesize += count;
|
||||||
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
pg = virt_to_page(task->data);
|
pg = virt_to_page(task->data);
|
||||||
|
|
||||||
while (datalen) {
|
get_page(pg);
|
||||||
i = skb_shinfo(skb)->nr_frags;
|
skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
|
||||||
frag = &skb_shinfo(skb)->frags[i];
|
count);
|
||||||
|
skb->len += count;
|
||||||
get_page(pg);
|
skb->data_len += count;
|
||||||
frag->page = pg;
|
skb->truesize += count;
|
||||||
frag->page_offset = 0;
|
|
||||||
frag->size = min((unsigned int)PAGE_SIZE, datalen);
|
|
||||||
|
|
||||||
skb_shinfo(skb)->nr_frags++;
|
|
||||||
datalen -= frag->size;
|
|
||||||
pg++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (padlen) {
|
if (padlen) {
|
||||||
i = skb_shinfo(skb)->nr_frags;
|
i = skb_shinfo(skb)->nr_frags;
|
||||||
frag = &skb_shinfo(skb)->frags[i];
|
get_page(pad_page);
|
||||||
frag->page = pad_page;
|
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
|
||||||
frag->page_offset = 0;
|
padlen);
|
||||||
frag->size = padlen;
|
|
||||||
skb_shinfo(skb)->nr_frags++;
|
skb->data_len += padlen;
|
||||||
|
skb->truesize += padlen;
|
||||||
|
skb->len += padlen;
|
||||||
}
|
}
|
||||||
|
|
||||||
datalen = count + padlen;
|
|
||||||
skb->data_len += datalen;
|
|
||||||
skb->truesize += datalen;
|
|
||||||
skb->len += datalen;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
|
int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
|
||||||
{
|
{
|
||||||
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
|
||||||
struct sk_buff *skb = tcp_task->dd_data;
|
|
||||||
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
|
struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
|
||||||
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
|
struct cxgb3i_conn *cconn = tcp_conn->dd_data;
|
||||||
|
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
||||||
|
struct cxgb3i_task_data *tdata = tcp_task->dd_data;
|
||||||
|
struct sk_buff *skb = tdata->skb;
|
||||||
unsigned int datalen;
|
unsigned int datalen;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
|
@ -300,13 +383,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
datalen = skb->data_len;
|
datalen = skb->data_len;
|
||||||
tcp_task->dd_data = NULL;
|
tdata->skb = NULL;
|
||||||
err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
|
err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
|
||||||
cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
|
|
||||||
task, skb, skb->len, skb->data_len, err);
|
|
||||||
if (err > 0) {
|
if (err > 0) {
|
||||||
int pdulen = err;
|
int pdulen = err;
|
||||||
|
|
||||||
|
cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
|
||||||
|
task, skb, skb->len, skb->data_len, err);
|
||||||
|
|
||||||
if (task->conn->hdrdgst_en)
|
if (task->conn->hdrdgst_en)
|
||||||
pdulen += ISCSI_DIGEST_SIZE;
|
pdulen += ISCSI_DIGEST_SIZE;
|
||||||
if (datalen && task->conn->datadgst_en)
|
if (datalen && task->conn->datadgst_en)
|
||||||
|
@ -325,12 +409,14 @@ int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
/* reset skb to send when we are called again */
|
/* reset skb to send when we are called again */
|
||||||
tcp_task->dd_data = skb;
|
tdata->skb = skb;
|
||||||
return -EAGAIN;
|
return -EAGAIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cxgb3i_pdu_init(void)
|
int cxgb3i_pdu_init(void)
|
||||||
{
|
{
|
||||||
|
if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
|
||||||
|
skb_extra_headroom = SKB_TX_HEADROOM;
|
||||||
pad_page = alloc_page(GFP_KERNEL);
|
pad_page = alloc_page(GFP_KERNEL);
|
||||||
if (!pad_page)
|
if (!pad_page)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -366,7 +452,9 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
|
||||||
skb = skb_peek(&c3cn->receive_queue);
|
skb = skb_peek(&c3cn->receive_queue);
|
||||||
while (!err && skb) {
|
while (!err && skb) {
|
||||||
__skb_unlink(skb, &c3cn->receive_queue);
|
__skb_unlink(skb, &c3cn->receive_queue);
|
||||||
read += skb_ulp_pdulen(skb);
|
read += skb_rx_pdulen(skb);
|
||||||
|
cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
|
||||||
|
conn, c3cn, skb, skb_rx_pdulen(skb));
|
||||||
err = cxgb3i_conn_read_pdu_skb(conn, skb);
|
err = cxgb3i_conn_read_pdu_skb(conn, skb);
|
||||||
__kfree_skb(skb);
|
__kfree_skb(skb);
|
||||||
skb = skb_peek(&c3cn->receive_queue);
|
skb = skb_peek(&c3cn->receive_queue);
|
||||||
|
@ -377,6 +465,11 @@ void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
|
||||||
cxgb3i_c3cn_rx_credits(c3cn, read);
|
cxgb3i_c3cn_rx_credits(c3cn, read);
|
||||||
}
|
}
|
||||||
conn->rxdata_octets += read;
|
conn->rxdata_octets += read;
|
||||||
|
|
||||||
|
if (err) {
|
||||||
|
cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
|
||||||
|
iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
|
void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
|
||||||
|
|
|
@ -53,7 +53,7 @@ struct cpl_rx_data_ddp_norss {
|
||||||
#define ULP2_FLAG_DCRC_ERROR 0x20
|
#define ULP2_FLAG_DCRC_ERROR 0x20
|
||||||
#define ULP2_FLAG_PAD_ERROR 0x40
|
#define ULP2_FLAG_PAD_ERROR 0x40
|
||||||
|
|
||||||
void cxgb3i_conn_closing(struct s3_conn *);
|
void cxgb3i_conn_closing(struct s3_conn *c3cn);
|
||||||
void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
|
void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn);
|
||||||
void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
|
void cxgb3i_conn_tx_open(struct s3_conn *c3cn);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1251,6 +1251,7 @@ static struct pci_device_id hptiop_id_table[] = {
|
||||||
{ PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
|
{ PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
{ PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
|
{ PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
{ PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
|
{ PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
|
{ PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
{ PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
|
{ PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
{ PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
|
{ PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
{ PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
|
{ PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops },
|
||||||
|
|
|
@ -1040,12 +1040,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
|
||||||
action = ACTION_FAIL;
|
action = ACTION_FAIL;
|
||||||
break;
|
break;
|
||||||
case ABORTED_COMMAND:
|
case ABORTED_COMMAND:
|
||||||
|
action = ACTION_FAIL;
|
||||||
if (sshdr.asc == 0x10) { /* DIF */
|
if (sshdr.asc == 0x10) { /* DIF */
|
||||||
description = "Target Data Integrity Failure";
|
description = "Target Data Integrity Failure";
|
||||||
action = ACTION_FAIL;
|
|
||||||
error = -EILSEQ;
|
error = -EILSEQ;
|
||||||
} else
|
}
|
||||||
action = ACTION_RETRY;
|
|
||||||
break;
|
break;
|
||||||
case NOT_READY:
|
case NOT_READY:
|
||||||
/* If the device is in the process of becoming
|
/* If the device is in the process of becoming
|
||||||
|
|
|
@ -107,6 +107,7 @@ static void scsi_disk_release(struct device *cdev);
|
||||||
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
|
static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *);
|
||||||
static void sd_print_result(struct scsi_disk *, int);
|
static void sd_print_result(struct scsi_disk *, int);
|
||||||
|
|
||||||
|
static DEFINE_SPINLOCK(sd_index_lock);
|
||||||
static DEFINE_IDA(sd_index_ida);
|
static DEFINE_IDA(sd_index_ida);
|
||||||
|
|
||||||
/* This semaphore is used to mediate the 0->1 reference get in the
|
/* This semaphore is used to mediate the 0->1 reference get in the
|
||||||
|
@ -1914,7 +1915,9 @@ static int sd_probe(struct device *dev)
|
||||||
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
|
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
|
||||||
goto out_put;
|
goto out_put;
|
||||||
|
|
||||||
|
spin_lock(&sd_index_lock);
|
||||||
error = ida_get_new(&sd_index_ida, &index);
|
error = ida_get_new(&sd_index_ida, &index);
|
||||||
|
spin_unlock(&sd_index_lock);
|
||||||
} while (error == -EAGAIN);
|
} while (error == -EAGAIN);
|
||||||
|
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -1936,7 +1939,9 @@ static int sd_probe(struct device *dev)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_index:
|
out_free_index:
|
||||||
|
spin_lock(&sd_index_lock);
|
||||||
ida_remove(&sd_index_ida, index);
|
ida_remove(&sd_index_ida, index);
|
||||||
|
spin_unlock(&sd_index_lock);
|
||||||
out_put:
|
out_put:
|
||||||
put_disk(gd);
|
put_disk(gd);
|
||||||
out_free:
|
out_free:
|
||||||
|
@ -1986,7 +1991,9 @@ static void scsi_disk_release(struct device *dev)
|
||||||
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
struct scsi_disk *sdkp = to_scsi_disk(dev);
|
||||||
struct gendisk *disk = sdkp->disk;
|
struct gendisk *disk = sdkp->disk;
|
||||||
|
|
||||||
|
spin_lock(&sd_index_lock);
|
||||||
ida_remove(&sd_index_ida, sdkp->index);
|
ida_remove(&sd_index_ida, sdkp->index);
|
||||||
|
spin_unlock(&sd_index_lock);
|
||||||
|
|
||||||
disk->private_data = NULL;
|
disk->private_data = NULL;
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
|
|
|
@ -2230,7 +2230,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
|
||||||
|
|
||||||
static struct platform_driver pxafb_driver = {
|
static struct platform_driver pxafb_driver = {
|
||||||
.probe = pxafb_probe,
|
.probe = pxafb_probe,
|
||||||
.remove = pxafb_remove,
|
.remove = __devexit_p(pxafb_remove),
|
||||||
.suspend = pxafb_suspend,
|
.suspend = pxafb_suspend,
|
||||||
.resume = pxafb_resume,
|
.resume = pxafb_resume,
|
||||||
.driver = {
|
.driver = {
|
||||||
|
|
|
@ -69,10 +69,12 @@ obj-$(CONFIG_DLM) += dlm/
|
||||||
# Do not add any filesystems before this line
|
# Do not add any filesystems before this line
|
||||||
obj-$(CONFIG_REISERFS_FS) += reiserfs/
|
obj-$(CONFIG_REISERFS_FS) += reiserfs/
|
||||||
obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
|
obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
|
||||||
obj-$(CONFIG_EXT4_FS) += ext4/ # Before ext2 so root fs can be ext4
|
obj-$(CONFIG_EXT2_FS) += ext2/
|
||||||
|
# We place ext4 after ext2 so plain ext2 root fs's are mounted using ext2
|
||||||
|
# unless explicitly requested by rootfstype
|
||||||
|
obj-$(CONFIG_EXT4_FS) += ext4/
|
||||||
obj-$(CONFIG_JBD) += jbd/
|
obj-$(CONFIG_JBD) += jbd/
|
||||||
obj-$(CONFIG_JBD2) += jbd2/
|
obj-$(CONFIG_JBD2) += jbd2/
|
||||||
obj-$(CONFIG_EXT2_FS) += ext2/
|
|
||||||
obj-$(CONFIG_CRAMFS) += cramfs/
|
obj-$(CONFIG_CRAMFS) += cramfs/
|
||||||
obj-$(CONFIG_SQUASHFS) += squashfs/
|
obj-$(CONFIG_SQUASHFS) += squashfs/
|
||||||
obj-y += ramfs/
|
obj-y += ramfs/
|
||||||
|
|
|
@ -609,7 +609,9 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
|
||||||
*/
|
*/
|
||||||
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
|
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
|
||||||
{
|
{
|
||||||
if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || (*retries)++ > 3)
|
if (!ext4_has_free_blocks(EXT4_SB(sb), 1) ||
|
||||||
|
(*retries)++ > 3 ||
|
||||||
|
!EXT4_SB(sb)->s_journal)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
|
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
|
||||||
|
|
|
@ -2544,7 +2544,7 @@ retry:
|
||||||
|
|
||||||
ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
|
|
||||||
if (mpd.retval == -ENOSPC) {
|
if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
|
||||||
/* commit the transaction which would
|
/* commit the transaction which would
|
||||||
* free blocks released in the transaction
|
* free blocks released in the transaction
|
||||||
* and try again
|
* and try again
|
||||||
|
|
|
@ -3091,7 +3091,6 @@ static int ext4_freeze(struct super_block *sb)
|
||||||
|
|
||||||
/* Journal blocked and flushed, clear needs_recovery flag. */
|
/* Journal blocked and flushed, clear needs_recovery flag. */
|
||||||
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
|
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
|
||||||
ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
|
|
||||||
error = ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
|
error = ext4_commit_super(sb, EXT4_SB(sb)->s_es, 1);
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
|
@ -91,8 +91,11 @@ io_mapping_unmap_atomic(void *vaddr)
|
||||||
static inline void *
|
static inline void *
|
||||||
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
|
||||||
{
|
{
|
||||||
|
resource_size_t phys_addr;
|
||||||
|
|
||||||
BUG_ON(offset >= mapping->size);
|
BUG_ON(offset >= mapping->size);
|
||||||
resource_size_t phys_addr = mapping->base + offset;
|
phys_addr = mapping->base + offset;
|
||||||
|
|
||||||
return ioremap_wc(phys_addr, PAGE_SIZE);
|
return ioremap_wc(phys_addr, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1085,6 +1085,7 @@ extern void synchronize_net(void);
|
||||||
extern int register_netdevice_notifier(struct notifier_block *nb);
|
extern int register_netdevice_notifier(struct notifier_block *nb);
|
||||||
extern int unregister_netdevice_notifier(struct notifier_block *nb);
|
extern int unregister_netdevice_notifier(struct notifier_block *nb);
|
||||||
extern int init_dummy_netdev(struct net_device *dev);
|
extern int init_dummy_netdev(struct net_device *dev);
|
||||||
|
extern void netdev_resync_ops(struct net_device *dev);
|
||||||
|
|
||||||
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
|
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
|
||||||
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
|
||||||
|
|
|
@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
|
||||||
#define rcu_enter_nohz() do { } while (0)
|
#define rcu_enter_nohz() do { } while (0)
|
||||||
#define rcu_exit_nohz() do { } while (0)
|
#define rcu_exit_nohz() do { } while (0)
|
||||||
|
|
||||||
|
/* A context switch is a grace period for rcuclassic. */
|
||||||
|
static inline int rcu_blocking_is_gp(void)
|
||||||
|
{
|
||||||
|
return num_online_cpus() == 1;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __LINUX_RCUCLASSIC_H */
|
#endif /* __LINUX_RCUCLASSIC_H */
|
||||||
|
|
|
@ -52,6 +52,9 @@ struct rcu_head {
|
||||||
void (*func)(struct rcu_head *head);
|
void (*func)(struct rcu_head *head);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Internal to kernel, but needed by rcupreempt.h. */
|
||||||
|
extern int rcu_scheduler_active;
|
||||||
|
|
||||||
#if defined(CONFIG_CLASSIC_RCU)
|
#if defined(CONFIG_CLASSIC_RCU)
|
||||||
#include <linux/rcuclassic.h>
|
#include <linux/rcuclassic.h>
|
||||||
#elif defined(CONFIG_TREE_RCU)
|
#elif defined(CONFIG_TREE_RCU)
|
||||||
|
@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
|
||||||
|
|
||||||
/* Internal to kernel */
|
/* Internal to kernel */
|
||||||
extern void rcu_init(void);
|
extern void rcu_init(void);
|
||||||
|
extern void rcu_scheduler_starting(void);
|
||||||
extern int rcu_needs_cpu(int cpu);
|
extern int rcu_needs_cpu(int cpu);
|
||||||
|
|
||||||
#endif /* __LINUX_RCUPDATE_H */
|
#endif /* __LINUX_RCUPDATE_H */
|
||||||
|
|
|
@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
|
||||||
#define rcu_exit_nohz() do { } while (0)
|
#define rcu_exit_nohz() do { } while (0)
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif /* CONFIG_NO_HZ */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A context switch is a grace period for rcupreempt synchronize_rcu()
|
||||||
|
* only during early boot, before the scheduler has been initialized.
|
||||||
|
* So, how the heck do we get a context switch? Well, if the caller
|
||||||
|
* invokes synchronize_rcu(), they are willing to accept a context
|
||||||
|
* switch, so we simply pretend that one happened.
|
||||||
|
*
|
||||||
|
* After boot, there might be a blocked or preempted task in an RCU
|
||||||
|
* read-side critical section, so we cannot then take the fastpath.
|
||||||
|
*/
|
||||||
|
static inline int rcu_blocking_is_gp(void)
|
||||||
|
{
|
||||||
|
return num_online_cpus() == 1 && !rcu_scheduler_active;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __LINUX_RCUPREEMPT_H */
|
#endif /* __LINUX_RCUPREEMPT_H */
|
||||||
|
|
|
@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_NO_HZ */
|
#endif /* CONFIG_NO_HZ */
|
||||||
|
|
||||||
|
/* A context switch is a grace period for rcutree. */
|
||||||
|
static inline int rcu_blocking_is_gp(void)
|
||||||
|
{
|
||||||
|
return num_online_cpus() == 1;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* __LINUX_RCUTREE_H */
|
#endif /* __LINUX_RCUTREE_H */
|
||||||
|
|
|
@ -2291,9 +2291,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
|
||||||
extern int sched_group_set_rt_period(struct task_group *tg,
|
extern int sched_group_set_rt_period(struct task_group *tg,
|
||||||
long rt_period_us);
|
long rt_period_us);
|
||||||
extern long sched_group_rt_period(struct task_group *tg);
|
extern long sched_group_rt_period(struct task_group *tg);
|
||||||
|
extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern int task_can_switch_user(struct user_struct *up,
|
||||||
|
struct task_struct *tsk);
|
||||||
|
|
||||||
#ifdef CONFIG_TASK_XACCT
|
#ifdef CONFIG_TASK_XACCT
|
||||||
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
|
||||||
{
|
{
|
||||||
|
|
|
@ -97,7 +97,7 @@ static inline void mark_rodata_ro(void) { }
|
||||||
extern void tc_init(void);
|
extern void tc_init(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum system_states system_state;
|
enum system_states system_state __read_mostly;
|
||||||
EXPORT_SYMBOL(system_state);
|
EXPORT_SYMBOL(system_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -463,6 +463,7 @@ static noinline void __init_refok rest_init(void)
|
||||||
* at least once to get things moving:
|
* at least once to get things moving:
|
||||||
*/
|
*/
|
||||||
init_idle_bootup_task(current);
|
init_idle_bootup_task(current);
|
||||||
|
rcu_scheduler_starting();
|
||||||
preempt_enable_no_resched();
|
preempt_enable_no_resched();
|
||||||
schedule();
|
schedule();
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
|
|
|
@ -679,8 +679,8 @@ int rcu_needs_cpu(int cpu)
|
||||||
void rcu_check_callbacks(int cpu, int user)
|
void rcu_check_callbacks(int cpu, int user)
|
||||||
{
|
{
|
||||||
if (user ||
|
if (user ||
|
||||||
(idle_cpu(cpu) && !in_softirq() &&
|
(idle_cpu(cpu) && rcu_scheduler_active &&
|
||||||
hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
!in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get here if this CPU took its interrupt from user
|
* Get here if this CPU took its interrupt from user
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/kernel_stat.h>
|
||||||
|
|
||||||
enum rcu_barrier {
|
enum rcu_barrier {
|
||||||
RCU_BARRIER_STD,
|
RCU_BARRIER_STD,
|
||||||
|
@ -55,6 +56,7 @@ static DEFINE_PER_CPU(struct rcu_head, rcu_barrier_head) = {NULL};
|
||||||
static atomic_t rcu_barrier_cpu_count;
|
static atomic_t rcu_barrier_cpu_count;
|
||||||
static DEFINE_MUTEX(rcu_barrier_mutex);
|
static DEFINE_MUTEX(rcu_barrier_mutex);
|
||||||
static struct completion rcu_barrier_completion;
|
static struct completion rcu_barrier_completion;
|
||||||
|
int rcu_scheduler_active __read_mostly;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Awaken the corresponding synchronize_rcu() instance now that a
|
* Awaken the corresponding synchronize_rcu() instance now that a
|
||||||
|
@ -80,6 +82,10 @@ void wakeme_after_rcu(struct rcu_head *head)
|
||||||
void synchronize_rcu(void)
|
void synchronize_rcu(void)
|
||||||
{
|
{
|
||||||
struct rcu_synchronize rcu;
|
struct rcu_synchronize rcu;
|
||||||
|
|
||||||
|
if (rcu_blocking_is_gp())
|
||||||
|
return;
|
||||||
|
|
||||||
init_completion(&rcu.completion);
|
init_completion(&rcu.completion);
|
||||||
/* Will wake me after RCU finished. */
|
/* Will wake me after RCU finished. */
|
||||||
call_rcu(&rcu.head, wakeme_after_rcu);
|
call_rcu(&rcu.head, wakeme_after_rcu);
|
||||||
|
@ -175,3 +181,9 @@ void __init rcu_init(void)
|
||||||
__rcu_init();
|
__rcu_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void rcu_scheduler_starting(void)
|
||||||
|
{
|
||||||
|
WARN_ON(num_online_cpus() != 1);
|
||||||
|
WARN_ON(nr_context_switches() > 0);
|
||||||
|
rcu_scheduler_active = 1;
|
||||||
|
}
|
||||||
|
|
|
@ -1181,6 +1181,9 @@ void __synchronize_sched(void)
|
||||||
{
|
{
|
||||||
struct rcu_synchronize rcu;
|
struct rcu_synchronize rcu;
|
||||||
|
|
||||||
|
if (num_online_cpus() == 1)
|
||||||
|
return; /* blocking is gp if only one CPU! */
|
||||||
|
|
||||||
init_completion(&rcu.completion);
|
init_completion(&rcu.completion);
|
||||||
/* Will wake me after RCU finished. */
|
/* Will wake me after RCU finished. */
|
||||||
call_rcu_sched(&rcu.head, wakeme_after_rcu);
|
call_rcu_sched(&rcu.head, wakeme_after_rcu);
|
||||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче