Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
	net/bridge/br_multicast.c
	net/ipv6/sit.c

The conflicts were minor:

1) sit.c changes overlap with change to ip_tunnel_xmit() signature.

2) br_multicast.c had an overlap between computing max_delay using
   msecs_to_jiffies and turning MLDV2_MRC() into an inline function
   with a name using lowercase instead of uppercase letters.

3) stmmac had two overlapping changes, one which conditionally allocated
   and hooked up a dma_cfg based upon the presence of the pbl OF property,
   and another one handling store-and-forward DMA made.  The latter of
   which should not go into the new of_find_property() basic block.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-09-05 14:58:52 -04:00
Родитель 1a5bbfc3d6 e2e5c4c07c
Коммит 06c54055be
143 изменённых файлов: 1487 добавлений и 688 удалений

Просмотреть файл

@ -6067,7 +6067,7 @@ M: Rob Herring <rob.herring@calxeda.com>
M: Pawel Moll <pawel.moll@arm.com> M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com> M: Mark Rutland <mark.rutland@arm.com>
M: Stephen Warren <swarren@wwwdotorg.org> M: Stephen Warren <swarren@wwwdotorg.org>
M: Ian Campbell <ian.campbell@citrix.com> M: Ian Campbell <ijc+devicetree@hellion.org.uk>
L: devicetree@vger.kernel.org L: devicetree@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/devicetree/ F: Documentation/devicetree/

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 11 PATCHLEVEL = 11
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc6 EXTRAVERSION = -rc7
NAME = Linux for Workgroups NAME = Linux for Workgroups
# *DOCUMENTATION* # *DOCUMENTATION*

Просмотреть файл

@ -39,9 +39,18 @@ ARC_ENTRY strchr
ld.a r2,[r0,4] ld.a r2,[r0,4]
sub r12,r6,r7 sub r12,r6,r7
bic r12,r12,r6 bic r12,r12,r6
#ifdef __LITTLE_ENDIAN__
and r7,r12,r4 and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned. breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one. b .Lfound_char ; Likewise this one.
#else
and r12,r12,r4
breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
lsr_s r12,r12,7
bic r2,r7,r6
b.d .Lfound_char_b
and_s r2,r2,r12
#endif
; /* We require this code address to be unaligned for speed... */ ; /* We require this code address to be unaligned for speed... */
.Laligned: .Laligned:
ld_s r2,[r0] ld_s r2,[r0]
@ -95,6 +104,7 @@ ARC_ENTRY strchr
lsr r7,r7,7 lsr r7,r7,7
bic r2,r7,r6 bic r2,r7,r6
.Lfound_char_b:
norm r2,r2 norm r2,r2
sub_s r0,r0,4 sub_s r0,r0,4
asr_s r2,r2,3 asr_s r2,r2,3

Просмотреть файл

@ -89,7 +89,8 @@ void set_fiq_handler(void *start, unsigned int length)
memcpy(base + offset, start, length); memcpy(base + offset, start, length);
if (!cache_is_vipt_nonaliasing()) if (!cache_is_vipt_nonaliasing())
flush_icache_range(base + offset, offset + length); flush_icache_range((unsigned long)base + offset, offset +
length);
flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
} }

Просмотреть файл

@ -82,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
crash_save_cpu(&regs, smp_processor_id()); crash_save_cpu(&regs, smp_processor_id());
flush_cache_all(); flush_cache_all();
set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi); atomic_dec(&waiting_for_crash_ipi);
while (1) while (1)
cpu_relax(); cpu_relax();

Просмотреть файл

@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */ /* Maintainer: Barry Song <baohua.song@csr.com> */
.nr_irqs = 128,
.map_io = sirfsoc_map_io, .map_io = sirfsoc_map_io,
.init_time = sirfsoc_init_time, .init_time = sirfsoc_init_time,
.init_late = sirfsoc_init_late, .init_late = sirfsoc_init_late,
@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */ /* Maintainer: Barry Song <baohua.song@csr.com> */
.nr_irqs = 128,
.map_io = sirfsoc_map_io, .map_io = sirfsoc_map_io,
.init_time = sirfsoc_init_time, .init_time = sirfsoc_init_time,
.dma_zone_size = SZ_256M, .dma_zone_size = SZ_256M,

Просмотреть файл

@ -809,15 +809,18 @@ config KUSER_HELPERS
the CPU type fitted to the system. This permits binaries to be the CPU type fitted to the system. This permits binaries to be
run on ARMv4 through to ARMv7 without modification. run on ARMv4 through to ARMv7 without modification.
See Documentation/arm/kernel_user_helpers.txt for details.
However, the fixed address nature of these helpers can be used However, the fixed address nature of these helpers can be used
by ROP (return orientated programming) authors when creating by ROP (return orientated programming) authors when creating
exploits. exploits.
If all of the binaries and libraries which run on your platform If all of the binaries and libraries which run on your platform
are built specifically for your platform, and make no use of are built specifically for your platform, and make no use of
these helpers, then you can turn this option off. However, these helpers, then you can turn this option off to hinder
when such an binary or library is run, it will receive a SIGILL such exploits. However, in that case, if a binary or library
signal, which will terminate the program. relying on those helpers is run, it will receive a SIGILL signal,
which will terminate the program.
Say N here only if you are absolutely certain that you do not Say N here only if you are absolutely certain that you do not
need these helpers; otherwise, the safe option is to say Y. need these helpers; otherwise, the safe option is to say Y.

Просмотреть файл

@ -979,6 +979,7 @@ config RELOCATABLE
must live at a different physical address than the primary must live at a different physical address than the primary
kernel. kernel.
# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET config PAGE_OFFSET
hex hex
default "0xc000000000000000" default "0xc000000000000000"

Просмотреть файл

@ -211,9 +211,19 @@ extern long long virt_phys_offset;
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else #else
#ifdef CONFIG_PPC64
/*
* gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
* with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
*/
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
#endif #endif
#endif
/* /*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,

Просмотреть файл

@ -35,7 +35,13 @@
#include <asm/vdso_datapage.h> #include <asm/vdso_datapage.h>
#include <asm/vio.h> #include <asm/vio.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/machdep.h>
/*
* This isn't a module but we expose that to userspace
* via /proc so leave the definitions here
*/
#define MODULE_VERS "1.9" #define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg" #define MODULE_NAME "lparcfg"
@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
{ {
unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) if (firmware_has_feature(FW_FEATURE_LPAR) &&
plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
} }
@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
} }
static const struct file_operations lparcfg_fops = { static const struct file_operations lparcfg_fops = {
.owner = THIS_MODULE,
.read = seq_read, .read = seq_read,
.write = lparcfg_write, .write = lparcfg_write,
.open = lparcfg_open, .open = lparcfg_open,
@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
} }
return 0; return 0;
} }
machine_device_initcall(pseries, lparcfg_init);
static void __exit lparcfg_cleanup(void)
{
remove_proc_subtree("powerpc/lparcfg", NULL);
}
module_init(lparcfg_init);
module_exit(lparcfg_cleanup);
MODULE_DESCRIPTION("Interface for LPAR configuration data");
MODULE_AUTHOR("Dave Engebretsen");
MODULE_LICENSE("GPL");

Просмотреть файл

@ -908,9 +908,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
device->cap._DDC = 1; device->cap._DDC = 1;
} }
if (acpi_video_init_brightness(device))
return;
if (acpi_video_backlight_support()) { if (acpi_video_backlight_support()) {
struct backlight_properties props; struct backlight_properties props;
struct pci_dev *pdev; struct pci_dev *pdev;
@ -920,6 +917,9 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
static int count = 0; static int count = 0;
char *name; char *name;
result = acpi_video_init_brightness(device);
if (result)
return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count); name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name) if (!name)
return; return;
@ -979,11 +979,6 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (result) if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n"); printk(KERN_ERR PREFIX "Create sysfs link\n");
} else {
/* Remove the brightness object. */
kfree(device->brightness->levels);
kfree(device->brightness);
device->brightness = NULL;
} }
} }

Просмотреть файл

@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
/* Disable sending Early R_OK. /* Disable sending Early R_OK.
* With "cached read" HDD testing and multiple ports busy on a SATA * With "cached read" HDD testing and multiple ports busy on a SATA
* host controller, 3726 PMP will very rarely drop a deferred * host controller, 3x26 PMP will very rarely drop a deferred
* R_OK that was intended for the host. Symptom will be all * R_OK that was intended for the host. Symptom will be all
* 5 drives under test will timeout, get reset, and recover. * 5 drives under test will timeout, get reset, and recover.
*/ */
if (vendor == 0x1095 && devid == 0x3726) { if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
u32 reg; u32 reg;
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg); err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
if (err_mask) { if (err_mask) {
rc = -EIO; rc = -EIO;
reason = "failed to read Sil3726 Private Register"; reason = "failed to read Sil3x26 Private Register";
goto fail; goto fail;
} }
reg &= ~0x1; reg &= ~0x1;
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
if (err_mask) { if (err_mask) {
rc = -EIO; rc = -EIO;
reason = "failed to write Sil3726 Private Register"; reason = "failed to write Sil3x26 Private Register";
goto fail; goto fail;
} }
} }
@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
u16 devid = sata_pmp_gscr_devid(gscr); u16 devid = sata_pmp_gscr_devid(gscr);
struct ata_link *link; struct ata_link *link;
if (vendor == 0x1095 && devid == 0x3726) { if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
/* sil3726 quirks */ /* sil3x26 quirks */
ata_for_each_link(link, ap, EDGE) { ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */ /* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM; link->flags |= ATA_LFLAG_NO_LPM;

Просмотреть файл

@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
{ {
struct sata_fsl_host_priv *host_priv = host->private_data; struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base; void __iomem *hcr_base = host_priv->hcr_base;
unsigned long flags;
if (count > ICC_MAX_INT_COUNT_THRESHOLD) if (count > ICC_MAX_INT_COUNT_THRESHOLD)
count = ICC_MAX_INT_COUNT_THRESHOLD; count = ICC_MAX_INT_COUNT_THRESHOLD;
@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
(count > ICC_MIN_INT_COUNT_THRESHOLD)) (count > ICC_MIN_INT_COUNT_THRESHOLD))
ticks = ICC_SAFE_INT_TICKS; ticks = ICC_SAFE_INT_TICKS;
spin_lock(&host->lock); spin_lock_irqsave(&host->lock, flags);
iowrite32((count << 24 | ticks), hcr_base + ICC); iowrite32((count << 24 | ticks), hcr_base + ICC);
intr_coalescing_count = count; intr_coalescing_count = count;
intr_coalescing_ticks = ticks; intr_coalescing_ticks = ticks;
spin_unlock(&host->lock); spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n", DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks); intr_coalescing_count, intr_coalescing_ticks);

Просмотреть файл

@ -86,11 +86,11 @@ struct ecx_plat_data {
#define SGPIO_SIGNALS 3 #define SGPIO_SIGNALS 3
#define ECX_ACTIVITY_BITS 0x300000 #define ECX_ACTIVITY_BITS 0x300000
#define ECX_ACTIVITY_SHIFT 2 #define ECX_ACTIVITY_SHIFT 0
#define ECX_LOCATE_BITS 0x80000 #define ECX_LOCATE_BITS 0x80000
#define ECX_LOCATE_SHIFT 1 #define ECX_LOCATE_SHIFT 1
#define ECX_FAULT_BITS 0x400000 #define ECX_FAULT_BITS 0x400000
#define ECX_FAULT_SHIFT 0 #define ECX_FAULT_SHIFT 2
static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port, static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
u32 shift) u32 shift)
{ {

Просмотреть файл

@ -141,6 +141,8 @@ static ssize_t show_mem_removable(struct device *dev,
container_of(dev, struct memory_block, dev); container_of(dev, struct memory_block, dev);
for (i = 0; i < sections_per_block; i++) { for (i = 0; i < sections_per_block; i++) {
if (!present_section_nr(mem->start_section_nr + i))
continue;
pfn = section_nr_to_pfn(mem->start_section_nr + i); pfn = section_nr_to_pfn(mem->start_section_nr + i);
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
} }

Просмотреть файл

@ -332,7 +332,7 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
} }
if (!rbnode->blklen) { if (!rbnode->blklen) {
rbnode->blklen = sizeof(*rbnode); rbnode->blklen = 1;
rbnode->base_reg = reg; rbnode->base_reg = reg;
} }

Просмотреть файл

@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
&status)) &status))
goto log_fail; goto log_fail;
while (status == SDVO_CMD_STATUS_PENDING && retry--) { while ((status == SDVO_CMD_STATUS_PENDING ||
status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
udelay(15); udelay(15);
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS, SDVO_I2C_CMD_STATUS,

Просмотреть файл

@ -752,6 +752,8 @@
will not assert AGPBUSY# and will only will not assert AGPBUSY# and will only
be delivered when out of C3. */ be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1<<9)
#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8 #define ACTHD 0x020c8
#define FW_BLC 0x020d8 #define FW_BLC 0x020d8
#define FW_BLC2 0x020dc #define FW_BLC2 0x020dc
@ -4438,7 +4440,7 @@
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22) #define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22) #define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22) #define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22) #define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
/* legacy values */ /* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22) #define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)

Просмотреть файл

@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio); POSTING_READ(mmio);
/* Flush the TLB for this page */
if (INTEL_INFO(dev)->gen >= 6) {
u32 reg = RING_INSTPM(ring->mmio_base);
I915_WRITE(reg,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
INSTPM_SYNC_FLUSH));
if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
ring->name);
}
} }
static int static int

Просмотреть файл

@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
u32 splitoff; u32 splitoff;
u32 s, e; u32 s, e;
BUG_ON(!type);
list_for_each_entry(this, &mm->free, fl_entry) { list_for_each_entry(this, &mm->free, fl_entry) {
e = this->offset + this->length; e = this->offset + this->length;
s = this->offset; s = this->offset;
@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
struct nouveau_mm_node *prev, *this, *next; struct nouveau_mm_node *prev, *this, *next;
u32 mask = align - 1; u32 mask = align - 1;
BUG_ON(!type);
list_for_each_entry_reverse(this, &mm->free, fl_entry) { list_for_each_entry_reverse(this, &mm->free, fl_entry) {
u32 e = this->offset + this->length; u32 e = this->offset + this->length;
u32 s = this->offset; u32 s = this->offset;

Просмотреть файл

@ -20,8 +20,8 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
} }
#define nouveau_mc_create(p,e,o,d) \ #define nouveau_mc_create(p,e,o,m,d) \
nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
#define nouveau_mc_destroy(p) ({ \ #define nouveau_mc_destroy(p) ({ \
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
}) })
@ -33,7 +33,8 @@ nouveau_mc(void *obj)
}) })
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, int, void **); struct nouveau_oclass *, const struct nouveau_mc_intr *,
int, void **);
void _nouveau_mc_dtor(struct nouveau_object *); void _nouveau_mc_dtor(struct nouveau_object *);
int _nouveau_mc_init(struct nouveau_object *); int _nouveau_mc_init(struct nouveau_object *);
int _nouveau_mc_fini(struct nouveau_object *, bool); int _nouveau_mc_fini(struct nouveau_object *, bool);

Просмотреть файл

@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret; return ret;
switch (pfb914 & 0x00000003) { switch (pfb914 & 0x00000003) {
case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
case 0x00000003: break; case 0x00000003: break;
} }
pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
pfb->ram->tags = nv_rd32(pfb, 0x100320); ram->tags = nv_rd32(pfb, 0x100320);
return 0; return 0;
} }

Просмотреть файл

@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret) if (ret)
return ret; return ret;
pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
pfb->ram->type = NV_MEM_TYPE_STOLEN; ram->type = NV_MEM_TYPE_STOLEN;
return 0; return 0;
} }

Просмотреть файл

@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
struct nouveau_ltcg base; struct nouveau_ltcg base;
u32 part_nr; u32 part_nr;
u32 subp_nr; u32 subp_nr;
struct nouveau_mm tags;
u32 num_tags; u32 num_tags;
u32 tag_base;
struct nouveau_mm tags;
struct nouveau_mm_node *tag_ram; struct nouveau_mm_node *tag_ram;
}; };
@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
u32 tag_size, tag_margin, tag_align; u32 tag_size, tag_margin, tag_align;
int ret; int ret;
nv_wr32(priv, 0x17e8d8, priv->part_nr);
if (nv_device(pfb)->card_type >= NV_E0)
nv_wr32(priv, 0x17e000, priv->part_nr);
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4; priv->num_tags = (pfb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17)) if (priv->num_tags > (1 << 17))
@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
tag_size += tag_align; tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */ tag_size = (tag_size + 0xfff) >> 12; /* round up */
ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
&priv->tag_ram); &priv->tag_ram);
if (ret) { if (ret) {
priv->num_tags = 0; priv->num_tags = 0;
@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
tag_base += tag_align - 1; tag_base += tag_align - 1;
ret = do_div(tag_base, tag_align); ret = do_div(tag_base, tag_align);
nv_wr32(priv, 0x17e8d4, tag_base); priv->tag_base = tag_base;
} }
ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
} }
priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
ret = nvc0_ltcg_init_tag_ram(pfb, priv); ret = nvc0_ltcg_init_tag_ram(pfb, priv);
if (ret) if (ret)
return ret; return ret;
@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
nouveau_ltcg_destroy(ltcg); nouveau_ltcg_destroy(ltcg);
} }
static int
nvc0_ltcg_init(struct nouveau_object *object)
{
struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
int ret;
ret = nouveau_ltcg_init(ltcg);
if (ret)
return ret;
nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
nv_wr32(priv, 0x17e8d8, priv->part_nr);
if (nv_device(ltcg)->card_type >= NV_E0)
nv_wr32(priv, 0x17e000, priv->part_nr);
nv_wr32(priv, 0x17e8d4, priv->tag_base);
return 0;
}
struct nouveau_oclass struct nouveau_oclass
nvc0_ltcg_oclass = { nvc0_ltcg_oclass = {
.handle = NV_SUBDEV(LTCG, 0xc0), .handle = NV_SUBDEV(LTCG, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) { .ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ltcg_ctor, .ctor = nvc0_ltcg_ctor,
.dtor = nvc0_ltcg_dtor, .dtor = nvc0_ltcg_dtor,
.init = _nouveau_ltcg_init, .init = nvc0_ltcg_init,
.fini = _nouveau_ltcg_fini, .fini = _nouveau_ltcg_fini,
}, },
}; };

Просмотреть файл

@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object)
int int
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, int length, void **pobject) struct nouveau_oclass *oclass,
const struct nouveau_mc_intr *intr_map,
int length, void **pobject)
{ {
struct nouveau_device *device = nv_device(parent); struct nouveau_device *device = nv_device(parent);
struct nouveau_mc *pmc; struct nouveau_mc *pmc;
@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret) if (ret)
return ret; return ret;
pmc->intr_map = intr_map;
ret = request_irq(device->pdev->irq, nouveau_mc_intr, ret = request_irq(device->pdev->irq, nouveau_mc_intr,
IRQF_SHARED, "nouveau", pmc); IRQF_SHARED, "nouveau", pmc);
if (ret < 0) if (ret < 0)

Просмотреть файл

@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_mc_priv *priv; struct nv04_mc_priv *priv;
int ret; int ret;
ret = nouveau_mc_create(parent, engine, oclass, &priv); ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv); *pobject = nv_object(priv);
if (ret) if (ret)
return ret; return ret;
priv->base.intr_map = nv04_mc_intr;
return 0; return 0;
} }

Просмотреть файл

@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv44_mc_priv *priv; struct nv44_mc_priv *priv;
int ret; int ret;
ret = nouveau_mc_create(parent, engine, oclass, &priv); ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv); *pobject = nv_object(priv);
if (ret) if (ret)
return ret; return ret;
priv->base.intr_map = nv04_mc_intr;
return 0; return 0;
} }

Просмотреть файл

@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_mc_priv *priv; struct nv50_mc_priv *priv;
int ret; int ret;
ret = nouveau_mc_create(parent, engine, oclass, &priv); ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
*pobject = nv_object(priv); *pobject = nv_object(priv);
if (ret) if (ret)
return ret; return ret;
priv->base.intr_map = nv50_mc_intr;
return 0; return 0;
} }

Просмотреть файл

@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_mc_priv *priv; struct nv98_mc_priv *priv;
int ret; int ret;
ret = nouveau_mc_create(parent, engine, oclass, &priv); ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
*pobject = nv_object(priv); *pobject = nv_object(priv);
if (ret) if (ret)
return ret; return ret;
priv->base.intr_map = nv98_mc_intr;
return 0; return 0;
} }

Просмотреть файл

@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_mc_priv *priv; struct nvc0_mc_priv *priv;
int ret; int ret;
ret = nouveau_mc_create(parent, engine, oclass, &priv); ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
*pobject = nv_object(priv); *pobject = nv_object(priv);
if (ret) if (ret)
return ret; return ret;
priv->base.intr_map = nvc0_mc_intr;
return 0; return 0;
} }

Просмотреть файл

@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->ramdac_a34 = 0x1; regp->ramdac_a34 = 0x1;
} }
static int
nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
int ret;
ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
if (ret == 0) {
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
}
return ret;
}
/** /**
* Sets up registers for the given mode/adjusted_mode pair. * Sets up registers for the given mode/adjusted_mode pair.
* *
@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_drm *drm = nouveau_drm(dev);
int ret;
NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode); drm_mode_debug_printmodeline(adjusted_mode);
ret = nv_crtc_swap_fbs(crtc, old_fb);
if (ret)
return ret;
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */ /* unlock must come after turning off FP_TG_CONTROL in output_prepare */
nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
static void nv_crtc_destroy(struct drm_crtc *crtc) static void nv_crtc_destroy(struct drm_crtc *crtc)
{ {
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc) if (!nv_crtc)
@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc); drm_crtc_cleanup(crtc);
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@ -753,6 +781,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg); nouveau_hw_load_state_palette(dev, nv_crtc->index, &nv04_display(dev)->mode_reg);
} }
static void
nv_crtc_disable(struct drm_crtc *crtc)
{
struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (disp->image[nv_crtc->index])
nouveau_bo_unpin(disp->image[nv_crtc->index]);
nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
}
static void static void
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
uint32_t size) uint32_t size)
@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *drm_fb; struct drm_framebuffer *drm_fb;
struct nouveau_framebuffer *fb; struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm; int arb_burst, arb_lwm;
int ret;
NV_DEBUG(drm, "index %d\n", nv_crtc->index); NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
return 0; return 0;
} }
/* If atomic, we want to switch to the fb we were passed, so /* If atomic, we want to switch to the fb we were passed, so
* now we update pointers to do that. (We don't pin; just * now we update pointers to do that.
* assume we're already pinned and update the base address.)
*/ */
if (atomic) { if (atomic) {
drm_fb = passed_fb; drm_fb = passed_fb;
@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
} else { } else {
drm_fb = crtc->fb; drm_fb = crtc->fb;
fb = nouveau_framebuffer(crtc->fb); fb = nouveau_framebuffer(crtc->fb);
/* If not atomic, we can go ahead and pin, and unpin the
* old fb we were passed.
*/
ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
if (ret)
return ret;
if (passed_fb) {
struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
nouveau_bo_unpin(ofb->nvbo);
}
} }
nv_crtc->fb.offset = fb->nvbo->bo.offset; nv_crtc->fb.offset = fb->nvbo->bo.offset;
@ -877,6 +901,9 @@ static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb) struct drm_framebuffer *old_fb)
{ {
int ret = nv_crtc_swap_fbs(crtc, old_fb);
if (ret)
return ret;
return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
} }
@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base, .mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.load_lut = nv_crtc_gamma_load, .load_lut = nv_crtc_gamma_load,
.disable = nv_crtc_disable,
}; };
int int

Просмотреть файл

@ -81,6 +81,7 @@ struct nv04_display {
uint32_t saved_vga_font[4][16384]; uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4]; uint32_t dac_users[4];
struct nouveau_object *core; struct nouveau_object *core;
struct nouveau_bo *image[2];
}; };
static inline struct nv04_display * static inline struct nv04_display *

Просмотреть файл

@ -577,6 +577,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nv50_display_flip_next(crtc, fb, chan, 0); ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret) if (ret)
goto fail_unreserve; goto fail_unreserve;
} else {
struct nv04_display *dispnv04 = nv04_display(dev);
nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
} }
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);

Просмотреть файл

@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
if (clk < pll->vco1.max_freq) if (clk < pll->vco1.max_freq)
pll->vco2.max_freq = 0; pll->vco2.max_freq = 0;
pclk->pll_calc(pclk, pll, clk, &coef); ret = pclk->pll_calc(pclk, pll, clk, &coef);
if (ret == 0) if (ret == 0)
return -ERANGE; return -ERANGE;

Просмотреть файл

@ -29,7 +29,9 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_bo_driver.h>
#define VMW_PPN_SIZE sizeof(unsigned long) #define VMW_PPN_SIZE (sizeof(unsigned long))
/* A future safe maximum remap size. */
#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
static int vmw_gmr2_bind(struct vmw_private *dev_priv, static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[], struct page *pages[],
@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
{ {
SVGAFifoCmdDefineGMR2 define_cmd; SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd; SVGAFifoCmdRemapGMR2 remap_cmd;
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
uint32_t *cmd; uint32_t *cmd;
uint32_t *cmd_orig; uint32_t *cmd_orig;
uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
uint32_t remap_pos = 0;
uint32_t cmd_size = define_size + remap_size;
uint32_t i; uint32_t i;
cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size); cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL)) if (unlikely(cmd == NULL))
return -ENOMEM; return -ENOMEM;
define_cmd.gmrId = gmr_id; define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages; define_cmd.numPages = num_pages;
*cmd++ = SVGA_CMD_DEFINE_GMR2;
memcpy(cmd, &define_cmd, sizeof(define_cmd));
cmd += sizeof(define_cmd) / sizeof(*cmd);
/*
* Need to split the command if there are too many
* pages that goes into the gmr.
*/
remap_cmd.gmrId = gmr_id; remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ? remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32; SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
remap_cmd.offsetPages = 0;
remap_cmd.numPages = num_pages;
*cmd++ = SVGA_CMD_DEFINE_GMR2; while (num_pages > 0) {
memcpy(cmd, &define_cmd, sizeof(define_cmd)); unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
cmd += sizeof(define_cmd) / sizeof(uint32);
*cmd++ = SVGA_CMD_REMAP_GMR2; remap_cmd.offsetPages = remap_pos;
memcpy(cmd, &remap_cmd, sizeof(remap_cmd)); remap_cmd.numPages = nr;
cmd += sizeof(remap_cmd) / sizeof(uint32);
for (i = 0; i < num_pages; ++i) { *cmd++ = SVGA_CMD_REMAP_GMR2;
if (VMW_PPN_SIZE <= 4) memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
*cmd = page_to_pfn(*pages++); cmd += sizeof(remap_cmd) / sizeof(*cmd);
else
*((uint64_t *)cmd) = page_to_pfn(*pages++);
cmd += VMW_PPN_SIZE / sizeof(*cmd); for (i = 0; i < nr; ++i) {
if (VMW_PPN_SIZE <= 4)
*cmd = page_to_pfn(*pages++);
else
*((uint64_t *)cmd) = page_to_pfn(*pages++);
cmd += VMW_PPN_SIZE / sizeof(*cmd);
}
num_pages -= nr;
remap_pos += nr;
} }
vmw_fifo_commit(dev_priv, define_size + remap_size); BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
vmw_fifo_commit(dev_priv, cmd_size);
return 0; return 0;
} }

Просмотреть файл

@ -232,7 +232,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
switch (mask) { switch (mask) {
case IIO_CHAN_INFO_RAW: case IIO_CHAN_INFO_RAW:
ret = adjd_s311_read_data(indio_dev, chan->address, val); ret = adjd_s311_read_data(indio_dev,
ADJD_S311_DATA_REG(chan->address), val);
if (ret < 0) if (ret < 0)
return ret; return ret;
return IIO_VAL_INT; return IIO_VAL_INT;

Просмотреть файл

@ -167,6 +167,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },

Просмотреть файл

@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
*/ */
static int elantech_packet_check_v3(struct psmouse *psmouse) static int elantech_packet_check_v3(struct psmouse *psmouse)
{ {
struct elantech_data *etd = psmouse->private;
const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff }; const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
unsigned char *packet = psmouse->packet; unsigned char *packet = psmouse->packet;
@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
if (!memcmp(packet, debounce_packet, sizeof(debounce_packet))) if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
return PACKET_DEBOUNCE; return PACKET_DEBOUNCE;
if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02) /*
return PACKET_V3_HEAD; * If the hardware flag 'crc_enabled' is set the packets have
* different signatures.
*/
if (etd->crc_enabled) {
if ((packet[3] & 0x09) == 0x08)
return PACKET_V3_HEAD;
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) if ((packet[3] & 0x09) == 0x09)
return PACKET_V3_TAIL; return PACKET_V3_TAIL;
} else {
if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
return PACKET_V3_HEAD;
if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
return PACKET_V3_TAIL;
}
return PACKET_UNKNOWN; return PACKET_UNKNOWN;
} }
static int elantech_packet_check_v4(struct psmouse *psmouse) static int elantech_packet_check_v4(struct psmouse *psmouse)
{ {
struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet; unsigned char *packet = psmouse->packet;
unsigned char packet_type = packet[3] & 0x03; unsigned char packet_type = packet[3] & 0x03;
bool sanity_check;
/*
* Sanity check based on the constant bits of a packet.
* The constant bits change depending on the value of
* the hardware flag 'crc_enabled' but are the same for
* every packet, regardless of the type.
*/
if (etd->crc_enabled)
sanity_check = ((packet[3] & 0x08) == 0x00);
else
sanity_check = ((packet[0] & 0x0c) == 0x04 &&
(packet[3] & 0x1c) == 0x10);
if (!sanity_check)
return PACKET_UNKNOWN;
switch (packet_type) { switch (packet_type) {
case 0: case 0:
@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
etd->reports_pressure = true; etd->reports_pressure = true;
} }
/*
* The signatures of v3 and v4 packets change depending on the
* value of this hardware flag.
*/
etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
return 0; return 0;
} }

Просмотреть файл

@ -129,6 +129,7 @@ struct elantech_data {
bool paritycheck; bool paritycheck;
bool jumpy_cursor; bool jumpy_cursor;
bool reports_pressure; bool reports_pressure;
bool crc_enabled;
unsigned char hw_version; unsigned char hw_version;
unsigned int fw_version; unsigned int fw_version;
unsigned int single_finger_reports; unsigned int single_finger_reports;

Просмотреть файл

@ -22,7 +22,8 @@ config SERIO_I8042
tristate "i8042 PC Keyboard controller" if EXPERT || !X86 tristate "i8042 PC Keyboard controller" if EXPERT || !X86
default y default y
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \ depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
(!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
!ARC
help help
i8042 is the chip over which the standard AT keyboard and PS/2 i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices, mouse are connected to the computer. If you use these devices,

Просмотреть файл

@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA =
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.touch_max = 2 }; .touch_max = 2 };
static struct wacom_features wacom_features_0xDB = static const struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.touch_max = 2 }; .touch_max = 2 };
@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF =
{ "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.touch_max = 16 }; .touch_max = 16 };
static const struct wacom_features wacom_features_0x300 =
{ "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x301 =
{ "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 = static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x101) },
{ USB_DEVICE_WACOM(0x10D) }, { USB_DEVICE_WACOM(0x10D) },
{ USB_DEVICE_WACOM(0x300) },
{ USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x304) },
{ USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0x47) },

Просмотреть файл

@ -23,7 +23,7 @@
#define SIRFSOC_INT_RISC_LEVEL1 0x0024 #define SIRFSOC_INT_RISC_LEVEL1 0x0024
#define SIRFSOC_INIT_IRQ_ID 0x0038 #define SIRFSOC_INIT_IRQ_ID 0x0038
#define SIRFSOC_NUM_IRQS 128 #define SIRFSOC_NUM_IRQS 64
static struct irq_domain *sirfsoc_irqdomain; static struct irq_domain *sirfsoc_irqdomain;
@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
{ {
struct irq_chip_generic *gc; struct irq_chip_generic *gc;
struct irq_chip_type *ct; struct irq_chip_type *ct;
int ret;
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq); ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
gc->reg_base = base;
ct = gc->chip_types; ct = gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit; ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit; ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->regs.mask = SIRFSOC_INT_RISC_MASK0; ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
} }
static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs) static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p
if (!base) if (!base)
panic("unable to map intc cpu registers\n"); panic("unable to map intc cpu registers\n");
/* using legacy because irqchip_generic does not work with linear */ sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0, &irq_generic_chip_ops, base);
&irq_domain_simple_ops, base);
sirfsoc_alloc_gc(base, 0, 32); sirfsoc_alloc_gc(base, 0, 32);
sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32); sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);

Просмотреть файл

@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
u8 *data; u8 *data;
int len; int len;
if (skb->len < sizeof(int)) if (skb->len < sizeof(int)) {
printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__); printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
return -EINVAL;
}
cont = *((int *)skb->data); cont = *((int *)skb->data);
len = skb->len - sizeof(int); len = skb->len - sizeof(int);
data = skb->data + sizeof(int); data = skb->data + sizeof(int);

Просмотреть файл

@ -148,7 +148,7 @@ config PCMCIA_PCNET
config NE_H8300 config NE_H8300
tristate "NE2000 compatible support for H8/300" tristate "NE2000 compatible support for H8/300"
depends on H8300 depends on H8300H_AKI3068NET || H8300H_H8MAX
---help--- ---help---
Say Y here if you want to use the NE2000 compatible Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor. controller on the Renesas H8/300 processor.

Просмотреть файл

@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
int old_max_eth_txqs, new_max_eth_txqs; int old_max_eth_txqs, new_max_eth_txqs;
int old_txdata_index = 0, new_txdata_index = 0; int old_txdata_index = 0, new_txdata_index = 0;
struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
/* Copy the NAPI object as it has been already initialized */ /* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi; from_fp->napi = to_fp->napi;
@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
memcpy(to_fp, from_fp, sizeof(*to_fp)); memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to; to_fp->index = to;
/* Retain the tpa_info of the original `to' version as we don't want
* 2 FPs to contain the same tpa_info pointer.
*/
to_fp->tpa_info = old_tpa_info;
/* move sp_objs contents as well, as their indices match fp ones */ /* move sp_objs contents as well, as their indices match fp ones */
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
@ -2959,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (IS_PF(bp)) { if (IS_PF(bp)) {
if (CNIC_LOADED(bp)) if (CNIC_LOADED(bp))
bnx2x_free_mem_cnic(bp); bnx2x_free_mem_cnic(bp);
bnx2x_free_mem(bp);
} }
bnx2x_free_mem(bp);
bp->state = BNX2X_STATE_CLOSED; bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false; bp->cnic_loaded = false;

Просмотреть файл

@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
struct bnx2x_phy *phy = &params->phy[INT_PHY]; struct bnx2x_phy *phy = &params->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG && if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) || (CHIP_IS_E1x(bp) ||
CHIP_IS_E2(bp))) CHIP_IS_E2(bp))) {
bnx2x_set_parallel_detection(phy, params); bnx2x_set_parallel_detection(phy, params);
if (params->phy[INT_PHY].config_init) if (params->phy[INT_PHY].config_init)
params->phy[INT_PHY].config_init(phy, params->phy[INT_PHY].config_init(phy,
params, params,
vars); vars);
}
} }
/* Init external phy*/ /* Init external phy*/

Просмотреть файл

@ -7855,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
{ {
int i; int i;
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping, BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
bp->fw_stats_data_sz + bp->fw_stats_req_sz); bp->fw_stats_data_sz + bp->fw_stats_req_sz);
if (IS_VF(bp))
return;
BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
sizeof(struct host_sp_status_block));
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath)); sizeof(struct bnx2x_slowpath));

Просмотреть файл

@ -545,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
return 0; return 0;
} }
static int
bnx2x_vfop_config_vlan0(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
bool add)
{
int rc;
vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
BNX2X_VLAN_MAC_DEL;
vlan_mac->user_req.u.vlan.vlan = 0;
rc = bnx2x_config_vlan_mac(bp, vlan_mac);
if (rc == -EEXIST)
rc = 0;
return rc;
}
static int bnx2x_vfop_config_list(struct bnx2x *bp, static int bnx2x_vfop_config_list(struct bnx2x *bp,
struct bnx2x_vfop_filters *filters, struct bnx2x_vfop_filters *filters,
struct bnx2x_vlan_mac_ramrod_params *vlan_mac) struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
@ -666,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_VLAN_CONFIG_LIST: case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */ /* next state */
vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0; vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
/* remove vlan0 - could be no-op */ /* do list config */
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
if (vfop->rc)
goto op_err;
/* Do vlan list config. if this operation fails we try to
* restore vlan0 to keep the queue is working order
*/
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac); vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
if (!vfop->rc) { if (!vfop->rc) {
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags); set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac); vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
} }
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
/* next state */
vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
if (list_empty(&obj->head))
/* add vlan0 */
vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE); bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
default: default:
@ -2833,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
return 0; return 0;
} }
struct set_vf_state_cookie {
struct bnx2x_virtf *vf;
u8 state;
};
void bnx2x_set_vf_state(void *cookie)
{
struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
p->vf->state = p->state;
}
/* VFOP close (teardown the queues, delete mcasts and close HW) */ /* VFOP close (teardown the queues, delete mcasts and close HW) */
static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf) static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
{ {
@ -2883,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
op_err: op_err:
BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc); BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done: op_done:
vf->state = VF_ACQUIRED;
/* need to make sure there are no outstanding stats ramrods which may
* cause the device to access the VF's stats buffer which it will free
* as soon as we return from the close flow.
*/
{
struct set_vf_state_cookie cookie;
cookie.vf = vf;
cookie.state = VF_ACQUIRED;
bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
}
DP(BNX2X_MSG_IOV, "set state to acquired\n"); DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop); bnx2x_vfop_end(bp, vf, vfop);
} }

Просмотреть файл

@ -522,20 +522,16 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
/* should be called under stats_sema */ /* should be called under stats_sema */
static void __bnx2x_stats_start(struct bnx2x *bp) static void __bnx2x_stats_start(struct bnx2x *bp)
{ {
/* vfs travel through here as part of the statistics FSM, but no action if (IS_PF(bp)) {
* is required if (bp->port.pmf)
*/ bnx2x_port_stats_init(bp);
if (IS_VF(bp))
return;
if (bp->port.pmf) else if (bp->func_stx)
bnx2x_port_stats_init(bp); bnx2x_func_stats_init(bp);
else if (bp->func_stx) bnx2x_hw_stats_post(bp);
bnx2x_func_stats_init(bp); bnx2x_storm_stats_post(bp);
}
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
bp->stats_started = true; bp->stats_started = true;
} }
@ -1997,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
estats->mac_discard); estats->mac_discard);
} }
} }
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie){
if (down_timeout(&bp->stats_sema, HZ/10))
BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
func_to_exec(cookie);
__bnx2x_stats_start(bp);
up(&bp->stats_sema);
}

Просмотреть файл

@ -539,6 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp); void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp); void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event); void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
void bnx2x_stats_safe_exec(struct bnx2x *bp,
void (func_to_exec)(void *cookie),
void *cookie);
/** /**
* bnx2x_save_statistics - save statistics when unloading. * bnx2x_save_statistics - save statistics when unloading.

Просмотреть файл

@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
return false; return false;
} }
static bool tg3_phy_led_bug(struct tg3 *tp)
{
switch (tg3_asic_rev(tp)) {
case ASIC_REV_5719:
if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
!tp->pci_fn)
return true;
return false;
}
return false;
}
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{ {
u32 val; u32 val;
@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
} }
return; return;
} else if (do_low_power) { } else if (do_low_power) {
tg3_writephy(tp, MII_TG3_EXT_CTRL, if (!tg3_phy_led_bug(tp))
MII_TG3_EXT_CTRL_FORCE_LED_OFF); tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_FORCE_LED_OFF);
val = MII_TG3_AUXCTL_PCTL_100TX_LPWR | val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
MII_TG3_AUXCTL_PCTL_SPR_ISOLATE | MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |

Просмотреть файл

@ -353,11 +353,9 @@ struct xgmac_extra_stats {
/* Receive errors */ /* Receive errors */
unsigned long rx_watchdog; unsigned long rx_watchdog;
unsigned long rx_da_filter_fail; unsigned long rx_da_filter_fail;
unsigned long rx_sa_filter_fail;
unsigned long rx_payload_error; unsigned long rx_payload_error;
unsigned long rx_ip_header_error; unsigned long rx_ip_header_error;
/* Tx/Rx IRQ errors */ /* Tx/Rx IRQ errors */
unsigned long tx_undeflow;
unsigned long tx_process_stopped; unsigned long tx_process_stopped;
unsigned long rx_buf_unav; unsigned long rx_buf_unav;
unsigned long rx_process_stopped; unsigned long rx_process_stopped;
@ -393,6 +391,7 @@ struct xgmac_priv {
char rx_pause; char rx_pause;
char tx_pause; char tx_pause;
int wolopts; int wolopts;
struct work_struct tx_timeout_work;
}; };
/* XGMAC Configuration Settings */ /* XGMAC Configuration Settings */
@ -409,6 +408,9 @@ struct xgmac_priv {
#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s) #define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s) #define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
#define tx_dma_ring_space(p) \
dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
/* XGMAC Descriptor Access Helpers */ /* XGMAC Descriptor Access Helpers */
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz) static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
{ {
@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
static inline int desc_get_buf_len(struct xgmac_dma_desc *p) static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
{ {
u32 len = cpu_to_le32(p->flags); u32 len = le32_to_cpu(p->buf_size);
return (len & DESC_BUFFER1_SZ_MASK) + return (len & DESC_BUFFER1_SZ_MASK) +
((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET); ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
} }
@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
p->flags = cpu_to_le32(tmpflags); p->flags = cpu_to_le32(tmpflags);
} }
static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
{
u32 tmpflags = le32_to_cpu(p->flags);
tmpflags &= TXDESC_END_RING;
p->flags = cpu_to_le32(tmpflags);
}
static inline int desc_get_tx_ls(struct xgmac_dma_desc *p) static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
{ {
return le32_to_cpu(p->flags) & TXDESC_LAST_SEG; return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
} }
static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
}
static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p) static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
{ {
return le32_to_cpu(p->buf1_addr); return le32_to_cpu(p->buf1_addr);
@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
{ {
u32 data; u32 data;
data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0); if (addr) {
writel(data, ioaddr + XGMAC_ADDR_HIGH(num)); data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
writel(data, ioaddr + XGMAC_ADDR_LOW(num)); data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
writel(data, ioaddr + XGMAC_ADDR_LOW(num));
} else {
writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
writel(0, ioaddr + XGMAC_ADDR_LOW(num));
}
} }
static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
if (unlikely(skb == NULL)) if (unlikely(skb == NULL))
break; break;
priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data, paddr = dma_map_single(priv->device, skb->data,
bufsz, DMA_FROM_DEVICE); priv->dma_buf_sz - NET_IP_ALIGN,
DMA_FROM_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb_any(skb);
break;
}
priv->rx_skbuff[entry] = skb;
desc_set_buf_addr(p, paddr, priv->dma_buf_sz); desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
} }
@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
return; return;
for (i = 0; i < DMA_RX_RING_SZ; i++) { for (i = 0; i < DMA_RX_RING_SZ; i++) {
if (priv->rx_skbuff[i] == NULL) struct sk_buff *skb = priv->rx_skbuff[i];
if (skb == NULL)
continue; continue;
p = priv->dma_rx + i; p = priv->dma_rx + i;
dma_unmap_single(priv->device, desc_get_buf_addr(p), dma_unmap_single(priv->device, desc_get_buf_addr(p),
priv->dma_buf_sz, DMA_FROM_DEVICE); priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
dev_kfree_skb_any(priv->rx_skbuff[i]); dev_kfree_skb_any(skb);
priv->rx_skbuff[i] = NULL; priv->rx_skbuff[i] = NULL;
} }
} }
static void xgmac_free_tx_skbufs(struct xgmac_priv *priv) static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
{ {
int i, f; int i;
struct xgmac_dma_desc *p; struct xgmac_dma_desc *p;
if (!priv->tx_skbuff) if (!priv->tx_skbuff)
@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
continue; continue;
p = priv->dma_tx + i; p = priv->dma_tx + i;
dma_unmap_single(priv->device, desc_get_buf_addr(p), if (desc_get_tx_fs(p))
desc_get_buf_len(p), DMA_TO_DEVICE); dma_unmap_single(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) { else
p = priv->dma_tx + i++;
dma_unmap_page(priv->device, desc_get_buf_addr(p), dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE); desc_get_buf_len(p), DMA_TO_DEVICE);
}
dev_kfree_skb_any(priv->tx_skbuff[i]); if (desc_get_tx_ls(p))
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL; priv->tx_skbuff[i] = NULL;
} }
} }
@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
*/ */
static void xgmac_tx_complete(struct xgmac_priv *priv) static void xgmac_tx_complete(struct xgmac_priv *priv)
{ {
int i;
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) { while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
unsigned int entry = priv->tx_tail; unsigned int entry = priv->tx_tail;
struct sk_buff *skb = priv->tx_skbuff[entry]; struct sk_buff *skb = priv->tx_skbuff[entry];
@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
if (desc_get_owner(p)) if (desc_get_owner(p))
break; break;
/* Verify tx error by looking at the last segment */
if (desc_get_tx_ls(p))
desc_get_tx_status(priv, p);
netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n", netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
priv->tx_head, priv->tx_tail); priv->tx_head, priv->tx_tail);
dma_unmap_single(priv->device, desc_get_buf_addr(p), if (desc_get_tx_fs(p))
desc_get_buf_len(p), DMA_TO_DEVICE); dma_unmap_single(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
/* Check tx error on the last segment */
if (desc_get_tx_ls(p)) {
desc_get_tx_status(priv, p);
dev_kfree_skb(skb);
}
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = NULL;
priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ); priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
if (!skb) {
continue;
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
DMA_TX_RING_SZ);
p = priv->dma_tx + priv->tx_tail;
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
}
dev_kfree_skb(skb);
} }
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) > /* Ensure tx_tail is visible to xgmac_xmit */
MAX_SKB_FRAGS) smp_mb();
if (unlikely(netif_queue_stopped(priv->dev) &&
(tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
netif_wake_queue(priv->dev); netif_wake_queue(priv->dev);
} }
/** static void xgmac_tx_timeout_work(struct work_struct *work)
* xgmac_tx_err:
* @priv: pointer to the private device structure
* Description: it cleans the descriptors and restarts the transmission
* in case of errors.
*/
static void xgmac_tx_err(struct xgmac_priv *priv)
{ {
u32 reg, value, inten; u32 reg, value;
struct xgmac_priv *priv =
container_of(work, struct xgmac_priv, tx_timeout_work);
netif_stop_queue(priv->dev); napi_disable(&priv->napi);
inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
writel(0, priv->base + XGMAC_DMA_INTR_ENA); writel(0, priv->base + XGMAC_DMA_INTR_ENA);
netif_tx_lock(priv->dev);
reg = readl(priv->base + XGMAC_DMA_CONTROL); reg = readl(priv->base + XGMAC_DMA_CONTROL);
writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL); writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
do { do {
@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS, writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
priv->base + XGMAC_DMA_STATUS); priv->base + XGMAC_DMA_STATUS);
writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
netif_tx_unlock(priv->dev);
netif_wake_queue(priv->dev); netif_wake_queue(priv->dev);
napi_enable(&priv->napi);
/* Enable interrupts */
writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
} }
static int xgmac_hw_init(struct net_device *dev) static int xgmac_hw_init(struct net_device *dev)
@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL; DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
writel(value, ioaddr + XGMAC_DMA_BUS_MODE); writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
/* Enable interrupts */ writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* Mask power mgt interrupt */ /* Mask power mgt interrupt */
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT); writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
napi_enable(&priv->napi); napi_enable(&priv->napi);
netif_start_queue(dev); netif_start_queue(dev);
/* Enable interrupts */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
return 0; return 0;
} }
@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE); paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) { if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
return -EIO; return NETDEV_TX_OK;
} }
priv->tx_skbuff[entry] = skb; priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len); desc_set_buf_addr_and_size(desc, paddr, len);
@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = skb_frag_dma_map(priv->device, frag, 0, len, paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) { if (dma_mapping_error(priv->device, paddr))
dev_kfree_skb(skb); goto dma_err;
return -EIO;
}
entry = dma_ring_incr(entry, DMA_TX_RING_SZ); entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
desc = priv->dma_tx + entry; desc = priv->dma_tx + entry;
priv->tx_skbuff[entry] = NULL; priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len); desc_set_buf_addr_and_size(desc, paddr, len);
if (i < (nfrags - 1)) if (i < (nfrags - 1))
@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb(); wmb();
desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG); desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
writel(1, priv->base + XGMAC_DMA_TX_POLL);
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ); priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
writel(1, priv->base + XGMAC_DMA_TX_POLL); /* Ensure tx_head update is visible to tx completion */
if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) < smp_mb();
MAX_SKB_FRAGS) if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
netif_stop_queue(dev); netif_stop_queue(dev);
/* Ensure netif_stop_queue is visible to tx completion */
smp_mb();
if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
netif_start_queue(dev);
}
return NETDEV_TX_OK;
dma_err:
entry = priv->tx_head;
for ( ; i > 0; i--) {
entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
desc = priv->dma_tx + entry;
priv->tx_skbuff[entry] = NULL;
dma_unmap_page(priv->device, desc_get_buf_addr(desc),
desc_get_buf_len(desc), DMA_TO_DEVICE);
desc_clear_tx_owner(desc);
}
desc = first;
dma_unmap_single(priv->device, desc_get_buf_addr(desc),
desc_get_buf_len(desc), DMA_TO_DEVICE);
dev_kfree_skb(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
skb_put(skb, frame_len); skb_put(skb, frame_len);
dma_unmap_single(priv->device, desc_get_buf_addr(p), dma_unmap_single(priv->device, desc_get_buf_addr(p),
frame_len, DMA_FROM_DEVICE); priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, priv->dev); skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = ip_checksum; skb->ip_summed = ip_checksum;
@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
static void xgmac_tx_timeout(struct net_device *dev) static void xgmac_tx_timeout(struct net_device *dev)
{ {
struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_priv *priv = netdev_priv(dev);
schedule_work(&priv->tx_timeout_work);
/* Clear Tx resources and restart transmitting again */
xgmac_tx_err(priv);
} }
/** /**
@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) { if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
use_hash = true; use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF; value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
} else {
use_hash = false;
} }
netdev_for_each_mc_addr(ha, dev) { netdev_for_each_mc_addr(ha, dev) {
if (use_hash) { if (use_hash) {
@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
} }
out: out:
for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
xgmac_set_mac_addr(ioaddr, NULL, reg);
for (i = 0; i < XGMAC_NUM_HASH; i++) for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i)); writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
static irqreturn_t xgmac_interrupt(int irq, void *dev_id) static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
{ {
u32 intr_status; u32 intr_status;
bool tx_err = false;
struct net_device *dev = (struct net_device *)dev_id; struct net_device *dev = (struct net_device *)dev_id;
struct xgmac_priv *priv = netdev_priv(dev); struct xgmac_priv *priv = netdev_priv(dev);
struct xgmac_extra_stats *x = &priv->xstats; struct xgmac_extra_stats *x = &priv->xstats;
@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
if (intr_status & DMA_STATUS_TPS) { if (intr_status & DMA_STATUS_TPS) {
netdev_err(priv->dev, "transmit process stopped\n"); netdev_err(priv->dev, "transmit process stopped\n");
x->tx_process_stopped++; x->tx_process_stopped++;
tx_err = true; schedule_work(&priv->tx_timeout_work);
} }
if (intr_status & DMA_STATUS_FBI) { if (intr_status & DMA_STATUS_FBI) {
netdev_err(priv->dev, "fatal bus error\n"); netdev_err(priv->dev, "fatal bus error\n");
x->fatal_bus_error++; x->fatal_bus_error++;
tx_err = true;
} }
if (tx_err)
xgmac_tx_err(priv);
} }
/* TX/RX NORMAL interrupts */ /* TX/RX NORMAL interrupts */
@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
XGMAC_STAT(rx_payload_error), XGMAC_STAT(rx_payload_error),
XGMAC_STAT(rx_ip_header_error), XGMAC_STAT(rx_ip_header_error),
XGMAC_STAT(rx_da_filter_fail), XGMAC_STAT(rx_da_filter_fail),
XGMAC_STAT(rx_sa_filter_fail),
XGMAC_STAT(fatal_bus_error), XGMAC_STAT(fatal_bus_error),
XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG), XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME), XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->netdev_ops = &xgmac_netdev_ops; ndev->netdev_ops = &xgmac_netdev_ops;
SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops); SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
spin_lock_init(&priv->stats_lock); spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
priv->device = &pdev->dev; priv->device = &pdev->dev;
priv->dev = ndev; priv->dev = ndev;
@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
if (device_can_wakeup(priv->device)) if (device_can_wakeup(priv->device))
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA; ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL) if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM; NETIF_F_RXCSUM;

Просмотреть файл

@ -4476,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0); pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev); pci_restore_state(pdev);
status = be_fw_wait_ready(adapter);
if (status)
return status;
/* tell fw we're ready to fire cmds */ /* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter); status = be_cmd_fw_init(adapter);
if (status) if (status)

Просмотреть файл

@ -296,6 +296,9 @@ struct fec_enet_private {
/* The ring entries to be free()ed */ /* The ring entries to be free()ed */
struct bufdesc *dirty_tx; struct bufdesc *dirty_tx;
unsigned short tx_ring_size;
unsigned short rx_ring_size;
struct platform_device *pdev; struct platform_device *pdev;
int opened; int opened;

Просмотреть файл

@ -238,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt; static int mii_cnt;
static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex) static inline
struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{ {
struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; struct bufdesc *new_bd = bdp + 1;
if (is_ex) struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
return (struct bufdesc *)(ex + 1); struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
if (bdp >= fep->tx_bd_base) {
base = fep->tx_bd_base;
ring_size = fep->tx_ring_size;
ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
} else {
base = fep->rx_bd_base;
ring_size = fep->rx_ring_size;
ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
}
if (fep->bufdesc_ex)
return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
ex_base : ex_new_bd);
else else
return bdp + 1; return (new_bd >= (base + ring_size)) ?
base : new_bd;
} }
static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex) static inline
struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{ {
struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp; struct bufdesc *new_bd = bdp - 1;
if (is_ex) struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
return (struct bufdesc *)(ex - 1); struct bufdesc_ex *ex_base;
struct bufdesc *base;
int ring_size;
if (bdp >= fep->tx_bd_base) {
base = fep->tx_bd_base;
ring_size = fep->tx_ring_size;
ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
} else {
base = fep->rx_bd_base;
ring_size = fep->rx_ring_size;
ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
}
if (fep->bufdesc_ex)
return (struct bufdesc *)((ex_new_bd < ex_base) ?
(ex_new_bd + ring_size) : ex_new_bd);
else else
return bdp - 1; return (new_bd < base) ? (new_bd + ring_size) : new_bd;
} }
static void *swap_buffer(void *bufaddr, int len) static void *swap_buffer(void *bufaddr, int len)
@ -379,7 +414,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} }
} }
bdp_pre = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
fep->delay_work.trig_tx = true; fep->delay_work.trig_tx = true;
@ -388,10 +423,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} }
/* If this was the last BD in the ring, start at the beginning again. */ /* If this was the last BD in the ring, start at the beginning again. */
if (status & BD_ENET_TX_WRAP) bdp = fec_enet_get_nextdesc(bdp, fep);
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
fep->cur_tx = bdp; fep->cur_tx = bdp;
@ -416,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the receive buffer descriptors. */ /* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base; bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < fep->rx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr) if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY; bdp->cbd_sc = BD_ENET_RX_EMPTY;
else else
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_nextdesc(bdp, fep);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base; fep->cur_rx = fep->rx_bd_base;
@ -435,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* ...and the same for transmit */ /* ...and the same for transmit */
bdp = fep->tx_bd_base; bdp = fep->tx_bd_base;
fep->cur_tx = bdp; fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < fep->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */ /* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
@ -444,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
fep->tx_skbuff[i] = NULL; fep->tx_skbuff[i] = NULL;
} }
bdp->cbd_bufaddr = 0; bdp->cbd_bufaddr = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_nextdesc(bdp, fep);
} }
/* Set the last buffer to wrap */ /* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp; fep->dirty_tx = bdp;
} }
@ -509,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START); * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
else else
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START); * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
for (i = 0; i <= TX_RING_MOD_MASK; i++) { for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@ -726,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
bdp = fep->dirty_tx; bdp = fep->dirty_tx;
/* get next bdp of dirty_tx */ /* get next bdp of dirty_tx */
if (bdp->cbd_sc & BD_ENET_TX_WRAP) bdp = fec_enet_get_nextdesc(bdp, fep);
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@ -799,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
fep->dirty_tx = bdp; fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */ /* Update pointer to next buffer descriptor to be transmitted */
if (status & BD_ENET_TX_WRAP) bdp = fec_enet_get_nextdesc(bdp, fep);
bdp = fep->tx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Since we have freed up a buffer, the ring is no longer full /* Since we have freed up a buffer, the ring is no longer full
*/ */
@ -970,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
htons(ETH_P_8021Q), htons(ETH_P_8021Q),
vlan_tag); vlan_tag);
if (!skb_defer_rx_timestamp(skb)) napi_gro_receive(&fep->napi, skb);
napi_gro_receive(&fep->napi, skb);
} }
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data, bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@ -993,10 +1018,8 @@ rx_processing_done:
} }
/* Update BD pointer to next entry */ /* Update BD pointer to next entry */
if (status & BD_ENET_RX_WRAP) bdp = fec_enet_get_nextdesc(bdp, fep);
bdp = fep->rx_bd_base;
else
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
/* Doing this here will keep the FEC running while we process /* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be * incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources. * able to keep up at the expense of system resources.
@ -1662,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct bufdesc *bdp; struct bufdesc *bdp;
bdp = fep->rx_bd_base; bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < fep->rx_ring_size; i++) {
skb = fep->rx_skbuff[i]; skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr) if (bdp->cbd_bufaddr)
@ -1670,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb) if (skb)
dev_kfree_skb(skb); dev_kfree_skb(skb);
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_nextdesc(bdp, fep);
} }
bdp = fep->tx_bd_base; bdp = fep->tx_bd_base;
for (i = 0; i < TX_RING_SIZE; i++) for (i = 0; i < fep->tx_ring_size; i++)
kfree(fep->tx_bounce[i]); kfree(fep->tx_bounce[i]);
} }
@ -1686,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
struct bufdesc *bdp; struct bufdesc *bdp;
bdp = fep->rx_bd_base; bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < fep->rx_ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb) { if (!skb) {
fec_enet_free_buffers(ndev); fec_enet_free_buffers(ndev);
@ -1703,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT; ebdp->cbd_esc = BD_ENET_RX_INT;
} }
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_nextdesc(bdp, fep);
} }
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base; bdp = fep->tx_bd_base;
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < fep->tx_ring_size; i++) {
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
bdp->cbd_sc = 0; bdp->cbd_sc = 0;
@ -1722,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT; ebdp->cbd_esc = BD_ENET_TX_INT;
} }
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_nextdesc(bdp, fep);
} }
/* Set the last buffer to wrap. */ /* Set the last buffer to wrap. */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex); bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP; bdp->cbd_sc |= BD_SC_WRAP;
return 0; return 0;
@ -1966,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
/* Get the Ethernet address */ /* Get the Ethernet address */
fec_get_mac(ndev); fec_get_mac(ndev);
/* init the tx & rx ring size */
fep->tx_ring_size = TX_RING_SIZE;
fep->rx_ring_size = RX_RING_SIZE;
/* Set receive and transmit descriptor base. */ /* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base; fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex) if (fep->bufdesc_ex)
fep->tx_bd_base = (struct bufdesc *) fep->tx_bd_base = (struct bufdesc *)
(((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE); (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
else else
fep->tx_bd_base = cbd_base + RX_RING_SIZE; fep->tx_bd_base = cbd_base + fep->rx_ring_size;
/* The FEC Ethernet specific entries in the device structure */ /* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT; ndev->watchdog_timeo = TX_TIMEOUT;

Просмотреть файл

@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc); jwrite32(jme, JME_APMC, apmc);
} }
NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2) NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
spin_lock_init(&jme->phy_lock); spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock); spin_lock_init(&jme->macaddr_lock);

Просмотреть файл

@ -138,7 +138,9 @@
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
#define MVNETA_MIB_COUNTERS_BASE 0x3080 #define MVNETA_MIB_COUNTERS_BASE 0x3080
#define MVNETA_MIB_LATE_COLLISION 0x7c #define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400 #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@ -948,6 +950,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Assign port SDMA configuration */ /* Assign port SDMA configuration */
mvreg_write(pp, MVNETA_SDMA_CONFIG, val); mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
/* Disable PHY polling in hardware, since we're using the
* kernel phylib to do this.
*/
val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
val &= ~MVNETA_PHY_POLLING_ENABLE;
mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
mvneta_set_ucast_table(pp, -1); mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1); mvneta_set_other_mcast_table(pp, -1);
@ -2340,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED | MVNETA_GMAC_CONFIG_GMII_SPEED |
MVNETA_GMAC_CONFIG_FULL_DUPLEX); MVNETA_GMAC_CONFIG_FULL_DUPLEX |
MVNETA_GMAC_AN_SPEED_EN |
MVNETA_GMAC_AN_DUPLEX_EN);
if (phydev->duplex) if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@ -2473,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
return 0; return 0;
} }
static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mvneta_port *pp = netdev_priv(dev);
int ret;
if (!pp->phy_dev)
return -ENOTSUPP;
ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
if (!ret)
mvneta_adjust_link(dev);
return ret;
}
/* Ethtool methods */ /* Ethtool methods */
/* Get settings (phy address, speed) for ethtools */ /* Get settings (phy address, speed) for ethtools */
@ -2591,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_change_mtu = mvneta_change_mtu, .ndo_change_mtu = mvneta_change_mtu,
.ndo_tx_timeout = mvneta_tx_timeout, .ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64, .ndo_get_stats64 = mvneta_get_stats64,
.ndo_do_ioctl = mvneta_ioctl,
}; };
const struct ethtool_ops mvneta_eth_tool_ops = { const struct ethtool_ops mvneta_eth_tool_ops = {

Просмотреть файл

@ -1171,7 +1171,6 @@ typedef struct {
#define NETXEN_DB_MAPSIZE_BYTES 0x1000 #define NETXEN_DB_MAPSIZE_BYTES 0x1000
#define NETXEN_NETDEV_WEIGHT 128
#define NETXEN_ADAPTER_UP_MAGIC 777 #define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0 #define NETXEN_NIC_PEG_TUNE 0

Просмотреть файл

@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) { for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring]; sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_add(netdev, &sds_ring->napi, netif_napi_add(netdev, &sds_ring->napi,
netxen_nic_poll, NETXEN_NETDEV_WEIGHT); netxen_nic_poll, NAPI_POLL_WEIGHT);
} }
return 0; return 0;

Просмотреть файл

@ -1348,7 +1348,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb_put(skb, pkt_len); skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev); skb->protocol = eth_type_trans(skb, ndev);
netif_rx(skb); netif_receive_skb(skb);
ndev->stats.rx_packets++; ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len; ndev->stats.rx_bytes += pkt_len;
} }
@ -1906,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev); pm_runtime_get_sync(&mdp->pdev->dev);
napi_enable(&mdp->napi);
ret = request_irq(ndev->irq, sh_eth_interrupt, ret = request_irq(ndev->irq, sh_eth_interrupt,
mdp->cd->irq_flags, ndev->name, ndev); mdp->cd->irq_flags, ndev->name, ndev);
if (ret) { if (ret) {
dev_err(&ndev->dev, "Can not assign IRQ number\n"); dev_err(&ndev->dev, "Can not assign IRQ number\n");
return ret; goto out_napi_off;
} }
/* Descriptor set */ /* Descriptor set */
@ -1928,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
if (ret) if (ret)
goto out_free_irq; goto out_free_irq;
napi_enable(&mdp->napi);
return ret; return ret;
out_free_irq: out_free_irq:
free_irq(ndev->irq, ndev); free_irq(ndev->irq, ndev);
out_napi_off:
napi_disable(&mdp->napi);
pm_runtime_put_sync(&mdp->pdev->dev); pm_runtime_put_sync(&mdp->pdev->dev);
return ret; return ret;
} }
@ -2025,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
{ {
struct sh_eth_private *mdp = netdev_priv(ndev); struct sh_eth_private *mdp = netdev_priv(ndev);
napi_disable(&mdp->napi);
netif_stop_queue(ndev); netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */ /* Disable interrupts by clearing the interrupt mask. */
@ -2044,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev); free_irq(ndev->irq, ndev);
napi_disable(&mdp->napi);
/* Free all the skbuffs in the Rx queue. */ /* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev); sh_eth_ring_free(ndev);

Просмотреть файл

@ -71,19 +71,22 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
plat->force_sf_dma_mode = 1; plat->force_sf_dma_mode = 1;
} }
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL); if (of_find_property(np, "snps,pbl", NULL)) {
if (!dma_cfg) dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
return -ENOMEM; GFP_KERNEL);
if (!dma_cfg)
plat->dma_cfg = dma_cfg; return -ENOMEM;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl); plat->dma_cfg = dma_cfg;
dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst"); of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst"); dma_cfg->fixed_burst =
of_property_read_bool(np, "snps,fixed-burst");
dma_cfg->mixed_burst =
of_property_read_bool(np, "snps,mixed-burst");
}
plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode"); plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
if (plat->force_thresh_dma_mode) { if (plat->force_thresh_dma_mode) {
plat->force_sf_dma_mode = 0; plat->force_sf_dma_mode = 0;
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set."); pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
return 0; return 0;
} }

Просмотреть файл

@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
{ {
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT; netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */ /* NAPI */
netif_napi_add(netdev, napi, netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
netdev->ethtool_ops = &gelic_ether_ethtool_ops; netdev->ethtool_ops = &gelic_ether_ethtool_ops;
netdev->netdev_ops = &gelic_netdevice_ops; netdev->netdev_ops = &gelic_netdevice_ops;
} }

Просмотреть файл

@ -37,7 +37,6 @@
#define GELIC_NET_RXBUF_ALIGN 128 #define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */ #define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ #define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL #define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */ #define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */

Просмотреть файл

@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
printk(KERN_WARNING "Setting MDIO clock divisor to " printk(KERN_WARNING "Setting MDIO clock divisor to "
"default %d\n", DEFAULT_CLOCK_DIVISOR); "default %d\n", DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR; clk_div = DEFAULT_CLOCK_DIVISOR;
of_node_put(np1);
goto issue; goto issue;
} }

Просмотреть файл

@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp, .driver_info = (unsigned long)&cdc_mbim_info_zlp,
}, },
/* HP hs2434 Mobile Broadband Module needs ZLPs */
{ USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
},
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE), { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info, .driver_info = (unsigned long)&cdc_mbim_info,
}, },

Просмотреть файл

@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action); zfcp_erp_action_dismiss(&port->erp_action);
else else {
shost_for_each_device(sdev, port->adapter->scsi_host) spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev); zfcp_erp_action_dismiss_lun(sdev);
spin_unlock(port->adapter->scsi_host->host_lock);
}
} }
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
{ {
struct scsi_device *sdev; struct scsi_device *sdev;
shost_for_each_device(sdev, port->adapter->scsi_host) spin_lock(port->adapter->scsi_host->host_lock);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, id, 0); _zfcp_erp_lun_reopen(sdev, clear, id, 0);
spin_unlock(port->adapter->scsi_host->host_lock);
} }
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_set_mask(common_mask, &port->status); atomic_set_mask(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags); read_unlock_irqrestore(&adapter->port_list_lock, flags);
shost_for_each_device(sdev, adapter->scsi_host) spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
} }
/** /**
@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
} }
read_unlock_irqrestore(&adapter->port_list_lock, flags); read_unlock_irqrestore(&adapter->port_list_lock, flags);
shost_for_each_device(sdev, adapter->scsi_host) { spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter) if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
} }
spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
} }
/** /**
@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{ {
struct scsi_device *sdev; struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 common_mask = mask & ZFCP_COMMON_FLAGS;
unsigned long flags;
atomic_set_mask(mask, &port->status); atomic_set_mask(mask, &port->status);
if (!common_mask) if (!common_mask)
return; return;
shost_for_each_device(sdev, port->adapter->scsi_host) spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask, atomic_set_mask(common_mask,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
} }
/** /**
@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
struct scsi_device *sdev; struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
unsigned long flags;
atomic_clear_mask(mask, &port->status); atomic_clear_mask(mask, &port->status);
@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
if (clear_counter) if (clear_counter)
atomic_set(&port->erp_counter, 0); atomic_set(&port->erp_counter, 0);
shost_for_each_device(sdev, port->adapter->scsi_host) spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
__shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) { if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask, atomic_clear_mask(common_mask,
&sdev_to_zfcp(sdev)->status); &sdev_to_zfcp(sdev)->status);
if (clear_counter) if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
} }
spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
} }
/** /**

Просмотреть файл

@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{ {
spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) || if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1; return 1;
spin_unlock_irq(&qdio->req_q_lock);
return 0; return 0;
} }
@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{ {
long ret; long ret;
spin_unlock_irq(&qdio->req_q_lock); ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
ret = wait_event_interruptible_timeout(qdio->req_q_wq, zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
zfcp_qdio_sbal_check(qdio), 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO; return -EIO;
@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
} }
spin_lock_irq(&qdio->req_q_lock);
return -EIO; return -EIO;
} }

Просмотреть файл

@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \ static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL); zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
struct device_attribute *at,\
char *buf) \
{ \
return sprintf(buf, _format, _value); \
} \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \ #define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \ static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\ struct device_attribute *at,\
@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n", ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) & (zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0); ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev, static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_in_recovery.attr, &dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr, &dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr, &dev_attr_unit_access_denied.attr,
&dev_attr_unit_access_shared.attr,
&dev_attr_unit_access_readonly.attr,
NULL NULL
}; };
static struct attribute_group zfcp_unit_attr_group = { static struct attribute_group zfcp_unit_attr_group = {

Просмотреть файл

@ -1353,7 +1353,6 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support" tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI depends on PCI && SCSI
select SCSI_FC_ATTRS select SCSI_FC_ATTRS
select GENERIC_CSUM
select CRC_T10DIF select CRC_T10DIF
help help
This lpfc driver supports the Emulex LightPulse This lpfc driver supports the Emulex LightPulse

Просмотреть файл

@ -482,7 +482,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = comedi_device_postconfig(dev); ret = comedi_device_postconfig(dev);
if (ret < 0) { if (ret < 0) {
comedi_device_detach(dev); comedi_device_detach(dev);
module_put(dev->driver->module); module_put(driv->module);
} }
/* On success, the driver module count has been incremented. */ /* On success, the driver module count has been incremented. */
return ret; return ret;

Просмотреть файл

@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno); pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
/* Try for up to 200s */ /* Try for up to 400ms */
for (timeout = 0; timeout < 20; timeout++) { for (timeout = 0; timeout < 40; timeout++) {
if (pv->established) if (pv->established)
goto established; goto established;
if (!hvsi_get_packet(pv)) if (!hvsi_get_packet(pv))

Просмотреть файл

@ -304,6 +304,13 @@ static int __init ohci_pci_init(void)
pr_info("%s: " DRIVER_DESC "\n", hcd_name); pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides); ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
#ifdef CONFIG_PM
/* Entries for the PCI suspend/resume callbacks are special */
ohci_pci_hc_driver.pci_suspend = ohci_suspend;
ohci_pci_hc_driver.pci_resume = ohci_resume;
#endif
return pci_register_driver(&ohci_pci_driver); return pci_register_driver(&ohci_pci_driver);
} }
module_init(ohci_pci_init); module_init(ohci_pci_init);

Просмотреть файл

@ -15,7 +15,7 @@
* 675 Mass Ave, Cambridge, MA 02139, USA. * 675 Mass Ave, Cambridge, MA 02139, USA.
*/ */
#include "otg_fsm.h" #include "phy-fsm-usb.h"
#include <linux/usb/otg.h> #include <linux/usb/otg.h>
#include <linux/ioctl.h> #include <linux/ioctl.h>

Просмотреть файл

@ -29,7 +29,7 @@
#include <linux/usb/gadget.h> #include <linux/usb/gadget.h>
#include <linux/usb/otg.h> #include <linux/usb/otg.h>
#include "phy-otg-fsm.h" #include "phy-fsm-usb.h"
/* Change USB protocol when there is a protocol change */ /* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol) static int otg_set_protocol(struct otg_fsm *fsm, int protocol)

Просмотреть файл

@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
int block, off; int block, off;
inode = iget_locked(sb, ino); inode = iget_locked(sb, ino);
if (IS_ERR(inode)) if (!inode)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) if (!(inode->i_state & I_NEW))
return inode; return inode;

Просмотреть файл

@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
int bio_uncopy_user(struct bio *bio) int bio_uncopy_user(struct bio *bio)
{ {
struct bio_map_data *bmd = bio->bi_private; struct bio_map_data *bmd = bio->bi_private;
int ret = 0; struct bio_vec *bvec;
int ret = 0, i;
if (!bio_flagged(bio, BIO_NULL_MAPPED)) if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, /*
bmd->nr_sgvecs, bio_data_dir(bio) == READ, * if we're in a workqueue, the request is orphaned, so
0, bmd->is_our_pages); * don't copy into a random user address space, just free.
*/
if (current->mm)
ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
bmd->nr_sgvecs, bio_data_dir(bio) == READ,
0, bmd->is_our_pages);
else if (bmd->is_our_pages)
bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
bio_free_map_data(bmd); bio_free_map_data(bmd);
bio_put(bio); bio_put(bio);
return ret; return ret;

Просмотреть файл

@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
*/ */
static void d_free(struct dentry *dentry) static void d_free(struct dentry *dentry)
{ {
BUG_ON(dentry->d_count); BUG_ON(dentry->d_lockref.count);
this_cpu_dec(nr_dentry); this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release) if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry); dentry->d_op->d_release(dentry);
@ -467,7 +467,7 @@ relock:
} }
if (ref) if (ref)
dentry->d_count--; dentry->d_lockref.count--;
/* /*
* inform the fs via d_prune that this dentry is about to be * inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed. * unhashed and destroyed.
@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
return; return;
repeat: repeat:
if (dentry->d_count == 1) if (dentry->d_lockref.count == 1)
might_sleep(); might_sleep();
spin_lock(&dentry->d_lock); if (lockref_put_or_lock(&dentry->d_lockref))
BUG_ON(!dentry->d_count);
if (dentry->d_count > 1) {
dentry->d_count--;
spin_unlock(&dentry->d_lock);
return; return;
}
if (dentry->d_flags & DCACHE_OP_DELETE) { if (dentry->d_flags & DCACHE_OP_DELETE) {
if (dentry->d_op->d_delete(dentry)) if (dentry->d_op->d_delete(dentry))
@ -535,7 +530,7 @@ repeat:
dentry->d_flags |= DCACHE_REFERENCED; dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry); dentry_lru_add(dentry);
dentry->d_count--; dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
return; return;
@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
* We also need to leave mountpoints alone, * We also need to leave mountpoints alone,
* directory or not. * directory or not.
*/ */
if (dentry->d_count > 1 && dentry->d_inode) { if (dentry->d_lockref.count > 1 && dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) { if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
return -EBUSY; return -EBUSY;
@ -606,14 +601,12 @@ EXPORT_SYMBOL(d_invalidate);
/* This must be called with d_lock held */ /* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry) static inline void __dget_dlock(struct dentry *dentry)
{ {
dentry->d_count++; dentry->d_lockref.count++;
} }
static inline void __dget(struct dentry *dentry) static inline void __dget(struct dentry *dentry)
{ {
spin_lock(&dentry->d_lock); lockref_get(&dentry->d_lockref);
__dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
} }
struct dentry *dget_parent(struct dentry *dentry) struct dentry *dget_parent(struct dentry *dentry)
@ -634,8 +627,8 @@ repeat:
goto repeat; goto repeat;
} }
rcu_read_unlock(); rcu_read_unlock();
BUG_ON(!ret->d_count); BUG_ON(!ret->d_lockref.count);
ret->d_count++; ret->d_lockref.count++;
spin_unlock(&ret->d_lock); spin_unlock(&ret->d_lock);
return ret; return ret;
} }
@ -718,7 +711,7 @@ restart:
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) { hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
if (!dentry->d_count) { if (!dentry->d_lockref.count) {
__dget_dlock(dentry); __dget_dlock(dentry);
__d_drop(dentry); __d_drop(dentry);
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
@ -763,12 +756,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
/* Prune ancestors. */ /* Prune ancestors. */
dentry = parent; dentry = parent;
while (dentry) { while (dentry) {
spin_lock(&dentry->d_lock); if (lockref_put_or_lock(&dentry->d_lockref))
if (dentry->d_count > 1) {
dentry->d_count--;
spin_unlock(&dentry->d_lock);
return; return;
}
dentry = dentry_kill(dentry, 1); dentry = dentry_kill(dentry, 1);
} }
} }
@ -793,7 +782,7 @@ static void shrink_dentry_list(struct list_head *list)
* the LRU because of laziness during lookup. Do not free * the LRU because of laziness during lookup. Do not free
* it - just keep it off the LRU list. * it - just keep it off the LRU list.
*/ */
if (dentry->d_count) { if (dentry->d_lockref.count) {
dentry_lru_del(dentry); dentry_lru_del(dentry);
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
continue; continue;
@ -913,7 +902,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
dentry_lru_del(dentry); dentry_lru_del(dentry);
__d_shrink(dentry); __d_shrink(dentry);
if (dentry->d_count != 0) { if (dentry->d_lockref.count != 0) {
printk(KERN_ERR printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}" "BUG: Dentry %p{i=%lx,n=%s}"
" still in use (%d)" " still in use (%d)"
@ -922,7 +911,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
dentry->d_inode ? dentry->d_inode ?
dentry->d_inode->i_ino : 0UL, dentry->d_inode->i_ino : 0UL,
dentry->d_name.name, dentry->d_name.name,
dentry->d_count, dentry->d_lockref.count,
dentry->d_sb->s_type->name, dentry->d_sb->s_type->name,
dentry->d_sb->s_id); dentry->d_sb->s_id);
BUG(); BUG();
@ -933,7 +922,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
list_del(&dentry->d_u.d_child); list_del(&dentry->d_u.d_child);
} else { } else {
parent = dentry->d_parent; parent = dentry->d_parent;
parent->d_count--; parent->d_lockref.count--;
list_del(&dentry->d_u.d_child); list_del(&dentry->d_u.d_child);
} }
@ -981,7 +970,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
dentry = sb->s_root; dentry = sb->s_root;
sb->s_root = NULL; sb->s_root = NULL;
dentry->d_count--; dentry->d_lockref.count--;
shrink_dcache_for_umount_subtree(dentry); shrink_dcache_for_umount_subtree(dentry);
while (!hlist_bl_empty(&sb->s_anon)) { while (!hlist_bl_empty(&sb->s_anon)) {
@ -1147,7 +1136,7 @@ resume:
* loop in shrink_dcache_parent() might not make any progress * loop in shrink_dcache_parent() might not make any progress
* and loop forever. * and loop forever.
*/ */
if (dentry->d_count) { if (dentry->d_lockref.count) {
dentry_lru_del(dentry); dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
dentry_lru_move_list(dentry, dispose); dentry_lru_move_list(dentry, dispose);
@ -1269,7 +1258,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb(); smp_wmb();
dentry->d_name.name = dname; dentry->d_name.name = dname;
dentry->d_count = 1; dentry->d_lockref.count = 1;
dentry->d_flags = 0; dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock); spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq); seqcount_init(&dentry->d_seq);
@ -1970,7 +1959,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next; goto next;
} }
dentry->d_count++; dentry->d_lockref.count++;
found = dentry; found = dentry;
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
break; break;
@ -2069,7 +2058,7 @@ again:
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
inode = dentry->d_inode; inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode); isdir = S_ISDIR(inode->i_mode);
if (dentry->d_count == 1) { if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) { if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
cpu_relax(); cpu_relax();
@ -2724,6 +2713,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
return memcpy(buffer, temp, sz); return memcpy(buffer, temp, sz);
} }
char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
{
char *end = buffer + buflen;
/* these dentries are never renamed, so d_lock is not needed */
if (prepend(&end, &buflen, " (deleted)", 11) ||
prepend_name(&end, &buflen, &dentry->d_name) ||
prepend(&end, &buflen, "/", 1))
end = ERR_PTR(-ENAMETOOLONG);
return end;
}
/* /*
* Write full pathname from the root of the filesystem into the buffer. * Write full pathname from the root of the filesystem into the buffer.
*/ */
@ -2937,7 +2937,7 @@ resume:
} }
if (!(dentry->d_flags & DCACHE_GENOCIDE)) { if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE; dentry->d_flags |= DCACHE_GENOCIDE;
dentry->d_count--; dentry->d_lockref.count--;
} }
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
} }
@ -2945,7 +2945,7 @@ resume:
struct dentry *child = this_parent; struct dentry *child = this_parent;
if (!(this_parent->d_flags & DCACHE_GENOCIDE)) { if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
this_parent->d_flags |= DCACHE_GENOCIDE; this_parent->d_flags |= DCACHE_GENOCIDE;
this_parent->d_count--; this_parent->d_lockref.count--;
} }
this_parent = try_to_ascend(this_parent, locked, seq); this_parent = try_to_ascend(this_parent, locked, seq);
if (!this_parent) if (!this_parent)

Просмотреть файл

@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
struct inode *inode; struct inode *inode;
inode = iget_locked(super, ino); inode = iget_locked(super, ino);
if (IS_ERR(inode)) if (!inode)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW)) if (!(inode->i_state & I_NEW))
return inode; return inode;

Просмотреть файл

@ -926,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
return h - hstates; return h - hstates;
} }
static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
dentry->d_name.name);
}
static struct dentry_operations anon_ops = { static struct dentry_operations anon_ops = {
.d_dname = hugetlb_dname .d_dname = simple_dname
}; };
/* /*

Просмотреть файл

@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
dir_index = (u32) ctx->pos; dir_index = (u32) ctx->pos;
/*
* NFSv4 reserves cookies 1 and 2 for . and .. so the value
* we return to the vfs is one greater than the one we use
* internally.
*/
if (dir_index)
dir_index--;
if (dir_index > 1) { if (dir_index > 1) {
struct dir_table_slot dirtab_slot; struct dir_table_slot dirtab_slot;
@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
if (p->header.flag & BT_INTERNAL) { if (p->header.flag & BT_INTERNAL) {
jfs_err("jfs_readdir: bad index table"); jfs_err("jfs_readdir: bad index table");
DT_PUTPAGE(mp); DT_PUTPAGE(mp);
ctx->pos = -1; ctx->pos = DIREND;
return 0; return 0;
} }
} else { } else {
@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
/* /*
* self "." * self "."
*/ */
ctx->pos = 0; ctx->pos = 1;
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
return 0; return 0;
} }
/* /*
* parent ".." * parent ".."
*/ */
ctx->pos = 1; ctx->pos = 2;
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
return 0; return 0;
@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
/* /*
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6 * Legacy filesystem - OS/2 & Linux JFS < 0.3.6
* *
* pn = index = 0: First entry "." * pn = 0; index = 1: First entry "."
* pn = 0; index = 1: Second entry ".." * pn = 0; index = 2: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page * pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries * pn = index = -1: No more entries
*/ */
dtpos = ctx->pos; dtpos = ctx->pos;
if (dtpos == 0) { if (dtpos < 2) {
/* build "." entry */ /* build "." entry */
ctx->pos = 1;
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR)) if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
return 0; return 0;
dtoffset->index = 1; dtoffset->index = 2;
ctx->pos = dtpos; ctx->pos = dtpos;
} }
if (dtoffset->pn == 0) { if (dtoffset->pn == 0) {
if (dtoffset->index == 1) { if (dtoffset->index == 2) {
/* build ".." entry */ /* build ".." entry */
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
return 0; return 0;
@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
} }
jfs_dirent->position = unique_pos++; jfs_dirent->position = unique_pos++;
} }
/*
* We add 1 to the index because we may
* use a value of 2 internally, and NFSv4
* doesn't like that.
*/
jfs_dirent->position++;
} else { } else {
jfs_dirent->position = dtpos; jfs_dirent->position = dtpos;
len = min(d_namleft, DTLHDRDATALEN_LEGACY); len = min(d_namleft, DTLHDRDATALEN_LEGACY);

Просмотреть файл

@ -536,8 +536,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
* a reference at this point. * a reference at this point.
*/ */
BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
BUG_ON(!parent->d_count); BUG_ON(!parent->d_lockref.count);
parent->d_count++; parent->d_lockref.count++;
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
} }
spin_unlock(&parent->d_lock); spin_unlock(&parent->d_lock);
@ -3327,7 +3327,7 @@ void dentry_unhash(struct dentry *dentry)
{ {
shrink_dcache_parent(dentry); shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
if (dentry->d_count == 1) if (dentry->d_lockref.count == 1)
__d_drop(dentry); __d_drop(dentry);
spin_unlock(&dentry->d_lock); spin_unlock(&dentry->d_lock);
} }
@ -3671,11 +3671,15 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0)
return -EINVAL; return -EINVAL;
/* /*
* Using empty names is equivalent to using AT_SYMLINK_FOLLOW * To use null names we require CAP_DAC_READ_SEARCH
* on /proc/self/fd/<fd>. * This ensures that not everyone will be able to create
* handlink using the passed filedescriptor.
*/ */
if (flags & AT_EMPTY_PATH) if (flags & AT_EMPTY_PATH) {
if (!capable(CAP_DAC_READ_SEARCH))
return -ENOENT;
how = LOOKUP_EMPTY; how = LOOKUP_EMPTY;
}
if (flags & AT_SYMLINK_FOLLOW) if (flags & AT_SYMLINK_FOLLOW)
how |= LOOKUP_FOLLOW; how |= LOOKUP_FOLLOW;

Просмотреть файл

@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
CL_COPY_ALL | CL_PRIVATE); CL_COPY_ALL | CL_PRIVATE);
namespace_unlock(); namespace_unlock();
if (IS_ERR(tree)) if (IS_ERR(tree))
return NULL; return ERR_CAST(tree);
return &tree->mnt; return &tree->mnt;
} }

Просмотреть файл

@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode = NULL; struct inode *inode = NULL;
struct ocfs2_super *osb = NULL; struct ocfs2_super *osb = NULL;
struct buffer_head *bh = NULL; struct buffer_head *bh = NULL;
char nodestr[8]; char nodestr[12];
struct ocfs2_blockcheck_stats stats; struct ocfs2_blockcheck_stats stats;
trace_ocfs2_fill_super(sb, data, silent); trace_ocfs2_fill_super(sb, data, silent);

Просмотреть файл

@ -228,8 +228,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
if (!p) if (!p)
return -ENOENT; return -ENOENT;
if (!dir_emit_dots(file, ctx))
goto out;
if (!dir_emit_dots(file, ctx)) if (!dir_emit_dots(file, ctx))
goto out; goto out;
files = get_files_struct(p); files = get_files_struct(p);

Просмотреть файл

@ -9,6 +9,7 @@
#include <linux/seqlock.h> #include <linux/seqlock.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/lockref.h>
struct nameidata; struct nameidata;
struct path; struct path;
@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
# endif # endif
#endif #endif
#define d_lock d_lockref.lock
struct dentry { struct dentry {
/* RCU lookup touched fields */ /* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */ unsigned int d_flags; /* protected by d_lock */
@ -112,8 +115,7 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */ unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
/* Ref lookup also touches following */ /* Ref lookup also touches following */
unsigned int d_count; /* protected by d_lock */ struct lockref d_lockref; /* per-dentry lock and refcount */
spinlock_t d_lock; /* per dentry lock */
const struct dentry_operations *d_op; const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */ struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */ unsigned long d_time; /* used by d_revalidate */
@ -318,7 +320,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
assert_spin_locked(&dentry->d_lock); assert_spin_locked(&dentry->d_lock);
if (!read_seqcount_retry(&dentry->d_seq, seq)) { if (!read_seqcount_retry(&dentry->d_seq, seq)) {
ret = 1; ret = 1;
dentry->d_count++; dentry->d_lockref.count++;
} }
return ret; return ret;
@ -326,7 +328,7 @@ static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
static inline unsigned d_count(const struct dentry *dentry) static inline unsigned d_count(const struct dentry *dentry)
{ {
return dentry->d_count; return dentry->d_lockref.count;
} }
/* validate "insecure" dentry pointer */ /* validate "insecure" dentry pointer */
@ -336,6 +338,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members * helper function for dentry_operations.d_dname() members
*/ */
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...); extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int); extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int); extern char *d_absolute_path(const struct path *, char *, int);
@ -356,17 +359,14 @@ extern char *dentry_path(struct dentry *, char *, int);
static inline struct dentry *dget_dlock(struct dentry *dentry) static inline struct dentry *dget_dlock(struct dentry *dentry)
{ {
if (dentry) if (dentry)
dentry->d_count++; dentry->d_lockref.count++;
return dentry; return dentry;
} }
static inline struct dentry *dget(struct dentry *dentry) static inline struct dentry *dget(struct dentry *dentry)
{ {
if (dentry) { if (dentry)
spin_lock(&dentry->d_lock); lockref_get(&dentry->d_lockref);
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
}
return dentry; return dentry;
} }

71
include/linux/lockref.h Normal file
Просмотреть файл

@ -0,0 +1,71 @@
#ifndef __LINUX_LOCKREF_H
#define __LINUX_LOCKREF_H
/*
* Locked reference counts.
*
* These are different from just plain atomic refcounts in that they
* are atomic with respect to the spinlock that goes with them. In
* particular, there can be implementations that don't actually get
* the spinlock for the common decrement/increment operations, but they
* still have to check that the operation is done semantically as if
* the spinlock had been taken (using a cmpxchg operation that covers
* both the lock and the count word, or using memory transactions, for
* example).
*/
#include <linux/spinlock.h>
struct lockref {
spinlock_t lock;
unsigned int count;
};
/**
* lockref_get - Increments reference count unconditionally
* @lockcnt: pointer to lockref structure
*
* This operation is only valid if you already hold a reference
* to the object, so you know the count cannot be zero.
*/
static inline void lockref_get(struct lockref *lockref)
{
spin_lock(&lockref->lock);
lockref->count++;
spin_unlock(&lockref->lock);
}
/**
* lockref_get_not_zero - Increments count unless the count is 0
* @lockcnt: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count is 0
*/
static inline int lockref_get_not_zero(struct lockref *lockref)
{
int retval = 0;
spin_lock(&lockref->lock);
if (lockref->count) {
lockref->count++;
retval = 1;
}
spin_unlock(&lockref->lock);
return retval;
}
/**
* lockref_put_or_lock - decrements count unless count <= 1 before decrement
* @lockcnt: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
*/
static inline int lockref_put_or_lock(struct lockref *lockref)
{
spin_lock(&lockref->lock);
if (lockref->count <= 1)
return 0;
lockref->count--;
spin_unlock(&lockref->lock);
return 1;
}
#endif /* __LINUX_LOCKREF_H */

Просмотреть файл

@ -14,6 +14,10 @@ struct fs_struct;
* A structure to contain pointers to all per-process * A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc. * namespaces - fs (mount), uts, network, sysvipc, etc.
* *
* The pid namespace is an exception -- it's accessed using
* task_active_pid_ns. The pid namespace here is the
* namespace that children will use.
*
* 'count' is the number of tasks holding a reference. * 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number * The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks. * of nsproxies pointing to it, not the number of tasks.
@ -27,7 +31,7 @@ struct nsproxy {
struct uts_namespace *uts_ns; struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns; struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns; struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns; struct pid_namespace *pid_ns_for_children;
struct net *net_ns; struct net *net_ns;
}; };
extern struct nsproxy init_nsproxy; extern struct nsproxy init_nsproxy;

Просмотреть файл

@ -16,6 +16,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/bug.h>
struct module; struct module;
struct device; struct device;

Просмотреть файл

@ -811,6 +811,63 @@ do { \
__ret; \ __ret; \
}) })
#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
lock, ret) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
if (condition) \
break; \
if (signal_pending(current)) { \
ret = -ERESTARTSYS; \
break; \
} \
spin_unlock_irq(&lock); \
ret = schedule_timeout(ret); \
spin_lock_irq(&lock); \
if (!ret) \
break; \
} \
finish_wait(&wq, &__wait); \
} while (0)
/**
* wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
* The condition is checked under the lock. This is expected
* to be called with the lock taken.
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @lock: a locked spinlock_t, which will be released before schedule()
* and reacquired afterwards.
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or signal is received. The @condition is
* checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* This is supposed to be called while holding the lock. The lock is
* dropped before going to sleep and is reacquired afterwards.
*
* The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
* was interrupted by a signal, and the remaining jiffies otherwise
* if the condition evaluated to true before the timeout elapsed.
*/
#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
timeout) \
({ \
int __ret = timeout; \
\
if (!(condition)) \
__wait_event_interruptible_lock_irq_timeout( \
wq, condition, lock, __ret); \
__ret; \
})
/* /*
* These are the old interfaces to sleep waiting for an event. * These are the old interfaces to sleep waiting for an event.

Просмотреть файл

@ -123,6 +123,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
/* local bh are disabled so it is ok to use _BH */ /* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk), NET_ADD_STATS_BH(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc); LINUX_MIB_BUSYPOLLRXPACKETS, rc);
cpu_relax();
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time)); !need_resched() && !busy_loop_timeout(end_time));

Просмотреть файл

@ -61,6 +61,7 @@ struct genl_family {
struct list_head ops_list; /* private */ struct list_head ops_list; /* private */
struct list_head family_list; /* private */ struct list_head family_list; /* private */
struct list_head mcast_groups; /* private */ struct list_head mcast_groups; /* private */
struct module *module;
}; };
/** /**
@ -121,9 +122,24 @@ struct genl_ops {
struct list_head ops_list; struct list_head ops_list;
}; };
extern int genl_register_family(struct genl_family *family); extern int __genl_register_family(struct genl_family *family);
extern int genl_register_family_with_ops(struct genl_family *family,
static inline int genl_register_family(struct genl_family *family)
{
family->module = THIS_MODULE;
return __genl_register_family(family);
}
extern int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops); struct genl_ops *ops, size_t n_ops);
static inline int genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops)
{
family->module = THIS_MODULE;
return __genl_register_family_with_ops(family, ops, n_ops);
}
extern int genl_unregister_family(struct genl_family *family); extern int genl_unregister_family(struct genl_family *family);
extern int genl_register_ops(struct genl_family *, struct genl_ops *ops); extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops); extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);

Просмотреть файл

@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
return hoplimit; return hoplimit;
} }
static inline int ip_skb_dst_mtu(struct sk_buff *skb)
{
struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
}
#endif /* _ROUTE_H */ #endif /* _ROUTE_H */

Просмотреть файл

@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
struct sk_buff *skb); struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb, int (*transport_finish)(struct sk_buff *skb,
int async); int async);
void (*local_error)(struct sk_buff *skb, u32 mtu);
}; };
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo); extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo); extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
extern void xfrm_state_delete_tunnel(struct xfrm_state *x); extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
extern int xfrm_output_resume(struct sk_buff *skb, int err); extern int xfrm_output_resume(struct sk_buff *skb, int err);
extern int xfrm_output(struct sk_buff *skb); extern int xfrm_output(struct sk_buff *skb);
extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
extern void xfrm_local_error(struct sk_buff *skb, int mtu);
extern int xfrm4_extract_header(struct sk_buff *skb); extern int xfrm4_extract_header(struct sk_buff *skb);
extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi, extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family); extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler); extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler); extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
extern int xfrm6_extract_header(struct sk_buff *skb); extern int xfrm6_extract_header(struct sk_buff *skb);
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb); extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi); extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
extern int xfrm6_output_finish(struct sk_buff *skb); extern int xfrm6_output_finish(struct sk_buff *skb);
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb, extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr); u8 **prevhdr);
extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
#ifdef CONFIG_XFRM #ifdef CONFIG_XFRM
extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb); extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);

Просмотреть файл

@ -2,6 +2,7 @@
#define _UAPI_CM4000_H_ #define _UAPI_CM4000_H_
#include <linux/types.h> #include <linux/types.h>
#include <linux/ioctl.h>
#define MAX_ATR 33 #define MAX_ATR 33

Просмотреть файл

@ -115,6 +115,8 @@ struct icmp6hdr {
#define ICMPV6_NOT_NEIGHBOUR 2 #define ICMPV6_NOT_NEIGHBOUR 2
#define ICMPV6_ADDR_UNREACH 3 #define ICMPV6_ADDR_UNREACH 3
#define ICMPV6_PORT_UNREACH 4 #define ICMPV6_PORT_UNREACH 4
#define ICMPV6_POLICY_FAIL 5
#define ICMPV6_REJECT_ROUTE 6
/* /*
* Codes for Time Exceeded * Codes for Time Exceeded

Просмотреть файл

@ -839,7 +839,7 @@ static inline void free_copy(struct msg_msg *copy)
static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
{ {
struct msg_msg *msg; struct msg_msg *msg, *found = NULL;
long count = 0; long count = 0;
list_for_each_entry(msg, &msq->q_messages, m_list) { list_for_each_entry(msg, &msq->q_messages, m_list) {
@ -848,6 +848,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
*msgtyp, mode)) { *msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1; *msgtyp = msg->m_type - 1;
found = msg;
} else if (mode == SEARCH_NUMBER) { } else if (mode == SEARCH_NUMBER) {
if (*msgtyp == count) if (*msgtyp == count)
return msg; return msg;
@ -857,7 +858,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
} }
} }
return ERR_PTR(-EAGAIN); return found ?: ERR_PTR(-EAGAIN);
} }
long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,

Просмотреть файл

@ -4480,6 +4480,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
struct dentry *d = cgrp->dentry; struct dentry *d = cgrp->dentry;
struct cgroup_event *event, *tmp; struct cgroup_event *event, *tmp;
struct cgroup_subsys *ss; struct cgroup_subsys *ss;
struct cgroup *child;
bool empty; bool empty;
lockdep_assert_held(&d->d_inode->i_mutex); lockdep_assert_held(&d->d_inode->i_mutex);
@ -4490,11 +4491,27 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* @cgrp from being removed while __put_css_set() is in progress. * @cgrp from being removed while __put_css_set() is in progress.
*/ */
read_lock(&css_set_lock); read_lock(&css_set_lock);
empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children); empty = list_empty(&cgrp->cset_links);
read_unlock(&css_set_lock); read_unlock(&css_set_lock);
if (!empty) if (!empty)
return -EBUSY; return -EBUSY;
/*
* Make sure there's no live children. We can't test ->children
* emptiness as dead children linger on it while being destroyed;
* otherwise, "rmdir parent/child parent" may fail with -EBUSY.
*/
empty = true;
rcu_read_lock();
list_for_each_entry_rcu(child, &cgrp->children, sibling) {
empty = cgroup_is_dead(child);
if (!empty)
break;
}
rcu_read_unlock();
if (!empty)
return -EBUSY;
/* /*
* Block new css_tryget() by killing css refcnts. cgroup core * Block new css_tryget() by killing css refcnts. cgroup core
* guarantees that, by the time ->css_offline() is invoked, no new * guarantees that, by the time ->css_offline() is invoked, no new

Просмотреть файл

@ -475,13 +475,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
/* /*
* Cpusets with tasks - existing or newly being attached - can't * Cpusets with tasks - existing or newly being attached - can't
* have empty cpus_allowed or mems_allowed. * be changed to have empty cpus_allowed or mems_allowed.
*/ */
ret = -ENOSPC; ret = -ENOSPC;
if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) && if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
(cpumask_empty(trial->cpus_allowed) && if (!cpumask_empty(cur->cpus_allowed) &&
nodes_empty(trial->mems_allowed))) cpumask_empty(trial->cpus_allowed))
goto out; goto out;
if (!nodes_empty(cur->mems_allowed) &&
nodes_empty(trial->mems_allowed))
goto out;
}
ret = 0; ret = 0;
out: out:

Просмотреть файл

@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* don't allow the creation of threads. * don't allow the creation of threads.
*/ */
if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
(task_active_pid_ns(current) != current->nsproxy->pid_ns)) (task_active_pid_ns(current) !=
current->nsproxy->pid_ns_for_children))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags); retval = security_task_create(clone_flags);
@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (pid != &init_struct_pid) { if (pid != &init_struct_pid) {
retval = -ENOMEM; retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns); pid = alloc_pid(p->nsproxy->pid_ns_for_children);
if (!pid) if (!pid)
goto bad_fork_cleanup_io; goto bad_fork_cleanup_io;
} }

Просмотреть файл

@ -29,15 +29,15 @@
static struct kmem_cache *nsproxy_cachep; static struct kmem_cache *nsproxy_cachep;
struct nsproxy init_nsproxy = { struct nsproxy init_nsproxy = {
.count = ATOMIC_INIT(1), .count = ATOMIC_INIT(1),
.uts_ns = &init_uts_ns, .uts_ns = &init_uts_ns,
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC) #if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
.ipc_ns = &init_ipc_ns, .ipc_ns = &init_ipc_ns,
#endif #endif
.mnt_ns = NULL, .mnt_ns = NULL,
.pid_ns = &init_pid_ns, .pid_ns_for_children = &init_pid_ns,
#ifdef CONFIG_NET #ifdef CONFIG_NET
.net_ns = &init_net, .net_ns = &init_net,
#endif #endif
}; };
@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_ipc; goto out_ipc;
} }
new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns); new_nsp->pid_ns_for_children =
if (IS_ERR(new_nsp->pid_ns)) { copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
err = PTR_ERR(new_nsp->pid_ns); if (IS_ERR(new_nsp->pid_ns_for_children)) {
err = PTR_ERR(new_nsp->pid_ns_for_children);
goto out_pid; goto out_pid;
} }
@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
return new_nsp; return new_nsp;
out_net: out_net:
if (new_nsp->pid_ns) if (new_nsp->pid_ns_for_children)
put_pid_ns(new_nsp->pid_ns); put_pid_ns(new_nsp->pid_ns_for_children);
out_pid: out_pid:
if (new_nsp->ipc_ns) if (new_nsp->ipc_ns)
put_ipc_ns(new_nsp->ipc_ns); put_ipc_ns(new_nsp->ipc_ns);
@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
put_uts_ns(ns->uts_ns); put_uts_ns(ns->uts_ns);
if (ns->ipc_ns) if (ns->ipc_ns)
put_ipc_ns(ns->ipc_ns); put_ipc_ns(ns->ipc_ns);
if (ns->pid_ns) if (ns->pid_ns_for_children)
put_pid_ns(ns->pid_ns); put_pid_ns(ns->pid_ns_for_children);
put_net(ns->net_ns); put_net(ns->net_ns);
kmem_cache_free(nsproxy_cachep, ns); kmem_cache_free(nsproxy_cachep, ns);
} }

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше