Merge branch 'upstream' of master.kernel.org:/pub/scm/linux/kernel/git/linville/wireless-2.6 into upstream

This commit is contained in:
Jeff Garzik 2006-09-06 11:02:22 -04:00
Родитель f2ad2d9b65 c576af4791
Коммит a2413598b8
18 изменённых файлов: 1025 добавлений и 550 удалений

Просмотреть файл

@ -449,9 +449,9 @@ L: linux-hams@vger.kernel.org
W: http://www.baycom.org/~tom/ham/ham.html
S: Maintained
BCM43XX WIRELESS DRIVER
P: Michael Buesch
M: mb@bu3sch.de
BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION)
P: Larry Finger
M: Larry.Finger@lwfinger.net
P: Stefano Brivio
M: st3@riseup.net
W: http://bcm43xx.berlios.de/

Просмотреть файл

@ -271,25 +271,14 @@ config IPW2200_DEBUG
bool "Enable full debugging output in IPW2200 module."
depends on IPW2200
---help---
This option will enable debug tracing output for the IPW2200.
This option will enable low level debug tracing output for IPW2200.
This will result in the kernel module being ~100k larger. You can
control which debug output is sent to the kernel log by setting the
value in
Note, normal debug code is already compiled in. This low level
debug option enables debug on hot paths (e.g Tx, Rx, ISR) and
will result in the kernel module being ~70 larger. Most users
will typically not need this high verbosity debug information.
/sys/bus/pci/drivers/ipw2200/debug_level
This entry will only exist if this option is enabled.
To set a value, simply echo an 8-byte hex value to the same file:
% echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level
You can find the list of debug mask values in
drivers/net/wireless/ipw2200.h
If you are not trying to debug or develop the IPW2200 driver, you
most likely want to say N here.
If you are not sure, say N here.
config AIRO
tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"

Просмотреть файл

@ -47,6 +47,7 @@
#include <linux/pci.h>
#include <asm/uaccess.h>
#include <net/ieee80211.h>
#include <linux/kthread.h>
#include "airo.h"
@ -1187,11 +1188,10 @@ struct airo_info {
int whichbap);
unsigned short *flash;
tdsRssiEntry *rssi;
struct task_struct *task;
struct task_struct *list_bss_task;
struct task_struct *airo_thread_task;
struct semaphore sem;
pid_t thr_pid;
wait_queue_head_t thr_wait;
struct completion thr_exited;
unsigned long expires;
struct {
struct sk_buff *skb;
@ -1733,12 +1733,12 @@ static int readBSSListRid(struct airo_info *ai, int first,
cmd.cmd=CMD_LISTBSS;
if (down_interruptible(&ai->sem))
return -ERESTARTSYS;
ai->list_bss_task = current;
issuecommand(ai, &cmd, &rsp);
up(&ai->sem);
/* Let the command take effect */
ai->task = current;
ssleep(3);
ai->task = NULL;
schedule_timeout_uninterruptible(3 * HZ);
ai->list_bss_task = NULL;
}
rc = PC4500_readrid(ai, first ? ai->bssListFirst : ai->bssListNext,
list, ai->bssListRidLen, 1);
@ -2400,8 +2400,7 @@ void stop_airo_card( struct net_device *dev, int freeres )
clear_bit(FLAG_REGISTERED, &ai->flags);
}
set_bit(JOB_DIE, &ai->jobs);
kill_proc(ai->thr_pid, SIGTERM, 1);
wait_for_completion(&ai->thr_exited);
kthread_stop(ai->airo_thread_task);
/*
* Clean out tx queue
@ -2811,9 +2810,8 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
ai->config.len = 0;
ai->pci = pci;
init_waitqueue_head (&ai->thr_wait);
init_completion (&ai->thr_exited);
ai->thr_pid = kernel_thread(airo_thread, dev, CLONE_FS | CLONE_FILES);
if (ai->thr_pid < 0)
ai->airo_thread_task = kthread_run(airo_thread, dev, dev->name);
if (IS_ERR(ai->airo_thread_task))
goto err_out_free;
ai->tfm = NULL;
rc = add_airo_dev( dev );
@ -2930,8 +2928,7 @@ err_out_unlink:
del_airo_dev(dev);
err_out_thr:
set_bit(JOB_DIE, &ai->jobs);
kill_proc(ai->thr_pid, SIGTERM, 1);
wait_for_completion(&ai->thr_exited);
kthread_stop(ai->airo_thread_task);
err_out_free:
free_netdev(dev);
return NULL;
@ -3063,13 +3060,7 @@ static int airo_thread(void *data) {
struct airo_info *ai = dev->priv;
int locked;
daemonize("%s", dev->name);
allow_signal(SIGTERM);
while(1) {
if (signal_pending(current))
flush_signals(current);
/* make swsusp happy with our thread */
try_to_freeze();
@ -3097,7 +3088,7 @@ static int airo_thread(void *data) {
set_bit(JOB_AUTOWEP, &ai->jobs);
break;
}
if (!signal_pending(current)) {
if (!kthread_should_stop()) {
unsigned long wake_at;
if (!ai->expires || !ai->scan_timeout) {
wake_at = max(ai->expires,
@ -3109,7 +3100,7 @@ static int airo_thread(void *data) {
schedule_timeout(wake_at - jiffies);
continue;
}
} else if (!signal_pending(current)) {
} else if (!kthread_should_stop()) {
schedule();
continue;
}
@ -3154,7 +3145,8 @@ static int airo_thread(void *data) {
else /* Shouldn't get here, but we make sure to unlock */
up(&ai->sem);
}
complete_and_exit (&ai->thr_exited, 0);
return 0;
}
static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs) {
@ -3235,8 +3227,8 @@ static irqreturn_t airo_interrupt ( int irq, void* dev_id, struct pt_regs *regs)
if(newStatus == ASSOCIATED || newStatus == REASSOCIATED) {
if (auto_wep)
apriv->expires = 0;
if (apriv->task)
wake_up_process (apriv->task);
if (apriv->list_bss_task)
wake_up_process(apriv->list_bss_task);
set_bit(FLAG_UPDATE_UNI, &apriv->flags);
set_bit(FLAG_UPDATE_MULTI, &apriv->flags);

Просмотреть файл

@ -33,14 +33,18 @@
#define BCM43xx_PCICFG_ICR 0x94
/* MMIO offsets */
#define BCM43xx_MMIO_DMA1_REASON 0x20
#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x24
#define BCM43xx_MMIO_DMA2_REASON 0x28
#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x2C
#define BCM43xx_MMIO_DMA3_REASON 0x30
#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x34
#define BCM43xx_MMIO_DMA4_REASON 0x38
#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x3C
#define BCM43xx_MMIO_DMA0_REASON 0x20
#define BCM43xx_MMIO_DMA0_IRQ_MASK 0x24
#define BCM43xx_MMIO_DMA1_REASON 0x28
#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x2C
#define BCM43xx_MMIO_DMA2_REASON 0x30
#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x34
#define BCM43xx_MMIO_DMA3_REASON 0x38
#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x3C
#define BCM43xx_MMIO_DMA4_REASON 0x40
#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x44
#define BCM43xx_MMIO_DMA5_REASON 0x48
#define BCM43xx_MMIO_DMA5_IRQ_MASK 0x4C
#define BCM43xx_MMIO_STATUS_BITFIELD 0x120
#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124
#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128
@ -56,14 +60,27 @@
#define BCM43xx_MMIO_XMITSTAT_1 0x174
#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */
#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
#define BCM43xx_MMIO_DMA1_BASE 0x200
#define BCM43xx_MMIO_DMA2_BASE 0x220
#define BCM43xx_MMIO_DMA3_BASE 0x240
#define BCM43xx_MMIO_DMA4_BASE 0x260
/* 32-bit DMA */
#define BCM43xx_MMIO_DMA32_BASE0 0x200
#define BCM43xx_MMIO_DMA32_BASE1 0x220
#define BCM43xx_MMIO_DMA32_BASE2 0x240
#define BCM43xx_MMIO_DMA32_BASE3 0x260
#define BCM43xx_MMIO_DMA32_BASE4 0x280
#define BCM43xx_MMIO_DMA32_BASE5 0x2A0
/* 64-bit DMA */
#define BCM43xx_MMIO_DMA64_BASE0 0x200
#define BCM43xx_MMIO_DMA64_BASE1 0x240
#define BCM43xx_MMIO_DMA64_BASE2 0x280
#define BCM43xx_MMIO_DMA64_BASE3 0x2C0
#define BCM43xx_MMIO_DMA64_BASE4 0x300
#define BCM43xx_MMIO_DMA64_BASE5 0x340
/* PIO */
#define BCM43xx_MMIO_PIO1_BASE 0x300
#define BCM43xx_MMIO_PIO2_BASE 0x310
#define BCM43xx_MMIO_PIO3_BASE 0x320
#define BCM43xx_MMIO_PIO4_BASE 0x330
#define BCM43xx_MMIO_PHY_VER 0x3E0
#define BCM43xx_MMIO_PHY_RADIO 0x3E2
#define BCM43xx_MMIO_ANTENNA 0x3E8
@ -233,8 +250,14 @@
#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000
/* sbtmstatehigh state flags */
#define BCM43xx_SBTMSTATEHIGH_SERROR 0x1
#define BCM43xx_SBTMSTATEHIGH_BUSY 0x4
#define BCM43xx_SBTMSTATEHIGH_SERROR 0x00000001
#define BCM43xx_SBTMSTATEHIGH_BUSY 0x00000004
#define BCM43xx_SBTMSTATEHIGH_TIMEOUT 0x00000020
#define BCM43xx_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000
#define BCM43xx_SBTMSTATEHIGH_DMA64BIT 0x10000000
#define BCM43xx_SBTMSTATEHIGH_GATEDCLK 0x20000000
#define BCM43xx_SBTMSTATEHIGH_BISTFAILED 0x40000000
#define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000
/* sbimstate flags */
#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000
@ -574,8 +597,11 @@ struct bcm43xx_dma {
struct bcm43xx_dmaring *tx_ring1;
struct bcm43xx_dmaring *tx_ring2;
struct bcm43xx_dmaring *tx_ring3;
struct bcm43xx_dmaring *tx_ring4;
struct bcm43xx_dmaring *tx_ring5;
struct bcm43xx_dmaring *rx_ring0;
struct bcm43xx_dmaring *rx_ring1; /* only available on core.rev < 5 */
struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */
};
/* Data structures for PIO transmission, per 80211 core. */
@ -739,7 +765,7 @@ struct bcm43xx_private {
/* Reason code of the last interrupt. */
u32 irq_reason;
u32 dma_reason[4];
u32 dma_reason[6];
/* saved irq enable/disable state bitfield. */
u32 irq_savedstate;
/* Link Quality calculation context. */

Просмотреть файл

@ -4,7 +4,7 @@
DMA ringbuffer and descriptor allocation/management
Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
Some code in this file is derived from the b44.c driver
Copyright (C) 2002 David S. Miller
@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot)
}
}
u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
{
static const u16 map64[] = {
BCM43xx_MMIO_DMA64_BASE0,
BCM43xx_MMIO_DMA64_BASE1,
BCM43xx_MMIO_DMA64_BASE2,
BCM43xx_MMIO_DMA64_BASE3,
BCM43xx_MMIO_DMA64_BASE4,
BCM43xx_MMIO_DMA64_BASE5,
};
static const u16 map32[] = {
BCM43xx_MMIO_DMA32_BASE0,
BCM43xx_MMIO_DMA32_BASE1,
BCM43xx_MMIO_DMA32_BASE2,
BCM43xx_MMIO_DMA32_BASE3,
BCM43xx_MMIO_DMA32_BASE4,
BCM43xx_MMIO_DMA32_BASE5,
};
if (dma64bit) {
assert(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map64));
return map64[controller_idx];
}
assert(controller_idx >= 0 &&
controller_idx < ARRAY_SIZE(map32));
return map32[controller_idx];
}
static inline
dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
unsigned char *buf,
@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
/* Unmap and free a descriptor buffer. */
static inline
void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc *desc,
struct bcm43xx_dmadesc_meta *meta,
int irq_context)
{
@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
{
struct device *dev = &(ring->bcm->pci_dev->dev);
ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
&(ring->dmabase), GFP_KERNEL);
if (!ring->vbase) {
if (!ring->descbase) {
printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
return -ENOMEM;
}
if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) {
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
"(0x%llx, len: %lu)\n",
(unsigned long long)ring->dmabase,
BCM43xx_DMA_RINGMEMSIZE);
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
ring->vbase, ring->dmabase);
return -ENOMEM;
}
assert(!(ring->dmabase & 0x000003FF));
memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
return 0;
}
@ -214,27 +232,35 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring)
struct device *dev = &(ring->bcm->pci_dev->dev);
dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
ring->vbase, ring->dmabase);
ring->descbase, ring->dmabase);
}
/* Reset the RX DMA channel */
int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
u16 mmio_base)
u16 mmio_base, int dma64)
{
int i;
u32 value;
u16 offset;
bcm43xx_write32(bcm,
mmio_base + BCM43xx_DMA_RX_CONTROL,
0x00000000);
offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
bcm43xx_write32(bcm, mmio_base + offset, 0);
for (i = 0; i < 1000; i++) {
value = bcm43xx_read32(bcm,
mmio_base + BCM43xx_DMA_RX_STATUS);
value &= BCM43xx_DMA_RXSTAT_STAT_MASK;
if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) {
offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
value = bcm43xx_read32(bcm, mmio_base + offset);
if (dma64) {
value &= BCM43xx_DMA64_RXSTAT;
if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
i = -1;
break;
}
} else {
value &= BCM43xx_DMA32_RXSTATE;
if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
i = -1;
break;
}
}
udelay(10);
}
if (i != -1) {
@ -247,32 +273,48 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
/* Reset the RX DMA channel */
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 mmio_base)
u16 mmio_base, int dma64)
{
int i;
u32 value;
u16 offset;
for (i = 0; i < 1000; i++) {
value = bcm43xx_read32(bcm,
mmio_base + BCM43xx_DMA_TX_STATUS);
value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED ||
value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT ||
value == BCM43xx_DMA_TXSTAT_STAT_STOPPED)
offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
value = bcm43xx_read32(bcm, mmio_base + offset);
if (dma64) {
value &= BCM43xx_DMA64_TXSTAT;
if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
value == BCM43xx_DMA64_TXSTAT_STOPPED)
break;
} else {
value &= BCM43xx_DMA32_TXSTATE;
if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
value == BCM43xx_DMA32_TXSTAT_STOPPED)
break;
}
udelay(10);
}
bcm43xx_write32(bcm,
mmio_base + BCM43xx_DMA_TX_CONTROL,
0x00000000);
offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
bcm43xx_write32(bcm, mmio_base + offset, 0);
for (i = 0; i < 1000; i++) {
value = bcm43xx_read32(bcm,
mmio_base + BCM43xx_DMA_TX_STATUS);
value &= BCM43xx_DMA_TXSTAT_STAT_MASK;
if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) {
offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
value = bcm43xx_read32(bcm, mmio_base + offset);
if (dma64) {
value &= BCM43xx_DMA64_TXSTAT;
if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
i = -1;
break;
}
} else {
value &= BCM43xx_DMA32_TXSTATE;
if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
i = -1;
break;
}
}
udelay(10);
}
if (i != -1) {
@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
return 0;
}
static void fill_descriptor(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc_generic *desc,
dma_addr_t dmaaddr,
u16 bufsize,
int start, int end, int irq)
{
int slot;
slot = bcm43xx_dma_desc2idx(ring, desc);
assert(slot >= 0 && slot < ring->nr_slots);
if (ring->dma64) {
u32 ctl0 = 0, ctl1 = 0;
u32 addrlo, addrhi;
u32 addrext;
addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
addrhi |= ring->routing;
if (slot == ring->nr_slots - 1)
ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
if (start)
ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
if (end)
ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
if (irq)
ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
ctl1 |= (bufsize - ring->frameoffset)
& BCM43xx_DMA64_DCTL1_BYTECNT;
ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
& BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
desc->dma64.control0 = cpu_to_le32(ctl0);
desc->dma64.control1 = cpu_to_le32(ctl1);
desc->dma64.address_low = cpu_to_le32(addrlo);
desc->dma64.address_high = cpu_to_le32(addrhi);
} else {
u32 ctl;
u32 addr;
u32 addrext;
addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
>> BCM43xx_DMA32_ROUTING_SHIFT;
addr |= ring->routing;
ctl = (bufsize - ring->frameoffset)
& BCM43xx_DMA32_DCTL_BYTECNT;
if (slot == ring->nr_slots - 1)
ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
if (start)
ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
if (end)
ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
if (irq)
ctl |= BCM43xx_DMA32_DCTL_IRQ;
ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
& BCM43xx_DMA32_DCTL_ADDREXT_MASK;
desc->dma32.control = cpu_to_le32(ctl);
desc->dma32.address = cpu_to_le32(addr);
}
}
static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc *desc,
struct bcm43xx_dmadesc_generic *desc,
struct bcm43xx_dmadesc_meta *meta,
gfp_t gfp_flags)
{
struct bcm43xx_rxhdr *rxhdr;
struct bcm43xx_hwxmitstatus *xmitstat;
dma_addr_t dmaaddr;
u32 desc_addr;
u32 desc_ctl;
const int slot = (int)(desc - ring->vbase);
struct sk_buff *skb;
assert(slot >= 0 && slot < ring->nr_slots);
assert(!ring->tx);
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb_any(skb);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
"(0x%llx, len: %u)\n",
(unsigned long long)dmaaddr, ring->rx_buffersize);
return -ENOMEM;
}
meta->skb = skb;
meta->dmaaddr = dmaaddr;
skb->dev = ring->bcm->net_dev;
desc_addr = (u32)(dmaaddr + ring->memoffset);
desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK &
(u32)(ring->rx_buffersize - ring->frameoffset));
if (slot == ring->nr_slots - 1)
desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
set_desc_addr(desc, desc_addr);
set_desc_ctl(desc, desc_ctl);
fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
rxhdr->frame_length = 0;
rxhdr->flags1 = 0;
xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
xmitstat->cookie = 0;
return 0;
}
@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
{
int i, err = -ENOMEM;
struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta;
for (i = 0; i < ring->nr_slots; i++) {
desc = ring->vbase + i;
meta = ring->meta + i;
desc = bcm43xx_dma_idx2desc(ring, i, &meta);
err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
if (err)
goto err_unwind;
}
mb();
ring->used_slots = ring->nr_slots;
err = 0;
out:
@ -354,8 +447,7 @@ out:
err_unwind:
for (i--; i >= 0; i--) {
desc = ring->vbase + i;
meta = ring->meta + i;
desc = bcm43xx_dma_idx2desc(ring, i, &meta);
unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
dev_kfree_skb(meta->skb);
@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
{
int err = 0;
u32 value;
u32 addrext;
if (ring->tx) {
/* Set Transmit Control register to "transmit enable" */
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
BCM43xx_DMA_TXCTRL_ENABLE);
/* Set Transmit Descriptor ring address. */
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING,
ring->dmabase + ring->memoffset);
if (ring->dma64) {
u64 ringbase = (u64)(ring->dmabase);
addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
value = BCM43xx_DMA64_TXENABLE;
value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
& BCM43xx_DMA64_TXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
(ringbase & 0xFFFFFFFF));
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
| ring->routing);
} else {
u32 ringbase = (u32)(ring->dmabase);
addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
value = BCM43xx_DMA32_TXENABLE;
value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
& BCM43xx_DMA32_TXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
(ringbase & ~BCM43xx_DMA32_ROUTING)
| ring->routing);
}
} else {
err = alloc_initial_descbuffers(ring);
if (err)
goto out;
/* Set Receive Control "receive enable" and frame offset */
value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT);
value |= BCM43xx_DMA_RXCTRL_ENABLE;
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value);
/* Set Receive Descriptor ring address. */
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING,
ring->dmabase + ring->memoffset);
/* Init the descriptor pointer. */
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200);
if (ring->dma64) {
u64 ringbase = (u64)(ring->dmabase);
addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
value |= BCM43xx_DMA64_RXENABLE;
value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
& BCM43xx_DMA64_RXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
(ringbase & 0xFFFFFFFF));
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
| ring->routing);
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
} else {
u32 ringbase = (u32)(ring->dmabase);
addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
value |= BCM43xx_DMA32_RXENABLE;
value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
& BCM43xx_DMA32_RXADDREXT_MASK;
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
(ringbase & ~BCM43xx_DMA32_ROUTING)
| ring->routing);
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
}
}
out:
@ -402,27 +534,32 @@ out:
static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
{
if (ring->tx) {
bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base);
/* Zero out Transmit Descriptor ring address. */
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0);
bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
} else
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
} else {
bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base);
/* Zero out Receive Descriptor ring address. */
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0);
bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
} else
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
}
}
static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
{
struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta;
int i;
if (!ring->used_slots)
return;
for (i = 0; i < ring->nr_slots; i++) {
desc = ring->vbase + i;
meta = ring->meta + i;
desc = bcm43xx_dma_idx2desc(ring, i, &meta);
if (!meta->skb) {
assert(ring->tx);
@ -435,58 +572,63 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
unmap_descbuffer(ring, meta->dmaaddr,
ring->rx_buffersize, 0);
}
free_descriptor_buffer(ring, desc, meta, 0);
free_descriptor_buffer(ring, meta, 0);
}
}
/* Main initialization function. */
static
struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
u16 dma_controller_base,
int nr_descriptor_slots,
int tx)
int controller_index,
int for_tx,
int dma64)
{
struct bcm43xx_dmaring *ring;
int err;
int nr_slots;
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring)
goto out;
ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots,
nr_slots = BCM43xx_RXRING_SLOTS;
if (for_tx)
nr_slots = BCM43xx_TXRING_SLOTS;
ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
GFP_KERNEL);
if (!ring->meta)
goto err_kfree_ring;
ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET;
ring->routing = BCM43xx_DMA32_CLIENTTRANS;
if (dma64)
ring->routing = BCM43xx_DMA64_CLIENTTRANS;
#ifdef CONFIG_BCM947XX
if (bcm->pci_dev->bus->number == 0)
ring->memoffset = 0;
ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
#endif
ring->bcm = bcm;
ring->nr_slots = nr_descriptor_slots;
ring->nr_slots = nr_slots;
ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
assert(ring->suspend_mark < ring->resume_mark);
ring->mmio_base = dma_controller_base;
if (tx) {
ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
ring->index = controller_index;
ring->dma64 = !!dma64;
if (for_tx) {
ring->tx = 1;
ring->current_slot = -1;
} else {
switch (dma_controller_base) {
case BCM43xx_MMIO_DMA1_BASE:
ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE;
ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET;
break;
case BCM43xx_MMIO_DMA4_BASE:
ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE;
ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
break;
default:
if (ring->index == 0) {
ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
} else if (ring->index == 3) {
ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
} else
assert(0);
}
}
err = alloc_ringmemory(ring);
if (err)
@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
if (!ring)
return;
dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n",
dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
(ring->dma64) ? "64" : "32",
ring->mmio_base,
(ring->tx) ? "TX" : "RX",
ring->max_used_slots, ring->nr_slots);
@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
return;
dma = bcm43xx_current_dma(bcm);
bcm43xx_destroy_dmaring(dma->rx_ring1);
dma->rx_ring1 = NULL;
bcm43xx_destroy_dmaring(dma->rx_ring3);
dma->rx_ring3 = NULL;
bcm43xx_destroy_dmaring(dma->rx_ring0);
dma->rx_ring0 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring5);
dma->tx_ring5 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring4);
dma->tx_ring4 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring3);
dma->tx_ring3 = NULL;
bcm43xx_destroy_dmaring(dma->tx_ring2);
@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
struct bcm43xx_dmaring *ring;
int err = -ENOMEM;
int dma64 = 0;
u32 sbtmstatehi;
sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
dma64 = 1;
/* setup TX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
BCM43xx_TXRING_SLOTS, 1);
ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
if (!ring)
goto out;
dma->tx_ring0 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE,
BCM43xx_TXRING_SLOTS, 1);
ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
if (!ring)
goto err_destroy_tx0;
dma->tx_ring1 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE,
BCM43xx_TXRING_SLOTS, 1);
ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
if (!ring)
goto err_destroy_tx1;
dma->tx_ring2 = ring;
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
BCM43xx_TXRING_SLOTS, 1);
ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
if (!ring)
goto err_destroy_tx2;
dma->tx_ring3 = ring;
/* setup RX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
BCM43xx_RXRING_SLOTS, 0);
ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
if (!ring)
goto err_destroy_tx3;
dma->tx_ring4 = ring;
ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
if (!ring)
goto err_destroy_tx4;
dma->tx_ring5 = ring;
/* setup RX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
if (!ring)
goto err_destroy_tx5;
dma->rx_ring0 = ring;
if (bcm->current_core->rev < 5) {
ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE,
BCM43xx_RXRING_SLOTS, 0);
ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
if (!ring)
goto err_destroy_rx0;
dma->rx_ring1 = ring;
dma->rx_ring3 = ring;
}
dprintk(KERN_INFO PFX "DMA initialized\n");
dprintk(KERN_INFO PFX "%s DMA initialized\n",
dma64 ? "64-bit" : "32-bit");
err = 0;
out:
return err;
@ -605,6 +764,12 @@ out:
err_destroy_rx0:
bcm43xx_destroy_dmaring(dma->rx_ring0);
dma->rx_ring0 = NULL;
err_destroy_tx5:
bcm43xx_destroy_dmaring(dma->tx_ring5);
dma->tx_ring5 = NULL;
err_destroy_tx4:
bcm43xx_destroy_dmaring(dma->tx_ring4);
dma->tx_ring4 = NULL;
err_destroy_tx3:
bcm43xx_destroy_dmaring(dma->tx_ring3);
dma->tx_ring3 = NULL;
@ -624,7 +789,7 @@ err_destroy_tx0:
static u16 generate_cookie(struct bcm43xx_dmaring *ring,
int slot)
{
u16 cookie = 0xF000;
u16 cookie = 0x1000;
/* Use the upper 4 bits of the cookie as
* DMA controller ID and store the slot number
@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring,
* Note that the cookie must never be 0, as this
* is a special value used in RX path.
*/
switch (ring->mmio_base) {
default:
assert(0);
case BCM43xx_MMIO_DMA1_BASE:
switch (ring->index) {
case 0:
cookie = 0xA000;
break;
case BCM43xx_MMIO_DMA2_BASE:
case 1:
cookie = 0xB000;
break;
case BCM43xx_MMIO_DMA3_BASE:
case 2:
cookie = 0xC000;
break;
case BCM43xx_MMIO_DMA4_BASE:
case 3:
cookie = 0xD000;
break;
case 4:
cookie = 0xE000;
break;
case 5:
cookie = 0xF000;
break;
}
assert(((u16)slot & 0xF000) == 0x0000);
cookie |= (u16)slot;
@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
case 0xD000:
ring = dma->tx_ring3;
break;
case 0xE000:
ring = dma->tx_ring4;
break;
case 0xF000:
ring = dma->tx_ring5;
break;
default:
assert(0);
}
@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
int slot)
{
u16 offset;
int descsize;
/* Everything is ready to start. Buffers are DMA mapped and
* associated with slots.
* "slot" is the last slot of the new frame we want to transmit.
@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
*/
wmb();
slot = next_slot(ring, slot);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
: sizeof(struct bcm43xx_dmadesc32);
bcm43xx_dma_write(ring, offset,
(u32)(slot * descsize));
}
static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
struct sk_buff *skb,
u8 cur_frag)
{
int slot;
struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta;
u32 desc_ctl;
u32 desc_addr;
dma_addr_t dmaaddr;
assert(skb_shinfo(skb)->nr_frags == 0);
slot = request_slot(ring);
desc = ring->vbase + slot;
meta = ring->meta + slot;
desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
/* Add a device specific TX header. */
assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
generate_cookie(ring, slot));
meta->skb = skb;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) {
return_slot(ring, slot);
printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
"(0x%llx, len: %u)\n",
(unsigned long long)meta->dmaaddr, skb->len);
return -ENOMEM;
}
dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
meta->dmaaddr = dmaaddr;
desc_addr = (u32)(meta->dmaaddr + ring->memoffset);
desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND;
desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
(u32)(meta->skb->len - ring->frameoffset));
if (slot == ring->nr_slots - 1)
desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
fill_descriptor(ring, desc, dmaaddr,
skb->len, 1, 1, 1);
set_desc_ctl(desc, desc_ctl);
set_desc_addr(desc, desc_addr);
/* Now transfer the whole frame. */
dmacontroller_poke_tx(ring, slot);
return 0;
}
int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
/* Take skb from ieee80211_txb_free */
txb->fragments[i] = NULL;
dma_tx_fragment(ring, skb, i);
//TODO: handle failure of dma_tx_fragment
}
ieee80211_txb_free(txb);
@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
struct bcm43xx_xmitstatus *status)
{
struct bcm43xx_dmaring *ring;
struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta;
int is_last_fragment;
int slot;
u32 tmp;
ring = parse_cookie(bcm, status->cookie, &slot);
assert(ring);
assert(ring->tx);
assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
while (1) {
assert(slot >= 0 && slot < ring->nr_slots);
desc = ring->vbase + slot;
meta = ring->meta + slot;
desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND);
if (ring->dma64) {
tmp = le32_to_cpu(desc->dma64.control0);
is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
} else {
tmp = le32_to_cpu(desc->dma32.control);
is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
}
unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
free_descriptor_buffer(ring, desc, meta, 1);
free_descriptor_buffer(ring, meta, 1);
/* Everything belonging to the slot is unmapped
* and freed, so we can return it.
*/
@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
static void dma_rx(struct bcm43xx_dmaring *ring,
int *slot)
{
struct bcm43xx_dmadesc *desc;
struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta;
struct bcm43xx_rxhdr *rxhdr;
struct sk_buff *skb;
@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
int err;
dma_addr_t dmaaddr;
desc = ring->vbase + *slot;
meta = ring->meta + *slot;
desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
skb = meta->skb;
if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) {
if (ring->index == 3) {
/* We received an xmit status. */
struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
struct bcm43xx_xmitstatus stat;
@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
s32 tmp = len;
while (1) {
desc = ring->vbase + *slot;
meta = ring->meta + *slot;
desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
#endif
assert(!ring->tx);
status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS);
descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc);
if (ring->dma64) {
status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
} else {
status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
descptr = (status & BCM43xx_DMA32_RXDPTR);
current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
}
assert(current_slot >= 0 && current_slot < ring->nr_slots);
slot = ring->current_slot;
@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
ring->max_used_slots = used_slots;
#endif
}
bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc)));
if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
} else {
bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
(u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
}
ring->current_slot = slot;
}
@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
{
assert(ring->tx);
bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
| BCM43xx_DMA_TXCTRL_SUSPEND);
if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
| BCM43xx_DMA64_TXSUSPEND);
} else {
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
| BCM43xx_DMA32_TXSUSPEND);
}
}
void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
{
assert(ring->tx);
bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL,
bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL)
& ~BCM43xx_DMA_TXCTRL_SUSPEND);
if (ring->dma64) {
bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
& ~BCM43xx_DMA64_TXSUSPEND);
} else {
bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
& ~BCM43xx_DMA32_TXSUSPEND);
}
bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
}

Просмотреть файл

@ -14,63 +14,179 @@
#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13)
#define BCM43xx_DMAIRQ_RX_DONE (1 << 16)
/* DMA controller register offsets. (relative to BCM43xx_DMA#_BASE) */
#define BCM43xx_DMA_TX_CONTROL 0x00
#define BCM43xx_DMA_TX_DESC_RING 0x04
#define BCM43xx_DMA_TX_DESC_INDEX 0x08
#define BCM43xx_DMA_TX_STATUS 0x0c
#define BCM43xx_DMA_RX_CONTROL 0x10
#define BCM43xx_DMA_RX_DESC_RING 0x14
#define BCM43xx_DMA_RX_DESC_INDEX 0x18
#define BCM43xx_DMA_RX_STATUS 0x1c
/* DMA controller channel control word values. */
#define BCM43xx_DMA_TXCTRL_ENABLE (1 << 0)
#define BCM43xx_DMA_TXCTRL_SUSPEND (1 << 1)
#define BCM43xx_DMA_TXCTRL_LOOPBACK (1 << 2)
#define BCM43xx_DMA_TXCTRL_FLUSH (1 << 4)
#define BCM43xx_DMA_RXCTRL_ENABLE (1 << 0)
#define BCM43xx_DMA_RXCTRL_FRAMEOFF_MASK 0x000000fe
#define BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT 1
#define BCM43xx_DMA_RXCTRL_PIO (1 << 8)
/* DMA controller channel status word values. */
#define BCM43xx_DMA_TXSTAT_DPTR_MASK 0x00000fff
#define BCM43xx_DMA_TXSTAT_STAT_MASK 0x0000f000
#define BCM43xx_DMA_TXSTAT_STAT_DISABLED 0x00000000
#define BCM43xx_DMA_TXSTAT_STAT_ACTIVE 0x00001000
#define BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT 0x00002000
#define BCM43xx_DMA_TXSTAT_STAT_STOPPED 0x00003000
#define BCM43xx_DMA_TXSTAT_STAT_SUSP 0x00004000
#define BCM43xx_DMA_TXSTAT_ERROR_MASK 0x000f0000
#define BCM43xx_DMA_TXSTAT_FLUSHED (1 << 20)
#define BCM43xx_DMA_RXSTAT_DPTR_MASK 0x00000fff
#define BCM43xx_DMA_RXSTAT_STAT_MASK 0x0000f000
#define BCM43xx_DMA_RXSTAT_STAT_DISABLED 0x00000000
#define BCM43xx_DMA_RXSTAT_STAT_ACTIVE 0x00001000
#define BCM43xx_DMA_RXSTAT_STAT_IDLEWAIT 0x00002000
#define BCM43xx_DMA_RXSTAT_STAT_RESERVED 0x00003000
#define BCM43xx_DMA_RXSTAT_STAT_ERRORS 0x00004000
#define BCM43xx_DMA_RXSTAT_ERROR_MASK 0x000f0000
/*** 32-bit DMA Engine. ***/
/* 32-bit DMA controller registers. */
#define BCM43xx_DMA32_TXCTL 0x00
#define BCM43xx_DMA32_TXENABLE 0x00000001
#define BCM43xx_DMA32_TXSUSPEND 0x00000002
#define BCM43xx_DMA32_TXLOOPBACK 0x00000004
#define BCM43xx_DMA32_TXFLUSH 0x00000010
#define BCM43xx_DMA32_TXADDREXT_MASK 0x00030000
#define BCM43xx_DMA32_TXADDREXT_SHIFT 16
#define BCM43xx_DMA32_TXRING 0x04
#define BCM43xx_DMA32_TXINDEX 0x08
#define BCM43xx_DMA32_TXSTATUS 0x0C
#define BCM43xx_DMA32_TXDPTR 0x00000FFF
#define BCM43xx_DMA32_TXSTATE 0x0000F000
#define BCM43xx_DMA32_TXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA32_TXSTAT_ACTIVE 0x00001000
#define BCM43xx_DMA32_TXSTAT_IDLEWAIT 0x00002000
#define BCM43xx_DMA32_TXSTAT_STOPPED 0x00003000
#define BCM43xx_DMA32_TXSTAT_SUSP 0x00004000
#define BCM43xx_DMA32_TXERROR 0x000F0000
#define BCM43xx_DMA32_TXERR_NOERR 0x00000000
#define BCM43xx_DMA32_TXERR_PROT 0x00010000
#define BCM43xx_DMA32_TXERR_UNDERRUN 0x00020000
#define BCM43xx_DMA32_TXERR_BUFREAD 0x00030000
#define BCM43xx_DMA32_TXERR_DESCREAD 0x00040000
#define BCM43xx_DMA32_TXACTIVE 0xFFF00000
#define BCM43xx_DMA32_RXCTL 0x10
#define BCM43xx_DMA32_RXENABLE 0x00000001
#define BCM43xx_DMA32_RXFROFF_MASK 0x000000FE
#define BCM43xx_DMA32_RXFROFF_SHIFT 1
#define BCM43xx_DMA32_RXDIRECTFIFO 0x00000100
#define BCM43xx_DMA32_RXADDREXT_MASK 0x00030000
#define BCM43xx_DMA32_RXADDREXT_SHIFT 16
#define BCM43xx_DMA32_RXRING 0x14
#define BCM43xx_DMA32_RXINDEX 0x18
#define BCM43xx_DMA32_RXSTATUS 0x1C
#define BCM43xx_DMA32_RXDPTR 0x00000FFF
#define BCM43xx_DMA32_RXSTATE 0x0000F000
#define BCM43xx_DMA32_RXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA32_RXSTAT_ACTIVE 0x00001000
#define BCM43xx_DMA32_RXSTAT_IDLEWAIT 0x00002000
#define BCM43xx_DMA32_RXSTAT_STOPPED 0x00003000
#define BCM43xx_DMA32_RXERROR 0x000F0000
#define BCM43xx_DMA32_RXERR_NOERR 0x00000000
#define BCM43xx_DMA32_RXERR_PROT 0x00010000
#define BCM43xx_DMA32_RXERR_OVERFLOW 0x00020000
#define BCM43xx_DMA32_RXERR_BUFWRITE 0x00030000
#define BCM43xx_DMA32_RXERR_DESCREAD 0x00040000
#define BCM43xx_DMA32_RXACTIVE 0xFFF00000
/* 32-bit DMA descriptor. */
struct bcm43xx_dmadesc32 {
__le32 control;
__le32 address;
} __attribute__((__packed__));
#define BCM43xx_DMA32_DCTL_BYTECNT 0x00001FFF
#define BCM43xx_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT 16
#define BCM43xx_DMA32_DCTL_DTABLEEND 0x10000000
#define BCM43xx_DMA32_DCTL_IRQ 0x20000000
#define BCM43xx_DMA32_DCTL_FRAMEEND 0x40000000
#define BCM43xx_DMA32_DCTL_FRAMESTART 0x80000000
/* Address field Routing value. */
#define BCM43xx_DMA32_ROUTING 0xC0000000
#define BCM43xx_DMA32_ROUTING_SHIFT 30
#define BCM43xx_DMA32_NOTRANS 0x00000000
#define BCM43xx_DMA32_CLIENTTRANS 0x40000000
/*** 64-bit DMA Engine. ***/
/* 64-bit DMA controller registers. */
#define BCM43xx_DMA64_TXCTL 0x00
#define BCM43xx_DMA64_TXENABLE 0x00000001
#define BCM43xx_DMA64_TXSUSPEND 0x00000002
#define BCM43xx_DMA64_TXLOOPBACK 0x00000004
#define BCM43xx_DMA64_TXFLUSH 0x00000010
#define BCM43xx_DMA64_TXADDREXT_MASK 0x00030000
#define BCM43xx_DMA64_TXADDREXT_SHIFT 16
#define BCM43xx_DMA64_TXINDEX 0x04
#define BCM43xx_DMA64_TXRINGLO 0x08
#define BCM43xx_DMA64_TXRINGHI 0x0C
#define BCM43xx_DMA64_TXSTATUS 0x10
#define BCM43xx_DMA64_TXSTATDPTR 0x00001FFF
#define BCM43xx_DMA64_TXSTAT 0xF0000000
#define BCM43xx_DMA64_TXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA64_TXSTAT_ACTIVE 0x10000000
#define BCM43xx_DMA64_TXSTAT_IDLEWAIT 0x20000000
#define BCM43xx_DMA64_TXSTAT_STOPPED 0x30000000
#define BCM43xx_DMA64_TXSTAT_SUSP 0x40000000
#define BCM43xx_DMA64_TXERROR 0x14
#define BCM43xx_DMA64_TXERRDPTR 0x0001FFFF
#define BCM43xx_DMA64_TXERR 0xF0000000
#define BCM43xx_DMA64_TXERR_NOERR 0x00000000
#define BCM43xx_DMA64_TXERR_PROT 0x10000000
#define BCM43xx_DMA64_TXERR_UNDERRUN 0x20000000
#define BCM43xx_DMA64_TXERR_TRANSFER 0x30000000
#define BCM43xx_DMA64_TXERR_DESCREAD 0x40000000
#define BCM43xx_DMA64_TXERR_CORE 0x50000000
#define BCM43xx_DMA64_RXCTL 0x20
#define BCM43xx_DMA64_RXENABLE 0x00000001
#define BCM43xx_DMA64_RXFROFF_MASK 0x000000FE
#define BCM43xx_DMA64_RXFROFF_SHIFT 1
#define BCM43xx_DMA64_RXDIRECTFIFO 0x00000100
#define BCM43xx_DMA64_RXADDREXT_MASK 0x00030000
#define BCM43xx_DMA64_RXADDREXT_SHIFT 16
#define BCM43xx_DMA64_RXINDEX 0x24
#define BCM43xx_DMA64_RXRINGLO 0x28
#define BCM43xx_DMA64_RXRINGHI 0x2C
#define BCM43xx_DMA64_RXSTATUS 0x30
#define BCM43xx_DMA64_RXSTATDPTR 0x00001FFF
#define BCM43xx_DMA64_RXSTAT 0xF0000000
#define BCM43xx_DMA64_RXSTAT_DISABLED 0x00000000
#define BCM43xx_DMA64_RXSTAT_ACTIVE 0x10000000
#define BCM43xx_DMA64_RXSTAT_IDLEWAIT 0x20000000
#define BCM43xx_DMA64_RXSTAT_STOPPED 0x30000000
#define BCM43xx_DMA64_RXSTAT_SUSP 0x40000000
#define BCM43xx_DMA64_RXERROR 0x34
#define BCM43xx_DMA64_RXERRDPTR 0x0001FFFF
#define BCM43xx_DMA64_RXERR 0xF0000000
#define BCM43xx_DMA64_RXERR_NOERR 0x00000000
#define BCM43xx_DMA64_RXERR_PROT 0x10000000
#define BCM43xx_DMA64_RXERR_UNDERRUN 0x20000000
#define BCM43xx_DMA64_RXERR_TRANSFER 0x30000000
#define BCM43xx_DMA64_RXERR_DESCREAD 0x40000000
#define BCM43xx_DMA64_RXERR_CORE 0x50000000
/* 64-bit DMA descriptor. */
struct bcm43xx_dmadesc64 {
__le32 control0;
__le32 control1;
__le32 address_low;
__le32 address_high;
} __attribute__((__packed__));
#define BCM43xx_DMA64_DCTL0_DTABLEEND 0x10000000
#define BCM43xx_DMA64_DCTL0_IRQ 0x20000000
#define BCM43xx_DMA64_DCTL0_FRAMEEND 0x40000000
#define BCM43xx_DMA64_DCTL0_FRAMESTART 0x80000000
#define BCM43xx_DMA64_DCTL1_BYTECNT 0x00001FFF
#define BCM43xx_DMA64_DCTL1_ADDREXT_MASK 0x00030000
#define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT 16
/* Address field Routing value. */
#define BCM43xx_DMA64_ROUTING 0xC0000000
#define BCM43xx_DMA64_ROUTING_SHIFT 30
#define BCM43xx_DMA64_NOTRANS 0x00000000
#define BCM43xx_DMA64_CLIENTTRANS 0x80000000
struct bcm43xx_dmadesc_generic {
union {
struct bcm43xx_dmadesc32 dma32;
struct bcm43xx_dmadesc64 dma64;
} __attribute__((__packed__));
} __attribute__((__packed__));
/* DMA descriptor control field values. */
#define BCM43xx_DMADTOR_BYTECNT_MASK 0x00001fff
#define BCM43xx_DMADTOR_DTABLEEND (1 << 28) /* End of descriptor table */
#define BCM43xx_DMADTOR_COMPIRQ (1 << 29) /* IRQ on completion request */
#define BCM43xx_DMADTOR_FRAMEEND (1 << 30)
#define BCM43xx_DMADTOR_FRAMESTART (1 << 31)
/* Misc DMA constants */
#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE
#define BCM43xx_DMA_BUSADDRMAX 0x3FFFFFFF
#define BCM43xx_DMA_DMABUSADDROFFSET (1 << 30)
#define BCM43xx_DMA1_RX_FRAMEOFFSET 30
#define BCM43xx_DMA4_RX_FRAMEOFFSET 0
#define BCM43xx_DMA0_RX_FRAMEOFFSET 30
#define BCM43xx_DMA3_RX_FRAMEOFFSET 0
/* DMA engine tuning knobs */
#define BCM43xx_TXRING_SLOTS 512
#define BCM43xx_RXRING_SLOTS 64
#define BCM43xx_DMA1_RXBUFFERSIZE (2304 + 100)
#define BCM43xx_DMA4_RXBUFFERSIZE 16
#define BCM43xx_DMA0_RX_BUFFERSIZE (2304 + 100)
#define BCM43xx_DMA3_RX_BUFFERSIZE 16
/* Suspend the tx queue, if less than this percent slots are free. */
#define BCM43xx_TXSUSPEND_PERCENT 20
/* Resume the tx queue, if more than this percent slots are free. */
@ -86,17 +202,6 @@ struct bcm43xx_private;
struct bcm43xx_xmitstatus;
struct bcm43xx_dmadesc {
__le32 _control;
__le32 _address;
} __attribute__((__packed__));
/* Macros to access the bcm43xx_dmadesc struct */
#define get_desc_ctl(desc) le32_to_cpu((desc)->_control)
#define set_desc_ctl(desc, ctl) do { (desc)->_control = cpu_to_le32(ctl); } while (0)
#define get_desc_addr(desc) le32_to_cpu((desc)->_address)
#define set_desc_addr(desc, addr) do { (desc)->_address = cpu_to_le32(addr); } while (0)
struct bcm43xx_dmadesc_meta {
/* The kernel DMA-able buffer. */
struct sk_buff *skb;
@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta {
};
struct bcm43xx_dmaring {
struct bcm43xx_private *bcm;
/* Kernel virtual base address of the ring memory. */
struct bcm43xx_dmadesc *vbase;
/* DMA memory offset */
dma_addr_t memoffset;
/* (Unadjusted) DMA base bus-address of the ring memory. */
dma_addr_t dmabase;
void *descbase;
/* Meta data about all descriptors. */
struct bcm43xx_dmadesc_meta *meta;
/* DMA Routing value. */
u32 routing;
/* (Unadjusted) DMA base bus-address of the ring memory. */
dma_addr_t dmabase;
/* Number of descriptor slots in the ring. */
int nr_slots;
/* Number of used descriptor slots. */
@ -127,12 +231,17 @@ struct bcm43xx_dmaring {
u32 frameoffset;
/* Descriptor buffer size. */
u16 rx_buffersize;
/* The MMIO base register of the DMA controller, this
* ring is posted to.
*/
/* The MMIO base register of the DMA controller. */
u16 mmio_base;
u8 tx:1, /* TRUE, if this is a TX ring. */
suspended:1; /* TRUE, if transfers are suspended on this ring. */
/* DMA controller index number (0-5). */
int index;
/* Boolean. Is this a TX ring? */
u8 tx;
/* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
u8 dma64;
/* Boolean. Are transfers suspended on this ring? */
u8 suspended;
struct bcm43xx_private *bcm;
#ifdef CONFIG_BCM43XX_DEBUG
/* Maximum number of used slots. */
int max_used_slots;
@ -140,6 +249,34 @@ struct bcm43xx_dmaring {
};
static inline
int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring,
struct bcm43xx_dmadesc_generic *desc)
{
if (ring->dma64) {
struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
return (int)(&(desc->dma64) - dd64);
} else {
struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
return (int)(&(desc->dma32) - dd32);
}
}
static inline
struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring,
int slot,
struct bcm43xx_dmadesc_meta **meta)
{
*meta = &(ring->meta[slot]);
if (ring->dma64) {
struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot]));
} else {
struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot]));
}
}
static inline
u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring,
u16 offset)
@ -159,9 +296,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm);
void bcm43xx_dma_free(struct bcm43xx_private *bcm);
int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base);
u16 dmacontroller_mmio_base,
int dma64);
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base);
u16 dmacontroller_mmio_base,
int dma64);
u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx);
void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring);
void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring);
@ -173,7 +314,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
struct ieee80211_txb *txb);
void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
#else /* CONFIG_BCM43XX_DMA */
@ -188,13 +328,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
}
static inline
int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base)
u16 dmacontroller_mmio_base,
int dma64)
{
return 0;
}
static inline
int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
u16 dmacontroller_mmio_base)
u16 dmacontroller_mmio_base,
int dma64)
{
return 0;
}

Просмотреть файл

@ -1371,6 +1371,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
if ((bcm43xx_core_enabled(bcm)) &&
!bcm43xx_using_pio(bcm)) {
//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
#if 0
#ifndef CONFIG_BCM947XX
/* reset all used DMA controllers. */
bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
@ -1380,6 +1381,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
if (bcm->current_core->rev < 5)
bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
#endif
#endif
}
if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
@ -1671,8 +1673,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm)
static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
{
u32 reason;
u32 dma_reason[4];
int activity = 0;
u32 dma_reason[6];
u32 merged_dma_reason = 0;
int i, activity = 0;
unsigned long flags;
#ifdef CONFIG_BCM43XX_DEBUG
@ -1684,10 +1687,10 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
spin_lock_irqsave(&bcm->irq_lock, flags);
reason = bcm->irq_reason;
dma_reason[0] = bcm->dma_reason[0];
dma_reason[1] = bcm->dma_reason[1];
dma_reason[2] = bcm->dma_reason[2];
dma_reason[3] = bcm->dma_reason[3];
for (i = 5; i >= 0; i--) {
dma_reason[i] = bcm->dma_reason[i];
merged_dma_reason |= dma_reason[i];
}
if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) {
/* TX error. We get this when Template Ram is written in wrong endianess
@ -1698,27 +1701,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n");
bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR);
}
if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_FATALMASK) |
(dma_reason[1] & BCM43xx_DMAIRQ_FATALMASK) |
(dma_reason[2] & BCM43xx_DMAIRQ_FATALMASK) |
(dma_reason[3] & BCM43xx_DMAIRQ_FATALMASK))) {
if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) {
printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: "
"0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
"0x%08X, 0x%08X, 0x%08X, "
"0x%08X, 0x%08X, 0x%08X\n",
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3]);
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
bcm43xx_controller_restart(bcm, "DMA error");
mmiowb();
spin_unlock_irqrestore(&bcm->irq_lock, flags);
return;
}
if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) |
(dma_reason[1] & BCM43xx_DMAIRQ_NONFATALMASK) |
(dma_reason[2] & BCM43xx_DMAIRQ_NONFATALMASK) |
(dma_reason[3] & BCM43xx_DMAIRQ_NONFATALMASK))) {
if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) {
printkl(KERN_ERR PFX "DMA error: "
"0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
"0x%08X, 0x%08X, 0x%08X, "
"0x%08X, 0x%08X, 0x%08X\n",
dma_reason[0], dma_reason[1],
dma_reason[2], dma_reason[3]);
dma_reason[2], dma_reason[3],
dma_reason[4], dma_reason[5]);
}
if (reason & BCM43xx_IRQ_PS) {
@ -1753,8 +1754,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
}
/* Check the DMA reason registers for received data. */
assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) {
if (bcm43xx_using_pio(bcm))
bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0);
@ -1762,13 +1761,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0);
/* We intentionally don't set "activity" to 1, here. */
}
assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) {
if (bcm43xx_using_pio(bcm))
bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3);
else
bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring1);
bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3);
activity = 1;
}
assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE));
assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE));
bcmirq_handled(BCM43xx_IRQ_RX);
if (reason & BCM43xx_IRQ_XMIT_STATUS) {
@ -1825,14 +1828,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason)
bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON,
bcm->dma_reason[0]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
bcm->dma_reason[1]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
bcm->dma_reason[2]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
bcm->dma_reason[3]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
bcm->dma_reason[4]);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON,
bcm->dma_reason[5]);
}
/* Interrupt handler top-half */
@ -1860,14 +1867,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
if (!reason)
goto out;
bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
& 0x0001dc00;
bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
& 0x0000dc00;
bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
& 0x0000dc00;
bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
& 0x0001dc00;
bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
& 0x0001DC00;
bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
& 0x0000DC00;
bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
& 0x0000DC00;
bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
& 0x0001DC00;
bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
& 0x0000DC00;
bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON)
& 0x0000DC00;
bcm43xx_interrupt_ack(bcm, reason);
@ -2448,10 +2459,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
bcm43xx_write32(bcm, 0x018C, 0x02000000);
}
bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0001DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0001DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
value32 |= 0x00100000;
@ -3261,6 +3274,7 @@ static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm)
/* This is the opposite of bcm43xx_init_board() */
static void bcm43xx_free_board(struct bcm43xx_private *bcm)
{
bcm43xx_rng_exit(bcm);
bcm43xx_sysfs_unregister(bcm);
bcm43xx_periodic_tasks_delete(bcm);
@ -3349,6 +3363,8 @@ static void prepare_priv_for_init(struct bcm43xx_private *bcm)
memset(bcm->dma_reason, 0, sizeof(bcm->dma_reason));
bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
bcm->mac_suspended = 1;
/* Noise calculation context */
memset(&bcm->noisecalc, 0, sizeof(bcm->noisecalc));
@ -3528,6 +3544,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
err = bcm43xx_sysfs_register(bcm);
if (err)
goto err_wlshutdown;
err = bcm43xx_rng_init(bcm);
if (err)
goto err_sysfs_unreg;
/*FIXME: This should be handled by softmac instead. */
schedule_work(&bcm->softmac->associnfo.work);
@ -3537,6 +3556,8 @@ out:
return err;
err_sysfs_unreg:
bcm43xx_sysfs_unregister(bcm);
err_wlshutdown:
bcm43xx_shutdown_all_wireless_cores(bcm);
err_crystal_off:
@ -3899,7 +3920,9 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
err = bcm43xx_tx(bcm, txb);
spin_unlock_irqrestore(&bcm->irq_lock, flags);
return err;
if (unlikely(err))
return NETDEV_TX_BUSY;
return NETDEV_TX_OK;
}
static struct net_device_stats * bcm43xx_net_get_stats(struct net_device *net_dev)

Просмотреть файл

@ -229,8 +229,8 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
range->throughput = 27 * 1000 * 1000;
range->max_qual.qual = 100;
range->max_qual.level = 152; /* set floor at -104 dBm (152 - 256) */
range->max_qual.noise = 152;
range->max_qual.level = 146; /* set floor at -110 dBm (146 - 256) */
range->max_qual.noise = 146;
range->max_qual.updated = IW_QUAL_ALL_UPDATED;
range->avg_qual.qual = 50;

Просмотреть файл

@ -6254,13 +6254,14 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
* member to call a function that then just turns and calls ipw2100_up.
* net_dev->init is called after name allocation but before the
* notifier chain is called */
mutex_lock(&priv->action_mutex);
err = register_netdev(dev);
if (err) {
printk(KERN_WARNING DRV_NAME
"Error calling register_netdev.\n");
goto fail_unlock;
goto fail;
}
mutex_lock(&priv->action_mutex);
registered = 1;
IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));

Просмотреть файл

@ -70,7 +70,7 @@
#define VQ
#endif
#define IPW2200_VERSION "1.1.2" VK VD VM VP VR VQ
#define IPW2200_VERSION "1.1.4" VK VD VM VP VR VQ
#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
#define DRV_VERSION IPW2200_VERSION
@ -83,9 +83,7 @@ MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
static int cmdlog = 0;
#ifdef CONFIG_IPW2200_DEBUG
static int debug = 0;
#endif
static int channel = 0;
static int mode = 0;
@ -567,7 +565,6 @@ static inline void ipw_disable_interrupts(struct ipw_priv *priv)
spin_unlock_irqrestore(&priv->irq_lock, flags);
}
#ifdef CONFIG_IPW2200_DEBUG
static char *ipw_error_desc(u32 val)
{
switch (val) {
@ -634,7 +631,6 @@ static void ipw_dump_error_log(struct ipw_priv *priv,
error->log[i].time,
error->log[i].data, error->log[i].event);
}
#endif
static inline int ipw_is_init(struct ipw_priv *priv)
{
@ -1435,9 +1431,7 @@ static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
#ifdef CONFIG_IPW2200_DEBUG
struct net_device *dev = priv->net_dev;
#endif
char buffer[] = "00000000";
unsigned long len =
(sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
@ -1958,14 +1952,12 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
IPW_WARNING("Firmware error detected. Restarting.\n");
if (priv->error) {
IPW_DEBUG_FW("Sysfs 'error' log already exists.\n");
#ifdef CONFIG_IPW2200_DEBUG
if (ipw_debug_level & IPW_DL_FW_ERRORS) {
struct ipw_fw_error *error =
ipw_alloc_error_log(priv);
ipw_dump_error_log(priv, error);
kfree(error);
}
#endif
} else {
priv->error = ipw_alloc_error_log(priv);
if (priv->error)
@ -1973,10 +1965,8 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
else
IPW_DEBUG_FW("Error allocating sysfs 'error' "
"log.\n");
#ifdef CONFIG_IPW2200_DEBUG
if (ipw_debug_level & IPW_DL_FW_ERRORS)
ipw_dump_error_log(priv, priv->error);
#endif
}
/* XXX: If hardware encryption is for WPA/WPA2,
@ -2287,7 +2277,7 @@ static int ipw_send_scan_abort(struct ipw_priv *priv)
static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
{
struct ipw_sensitivity_calib calib = {
.beacon_rssi_raw = sens,
.beacon_rssi_raw = cpu_to_le16(sens),
};
return ipw_send_cmd_pdu(priv, IPW_CMD_SENSITIVITY_CALIB, sizeof(calib),
@ -2353,6 +2343,7 @@ static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
return -1;
}
phy_off = cpu_to_le32(phy_off);
return ipw_send_cmd_pdu(priv, IPW_CMD_CARD_DISABLE, sizeof(phy_off),
&phy_off);
}
@ -2414,7 +2405,7 @@ static int ipw_set_tx_power(struct ipw_priv *priv)
static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
{
struct ipw_rts_threshold rts_threshold = {
.rts_threshold = rts,
.rts_threshold = cpu_to_le16(rts),
};
if (!priv) {
@ -2429,7 +2420,7 @@ static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
{
struct ipw_frag_threshold frag_threshold = {
.frag_threshold = frag,
.frag_threshold = cpu_to_le16(frag),
};
if (!priv) {
@ -2464,6 +2455,7 @@ static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
break;
}
param = cpu_to_le32(mode);
return ipw_send_cmd_pdu(priv, IPW_CMD_POWER_MODE, sizeof(param),
&param);
}
@ -3915,7 +3907,6 @@ static const struct ipw_status_code ipw_status_codes[] = {
{0x2E, "Cipher suite is rejected per security policy"},
};
#ifdef CONFIG_IPW2200_DEBUG
static const char *ipw_get_status_code(u16 status)
{
int i;
@ -3924,7 +3915,6 @@ static const char *ipw_get_status_code(u16 status)
return ipw_status_codes[i].reason;
return "Unknown status value.";
}
#endif
static void inline average_init(struct average *avg)
{
@ -4394,7 +4384,6 @@ static void ipw_rx_notification(struct ipw_priv *priv,
if (priv->
status & (STATUS_ASSOCIATED |
STATUS_AUTH)) {
#ifdef CONFIG_IPW2200_DEBUG
struct notif_authenticate *auth
= &notif->u.auth;
IPW_DEBUG(IPW_DL_NOTIF |
@ -4412,7 +4401,6 @@ static void ipw_rx_notification(struct ipw_priv *priv,
ipw_get_status_code
(ntohs
(auth->status)));
#endif
priv->status &=
~(STATUS_ASSOCIATING |
@ -5055,7 +5043,6 @@ static void ipw_rx_queue_replenish(void *data)
}
list_del(element);
rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
rxb->dma_addr =
pci_map_single(priv->pci_dev, rxb->skb->data,
IPW_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
@ -5834,8 +5821,8 @@ static void ipw_send_tgi_tx_key(struct ipw_priv *priv, int type, int index)
key.station_index = 0; /* always 0 for BSS */
key.flags = 0;
/* 0 for new key; previous value of counter (after fatal error) */
key.tx_counter[0] = 0;
key.tx_counter[1] = 0;
key.tx_counter[0] = cpu_to_le32(0);
key.tx_counter[1] = cpu_to_le32(0);
ipw_send_cmd_pdu(priv, IPW_CMD_TGI_TX_KEY, sizeof(key), &key);
}
@ -5969,7 +5956,6 @@ static void ipw_bg_adhoc_check(void *data)
mutex_unlock(&priv->mutex);
}
#ifdef CONFIG_IPW2200_DEBUG
static void ipw_debug_config(struct ipw_priv *priv)
{
IPW_DEBUG_INFO("Scan completed, no valid APs matched "
@ -5994,9 +5980,6 @@ static void ipw_debug_config(struct ipw_priv *priv)
IPW_DEBUG_INFO("PRIVACY off\n");
IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
}
#else
#define ipw_debug_config(x) do {} while (0)
#endif
static void ipw_set_fixed_rate(struct ipw_priv *priv, int mode)
{
@ -6184,7 +6167,7 @@ static void ipw_add_scan_channels(struct ipw_priv *priv,
}
}
static int ipw_request_scan(struct ipw_priv *priv)
static int ipw_request_scan_helper(struct ipw_priv *priv, int type)
{
struct ipw_scan_request_ext scan;
int err = 0, scan_type;
@ -6215,7 +6198,18 @@ static int ipw_request_scan(struct ipw_priv *priv)
}
memset(&scan, 0, sizeof(scan));
scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
if (type == IW_SCAN_TYPE_PASSIVE) {
IPW_DEBUG_WX("use passive scanning\n");
scan_type = IPW_SCAN_PASSIVE_FULL_DWELL_SCAN;
scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] =
cpu_to_le16(120);
ipw_add_scan_channels(priv, &scan, scan_type);
goto send_request;
}
/* Use active scan by default. */
if (priv->config & CFG_SPEED_SCAN)
scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] =
cpu_to_le16(30);
@ -6225,9 +6219,8 @@ static int ipw_request_scan(struct ipw_priv *priv)
scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] =
cpu_to_le16(20);
scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
scan.full_scan_index = cpu_to_le32(ieee80211_get_scans(priv->ieee));
scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = cpu_to_le16(120);
#ifdef CONFIG_IPW2200_MONITOR
if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
@ -6290,6 +6283,7 @@ static int ipw_request_scan(struct ipw_priv *priv)
}
#endif
send_request:
err = ipw_send_scan_request_ext(priv, &scan);
if (err) {
IPW_DEBUG_HC("Sending scan command failed: %08X\n", err);
@ -6305,6 +6299,14 @@ static int ipw_request_scan(struct ipw_priv *priv)
return err;
}
static int ipw_request_passive_scan(struct ipw_priv *priv) {
return ipw_request_scan_helper(priv, IW_SCAN_TYPE_PASSIVE);
}
static int ipw_request_scan(struct ipw_priv *priv) {
return ipw_request_scan_helper(priv, IW_SCAN_TYPE_ACTIVE);
}
static void ipw_bg_abort_scan(void *data)
{
struct ipw_priv *priv = data;
@ -6827,6 +6829,15 @@ static int ipw_qos_activate(struct ipw_priv *priv,
}
IPW_DEBUG_QOS("QoS sending IPW_CMD_QOS_PARAMETERS\n");
for (i = 0; i < 3; i++) {
int j;
for (j = 0; j < QOS_QUEUE_NUM; j++) {
qos_parameters[i].cw_min[j] = cpu_to_le16(qos_parameters[i].cw_min[j]);
qos_parameters[i].cw_max[j] = cpu_to_le16(qos_parameters[i].cw_max[j]);
qos_parameters[i].tx_op_limit[j] = cpu_to_le16(qos_parameters[i].tx_op_limit[j]);
}
}
err = ipw_send_qos_params_command(priv,
(struct ieee80211_qos_parameters *)
&(qos_parameters[0]));
@ -7065,7 +7076,7 @@ static int ipw_qos_set_tx_queue_command(struct ipw_priv *priv,
if (priv->qos_data.qos_no_ack_mask & (1UL << tx_queue_id)) {
tfd->tx_flags &= ~DCT_FLAG_ACK_REQD;
tfd->tfd.tfd_26.mchdr.qos_ctrl |= CTRL_QOS_NO_ACK;
tfd->tfd.tfd_26.mchdr.qos_ctrl |= cpu_to_le16(CTRL_QOS_NO_ACK);
}
return 0;
}
@ -7646,7 +7657,6 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
/* Big bitfield of all the fields we provide in radiotap */
ipw_rt->rt_hdr.it_present =
((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_RATE) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@ -7655,6 +7665,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
/* Zero the flags, we'll add to them as we go */
ipw_rt->rt_flags = 0;
ipw_rt->rt_tsf = 0ULL;
/* Convert signal to DBM */
ipw_rt->rt_dbmsignal = antsignal;
@ -7773,7 +7784,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
s8 noise = frame->noise;
u8 rate = frame->rate;
short len = le16_to_cpu(pkt->u.frame.length);
u64 tsf = 0;
struct sk_buff *skb;
int hdr_only = 0;
u16 filter = priv->prom_priv->filter;
@ -7808,17 +7818,17 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
}
hdr = (void *)rxb->skb->data + IPW_RX_FRAME_SIZE;
if (ieee80211_is_management(hdr->frame_ctl)) {
if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
if (filter & IPW_PROM_NO_MGMT)
return;
if (filter & IPW_PROM_MGMT_HEADER_ONLY)
hdr_only = 1;
} else if (ieee80211_is_control(hdr->frame_ctl)) {
} else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
if (filter & IPW_PROM_NO_CTL)
return;
if (filter & IPW_PROM_CTL_HEADER_ONLY)
hdr_only = 1;
} else if (ieee80211_is_data(hdr->frame_ctl)) {
} else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
if (filter & IPW_PROM_NO_DATA)
return;
if (filter & IPW_PROM_DATA_HEADER_ONLY)
@ -7836,7 +7846,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
ipw_rt = (void *)skb->data;
if (hdr_only)
len = ieee80211_get_hdrlen(hdr->frame_ctl);
len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
memcpy(ipw_rt->payload, hdr, len);
@ -7859,7 +7869,6 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
/* Big bitfield of all the fields we provide in radiotap */
ipw_rt->rt_hdr.it_present =
((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_TSFT) |
(1 << IEEE80211_RADIOTAP_RATE) |
(1 << IEEE80211_RADIOTAP_CHANNEL) |
(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
@ -7868,8 +7877,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
/* Zero the flags, we'll add to them as we go */
ipw_rt->rt_flags = 0;
ipw_rt->rt_tsf = tsf;
ipw_rt->rt_tsf = 0ULL;
/* Convert to DBM */
ipw_rt->rt_dbmsignal = signal;
@ -8142,8 +8150,7 @@ static void ipw_rx(struct ipw_priv *priv)
switch (pkt->header.message_type) {
case RX_FRAME_TYPE: /* 802.11 frame */ {
struct ieee80211_rx_stats stats = {
.rssi =
le16_to_cpu(pkt->u.frame.rssi_dbm) -
.rssi = pkt->u.frame.rssi_dbm -
IPW_RSSI_TO_DBM,
.signal =
le16_to_cpu(pkt->u.frame.rssi_dbm) -
@ -8578,9 +8585,26 @@ static int ipw_wx_get_freq(struct net_device *dev,
* configured CHANNEL then return that; otherwise return ANY */
mutex_lock(&priv->mutex);
if (priv->config & CFG_STATIC_CHANNEL ||
priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
wrqu->freq.m = priv->channel;
else
priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) {
int i;
i = ieee80211_channel_to_index(priv->ieee, priv->channel);
BUG_ON(i == -1);
wrqu->freq.e = 1;
switch (ieee80211_is_valid_channel(priv->ieee, priv->channel)) {
case IEEE80211_52GHZ_BAND:
wrqu->freq.m = priv->ieee->geo.a[i].freq * 100000;
break;
case IEEE80211_24GHZ_BAND:
wrqu->freq.m = priv->ieee->geo.bg[i].freq * 100000;
break;
default:
BUG();
}
} else
wrqu->freq.m = 0;
mutex_unlock(&priv->mutex);
@ -8836,42 +8860,38 @@ static int ipw_wx_set_essid(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct ipw_priv *priv = ieee80211_priv(dev);
char *essid = ""; /* ANY */
int length = 0;
int length;
mutex_lock(&priv->mutex);
if (wrqu->essid.flags && wrqu->essid.length) {
length = wrqu->essid.length - 1;
essid = extra;
}
if (length == 0) {
if (!wrqu->essid.flags)
{
IPW_DEBUG_WX("Setting ESSID to ANY\n");
if ((priv->config & CFG_STATIC_ESSID) &&
!(priv->status & (STATUS_ASSOCIATED |
STATUS_ASSOCIATING))) {
IPW_DEBUG_ASSOC("Attempting to associate with new "
"parameters.\n");
ipw_disassociate(priv);
priv->config &= ~CFG_STATIC_ESSID;
ipw_associate(priv);
}
mutex_unlock(&priv->mutex);
return 0;
}
length = min(length, IW_ESSID_MAX_SIZE);
length = min((int)wrqu->essid.length, IW_ESSID_MAX_SIZE);
if (!extra[length - 1])
length--;
priv->config |= CFG_STATIC_ESSID;
if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
if (priv->essid_len == length && !memcmp(priv->essid, extra, length)
&& (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) {
IPW_DEBUG_WX("ESSID set to current ESSID.\n");
mutex_unlock(&priv->mutex);
return 0;
}
IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(extra, length),
length);
priv->essid_len = length;
memcpy(priv->essid, essid, priv->essid_len);
memcpy(priv->essid, extra, priv->essid_len);
/* Network configuration changed -- force [re]association */
IPW_DEBUG_ASSOC("[re]association triggered due to ESSID change.\n");
@ -9252,7 +9272,7 @@ static int ipw_wx_set_retry(struct net_device *dev,
if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
return 0;
if (wrqu->retry.value < 0 || wrqu->retry.value > 255)
if (wrqu->retry.value < 0 || wrqu->retry.value >= 255)
return -EINVAL;
mutex_lock(&priv->mutex);
@ -9375,15 +9395,19 @@ static int ipw_wx_set_scan(struct net_device *dev,
union iwreq_data *wrqu, char *extra)
{
struct ipw_priv *priv = ieee80211_priv(dev);
struct iw_scan_req *req = NULL;
if (wrqu->data.length
&& wrqu->data.length == sizeof(struct iw_scan_req)) {
req = (struct iw_scan_req *)extra;
struct iw_scan_req *req = (struct iw_scan_req *)extra;
if (wrqu->data.length == sizeof(struct iw_scan_req)) {
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
ipw_request_direct_scan(priv, req->essid,
req->essid_len);
return 0;
}
if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
queue_work(priv->workqueue,
&priv->request_passive_scan);
return 0;
}
}
IPW_DEBUG_WX("Start scan\n");
@ -10092,7 +10116,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
switch (priv->ieee->sec.level) {
case SEC_LEVEL_3:
tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
IEEE80211_FCTL_PROTECTED;
cpu_to_le16(IEEE80211_FCTL_PROTECTED);
/* XXX: ACK flag must be set for CCMP even if it
* is a multicast/broadcast packet, because CCMP
* group communication encrypted by GTK is
@ -10107,14 +10131,14 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
break;
case SEC_LEVEL_2:
tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
IEEE80211_FCTL_PROTECTED;
cpu_to_le16(IEEE80211_FCTL_PROTECTED);
tfd->u.data.tx_flags &= ~DCT_FLAG_NO_WEP;
tfd->u.data.tx_flags_ext |= DCT_FLAG_EXT_SECURITY_TKIP;
tfd->u.data.key_index = DCT_WEP_INDEX_USE_IMMEDIATE;
break;
case SEC_LEVEL_1:
tfd->u.data.tfd.tfd_24.mchdr.frame_ctl |=
IEEE80211_FCTL_PROTECTED;
cpu_to_le16(IEEE80211_FCTL_PROTECTED);
tfd->u.data.key_index = priv->ieee->tx_keyidx;
if (priv->ieee->sec.key_sizes[priv->ieee->tx_keyidx] <=
40)
@ -10246,17 +10270,17 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
/* Filtering of fragment chains is done agains the first fragment */
hdr = (void *)txb->fragments[0]->data;
if (ieee80211_is_management(hdr->frame_ctl)) {
if (ieee80211_is_management(le16_to_cpu(hdr->frame_ctl))) {
if (filter & IPW_PROM_NO_MGMT)
return;
if (filter & IPW_PROM_MGMT_HEADER_ONLY)
hdr_only = 1;
} else if (ieee80211_is_control(hdr->frame_ctl)) {
} else if (ieee80211_is_control(le16_to_cpu(hdr->frame_ctl))) {
if (filter & IPW_PROM_NO_CTL)
return;
if (filter & IPW_PROM_CTL_HEADER_ONLY)
hdr_only = 1;
} else if (ieee80211_is_data(hdr->frame_ctl)) {
} else if (ieee80211_is_data(le16_to_cpu(hdr->frame_ctl))) {
if (filter & IPW_PROM_NO_DATA)
return;
if (filter & IPW_PROM_DATA_HEADER_ONLY)
@ -10271,7 +10295,7 @@ static void ipw_handle_promiscuous_tx(struct ipw_priv *priv,
if (hdr_only) {
hdr = (void *)src->data;
len = ieee80211_get_hdrlen(hdr->frame_ctl);
len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
} else
len = src->len;
@ -10615,6 +10639,8 @@ static int ipw_setup_deferred_work(struct ipw_priv *priv)
INIT_WORK(&priv->down, (void (*)(void *))ipw_bg_down, priv);
INIT_WORK(&priv->request_scan,
(void (*)(void *))ipw_request_scan, priv);
INIT_WORK(&priv->request_passive_scan,
(void (*)(void *))ipw_request_passive_scan, priv);
INIT_WORK(&priv->gather_stats,
(void (*)(void *))ipw_bg_gather_stats, priv);
INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_bg_abort_scan, priv);
@ -11467,9 +11493,7 @@ static int ipw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
priv->net_dev = net_dev;
priv->pci_dev = pdev;
#ifdef CONFIG_IPW2200_DEBUG
ipw_debug_level = debug;
#endif
spin_lock_init(&priv->irq_lock);
spin_lock_init(&priv->lock);
for (i = 0; i < IPW_IBSS_MAC_HASH_SIZE; i++)
@ -11734,6 +11758,16 @@ static int ipw_pci_resume(struct pci_dev *pdev)
}
#endif
static void ipw_pci_shutdown(struct pci_dev *pdev)
{
struct ipw_priv *priv = pci_get_drvdata(pdev);
/* Take down the device; powers it off, etc. */
ipw_down(priv);
pci_disable_device(pdev);
}
/* driver initialization stuff */
static struct pci_driver ipw_driver = {
.name = DRV_NAME,
@ -11744,6 +11778,7 @@ static struct pci_driver ipw_driver = {
.suspend = ipw_pci_suspend,
.resume = ipw_pci_resume,
#endif
.shutdown = ipw_pci_shutdown,
};
static int __init ipw_init(void)
@ -11787,10 +11822,8 @@ MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
module_param(led, int, 0444);
MODULE_PARM_DESC(led, "enable led control on some systems (default 0 off)\n");
#ifdef CONFIG_IPW2200_DEBUG
module_param(debug, int, 0444);
MODULE_PARM_DESC(debug, "debug output mask");
#endif
module_param(channel, int, 0444);
MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");

Просмотреть файл

@ -713,7 +713,6 @@ struct ipw_rx_packet {
struct ipw_rx_mem_buffer {
dma_addr_t dma_addr;
struct ipw_rx_buffer *rxb;
struct sk_buff *skb;
struct list_head list;
}; /* Not transferred over network, so not __attribute__ ((packed)) */
@ -1297,6 +1296,7 @@ struct ipw_priv {
struct work_struct system_config;
struct work_struct rx_replenish;
struct work_struct request_scan;
struct work_struct request_passive_scan;
struct work_struct adapter_restart;
struct work_struct rf_kill;
struct work_struct up;
@ -1381,13 +1381,18 @@ BITC(x,19),BITC(x,18),BITC(x,17),BITC(x,16),\
BIT_ARG16(x)
#ifdef CONFIG_IPW2200_DEBUG
#define IPW_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
#ifdef CONFIG_IPW2200_DEBUG
#define IPW_LL_DEBUG(level, fmt, args...) \
do { if (ipw_debug_level & (level)) \
printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
#else
#define IPW_DEBUG(level, fmt, args...) do {} while (0)
#define IPW_LL_DEBUG(level, fmt, args...) do {} while (0)
#endif /* CONFIG_IPW2200_DEBUG */
/*
@ -1457,28 +1462,27 @@ do { if (ipw_debug_level & (level)) \
#define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a)
#define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a)
#define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a)
#define IPW_DEBUG_TRACE(f, a...) IPW_DEBUG(IPW_DL_TRACE, f, ## a)
#define IPW_DEBUG_RX(f, a...) IPW_DEBUG(IPW_DL_RX, f, ## a)
#define IPW_DEBUG_TX(f, a...) IPW_DEBUG(IPW_DL_TX, f, ## a)
#define IPW_DEBUG_ISR(f, a...) IPW_DEBUG(IPW_DL_ISR, f, ## a)
#define IPW_DEBUG_TRACE(f, a...) IPW_LL_DEBUG(IPW_DL_TRACE, f, ## a)
#define IPW_DEBUG_RX(f, a...) IPW_LL_DEBUG(IPW_DL_RX, f, ## a)
#define IPW_DEBUG_TX(f, a...) IPW_LL_DEBUG(IPW_DL_TX, f, ## a)
#define IPW_DEBUG_ISR(f, a...) IPW_LL_DEBUG(IPW_DL_ISR, f, ## a)
#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
#define IPW_DEBUG_LED(f, a...) IPW_DEBUG(IPW_DL_LED, f, ## a)
#define IPW_DEBUG_WEP(f, a...) IPW_DEBUG(IPW_DL_WEP, f, ## a)
#define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
#define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a)
#define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a)
#define IPW_DEBUG_LED(f, a...) IPW_LL_DEBUG(IPW_DL_LED, f, ## a)
#define IPW_DEBUG_WEP(f, a...) IPW_LL_DEBUG(IPW_DL_WEP, f, ## a)
#define IPW_DEBUG_HC(f, a...) IPW_LL_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
#define IPW_DEBUG_FRAG(f, a...) IPW_LL_DEBUG(IPW_DL_FRAG, f, ## a)
#define IPW_DEBUG_FW(f, a...) IPW_LL_DEBUG(IPW_DL_FW, f, ## a)
#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
#define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a)
#define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a)
#define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a)
#define IPW_DEBUG_IO(f, a...) IPW_LL_DEBUG(IPW_DL_IO, f, ## a)
#define IPW_DEBUG_ORD(f, a...) IPW_LL_DEBUG(IPW_DL_ORD, f, ## a)
#define IPW_DEBUG_FW_INFO(f, a...) IPW_LL_DEBUG(IPW_DL_FW_INFO, f, ## a)
#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
#define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a)
#define IPW_DEBUG_MERGE(f, a...) IPW_DEBUG(IPW_DL_MERGE, f, ## a)
#define IPW_DEBUG_QOS(f, a...) IPW_DEBUG(IPW_DL_QOS, f, ## a)
#define IPW_DEBUG_STATS(f, a...) IPW_LL_DEBUG(IPW_DL_STATS, f, ## a)
#define IPW_DEBUG_MERGE(f, a...) IPW_LL_DEBUG(IPW_DL_MERGE, f, ## a)
#define IPW_DEBUG_QOS(f, a...) IPW_LL_DEBUG(IPW_DL_QOS, f, ## a)
#include <linux/ctype.h>
@ -1947,10 +1951,17 @@ struct host_cmd {
u32 *param;
} __attribute__ ((packed));
struct cmdlog_host_cmd {
u8 cmd;
u8 len;
u16 reserved;
char param[124];
} __attribute__ ((packed));
struct ipw_cmd_log {
unsigned long jiffies;
int retcode;
struct host_cmd cmd;
struct cmdlog_host_cmd cmd;
};
/* SysConfig command parameters ... */

Просмотреть файл

@ -82,6 +82,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <net/ieee80211.h>

Просмотреть файл

@ -134,11 +134,7 @@ extern irqreturn_t orinoco_interrupt(int irq, void * dev_id, struct pt_regs *reg
/* Locking and synchronization functions */
/********************************************************************/
/* These functions *must* be inline or they will break horribly on
* SPARC, due to its weird semantics for save/restore flags. extern
* inline should prevent the kernel from linking or module from
* loading if they are not inlined. */
extern inline int orinoco_lock(struct orinoco_private *priv,
static inline int orinoco_lock(struct orinoco_private *priv,
unsigned long *flags)
{
spin_lock_irqsave(&priv->lock, *flags);
@ -151,7 +147,7 @@ extern inline int orinoco_lock(struct orinoco_private *priv,
return 0;
}
extern inline void orinoco_unlock(struct orinoco_private *priv,
static inline void orinoco_unlock(struct orinoco_private *priv,
unsigned long *flags)
{
spin_unlock_irqrestore(&priv->lock, *flags);

Просмотреть файл

@ -271,6 +271,27 @@ static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
return 0;
}
/*
* deal with seq counter wrapping correctly.
* refer to timer_after() for jiffies wrapping handling
*/
static inline int ccmp_replay_check(u8 *pn_n, u8 *pn_o)
{
u32 iv32_n, iv16_n;
u32 iv32_o, iv16_o;
iv32_n = (pn_n[0] << 24) | (pn_n[1] << 16) | (pn_n[2] << 8) | pn_n[3];
iv16_n = (pn_n[4] << 8) | pn_n[5];
iv32_o = (pn_o[0] << 24) | (pn_o[1] << 16) | (pn_o[2] << 8) | pn_o[3];
iv16_o = (pn_o[4] << 8) | pn_o[5];
if ((s32)iv32_n - (s32)iv32_o < 0 ||
(iv32_n == iv32_o && iv16_n <= iv16_o))
return 1;
return 0;
}
static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_ccmp_data *key = priv;
@ -323,7 +344,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
pn[5] = pos[0];
pos += 8;
if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
if (ccmp_replay_check(pn, key->rx_pn)) {
if (net_ratelimit()) {
printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT
" previous PN %02x%02x%02x%02x%02x%02x "

Просмотреть файл

@ -52,8 +52,10 @@ struct ieee80211_tkip_data {
int key_idx;
struct crypto_tfm *tfm_arc4;
struct crypto_tfm *tfm_michael;
struct crypto_tfm *tx_tfm_arc4;
struct crypto_tfm *tx_tfm_michael;
struct crypto_tfm *rx_tfm_arc4;
struct crypto_tfm *rx_tfm_michael;
/* scratch buffers for virt_to_page() (crypto API) */
u8 rx_hdr[16], tx_hdr[16];
@ -85,15 +87,29 @@ static void *ieee80211_tkip_init(int key_idx)
priv->key_idx = key_idx;
priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
if (priv->tfm_arc4 == NULL) {
priv->tx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
if (priv->tx_tfm_arc4 == NULL) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
goto fail;
}
priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0);
if (priv->tfm_michael == NULL) {
priv->tx_tfm_michael = crypto_alloc_tfm("michael_mic", 0);
if (priv->tx_tfm_michael == NULL) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
goto fail;
}
priv->rx_tfm_arc4 = crypto_alloc_tfm("arc4", 0);
if (priv->rx_tfm_arc4 == NULL) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API arc4\n");
goto fail;
}
priv->rx_tfm_michael = crypto_alloc_tfm("michael_mic", 0);
if (priv->rx_tfm_michael == NULL) {
printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
"crypto API michael_mic\n");
goto fail;
@ -103,10 +119,14 @@ static void *ieee80211_tkip_init(int key_idx)
fail:
if (priv) {
if (priv->tfm_michael)
crypto_free_tfm(priv->tfm_michael);
if (priv->tfm_arc4)
crypto_free_tfm(priv->tfm_arc4);
if (priv->tx_tfm_michael)
crypto_free_tfm(priv->tx_tfm_michael);
if (priv->tx_tfm_arc4)
crypto_free_tfm(priv->tx_tfm_arc4);
if (priv->rx_tfm_michael)
crypto_free_tfm(priv->rx_tfm_michael);
if (priv->rx_tfm_arc4)
crypto_free_tfm(priv->rx_tfm_arc4);
kfree(priv);
}
@ -116,10 +136,16 @@ static void *ieee80211_tkip_init(int key_idx)
static void ieee80211_tkip_deinit(void *priv)
{
struct ieee80211_tkip_data *_priv = priv;
if (_priv && _priv->tfm_michael)
crypto_free_tfm(_priv->tfm_michael);
if (_priv && _priv->tfm_arc4)
crypto_free_tfm(_priv->tfm_arc4);
if (_priv) {
if (_priv->tx_tfm_michael)
crypto_free_tfm(_priv->tx_tfm_michael);
if (_priv->tx_tfm_arc4)
crypto_free_tfm(_priv->tx_tfm_arc4);
if (_priv->rx_tfm_michael)
crypto_free_tfm(_priv->rx_tfm_michael);
if (_priv->rx_tfm_arc4)
crypto_free_tfm(_priv->rx_tfm_arc4);
}
kfree(priv);
}
@ -351,15 +377,28 @@ static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
crypto_cipher_setkey(tkey->tx_tfm_arc4, rc4key, 16);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = len + 4;
crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4);
crypto_cipher_encrypt(tkey->tx_tfm_arc4, &sg, &sg, len + 4);
return 0;
}
/*
* deal with seq counter wrapping correctly.
* refer to timer_after() for jiffies wrapping handling
*/
static inline int tkip_replay_check(u32 iv32_n, u16 iv16_n,
u32 iv32_o, u16 iv16_o)
{
if ((s32)iv32_n - (s32)iv32_o < 0 ||
(iv32_n == iv32_o && iv16_n <= iv16_o))
return 1;
return 0;
}
static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
@ -414,8 +453,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
pos += 8;
if (iv32 < tkey->rx_iv32 ||
(iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) {
if (net_ratelimit()) {
printk(KERN_DEBUG "TKIP: replay detected: STA=" MAC_FMT
" previous TSC %08x%04x received TSC "
@ -434,11 +472,11 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
plen = skb->len - hdr_len - 12;
crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
crypto_cipher_setkey(tkey->rx_tfm_arc4, rc4key, 16);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = plen + 4;
crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4);
crypto_cipher_decrypt(tkey->rx_tfm_arc4, &sg, &sg, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;
@ -472,12 +510,12 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
return keyidx;
}
static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
static int michael_mic(struct crypto_tfm *tfm_michael, u8 * key, u8 * hdr,
u8 * data, size_t data_len, u8 * mic)
{
struct scatterlist sg[2];
if (tkey->tfm_michael == NULL) {
if (tfm_michael == NULL) {
printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
return -1;
}
@ -489,10 +527,10 @@ static int michael_mic(struct ieee80211_tkip_data *tkey, u8 * key, u8 * hdr,
sg[1].offset = offset_in_page(data);
sg[1].length = data_len;
crypto_digest_init(tkey->tfm_michael);
crypto_digest_setkey(tkey->tfm_michael, key, 8);
crypto_digest_update(tkey->tfm_michael, sg, 2);
crypto_digest_final(tkey->tfm_michael, mic);
crypto_digest_init(tfm_michael);
crypto_digest_setkey(tfm_michael, key, 8);
crypto_digest_update(tfm_michael, sg, 2);
crypto_digest_final(tfm_michael, mic);
return 0;
}
@ -528,7 +566,7 @@ static void michael_mic_hdr(struct sk_buff *skb, u8 * hdr)
if (stype & IEEE80211_STYPE_QOS_DATA) {
const struct ieee80211_hdr_3addrqos *qoshdr =
(struct ieee80211_hdr_3addrqos *)skb->data;
hdr[12] = le16_to_cpu(qoshdr->qos_ctl) & IEEE80211_QCTL_TID;
hdr[12] = qoshdr->qos_ctl & cpu_to_le16(IEEE80211_QCTL_TID);
} else
hdr[12] = 0; /* priority */
@ -550,7 +588,7 @@ static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len,
michael_mic_hdr(skb, tkey->tx_hdr);
pos = skb_put(skb, 8);
if (michael_mic(tkey, &tkey->key[16], tkey->tx_hdr,
if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
return -1;
@ -588,7 +626,7 @@ static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
return -1;
michael_mic_hdr(skb, tkey->rx_hdr);
if (michael_mic(tkey, &tkey->key[24], tkey->rx_hdr,
if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
@ -618,14 +656,18 @@ static int ieee80211_tkip_set_key(void *key, int len, u8 * seq, void *priv)
{
struct ieee80211_tkip_data *tkey = priv;
int keyidx;
struct crypto_tfm *tfm = tkey->tfm_michael;
struct crypto_tfm *tfm2 = tkey->tfm_arc4;
struct crypto_tfm *tfm = tkey->tx_tfm_michael;
struct crypto_tfm *tfm2 = tkey->tx_tfm_arc4;
struct crypto_tfm *tfm3 = tkey->rx_tfm_michael;
struct crypto_tfm *tfm4 = tkey->rx_tfm_arc4;
keyidx = tkey->key_idx;
memset(tkey, 0, sizeof(*tkey));
tkey->key_idx = keyidx;
tkey->tfm_michael = tfm;
tkey->tfm_arc4 = tfm2;
tkey->tx_tfm_michael = tfm;
tkey->tx_tfm_arc4 = tfm2;
tkey->rx_tfm_michael = tfm3;
tkey->rx_tfm_arc4 = tfm4;
if (len == TKIP_KEY_LEN) {
memcpy(tkey->key, key, TKIP_KEY_LEN);
tkey->key_set = 1;

Просмотреть файл

@ -32,7 +32,8 @@ struct prism2_wep_data {
u8 key[WEP_KEY_LEN + 1];
u8 key_len;
u8 key_idx;
struct crypto_tfm *tfm;
struct crypto_tfm *tx_tfm;
struct crypto_tfm *rx_tfm;
};
static void *prism2_wep_init(int keyidx)
@ -44,13 +45,19 @@ static void *prism2_wep_init(int keyidx)
goto fail;
priv->key_idx = keyidx;
priv->tfm = crypto_alloc_tfm("arc4", 0);
if (priv->tfm == NULL) {
priv->tx_tfm = crypto_alloc_tfm("arc4", 0);
if (priv->tx_tfm == NULL) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
goto fail;
}
priv->rx_tfm = crypto_alloc_tfm("arc4", 0);
if (priv->rx_tfm == NULL) {
printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
"crypto API arc4\n");
goto fail;
}
/* start WEP IV from a random value */
get_random_bytes(&priv->iv, 4);
@ -58,8 +65,10 @@ static void *prism2_wep_init(int keyidx)
fail:
if (priv) {
if (priv->tfm)
crypto_free_tfm(priv->tfm);
if (priv->tx_tfm)
crypto_free_tfm(priv->tx_tfm);
if (priv->rx_tfm)
crypto_free_tfm(priv->rx_tfm);
kfree(priv);
}
return NULL;
@ -68,8 +77,12 @@ static void *prism2_wep_init(int keyidx)
static void prism2_wep_deinit(void *priv)
{
struct prism2_wep_data *_priv = priv;
if (_priv && _priv->tfm)
crypto_free_tfm(_priv->tfm);
if (_priv) {
if (_priv->tx_tfm)
crypto_free_tfm(_priv->tx_tfm);
if (_priv->rx_tfm)
crypto_free_tfm(_priv->rx_tfm);
}
kfree(priv);
}
@ -151,11 +164,11 @@ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
icv[2] = crc >> 16;
icv[3] = crc >> 24;
crypto_cipher_setkey(wep->tfm, key, klen);
crypto_cipher_setkey(wep->tx_tfm, key, klen);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = len + 4;
crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
crypto_cipher_encrypt(wep->tx_tfm, &sg, &sg, len + 4);
return 0;
}
@ -194,11 +207,11 @@ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
/* Apply RC4 to data and compute CRC32 over decrypted data */
plen = skb->len - hdr_len - 8;
crypto_cipher_setkey(wep->tfm, key, klen);
crypto_cipher_setkey(wep->rx_tfm, key, klen);
sg.page = virt_to_page(pos);
sg.offset = offset_in_page(pos);
sg.length = plen + 4;
crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
crypto_cipher_decrypt(wep->rx_tfm, &sg, &sg, plen + 4);
crc = ~crc32_le(~0, pos, plen);
icv[0] = crc;

Просмотреть файл

@ -1078,13 +1078,16 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
while (length >= sizeof(*info_element)) {
if (sizeof(*info_element) + info_element->len > length) {
IEEE80211_DEBUG_MGMT("Info elem: parse failed: "
IEEE80211_ERROR("Info elem: parse failed: "
"info_element->len + 2 > left : "
"info_element->len+2=%zd left=%d, id=%d.\n",
info_element->len +
sizeof(*info_element),
length, info_element->id);
return 1;
/* We stop processing but don't return an error here
* because some misbehaviour APs break this rule. ie.
* Orinoco AP1000. */
break;
}
switch (info_element->id) {

Просмотреть файл

@ -337,7 +337,7 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len += 2;
skb->priority = ieee80211_classify(skb);
header.qos_ctl |= skb->priority & IEEE80211_QCTL_TID;
header.qos_ctl |= cpu_to_le16(skb->priority & IEEE80211_QCTL_TID);
}
header.frame_ctl = cpu_to_le16(fc);
@ -532,13 +532,6 @@ int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
if (ret == NETDEV_TX_BUSY) {
printk(KERN_ERR "%s: NETDEV_TX_BUSY returned; "
"driver should report queue full via "
"ieee_device->is_queue_full.\n",
ieee->dev->name);
}
ieee80211_txb_free(txb);
}