Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (55 commits)
  net: fib_rules: fix error code for unsupported families
  netdevice: Fix wrong string handle in kernel command line parsing
  net: Tyop of sk_filter() comment
  netlink: Unneeded local variable
  net-sched: fix filter destruction in atm/hfsc qdisc destruction
  net-sched: change tcf_destroy_chain() to clear start of filter list
  ipv4: fix sysctl documentation of time related values
  mac80211: don't accept WEP keys other than WEP40 and WEP104
  hostap: fix sparse warnings
  hostap: don't report useless WDS frames by default
  textsearch: fix Boyer-Moore text search bug
  netfilter: nf_conntrack_tcp: fixing to check the lower bound of valid ACK
  ipv6 route: Convert rt6_device_match() to use RT6_LOOKUP_F_xxx flags.
  netlabel: Fix a problem when dumping the default IPv6 static labels
  net/inet_lro: remove setting skb->ip_summed when not LRO-able
  inet fragments: fix race between inet_frag_find and inet_frag_secret_rebuild
  CONNECTOR: add a proc entry to list connectors
  netlink: Fix some doc comments in net/netlink/attr.c
  tcp: /proc/net/tcp rto,ato values not scaled properly (v2)
  include/linux/netdevice.h: don't export MAX_HEADER to userspace
  ...
This commit is contained in:
Linus Torvalds 2008-07-02 18:43:16 -07:00
Родитель 3d25802e3b 2fe195cfe3
Коммит 821b03ffac
76 изменённых файлов: 411 добавлений и 272 удалений

Просмотреть файл

@ -81,23 +81,23 @@ inet_peer_minttl - INTEGER
Minimum time-to-live of entries. Should be enough to cover fragment
time-to-live on the reassembling side. This minimum time-to-live is
guaranteed if the pool size is less than inet_peer_threshold.
Measured in jiffies(1).
Measured in seconds.
inet_peer_maxttl - INTEGER
Maximum time-to-live of entries. Unused entries will expire after
this period of time if there is no memory pressure on the pool (i.e.
when the number of entries in the pool is very small).
Measured in jiffies(1).
Measured in seconds.
inet_peer_gc_mintime - INTEGER
Minimum interval between garbage collection passes. This interval is
in effect under high memory pressure on the pool.
Measured in jiffies(1).
Measured in seconds.
inet_peer_gc_maxtime - INTEGER
Minimum interval between garbage collection passes. This interval is
in effect under low (or absent) memory pressure on the pool.
Measured in jiffies(1).
Measured in seconds.
TCP variables:
@ -794,10 +794,6 @@ tag - INTEGER
Allows you to write a number, which can be used as required.
Default value is 0.
(1) Jiffie: internal timeunit for the kernel. On the i386 1/100s, on the
Alpha 1/1024s. See the HZ define in /usr/include/asm/param.h for the exact
value on your system.
Alexey Kuznetsov.
kuznet@ms2.inr.ac.ru

Просмотреть файл

@ -83,9 +83,9 @@ Valid range: Limited by memory on system
Default: 30
e. intr_type
Specifies interrupt type. Possible values 1(INTA), 2(MSI), 3(MSI-X)
Valid range: 1-3
Default: 1
Specifies interrupt type. Possible values 0(INTA), 2(MSI-X)
Valid values: 0, 2
Default: 2
5. Performance suggestions
General:

Просмотреть файл

@ -27,6 +27,8 @@
#include <linux/moduleparam.h>
#include <linux/connector.h>
#include <linux/mutex.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <net/sock.h>
@ -403,6 +405,40 @@ static void cn_callback(void *data)
mutex_unlock(&notify_lock);
}
static int cn_proc_show(struct seq_file *m, void *v)
{
struct cn_queue_dev *dev = cdev.cbdev;
struct cn_callback_entry *cbq;
seq_printf(m, "Name ID\n");
spin_lock_bh(&dev->queue_lock);
list_for_each_entry(cbq, &dev->queue_list, callback_entry) {
seq_printf(m, "%-15s %u:%u\n",
cbq->id.name,
cbq->id.id.idx,
cbq->id.id.val);
}
spin_unlock_bh(&dev->queue_lock);
return 0;
}
static int cn_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, cn_proc_show, NULL);
}
static const struct file_operations cn_file_ops = {
.owner = THIS_MODULE,
.open = cn_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release
};
static int __devinit cn_init(void)
{
struct cn_dev *dev = &cdev;
@ -434,6 +470,8 @@ static int __devinit cn_init(void)
return -EINVAL;
}
proc_net_fops_create(&init_net, "connector", S_IRUGO, &cn_file_ops);
return 0;
}
@ -443,6 +481,8 @@ static void __devexit cn_fini(void)
cn_already_initialized = 0;
proc_net_remove(&init_net, "connector");
cn_del_callback(&dev->id);
cn_queue_free_dev(dev->cbdev);
netlink_kernel_release(dev->nls);

Просмотреть файл

@ -1768,9 +1768,10 @@ vortex_timer(unsigned long data)
case XCVR_MII: case XCVR_NWAY:
{
ok = 1;
spin_lock_bh(&vp->lock);
/* Interrupts are already disabled */
spin_lock(&vp->lock);
vortex_check_media(dev, 0);
spin_unlock_bh(&vp->lock);
spin_unlock(&vp->lock);
}
break;
default: /* Other media types handled by Tx timeouts. */

Просмотреть файл

@ -1803,6 +1803,8 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
if (rx->prev->skb) {
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
sizeof(struct rfd), PCI_DMA_TODEVICE);
}
return 0;

Просмотреть файл

@ -347,7 +347,7 @@ e1000_set_tso(struct net_device *netdev, u32 data)
else
netdev->features &= ~NETIF_F_TSO;
if (data)
if (data && (adapter->hw.mac_type > e1000_82547_rev_2))
netdev->features |= NETIF_F_TSO6;
else
netdev->features &= ~NETIF_F_TSO6;

Просмотреть файл

@ -2535,7 +2535,8 @@ void e1000e_down(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
e1000e_reset(adapter);
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
e1000_clean_tx_ring(adapter);
e1000_clean_rx_ring(adapter);

Просмотреть файл

@ -1077,8 +1077,6 @@ static inline void rx_off(struct scc_priv *priv)
static void start_timer(struct scc_priv *priv, int t, int r15)
{
unsigned long flags;
outb(priv->tmr_mode, priv->tmr_ctrl);
if (t == 0) {
tm_isr(priv);

Просмотреть файл

@ -718,7 +718,8 @@ void igb_down(struct igb_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
igb_reset(adapter);
if (!pci_channel_offline(adapter->pdev))
igb_reset(adapter);
igb_clean_all_tx_rings(adapter);
igb_clean_all_rx_rings(adapter);
}

Просмотреть файл

@ -1271,7 +1271,7 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
endframeLen = framelen - jumbo->current_size;
endframelen = framelen - jumbo->current_size;
/*
if (framelen > IPG_RXFRAG_SIZE)
framelen=IPG_RXFRAG_SIZE;
@ -1279,8 +1279,8 @@ static void ipg_nic_rx_with_end(struct net_device *dev,
if (framelen > IPG_RXSUPPORT_SIZE)
dev_kfree_skb_irq(jumbo->skb);
else {
memcpy(skb_put(jumbo->skb, endframeLen),
skb->data, endframeLen);
memcpy(skb_put(jumbo->skb, endframelen),
skb->data, endframelen);
jumbo->skb->protocol =
eth_type_trans(jumbo->skb, dev);
@ -1352,16 +1352,16 @@ static int ipg_nic_rx(struct net_device *dev)
switch (ipg_nic_rx_check_frame_type(dev)) {
case FRAME_WITH_START_WITH_END:
ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry);
ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
break;
case FRAME_WITH_START:
ipg_nic_rx_with_start(dev, tp, rxfd, entry);
ipg_nic_rx_with_start(dev, sp, rxfd, entry);
break;
case FRAME_WITH_END:
ipg_nic_rx_with_end(dev, tp, rxfd, entry);
ipg_nic_rx_with_end(dev, sp, rxfd, entry);
break;
case FRAME_NO_START_NO_END:
ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry);
ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
break;
}
}
@ -1808,7 +1808,7 @@ static int ipg_nic_open(struct net_device *dev)
/* initialize JUMBO Frame control variable */
sp->jumbo.found_start = 0;
sp->jumbo.current_size = 0;
sp->jumbo.skb = 0;
sp->jumbo.skb = NULL;
dev->mtu = IPG_TXFRAG_SIZE;
#endif

Просмотреть файл

@ -1969,7 +1969,8 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
netif_stop_queue(netdev);
ixgbe_reset(adapter);
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
ixgbe_clean_all_rx_rings(adapter);

Просмотреть файл

@ -71,14 +71,18 @@ static irqreturn_t netxen_intr(int irq, void *data);
static irqreturn_t netxen_msi_intr(int irq, void *data);
/* PCI Device ID Table */
#define ENTRY(device) \
{PCI_DEVICE(0x4040, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
{PCI_DEVICE(0x4040, 0x0001), PCI_DEVICE_CLASS(0x020000, ~0)},
{PCI_DEVICE(0x4040, 0x0002), PCI_DEVICE_CLASS(0x020000, ~0)},
{PCI_DEVICE(0x4040, 0x0003), PCI_DEVICE_CLASS(0x020000, ~0)},
{PCI_DEVICE(0x4040, 0x0004), PCI_DEVICE_CLASS(0x020000, ~0)},
{PCI_DEVICE(0x4040, 0x0005), PCI_DEVICE_CLASS(0x020000, ~0)},
{PCI_DEVICE(0x4040, 0x0024), PCI_DEVICE_CLASS(0x020000, ~0)},
{PCI_DEVICE(0x4040, 0x0025), PCI_DEVICE_CLASS(0x020000, ~0)},
ENTRY(0x0001),
ENTRY(0x0002),
ENTRY(0x0003),
ENTRY(0x0004),
ENTRY(0x0005),
ENTRY(0x0024),
ENTRY(0x0025),
{0,}
};

Просмотреть файл

@ -525,12 +525,14 @@ static int axnet_open(struct net_device *dev)
int ret;
axnet_dev_t *info = PRIV(dev);
struct pcmcia_device *link = info->p_dev;
unsigned int nic_base = dev->base_addr;
DEBUG(2, "axnet_open('%s')\n", dev->name);
if (!pcmcia_dev_present(link))
return -ENODEV;
outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
if (ret)
return ret;

Просмотреть файл

@ -969,6 +969,7 @@ static int pcnet_open(struct net_device *dev)
int ret;
pcnet_dev_t *info = PRIV(dev);
struct pcmcia_device *link = info->p_dev;
unsigned int nic_base = dev->base_addr;
DEBUG(2, "pcnet_open('%s')\n", dev->name);
@ -976,6 +977,8 @@ static int pcnet_open(struct net_device *dev)
return -ENODEV;
set_misc_reg(dev);
outb_p(0xFF, nic_base + EN0_ISR); /* Clear bogus intr. */
ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
if (ret)
return ret;

Просмотреть файл

@ -3701,7 +3701,9 @@ static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
printk(KERN_ERR PFX
"%s: Driver up/down cycle failed, "
"closing device\n",qdev->ndev->name);
rtnl_lock();
dev_close(qdev->ndev);
rtnl_unlock();
return -1;
}
return 0;

Просмотреть файл

@ -273,7 +273,7 @@ static void r6040_init_ring_desc(struct r6040_descriptor *desc_ring,
dma_addr_t mapping = desc_dma;
while (size-- > 0) {
mapping += sizeof(sizeof(*desc));
mapping += sizeof(*desc);
desc->ndesc = cpu_to_le32(mapping);
desc->vndescp = desc + 1;
desc++;

Просмотреть файл

@ -2625,9 +2625,7 @@ static int fill_rx_buffers(struct ring_info *ring)
rxdp1->Buffer0_ptr = pci_map_single
(ring->pdev, skb->data, size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
if( (rxdp1->Buffer0_ptr == 0) ||
(rxdp1->Buffer0_ptr ==
DMA_ERROR_CODE))
if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 =
@ -2657,6 +2655,7 @@ static int fill_rx_buffers(struct ring_info *ring)
skb->data = (void *) (unsigned long)tmp;
skb_reset_tail_pointer(skb);
/* AK: check is wrong. 0 can be valid dma address */
if (!(rxdp3->Buffer0_ptr))
rxdp3->Buffer0_ptr =
pci_map_single(ring->pdev, ba->ba_0,
@ -2665,8 +2664,7 @@ static int fill_rx_buffers(struct ring_info *ring)
pci_dma_sync_single_for_device(ring->pdev,
(dma_addr_t) rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);
if( (rxdp3->Buffer0_ptr == 0) ||
(rxdp3->Buffer0_ptr == DMA_ERROR_CODE))
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
goto pci_map_failed;
rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
@ -2681,18 +2679,17 @@ static int fill_rx_buffers(struct ring_info *ring)
(ring->pdev, skb->data, ring->mtu + 4,
PCI_DMA_FROMDEVICE);
if( (rxdp3->Buffer2_ptr == 0) ||
(rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
goto pci_map_failed;
/* AK: check is wrong */
if (!rxdp3->Buffer1_ptr)
rxdp3->Buffer1_ptr =
pci_map_single(ring->pdev,
ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
if( (rxdp3->Buffer1_ptr == 0) ||
(rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
pci_unmap_single
(ring->pdev,
(dma_addr_t)(unsigned long)
@ -4264,16 +4261,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Buffer_Pointer = pci_map_single(sp->pdev,
fifo->ufo_in_band_v,
sizeof(u64), PCI_DMA_TODEVICE);
if((txdp->Buffer_Pointer == 0) ||
(txdp->Buffer_Pointer == DMA_ERROR_CODE))
if (pci_dma_mapping_error(txdp->Buffer_Pointer))
goto pci_map_failed;
txdp++;
}
txdp->Buffer_Pointer = pci_map_single
(sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
if((txdp->Buffer_Pointer == 0) ||
(txdp->Buffer_Pointer == DMA_ERROR_CODE))
if (pci_dma_mapping_error(txdp->Buffer_Pointer))
goto pci_map_failed;
txdp->Host_Control = (unsigned long) skb;
@ -6884,10 +6879,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single( sp->pdev, (*skb)->data,
size - NET_IP_ALIGN,
PCI_DMA_FROMDEVICE);
if( (rxdp1->Buffer0_ptr == 0) ||
(rxdp1->Buffer0_ptr == DMA_ERROR_CODE)) {
if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
goto memalloc_failed;
}
rxdp->Host_Control = (unsigned long) (*skb);
}
} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
@ -6913,15 +6906,12 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
pci_map_single(sp->pdev, (*skb)->data,
dev->mtu + 4,
PCI_DMA_FROMDEVICE);
if( (rxdp3->Buffer2_ptr == 0) ||
(rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) {
if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
goto memalloc_failed;
}
rxdp3->Buffer0_ptr = *temp0 =
pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
PCI_DMA_FROMDEVICE);
if( (rxdp3->Buffer0_ptr == 0) ||
(rxdp3->Buffer0_ptr == DMA_ERROR_CODE)) {
if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer2_ptr,
dev->mtu + 4, PCI_DMA_FROMDEVICE);
@ -6933,8 +6923,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
rxdp3->Buffer1_ptr = *temp1 =
pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
PCI_DMA_FROMDEVICE);
if( (rxdp3->Buffer1_ptr == 0) ||
(rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
pci_unmap_single (sp->pdev,
(dma_addr_t)rxdp3->Buffer0_ptr,
BUF0_LEN, PCI_DMA_FROMDEVICE);

Просмотреть файл

@ -75,10 +75,6 @@ static int debug_level = ERR_DBG;
/* DEBUG message print. */
#define DBG_PRINT(dbg_level, args...) if(!(debug_level<dbg_level)) printk(args)
#ifndef DMA_ERROR_CODE
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
#endif
/* Protocol assist features of the NIC */
#define L3_CKSUM_OK 0xFFFF
#define L4_CKSUM_OK 0xFFFF

Просмотреть файл

@ -1394,6 +1394,7 @@ tc35815_open(struct net_device *dev)
tc35815_chip_init(dev);
spin_unlock_irq(&lp->lock);
netif_carrier_off(dev);
/* schedule a link state check */
phy_start(lp->phy_dev);
@ -1735,7 +1736,6 @@ tc35815_rx(struct net_device *dev)
skb = lp->rx_skbs[cur_bd].skb;
prefetch(skb->data);
lp->rx_skbs[cur_bd].skb = NULL;
lp->fbl_count--;
pci_unmap_single(lp->pci_dev,
lp->rx_skbs[cur_bd].skb_dma,
RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
@ -1791,6 +1791,7 @@ tc35815_rx(struct net_device *dev)
#ifdef TC35815_USE_PACKEDBUFFER
while (lp->fbl_curid != id)
#else
lp->fbl_count--;
while (lp->fbl_count < RX_BUF_NUM)
#endif
{
@ -2453,6 +2454,7 @@ static int tc35815_resume(struct pci_dev *pdev)
return 0;
pci_set_power_state(pdev, PCI_D0);
tc35815_restart(dev);
netif_carrier_off(dev);
if (lp->phy_dev)
phy_start(lp->phy_dev);
netif_device_attach(dev);

Просмотреть файл

@ -32,6 +32,7 @@
#include <linux/x25.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <linux/rtnetlink.h>
#include "x25_asy.h"
#include <net/x25device.h>
@ -601,8 +602,10 @@ static void x25_asy_close_tty(struct tty_struct *tty)
if (!sl || sl->magic != X25_ASY_MAGIC)
return;
rtnl_lock();
if (sl->dev->flags & IFF_UP)
dev_close(sl->dev);
rtnl_unlock();
tty->disc_data = NULL;
sl->tty = NULL;

Просмотреть файл

@ -72,6 +72,9 @@ static void b43_led_brightness_set(struct led_classdev *led_dev,
struct b43_wldev *dev = led->dev;
bool radio_enabled;
if (unlikely(b43_status(dev) < B43_STAT_INITIALIZED))
return;
/* Checking the radio-enabled status here is slightly racy,
* but we want to avoid the locking overhead and we don't care
* whether the LED has the wrong state for a second. */

Просмотреть файл

@ -2883,12 +2883,11 @@ static int b43_op_tx(struct ieee80211_hw *hw,
if (unlikely(skb->len < 2 + 2 + 6)) {
/* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
goto drop_packet;
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
if (unlikely(!dev))
return NETDEV_TX_BUSY;
goto drop_packet;
/* Transmissions on seperate queues can run concurrently. */
read_lock_irqsave(&wl->tx_lock, flags);
@ -2904,7 +2903,12 @@ static int b43_op_tx(struct ieee80211_hw *hw,
read_unlock_irqrestore(&wl->tx_lock, flags);
if (unlikely(err))
return NETDEV_TX_BUSY;
goto drop_packet;
return NETDEV_TX_OK;
drop_packet:
/* We can not transmit this packet. Drop it. */
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}

Просмотреть файл

@ -876,6 +876,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
if (!ring)
goto out;
ring->type = type;
ring->dev = dev;
nr_slots = B43legacy_RXRING_SLOTS;
if (for_tx)
@ -922,7 +923,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
DMA_TO_DEVICE);
}
ring->dev = dev;
ring->nr_slots = nr_slots;
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;

Просмотреть файл

@ -2378,8 +2378,10 @@ static int b43legacy_op_tx(struct ieee80211_hw *hw,
} else
err = b43legacy_dma_tx(dev, skb, ctl);
out:
if (unlikely(err))
return NETDEV_TX_BUSY;
if (unlikely(err)) {
/* Drop the packet. */
dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
}

Просмотреть файл

@ -64,7 +64,7 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
int hdrlen, phdrlen, head_need, tail_need;
u16 fc;
int prism_header, ret;
struct ieee80211_hdr_4addr *hdr;
struct ieee80211_hdr_4addr *fhdr;
iface = netdev_priv(dev);
local = iface->local;
@ -83,8 +83,8 @@ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb,
phdrlen = 0;
}
hdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(hdr->frame_ctl);
fhdr = (struct ieee80211_hdr_4addr *) skb->data;
fc = le16_to_cpu(fhdr->frame_ctl);
if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) {
printk(KERN_DEBUG "%s: dropped management frame with header "
@ -551,7 +551,7 @@ hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr_4addr *hdr,
hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff ||
hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) {
/* RA (or BSSID) is not ours - drop */
PDEBUG(DEBUG_EXTRA, "%s: received WDS frame with "
PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with "
"not own or broadcast %s=%s\n",
local->dev->name,
fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID",

Просмотреть файл

@ -1930,7 +1930,7 @@ static void handle_pspoll(local_info_t *local,
PDEBUG(DEBUG_PS, " PSPOLL and AID[15:14] not set\n");
return;
}
aid &= ~BIT(15) & ~BIT(14);
aid &= ~(BIT(15) | BIT(14));
if (aid == 0 || aid > MAX_AID_TABLE_SIZE) {
PDEBUG(DEBUG_PS, " invalid aid=%d\n", aid);
return;

Просмотреть файл

@ -533,10 +533,10 @@ static void prism2_detach(struct pcmcia_device *link)
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
#define CFG_CHECK2(fn, retf) \
do { int ret = (retf); \
if (ret != 0) { \
PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", ret); \
cs_error(link, fn, ret); \
do { int _ret = (retf); \
if (_ret != 0) { \
PDEBUG(DEBUG_EXTRA, "CardServices(" #fn ") returned %d\n", _ret); \
cs_error(link, fn, _ret); \
goto next_entry; \
} \
} while (0)

Просмотреть файл

@ -2835,7 +2835,7 @@ static void hostap_passive_scan(unsigned long data)
{
local_info_t *local = (local_info_t *) data;
struct net_device *dev = local->dev;
u16 channel;
u16 chan;
if (local->passive_scan_interval <= 0)
return;
@ -2872,11 +2872,11 @@ static void hostap_passive_scan(unsigned long data)
printk(KERN_DEBUG "%s: passive scan channel %d\n",
dev->name, local->passive_scan_channel);
channel = local->passive_scan_channel;
chan = local->passive_scan_channel;
local->passive_scan_state = PASSIVE_SCAN_WAIT;
local->passive_scan_timer.expires = jiffies + HZ / 10;
} else {
channel = local->channel;
chan = local->channel;
local->passive_scan_state = PASSIVE_SCAN_LISTEN;
local->passive_scan_timer.expires = jiffies +
local->passive_scan_interval * HZ;
@ -2884,9 +2884,9 @@ static void hostap_passive_scan(unsigned long data)
if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST |
(HFA384X_TEST_CHANGE_CHANNEL << 8),
channel, NULL, 0))
chan, NULL, 0))
printk(KERN_ERR "%s: passive scan channel set %d "
"failed\n", dev->name, channel);
"failed\n", dev->name, chan);
add_timer(&local->passive_scan_timer);
}

Просмотреть файл

@ -594,7 +594,8 @@ void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx)
}
int hostap_80211_header_parse(const struct sk_buff *skb, unsigned char *haddr)
static int hostap_80211_header_parse(const struct sk_buff *skb,
unsigned char *haddr)
{
struct hostap_interface *iface = netdev_priv(skb->dev);
local_info_t *local = iface->local;
@ -857,7 +858,6 @@ const struct header_ops hostap_80211_ops = {
.rebuild = eth_rebuild_header,
.cache = eth_header_cache,
.cache_update = eth_header_cache_update,
.parse = hostap_80211_header_parse,
};
EXPORT_SYMBOL(hostap_80211_ops);
@ -1150,7 +1150,6 @@ EXPORT_SYMBOL(hostap_set_roaming);
EXPORT_SYMBOL(hostap_set_auth_algs);
EXPORT_SYMBOL(hostap_dump_rx_header);
EXPORT_SYMBOL(hostap_dump_tx_header);
EXPORT_SYMBOL(hostap_80211_header_parse);
EXPORT_SYMBOL(hostap_80211_get_hdrlen);
EXPORT_SYMBOL(hostap_get_stats);
EXPORT_SYMBOL(hostap_setup_dev);

Просмотреть файл

@ -2227,7 +2227,10 @@ static int iwl3945_scan_initiate(struct iwl3945_priv *priv)
}
IWL_DEBUG_INFO("Starting scan...\n");
priv->scan_bands = 2;
if (priv->cfg->sku & IWL_SKU_G)
priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
if (priv->cfg->sku & IWL_SKU_A)
priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
set_bit(STATUS_SCANNING, &priv->status);
priv->scan_start = jiffies;
priv->scan_pass_start = priv->scan_start;
@ -3352,13 +3355,18 @@ static void iwl3945_rx_scan_complete_notif(struct iwl3945_priv *priv,
cancel_delayed_work(&priv->scan_check);
IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
(priv->scan_bands == 2) ? "2.4" : "5.2",
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
"2.4" : "5.2",
jiffies_to_msecs(elapsed_jiffies
(priv->scan_pass_start, jiffies)));
/* Remove this scanned band from the list
* of pending bands to scan */
priv->scan_bands--;
/* Remove this scanned band from the list of pending
* bands to scan, band G precedes A in order of scanning
* as seen in iwl3945_bg_request_scan */
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
/* If a request to abort was given, or the scan did not succeed
* then we reset the scan state machine and terminate,
@ -4972,7 +4980,7 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
if (!is_channel_valid(ch_info)) {
IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
scan_ch->channel);
continue;
}
@ -6315,21 +6323,16 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
/* flags + rate selection */
switch (priv->scan_bands) {
case 2:
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
scan->good_CRC_th = 0;
band = IEEE80211_BAND_2GHZ;
break;
case 1:
} else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
scan->good_CRC_th = IWL_GOOD_CRC_TH;
band = IEEE80211_BAND_5GHZ;
break;
default:
} else {
IWL_WARNING("Invalid scan band count\n");
goto done;
}
@ -6770,7 +6773,7 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
conf->channel->hw_value);
if (!is_channel_valid(ch_info)) {
IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this band.\n",
conf->channel->hw_value, conf->channel->band);
IWL_DEBUG_MAC80211("leave - invalid channel\n");
spin_unlock_irqrestore(&priv->lock, flags);

Просмотреть файл

@ -1774,7 +1774,10 @@ static int iwl4965_scan_initiate(struct iwl_priv *priv)
}
IWL_DEBUG_INFO("Starting scan...\n");
priv->scan_bands = 2;
if (priv->cfg->sku & IWL_SKU_G)
priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
if (priv->cfg->sku & IWL_SKU_A)
priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
set_bit(STATUS_SCANNING, &priv->status);
priv->scan_start = jiffies;
priv->scan_pass_start = priv->scan_start;
@ -3023,8 +3026,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
if (index != -1) {
int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
#ifdef CONFIG_IWL4965_HT
int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
if (tid != MAX_TID_COUNT)
priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
@ -3276,13 +3280,18 @@ static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
cancel_delayed_work(&priv->scan_check);
IWL_DEBUG_INFO("Scan pass on %sGHz took %dms\n",
(priv->scan_bands == 2) ? "2.4" : "5.2",
(priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ?
"2.4" : "5.2",
jiffies_to_msecs(elapsed_jiffies
(priv->scan_pass_start, jiffies)));
/* Remove this scanned band from the list
* of pending bands to scan */
priv->scan_bands--;
/* Remove this scanned band from the list of pending
* bands to scan, band G precedes A in order of scanning
* as seen in iwl_bg_request_scan */
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
/* If a request to abort was given, or the scan did not succeed
* then we reset the scan state machine and terminate,
@ -3292,7 +3301,7 @@ static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
clear_bit(STATUS_SCAN_ABORTING, &priv->status);
} else {
/* If there are more bands on this scan pass reschedule */
if (priv->scan_bands > 0)
if (priv->scan_bands)
goto reschedule;
}
@ -4635,10 +4644,9 @@ static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq);
ch_info = iwl_get_channel_info(priv, band,
scan_ch->channel);
ch_info = iwl_get_channel_info(priv, band, scan_ch->channel);
if (!is_channel_valid(ch_info)) {
IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
IWL_DEBUG_SCAN("Channel %d is INVALID for this band.\n",
scan_ch->channel);
continue;
}
@ -5830,8 +5838,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (priv->scan_bands) {
case 2:
if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate_n_flags =
iwl4965_hw_set_rate_n_flags(IWL_RATE_1M_PLCP,
@ -5839,17 +5846,13 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
scan->good_CRC_th = 0;
band = IEEE80211_BAND_2GHZ;
break;
case 1:
} else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
scan->tx_cmd.rate_n_flags =
iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
RATE_MCS_ANT_B_MSK);
scan->good_CRC_th = IWL_GOOD_CRC_TH;
band = IEEE80211_BAND_5GHZ;
break;
default:
} else {
IWL_WARNING("Invalid scan band count\n");
goto done;
}

Просмотреть файл

@ -290,7 +290,7 @@ islpci_monitor_rx(islpci_private *priv, struct sk_buff **skb)
avs->version = cpu_to_be32(P80211CAPTURE_VERSION);
avs->length = cpu_to_be32(sizeof (struct avs_80211_1_header));
avs->mactime = cpu_to_be64(le64_to_cpu(clock));
avs->mactime = cpu_to_be64(clock);
avs->hosttime = cpu_to_be64(jiffies);
avs->phytype = cpu_to_be32(6); /*OFDM: 6 for (g), 8 for (a) */
avs->channel = cpu_to_be32(channel_of_freq(freq));

Просмотреть файл

@ -138,11 +138,8 @@ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev,
* Wait until the BBP becomes ready.
*/
reg = rt2500usb_bbp_check(rt2x00dev);
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) {
ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n");
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
}
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
goto exit_fail;
/*
* Write the data into the BBP.
@ -155,6 +152,13 @@ static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev,
rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg);
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
exit_fail:
mutex_unlock(&rt2x00dev->usb_cache_mutex);
ERROR(rt2x00dev, "PHY_CSR8 register busy. Write failed.\n");
}
static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
@ -168,10 +172,8 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
* Wait until the BBP becomes ready.
*/
reg = rt2500usb_bbp_check(rt2x00dev);
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) {
ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
return;
}
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
goto exit_fail;
/*
* Write the request into the BBP.
@ -186,17 +188,21 @@ static void rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev,
* Wait until the BBP becomes ready.
*/
reg = rt2500usb_bbp_check(rt2x00dev);
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY)) {
ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
*value = 0xff;
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
}
if (rt2x00_get_field16(reg, PHY_CSR8_BUSY))
goto exit_fail;
rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7, &reg);
*value = rt2x00_get_field16(reg, PHY_CSR7_DATA);
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
exit_fail:
mutex_unlock(&rt2x00dev->usb_cache_mutex);
ERROR(rt2x00dev, "PHY_CSR8 register busy. Read failed.\n");
*value = 0xff;
}
static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev,

Просмотреть файл

@ -821,6 +821,7 @@ struct rt2x00_dev {
/*
* Scheduled work.
*/
struct workqueue_struct *workqueue;
struct work_struct intf_work;
struct work_struct filter_work;

Просмотреть файл

@ -75,7 +75,7 @@ static void rt2x00lib_start_link_tuner(struct rt2x00_dev *rt2x00dev)
rt2x00lib_reset_link_tuner(rt2x00dev);
queue_delayed_work(rt2x00dev->hw->workqueue,
queue_delayed_work(rt2x00dev->workqueue,
&rt2x00dev->link.work, LINK_TUNE_INTERVAL);
}
@ -136,14 +136,6 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
if (!__test_and_clear_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
return;
/*
* Stop all scheduled work.
*/
if (work_pending(&rt2x00dev->intf_work))
cancel_work_sync(&rt2x00dev->intf_work);
if (work_pending(&rt2x00dev->filter_work))
cancel_work_sync(&rt2x00dev->filter_work);
/*
* Stop the TX queues.
*/
@ -398,8 +390,8 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
* Increase tuner counter, and reschedule the next link tuner run.
*/
rt2x00dev->link.count++;
queue_delayed_work(rt2x00dev->hw->workqueue, &rt2x00dev->link.work,
LINK_TUNE_INTERVAL);
queue_delayed_work(rt2x00dev->workqueue,
&rt2x00dev->link.work, LINK_TUNE_INTERVAL);
}
static void rt2x00lib_packetfilter_scheduled(struct work_struct *work)
@ -433,6 +425,15 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
spin_unlock(&intf->lock);
/*
* It is possible the radio was disabled while the work had been
* scheduled. If that happens we should return here immediately,
* note that in the spinlock protected area above the delayed_flags
* have been cleared correctly.
*/
if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
return;
if (delayed_flags & DELAYED_UPDATE_BEACON) {
skb = ieee80211_beacon_get(rt2x00dev->hw, vif, &control);
if (skb && rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw,
@ -441,7 +442,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
}
if (delayed_flags & DELAYED_CONFIG_ERP)
rt2x00lib_config_erp(rt2x00dev, intf, &intf->conf);
rt2x00lib_config_erp(rt2x00dev, intf, &conf);
if (delayed_flags & DELAYED_LED_ASSOC)
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
@ -487,7 +488,7 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
rt2x00lib_beacondone_iter,
rt2x00dev);
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
}
EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
@ -1130,6 +1131,10 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
/*
* Initialize configuration work.
*/
rt2x00dev->workqueue = create_singlethread_workqueue("rt2x00lib");
if (!rt2x00dev->workqueue)
goto exit;
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
@ -1189,6 +1194,13 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
rt2x00rfkill_free(rt2x00dev);
rt2x00leds_unregister(rt2x00dev);
/*
* Stop all queued work. Note that most tasks will already be halted
* during rt2x00lib_disable_radio() and rt2x00lib_uninitialize().
*/
flush_workqueue(rt2x00dev->workqueue);
destroy_workqueue(rt2x00dev->workqueue);
/*
* Free ieee80211_hw memory.
*/

Просмотреть файл

@ -428,7 +428,7 @@ void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
else
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
queue_work(rt2x00dev->workqueue, &rt2x00dev->filter_work);
}
EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
@ -509,7 +509,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
if (delayed) {
intf->delayed_flags |= delayed;
queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
queue_work(rt2x00dev->workqueue, &rt2x00dev->intf_work);
}
spin_unlock(&intf->lock);
}

Просмотреть файл

@ -134,11 +134,8 @@ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
* Wait until the BBP becomes ready.
*/
reg = rt73usb_bbp_check(rt2x00dev);
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n");
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
}
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
goto exit_fail;
/*
* Write the data into the BBP.
@ -151,6 +148,13 @@ static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev,
rt73usb_register_write_lock(rt2x00dev, PHY_CSR3, reg);
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
exit_fail:
mutex_unlock(&rt2x00dev->usb_cache_mutex);
ERROR(rt2x00dev, "PHY_CSR3 register busy. Write failed.\n");
}
static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
@ -164,11 +168,8 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
* Wait until the BBP becomes ready.
*/
reg = rt73usb_bbp_check(rt2x00dev);
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
}
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
goto exit_fail;
/*
* Write the request into the BBP.
@ -184,14 +185,19 @@ static void rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev,
* Wait until the BBP becomes ready.
*/
reg = rt73usb_bbp_check(rt2x00dev);
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY)) {
ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
*value = 0xff;
return;
}
if (rt2x00_get_field32(reg, PHY_CSR3_BUSY))
goto exit_fail;
*value = rt2x00_get_field32(reg, PHY_CSR3_VALUE);
mutex_unlock(&rt2x00dev->usb_cache_mutex);
return;
exit_fail:
mutex_unlock(&rt2x00dev->usb_cache_mutex);
ERROR(rt2x00dev, "PHY_CSR3 register busy. Read failed.\n");
*value = 0xff;
}
static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,

Просмотреть файл

@ -84,7 +84,11 @@ struct net_lro_mgr {
from received packets and eth protocol
is still ETH_P_8021Q */
u32 ip_summed; /* Set in non generated SKBs in page mode */
/*
* Set for generated SKBs that are not added to
* the frag list in fragmented mode
*/
u32 ip_summed;
u32 ip_summed_aggr; /* Set in aggregated SKBs: CHECKSUM_UNNECESSARY
* or CHECKSUM_NONE */

Просмотреть файл

@ -88,6 +88,8 @@ struct wireless_dev;
#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
#ifdef __KERNEL__
/*
* Compute the worst case header length according to the protocols
* used.
@ -114,6 +116,8 @@ struct wireless_dev;
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif
#endif /* __KERNEL__ */
struct net_device_subqueue
{
/* Give a control state for each queue. This struct may contain

Просмотреть файл

@ -595,6 +595,15 @@ enum ieee80211_key_alg {
ALG_CCMP,
};
/**
* enum ieee80211_key_len - key length
* @WEP40: WEP 5 byte long key
* @WEP104: WEP 13 byte long key
*/
enum ieee80211_key_len {
LEN_WEP40 = 5,
LEN_WEP104 = 13,
};
/**
* enum ieee80211_key_flags - key flags

Просмотреть файл

@ -178,7 +178,7 @@ extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops);
extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
struct Qdisc_ops *ops, u32 parentid);
extern void tcf_destroy(struct tcf_proto *tp);
extern void tcf_destroy_chain(struct tcf_proto *fl);
extern void tcf_destroy_chain(struct tcf_proto **fl);
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list)

Просмотреть файл

@ -63,7 +63,7 @@ static unsigned int bm_find(struct ts_config *conf, struct ts_state *state)
struct ts_bm *bm = ts_config_priv(conf);
unsigned int i, text_len, consumed = state->offset;
const u8 *text;
int shift = bm->patlen, bs;
int shift = bm->patlen - 1, bs;
for (;;) {
text_len = conf->get_next_block(consumed, &text, conf, state);

Просмотреть файл

@ -454,7 +454,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
strcpy(s[i].name, name);
strlcpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
@ -479,7 +479,7 @@ int netdev_boot_setup_check(struct net_device *dev)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
!strncmp(dev->name, s[i].name, strlen(s[i].name))) {
!strcmp(dev->name, s[i].name)) {
dev->irq = s[i].map.irq;
dev->base_addr = s[i].map.base_addr;
dev->mem_start = s[i].map.mem_start;
@ -2973,7 +2973,7 @@ EXPORT_SYMBOL(dev_unicast_delete);
/**
* dev_unicast_add - add a secondary unicast address
* @dev: device
* @addr: address to delete
* @addr: address to add
* @alen: length of @addr
*
* Add a secondary unicast address to the device or increase

Просмотреть файл

@ -226,7 +226,7 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
ops = lookup_rules_ops(net, frh->family);
if (ops == NULL) {
err = EAFNOSUPPORT;
err = -EAFNOSUPPORT;
goto errout;
}
@ -365,7 +365,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
ops = lookup_rules_ops(net, frh->family);
if (ops == NULL) {
err = EAFNOSUPPORT;
err = -EAFNOSUPPORT;
goto errout;
}

Просмотреть файл

@ -68,7 +68,6 @@ static inline void *load_pointer(struct sk_buff *skb, int k,
* sk_filter - run a packet through a socket filter
* @sk: sock associated with &sk_buff
* @skb: buffer to filter
* @needlock: set to 1 if the sock is not locked by caller.
*
* Run the filter code and then cut skb->data to correct size returned by
* sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller

Просмотреть файл

@ -1292,12 +1292,14 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
{
unsigned int nr_pages = spd->nr_pages;
unsigned int poff, plen, len, toff, tlen;
int headlen, seg;
int headlen, seg, error = 0;
toff = *offset;
tlen = *total_len;
if (!tlen)
if (!tlen) {
error = 1;
goto err;
}
/*
* if the offset is greater than the linear part, go directly to
@ -1339,7 +1341,8 @@ static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
* just jump directly to update and return, no point
* in going over fragments when the output is full.
*/
if (spd_fill_page(spd, virt_to_page(p), plen, poff, skb))
error = spd_fill_page(spd, virt_to_page(p), plen, poff, skb);
if (error)
goto done;
tlen -= plen;
@ -1369,7 +1372,8 @@ map_frag:
if (!plen)
break;
if (spd_fill_page(spd, f->page, plen, poff, skb))
error = spd_fill_page(spd, f->page, plen, poff, skb);
if (error)
break;
tlen -= plen;
@ -1382,7 +1386,10 @@ done:
return 0;
}
err:
return 1;
/* update the offset to reflect the linear part skip, if any */
if (!error)
*offset = toff;
return error;
}
/*

Просмотреть файл

@ -192,14 +192,21 @@ EXPORT_SYMBOL(inet_frag_evictor);
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
struct inet_frag_queue *qp_in, struct inet_frags *f,
unsigned int hash, void *arg)
void *arg)
{
struct inet_frag_queue *qp;
#ifdef CONFIG_SMP
struct hlist_node *n;
#endif
unsigned int hash;
write_lock(&f->lock);
/*
* While we stayed w/o the lock other CPU could update
* the rnd seed, so we need to re-calculate the hash
* chain. Fortunatelly the qp_in can be used to get one.
*/
hash = f->hashfn(qp_in);
#ifdef CONFIG_SMP
/* With SMP race we have to recheck hash table, because
* such entry could be created on other cpu, while we
@ -247,7 +254,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
}
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
struct inet_frags *f, void *arg, unsigned int hash)
struct inet_frags *f, void *arg)
{
struct inet_frag_queue *q;
@ -255,7 +262,7 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
if (q == NULL)
return NULL;
return inet_frag_intern(nf, q, f, hash, arg);
return inet_frag_intern(nf, q, f, arg);
}
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
@ -264,7 +271,6 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
struct inet_frag_queue *q;
struct hlist_node *n;
read_lock(&f->lock);
hlist_for_each_entry(q, n, &f->hash[hash], list) {
if (q->net == nf && f->match(q, key)) {
atomic_inc(&q->refcnt);
@ -274,6 +280,6 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
}
read_unlock(&f->lock);
return inet_frag_create(nf, f, key, hash);
return inet_frag_create(nf, f, key);
}
EXPORT_SYMBOL(inet_frag_find);

Просмотреть файл

@ -383,8 +383,7 @@ static int __lro_proc_skb(struct net_lro_mgr *lro_mgr, struct sk_buff *skb,
out2: /* send aggregated SKBs to stack */
lro_flush(lro_mgr, lro_desc);
out: /* Original SKB has to be posted to stack */
skb->ip_summed = lro_mgr->ip_summed;
out:
return 1;
}

Просмотреть файл

@ -229,6 +229,8 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
arg.iph = iph;
arg.user = user;
read_lock(&ip4_frags.lock);
hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);

Просмотреть файл

@ -260,6 +260,8 @@
#include <linux/socket.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/crypto.h>
@ -2620,7 +2622,7 @@ __setup("thash_entries=", set_thash_entries);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
unsigned long limit;
unsigned long nr_pages, limit;
int order, i, max_share;
BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
@ -2689,8 +2691,9 @@ void __init tcp_init(void)
* is up to 1/2 at 256 MB, decreasing toward zero with the amount of
* memory, with a floor of 128 pages.
*/
limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
nr_pages = totalram_pages - totalhigh_pages;
limit = min(nr_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
limit = (limit * (nr_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
limit = max(limit, 128UL);
sysctl_tcp_mem[0] = limit / 4 * 3;
sysctl_tcp_mem[1] = limit;

Просмотреть файл

@ -2291,7 +2291,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
}
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5d %8d %lu %d %p %u %u %u %u %d%n",
"%08X %5d %8d %lu %d %p %lu %lu %u %u %d%n",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
@ -2303,8 +2303,8 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
icsk->icsk_probes_out,
sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
icsk->icsk_rto,
icsk->icsk_ack.ato,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,

Просмотреть файл

@ -129,7 +129,7 @@ static struct nf_hook_ops ip6t_ops[] __read_mostly = {
.priority = NF_IP6_PRI_MANGLE,
},
{
.hook = ip6t_local_hook,
.hook = ip6t_route_hook,
.owner = THIS_MODULE,
.pf = PF_INET6,
.hooknum = NF_INET_LOCAL_IN,

Просмотреть файл

@ -207,9 +207,10 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
arg.id = id;
arg.src = src;
arg.dst = dst;
read_lock_bh(&nf_frags.lock);
hash = ip6qhashfn(id, src, dst);
local_bh_disable();
q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash);
local_bh_enable();
if (q == NULL)

Просмотреть файл

@ -247,6 +247,8 @@ fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst,
arg.id = id;
arg.src = src;
arg.dst = dst;
read_lock(&ip6_frags.lock);
hash = ip6qhashfn(id, src, dst);
q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);

Просмотреть файл

@ -240,7 +240,7 @@ static inline int rt6_need_strict(struct in6_addr *daddr)
static inline struct rt6_info *rt6_device_match(struct net *net,
struct rt6_info *rt,
int oif,
int strict)
int flags)
{
struct rt6_info *local = NULL;
struct rt6_info *sprt;
@ -253,7 +253,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
if (dev->flags & IFF_LOOPBACK) {
if (sprt->rt6i_idev == NULL ||
sprt->rt6i_idev->dev->ifindex != oif) {
if (strict && oif)
if (flags & RT6_LOOKUP_F_IFACE && oif)
continue;
if (local && (!oif ||
local->rt6i_idev->dev->ifindex == oif))
@ -266,7 +266,7 @@ static inline struct rt6_info *rt6_device_match(struct net *net,
if (local)
return local;
if (strict)
if (flags & RT6_LOOKUP_F_IFACE)
return net->ipv6.ip6_null_entry;
}
return rt;

Просмотреть файл

@ -2036,7 +2036,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %u %u %u %u %d\n",
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %lu %lu %u %u %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
@ -2052,8 +2052,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
icsk->icsk_probes_out,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
icsk->icsk_rto,
icsk->icsk_ack.ato,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
);

Просмотреть файл

@ -380,6 +380,15 @@ void ieee80211_key_free(struct ieee80211_key *key)
if (!key)
return;
if (!key->sdata) {
/* The key has not been linked yet, simply free it
* and don't Oops */
if (key->conf.alg == ALG_CCMP)
ieee80211_aes_key_free(key->u.ccmp.tfm);
kfree(key);
return;
}
spin_lock_irqsave(&key->sdata->local->key_lock, flags);
__ieee80211_key_free(key);
spin_unlock_irqrestore(&key->sdata->local->key_lock, flags);

Просмотреть файл

@ -95,6 +95,13 @@ static int ieee80211_set_encryption(struct net_device *dev, u8 *sta_addr,
}
}
if (alg == ALG_WEP &&
key_len != LEN_WEP40 && key_len != LEN_WEP104) {
ieee80211_key_free(key);
err = -EINVAL;
goto out_unlock;
}
ieee80211_key_link(key, sdata, sta);
if (set_tx_key || (!sta && !sdata->default_key && key))

Просмотреть файл

@ -323,8 +323,7 @@ static void wme_qdiscop_destroy(struct Qdisc* qd)
struct ieee80211_hw *hw = &local->hw;
int queue;
tcf_destroy_chain(q->filter_list);
q->filter_list = NULL;
tcf_destroy_chain(&q->filter_list);
for (queue=0; queue < hw->queues; queue++) {
skb_queue_purge(&q->requeued[queue]);

Просмотреть файл

@ -331,12 +331,13 @@ static unsigned int get_conntrack_index(const struct tcphdr *tcph)
I. Upper bound for valid data: seq <= sender.td_maxend
II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
III. Upper bound for valid ack: sack <= receiver.td_end
IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW
III. Upper bound for valid (s)ack: sack <= receiver.td_end
IV. Lower bound for valid (s)ack: sack >= receiver.td_end - MAXACKWINDOW
where sack is the highest right edge of sack block found in the packet.
where sack is the highest right edge of sack block found in the packet
or ack in the case of packet without SACK option.
The upper bound limit for a valid ack is not ignored -
The upper bound limit for a valid (s)ack is not ignored -
we doesn't have to deal with fragments.
*/
@ -606,12 +607,12 @@ static bool tcp_in_window(const struct nf_conn *ct,
before(seq, sender->td_maxend + 1),
after(end, sender->td_end - receiver->td_maxwin - 1),
before(sack, receiver->td_end + 1),
after(ack, receiver->td_end - MAXACKWINDOW(sender)));
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
if (before(seq, sender->td_maxend + 1) &&
after(end, sender->td_end - receiver->td_maxwin - 1) &&
before(sack, receiver->td_end + 1) &&
after(ack, receiver->td_end - MAXACKWINDOW(sender))) {
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
* Take into account window scaling (RFC 1323).
*/

Просмотреть файл

@ -1534,7 +1534,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,
}
}
list_for_each_entry_rcu(addr6, &iface->addr6_list, list) {
if (addr6->valid || iter_addr6++ < skip_addr6)
if (!addr6->valid || iter_addr6++ < skip_addr6)
continue;
if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,
iface,

Просмотреть файл

@ -886,7 +886,7 @@ retry:
return netlink_unicast_kernel(sk, skb);
if (sk_filter(sk, skb)) {
int err = skb->len;
err = skb->len;
kfree_skb(skb);
sock_put(sk);
return err;

Просмотреть файл

@ -132,6 +132,7 @@ errout:
* @maxtype: maximum attribute type to be expected
* @head: head of attribute stream
* @len: length of attribute stream
* @policy: validation policy
*
* Parses a stream of attributes and stores a pointer to each attribute in
* the tb array accessable via the attribute type. Attributes with a type
@ -194,7 +195,7 @@ struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
/**
* nla_strlcpy - Copy string attribute payload into a sized buffer
* @dst: where to copy the string to
* @src: attribute to copy the string from
* @nla: attribute to copy the string from
* @dstsize: size of destination buffer
*
* Copies at most dstsize - 1 bytes into the destination buffer.
@ -340,9 +341,9 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
}
/**
* nla_reserve - reserve room for attribute without header
* nla_reserve_nohdr - reserve room for attribute without header
* @skb: socket buffer to reserve room on
* @len: length of attribute payload
* @attrlen: length of attribute payload
*
* Reserves room for attribute payload without a header.
*

Просмотреть файл

@ -106,17 +106,6 @@ config NET_SCH_PRIO
To compile this code as a module, choose M here: the
module will be called sch_prio.
config NET_SCH_RR
tristate "Multi Band Round Robin Queuing (RR)"
select NET_SCH_PRIO
---help---
Say Y here if you want to use an n-band round robin packet
scheduler.
The module uses sch_prio for its framework and is aliased as
sch_rr, so it will load sch_prio, although it is referred
to using sch_rr.
config NET_SCH_RED
tristate "Random Early Detection (RED)"
---help---

Просмотреть файл

@ -1252,12 +1252,12 @@ void tcf_destroy(struct tcf_proto *tp)
kfree(tp);
}
void tcf_destroy_chain(struct tcf_proto *fl)
void tcf_destroy_chain(struct tcf_proto **fl)
{
struct tcf_proto *tp;
while ((tp = fl) != NULL) {
fl = tp->next;
while ((tp = *fl) != NULL) {
*fl = tp->next;
tcf_destroy(tp);
}
}

Просмотреть файл

@ -160,7 +160,7 @@ static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
*prev = flow->next;
pr_debug("atm_tc_put: qdisc %p\n", flow->q);
qdisc_destroy(flow->q);
tcf_destroy_chain(flow->filter_list);
tcf_destroy_chain(&flow->filter_list);
if (flow->sock) {
pr_debug("atm_tc_put: f_count %d\n",
file_count(flow->sock->file));
@ -586,10 +586,11 @@ static void atm_tc_destroy(struct Qdisc *sch)
struct atm_flow_data *flow;
pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
for (flow = p->flows; flow; flow = flow->next)
tcf_destroy_chain(&flow->filter_list);
/* races ? */
while ((flow = p->flows)) {
tcf_destroy_chain(flow->filter_list);
flow->filter_list = NULL;
if (flow->ref > 1)
printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
flow->ref);

Просмотреть файл

@ -1704,7 +1704,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
BUG_TRAP(!cl->filters);
tcf_destroy_chain(cl->filter_list);
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
gen_kill_estimator(&cl->bstats, &cl->rate_est);
@ -1728,10 +1728,8 @@ cbq_destroy(struct Qdisc* sch)
* be bound to classes which have been destroyed already. --TGR '04
*/
for (h = 0; h < 16; h++) {
for (cl = q->classes[h]; cl; cl = cl->next) {
tcf_destroy_chain(cl->filter_list);
cl->filter_list = NULL;
}
for (cl = q->classes[h]; cl; cl = cl->next)
tcf_destroy_chain(&cl->filter_list);
}
for (h = 0; h < 16; h++) {
struct cbq_class *next;

Просмотреть файл

@ -416,7 +416,7 @@ static void dsmark_destroy(struct Qdisc *sch)
pr_debug("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p);
tcf_destroy_chain(p->filter_list);
tcf_destroy_chain(&p->filter_list);
qdisc_destroy(p->q);
kfree(p->mask);
}

Просмотреть файл

@ -468,7 +468,7 @@ struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
return sch;
errout:
return ERR_PTR(-err);
return ERR_PTR(err);
}
struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops,

Просмотреть файл

@ -1123,7 +1123,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
{
struct hfsc_sched *q = qdisc_priv(sch);
tcf_destroy_chain(cl->filter_list);
tcf_destroy_chain(&cl->filter_list);
qdisc_destroy(cl->qdisc);
gen_kill_estimator(&cl->bstats, &cl->rate_est);
if (cl != &q->root)
@ -1540,6 +1540,10 @@ hfsc_destroy_qdisc(struct Qdisc *sch)
struct hfsc_class *cl, *next;
unsigned int i;
for (i = 0; i < HFSC_HSIZE; i++) {
list_for_each_entry(cl, &q->clhash[i], hlist)
tcf_destroy_chain(&cl->filter_list);
}
for (i = 0; i < HFSC_HSIZE; i++) {
list_for_each_entry_safe(cl, next, &q->clhash[i], hlist)
hfsc_destroy_class(sch, cl);

Просмотреть файл

@ -1238,7 +1238,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
tcf_destroy_chain(cl->filter_list);
tcf_destroy_chain(&cl->filter_list);
while (!list_empty(&cl->children))
htb_destroy_class(sch, list_entry(cl->children.next,
@ -1267,7 +1267,7 @@ static void htb_destroy(struct Qdisc *sch)
and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
unbind_filter on it (without Oops). */
tcf_destroy_chain(q->filter_list);
tcf_destroy_chain(&q->filter_list);
while (!list_empty(&q->root))
htb_destroy_class(sch, list_entry(q->root.next,

Просмотреть файл

@ -104,7 +104,7 @@ static void ingress_destroy(struct Qdisc *sch)
{
struct ingress_qdisc_data *p = qdisc_priv(sch);
tcf_destroy_chain(p->filter_list);
tcf_destroy_chain(&p->filter_list);
}
static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)

Просмотреть файл

@ -219,7 +219,7 @@ prio_destroy(struct Qdisc* sch)
int prio;
struct prio_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(q->filter_list);
tcf_destroy_chain(&q->filter_list);
for (prio=0; prio<q->bands; prio++)
qdisc_destroy(q->queues[prio]);
}

Просмотреть файл

@ -520,7 +520,7 @@ static void sfq_destroy(struct Qdisc *sch)
{
struct sfq_sched_data *q = qdisc_priv(sch);
tcf_destroy_chain(q->filter_list);
tcf_destroy_chain(&q->filter_list);
q->perturb_period = 0;
del_timer_sync(&q->perturb_timer);
}

Просмотреть файл

@ -487,8 +487,8 @@ static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
static unsigned int unix_datagram_poll(struct file *, struct socket *,
poll_table *);
static unsigned int unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct kiocb *, struct socket *,
@ -534,7 +534,7 @@ static const struct proto_ops unix_dgram_ops = {
.socketpair = unix_socketpair,
.accept = sock_no_accept,
.getname = unix_getname,
.poll = unix_datagram_poll,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = sock_no_listen,
.shutdown = unix_shutdown,
@ -555,7 +555,7 @@ static const struct proto_ops unix_seqpacket_ops = {
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_datagram_poll,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
@ -1994,29 +1994,13 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
return mask;
}
static unsigned int unix_datagram_poll(struct file *file, struct socket *sock,
poll_table *wait)
static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk, *peer;
unsigned int mask;
struct sock *sk = sock->sk, *other;
unsigned int mask, writable;
poll_wait(file, sk->sk_sleep, wait);
peer = unix_peer_get(sk);
if (peer) {
if (peer != sk) {
/*
* Writability of a connected socket additionally
* depends on the state of the receive queue of the
* peer.
*/
poll_wait(file, &unix_sk(peer)->peer_wait, wait);
} else {
sock_put(peer);
peer = NULL;
}
}
mask = 0;
/* exceptional events? */
@ -2042,14 +2026,26 @@ static unsigned int unix_datagram_poll(struct file *file, struct socket *sock,
}
/* writable? */
if (unix_writable(sk) && !(peer && unix_recvq_full(peer)))
writable = unix_writable(sk);
if (writable) {
other = unix_peer_get(sk);
if (other) {
if (unix_peer(other) != sk) {
poll_wait(file, &unix_sk(other)->peer_wait,
wait);
if (unix_recvq_full(other))
writable = 0;
}
sock_put(other);
}
}
if (writable)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
if (peer)
sock_put(peer);
return mask;
}

Просмотреть файл

@ -80,6 +80,23 @@ static const struct ieee80211_channel_range ieee80211_JP_channels[] = {
IEEE80211_CHAN_RADAR),
};
static const struct ieee80211_channel_range ieee80211_EU_channels[] = {
/* IEEE 802.11b/g, channels 1..13 */
RANGE_PWR(2412, 2472, 20, 6, 0),
/* IEEE 802.11a, channel 36*/
RANGE_PWR(5180, 5180, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN),
/* IEEE 802.11a, channel 40*/
RANGE_PWR(5200, 5200, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN),
/* IEEE 802.11a, channel 44*/
RANGE_PWR(5220, 5220, 23, 6, IEEE80211_CHAN_PASSIVE_SCAN),
/* IEEE 802.11a, channels 48..64 */
RANGE_PWR(5240, 5320, 23, 6, IEEE80211_CHAN_NO_IBSS |
IEEE80211_CHAN_RADAR),
/* IEEE 802.11a, channels 100..140 */
RANGE_PWR(5500, 5700, 30, 6, IEEE80211_CHAN_NO_IBSS |
IEEE80211_CHAN_RADAR),
};
#define REGDOM(_code) \
{ \
.code = __stringify(_code), \
@ -90,6 +107,7 @@ static const struct ieee80211_channel_range ieee80211_JP_channels[] = {
static const struct ieee80211_regdomain ieee80211_regdoms[] = {
REGDOM(US),
REGDOM(JP),
REGDOM(EU),
};