Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
David S. Miller 2009-07-09 20:18:24 -07:00
Родитель bff38771e1 e594e96e8a
Коммит e5a8a896f5
42 изменённых файлов: 224 добавлений и 103 удалений

Просмотреть файл

@ -184,8 +184,6 @@ usage should require reading the full document.
!Finclude/net/mac80211.h ieee80211_ctstoself_get
!Finclude/net/mac80211.h ieee80211_ctstoself_duration
!Finclude/net/mac80211.h ieee80211_generic_frame_duration
!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
!Finclude/net/mac80211.h ieee80211_hdrlen
!Finclude/net/mac80211.h ieee80211_wake_queue
!Finclude/net/mac80211.h ieee80211_stop_queue
!Finclude/net/mac80211.h ieee80211_wake_queues

Просмотреть файл

@ -302,4 +302,8 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
/* The {read|write|spin}_lock() on x86 are full memory barriers. */
static inline void smp_mb__after_lock(void) { }
#define ARCH_HAS_SMP_MB_AFTER_LOCK
#endif /* _ASM_X86_SPINLOCK_H */

Просмотреть файл

@ -670,8 +670,7 @@ static int setup_sge_qsets(struct adapter *adap)
struct port_info *pi = netdev_priv(dev);
pi->qs = &adap->sge.qs[pi->first_qset];
for (j = pi->first_qset; j < pi->first_qset + pi->nqsets;
++j, ++qset_idx) {
for (j = 0; j < pi->nqsets; ++j, ++qset_idx) {
set_qset_lro(dev, qset_idx, pi->rx_offload & T3_LRO);
err = t3_sge_alloc_qset(adap, qset_idx, 1,
(adap->flags & USING_MSIX) ? qset_idx + 1 :

Просмотреть файл

@ -1820,11 +1820,19 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
struct device *emac_dev = &priv->ndev->dev;
struct sockaddr *sa = addr;
if (!is_valid_ether_addr(sa->sa_data))
return -EINVAL;
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
/* If the interface is down - rxch is NULL. */
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
memcpy(rxch->mac_addr, sa->sa_data, ndev->addr_len);
emac_setmac(priv, EMAC_DEF_RX_CH, rxch->mac_addr);
}
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",

Просмотреть файл

@ -46,12 +46,12 @@
#else
#define FEC_ECNTRL; 0x000 /* Ethernet control reg */
#define FEC_IEVENT; 0x004 /* Interrupt even reg */
#define FEC_IMASK; 0x008 /* Interrupt mask reg */
#define FEC_IVEC; 0x00c /* Interrupt vec status reg */
#define FEC_R_DES_ACTIVE; 0x010 /* Receive descriptor reg */
#define FEC_X_DES_ACTIVE; 0x01c /* Transmit descriptor reg */
#define FEC_ECNTRL 0x000 /* Ethernet control reg */
#define FEC_IEVENT 0x004 /* Interrupt even reg */
#define FEC_IMASK 0x008 /* Interrupt mask reg */
#define FEC_IVEC 0x00c /* Interrupt vec status reg */
#define FEC_R_DES_ACTIVE 0x010 /* Receive descriptor reg */
#define FEC_X_DES_ACTIVE 0x014 /* Transmit descriptor reg */
#define FEC_MII_DATA 0x040 /* MII manage frame reg */
#define FEC_MII_SPEED 0x044 /* MII speed control reg */
#define FEC_R_BOUND 0x08c /* FIFO receive bound reg */

Просмотреть файл

@ -190,6 +190,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
phy->ops.write_reg = igb_write_phy_reg_igp;
}
/* set lan id */
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
E1000_STATUS_FUNC_SHIFT;
/* Set phy->phy_addr and phy->id. */
ret_val = igb_get_phy_id_82575(hw);
if (ret_val)

Просмотреть файл

@ -138,6 +138,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->hw.fc.requested_mode = ixgbe_fc_none;
}
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
}
adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
@ -154,6 +158,8 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
ixgbe_init_interrupt_scheme(adapter);
if (netif_running(netdev))
netdev->netdev_ops->ndo_open(netdev);

Просмотреть файл

@ -3130,7 +3130,11 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
#endif
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
DPRINTK(PROBE, INFO, "FCOE enabled with RSS \n");
ixgbe_set_rss_queues(adapter);
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_set_fdir_queues(adapter);
else
ixgbe_set_rss_queues(adapter);
}
/* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues;
@ -3388,7 +3392,12 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
}
#endif /* CONFIG_IXGBE_DCB */
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
ixgbe_cache_ring_rss(adapter);
if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
fcoe_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_i++)
@ -5578,12 +5587,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->features |= NETIF_F_FCOE_CRC;
netdev->features |= NETIF_F_FSO;
netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
DPRINTK(DRV, INFO, "FCoE enabled, "
"disabling Flow Director\n");
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &=
~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
} else {
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
}

Просмотреть файл

@ -49,8 +49,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
#define DRV_VERSION "0.23"
#define DRV_RELDATE "05May2009"
#define DRV_VERSION "0.24"
#define DRV_RELDATE "08Jul2009"
/* PHY CHIP Address */
#define PHY1_ADDR 1 /* For MAC1 */
@ -704,8 +704,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
/* Read MISR status and clear */
status = ioread16(ioaddr + MISR);
if (status == 0x0000 || status == 0xffff)
if (status == 0x0000 || status == 0xffff) {
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
return IRQ_NONE;
}
/* RX interrupt request */
if (status & RX_INTS) {

Просмотреть файл

@ -486,12 +486,14 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
struct sock *sk = tun->sk;
struct sock *sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
sk = tun->sk;
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
poll_wait(file, &tun->socket.wait, wait);

Просмотреть файл

@ -1,5 +1,6 @@
config ATH_COMMON
tristate "Atheros Wireless Cards"
depends on WLAN_80211
depends on ATH5K || ATH9K || AR9170_USB
source "drivers/net/wireless/ath/ath5k/Kconfig"

Просмотреть файл

@ -355,7 +355,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
if (bf_next == NULL) {
INIT_LIST_HEAD(&bf_head);
/*
* Make sure the last desc is reclaimed if it
* not a holding desc.
*/
if (!bf_last->bf_stale)
list_move_tail(&bf->list, &bf_head);
else
INIT_LIST_HEAD(&bf_head);
} else {
ASSERT(!list_empty(bf_q));
list_move_tail(&bf->list, &bf_head);

Просмотреть файл

@ -648,6 +648,7 @@ struct b43_wl {
u8 nr_devs;
bool radiotap_enabled;
bool radio_enabled;
/* The beacon we are currently using (AP or IBSS mode).
* This beacon stuff is protected by the irq_lock. */

Просмотреть файл

@ -3497,8 +3497,8 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
if (phy->ops->set_rx_antenna)
phy->ops->set_rx_antenna(dev, antenna);
if (!!conf->radio_enabled != phy->radio_on) {
if (conf->radio_enabled) {
if (wl->radio_enabled != phy->radio_on) {
if (wl->radio_enabled) {
b43_software_rfkill(dev, false);
b43info(dev->wl, "Radio turned on by software\n");
if (!dev->radio_hw_enable) {
@ -4339,6 +4339,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
wl->beacon0_uploaded = 0;
wl->beacon1_uploaded = 0;
wl->beacon_templates_virgin = 1;
wl->radio_enabled = 1;
mutex_lock(&wl->mutex);
@ -4378,6 +4379,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
if (b43_status(dev) >= B43_STAT_STARTED)
b43_wireless_core_stop(dev);
b43_wireless_core_exit(dev);
wl->radio_enabled = 0;
mutex_unlock(&wl->mutex);
cancel_work_sync(&(wl->txpower_adjust_work));
@ -4560,6 +4562,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
B43_WARN_ON(1);
dev->phy.gmode = have_2ghz_phy;
dev->phy.radio_on = 1;
tmp = dev->phy.gmode ? B43_TMSLOW_GMODE : 0;
b43_wireless_core_reset(dev, tmp);

Просмотреть файл

@ -35,6 +35,7 @@
static /*const */ struct pcmcia_device_id b43_pcmcia_tbl[] = {
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x448),
PCMCIA_DEVICE_MANF_CARD(0x2D0, 0x476),
PCMCIA_DEVICE_NULL,
};

Просмотреть файл

@ -607,6 +607,7 @@ struct b43legacy_wl {
u8 nr_devs;
bool radiotap_enabled;
bool radio_enabled;
/* The beacon we are currently using (AP or IBSS mode).
* This beacon stuff is protected by the irq_lock. */

Просмотреть файл

@ -2689,8 +2689,8 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
/* Antennas for RX and management frame TX. */
b43legacy_mgmtframe_txantenna(dev, antenna_tx);
if (!!conf->radio_enabled != phy->radio_on) {
if (conf->radio_enabled) {
if (wl->radio_enabled != phy->radio_on) {
if (wl->radio_enabled) {
b43legacy_radio_turn_on(dev);
b43legacyinfo(dev->wl, "Radio turned on by software\n");
if (!dev->radio_hw_enable)
@ -3441,6 +3441,7 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
wl->beacon0_uploaded = 0;
wl->beacon1_uploaded = 0;
wl->beacon_templates_virgin = 1;
wl->radio_enabled = 1;
mutex_lock(&wl->mutex);
@ -3479,6 +3480,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
wl->radio_enabled = 0;
mutex_unlock(&wl->mutex);
}
@ -3620,6 +3622,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
dev->phy.radio_on = 1;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);

Просмотреть файл

@ -4,6 +4,15 @@ config IWM
depends on CFG80211
select WIRELESS_EXT
select FW_LOADER
help
The Intel Wireless Multicomm 3200 hardware is a combo
card with GPS, Bluetooth, WiMax and 802.11 radios. It
runs over SDIO and is typically found on Moorestown
based platform. This driver takes care of the 802.11
part, which is a fullmac one.
If you choose to build it as a module, it'll be called
iwmc3200wifi.ko.
config IWM_DEBUG
bool "Enable full debugging output in iwmc3200wifi"

Просмотреть файл

@ -418,6 +418,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
continue;
if (!data2->started || !hwsim_ps_rx_ok(data2, skb) ||
!data->channel || !data2->channel ||
data->channel->center_freq != data2->channel->center_freq ||
!(data->group & data2->group))
continue;

Просмотреть файл

@ -823,30 +823,30 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
struct p54_tx_info *range;
unsigned long flags;
if (unlikely(!skb || !dev || skb_queue_empty(&priv->tx_queue)))
if (unlikely(!skb || !dev || !skb_queue_len(&priv->tx_queue)))
return;
/* There used to be a check here to see if the SKB was on the
* TX queue or not. This can never happen because all SKBs we
* see here successfully went through p54_assign_address()
* which means the SKB is on the ->tx_queue.
/*
* don't try to free an already unlinked skb
*/
if (unlikely((!skb->next) || (!skb->prev)))
return;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
info = IEEE80211_SKB_CB(skb);
range = (void *)info->rate_driver_data;
if (!skb_queue_is_first(&priv->tx_queue, skb)) {
if (skb->prev != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
ni = IEEE80211_SKB_CB(skb_queue_prev(&priv->tx_queue, skb));
ni = IEEE80211_SKB_CB(skb->prev);
mr = (struct p54_tx_info *)ni->rate_driver_data;
}
if (!skb_queue_is_last(&priv->tx_queue, skb)) {
if (skb->next != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue, skb));
ni = IEEE80211_SKB_CB(skb->next);
mr = (struct p54_tx_info *)ni->rate_driver_data;
}
__skb_unlink(skb, &priv->tx_queue);
@ -864,13 +864,15 @@ static struct sk_buff *p54_find_tx_entry(struct ieee80211_hw *dev,
unsigned long flags;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
skb_queue_walk(&priv->tx_queue, entry) {
entry = priv->tx_queue.next;
while (entry != (struct sk_buff *)&priv->tx_queue) {
struct p54_hdr *hdr = (struct p54_hdr *) entry->data;
if (hdr->req_id == req_id) {
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return entry;
}
entry = entry->next;
}
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
return NULL;
@ -888,33 +890,36 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
int count, idx;
spin_lock_irqsave(&priv->tx_queue.lock, flags);
skb_queue_walk(&priv->tx_queue, entry) {
entry = (struct sk_buff *) priv->tx_queue.next;
while (entry != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(entry);
struct p54_hdr *entry_hdr;
struct p54_tx_data *entry_data;
unsigned int pad = 0, frame_len;
range = (void *)info->rate_driver_data;
if (range->start_addr != addr)
if (range->start_addr != addr) {
entry = entry->next;
continue;
}
if (!skb_queue_is_last(&priv->tx_queue, entry)) {
if (entry->next != (struct sk_buff *)&priv->tx_queue) {
struct ieee80211_tx_info *ni;
struct p54_tx_info *mr;
ni = IEEE80211_SKB_CB(skb_queue_next(&priv->tx_queue,
entry));
ni = IEEE80211_SKB_CB(entry->next);
mr = (struct p54_tx_info *)ni->rate_driver_data;
}
__skb_unlink(entry, &priv->tx_queue);
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
frame_len = entry->len;
entry_hdr = (struct p54_hdr *) entry->data;
entry_data = (struct p54_tx_data *) entry_hdr->data;
priv->tx_stats[entry_data->hw_queue].len--;
if (priv->tx_stats[entry_data->hw_queue].len)
priv->tx_stats[entry_data->hw_queue].len--;
priv->stats.dot11ACKFailureCount += payload->tries - 1;
spin_unlock_irqrestore(&priv->tx_queue.lock, flags);
/*
* Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are
@ -1164,21 +1169,23 @@ static int p54_assign_address(struct ieee80211_hw *dev, struct sk_buff *skb,
}
}
skb_queue_walk(&priv->tx_queue, entry) {
entry = priv->tx_queue.next;
while (left--) {
u32 hole_size;
info = IEEE80211_SKB_CB(entry);
range = (void *)info->rate_driver_data;
hole_size = range->start_addr - last_addr;
if (!target_skb && hole_size >= len) {
target_skb = skb_queue_prev(&priv->tx_queue, entry);
target_skb = entry->prev;
hole_size -= len;
target_addr = last_addr;
}
largest_hole = max(largest_hole, hole_size);
last_addr = range->end_addr;
entry = entry->next;
}
if (!target_skb && priv->rx_end - last_addr >= len) {
target_skb = skb_peek_tail(&priv->tx_queue);
target_skb = priv->tx_queue.prev;
largest_hole = max(largest_hole, priv->rx_end - last_addr - len);
if (!skb_queue_empty(&priv->tx_queue)) {
info = IEEE80211_SKB_CB(target_skb);
@ -2084,6 +2091,7 @@ out:
static void p54_stop(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
struct sk_buff *skb;
mutex_lock(&priv->conf_mutex);
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
@ -2098,7 +2106,8 @@ static void p54_stop(struct ieee80211_hw *dev)
p54_tx_cancel(dev, priv->cached_beacon);
priv->stop(dev);
skb_queue_purge(&priv->tx_queue);
while ((skb = skb_dequeue(&priv->tx_queue)))
kfree_skb(skb);
priv->cached_beacon = NULL;
priv->tsf_high32 = priv->tsf_low32 = 0;
mutex_unlock(&priv->conf_mutex);

Просмотреть файл

@ -38,7 +38,6 @@ static struct usb_device_id usb_ids[] = {
/* ZD1211 */
{ USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
@ -61,6 +60,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x054c, 0x0257), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
@ -87,6 +87,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0df6, 0x0036), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },

Просмотреть файл

@ -171,7 +171,7 @@ static int hp_wmi_tablet_state(void)
static int hp_wmi_set_block(void *data, bool blocked)
{
unsigned long b = (unsigned long) data;
int query = BIT(b + 8) | ((!!blocked) << b);
int query = BIT(b + 8) | ((!blocked) << b);
return hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, query);
}

Просмотреть файл

@ -678,7 +678,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
sprom->board_rev = tuple.TupleData[1];
break;
case SSB_PCMCIA_CIS_PA:
GOTO_ERROR_ON(tuple.TupleDataLen != 9,
GOTO_ERROR_ON((tuple.TupleDataLen != 9) &&
(tuple.TupleDataLen != 10),
"pa tpl size");
sprom->pa0b0 = tuple.TupleData[1] |
((u16)tuple.TupleData[2] << 8);
@ -718,7 +719,8 @@ int ssb_pcmcia_get_invariants(struct ssb_bus *bus,
sprom->antenna_gain.ghz5.a3 = tuple.TupleData[1];
break;
case SSB_PCMCIA_CIS_BFLAGS:
GOTO_ERROR_ON(tuple.TupleDataLen != 3,
GOTO_ERROR_ON((tuple.TupleDataLen != 3) &&
(tuple.TupleDataLen != 5),
"bfl tpl size");
sprom->boardflags_lo = tuple.TupleData[1] |
((u16)tuple.TupleData[2] << 8);

Просмотреть файл

@ -99,7 +99,6 @@ enum rfkill_user_states {
#undef RFKILL_STATE_UNBLOCKED
#undef RFKILL_STATE_HARD_BLOCKED
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>

Просмотреть файл

@ -132,6 +132,11 @@ do { \
#endif /*__raw_spin_is_contended*/
#endif
/* The lock does not imply full memory barrier. */
#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
static inline void smp_mb__after_lock(void) { smp_mb(); }
#endif
/**
* spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question.

Просмотреть файл

@ -54,6 +54,7 @@
#include <linux/filter.h>
#include <linux/rculist_nulls.h>
#include <linux/poll.h>
#include <asm/atomic.h>
#include <net/dst.h>
@ -1241,6 +1242,74 @@ static inline int sk_has_allocations(const struct sock *sk)
return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
}
/**
* sk_has_sleeper - check if there are any waiting processes
* @sk: socket
*
* Returns true if socket has waiting processes
*
* The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
* barrier call. They were added due to the race found within the tcp code.
*
* Consider following tcp code paths:
*
* CPU1 CPU2
*
* sys_select receive packet
* ... ...
* __add_wait_queue update tp->rcv_nxt
* ... ...
* tp->rcv_nxt check sock_def_readable
* ... {
* schedule ...
* if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
* wake_up_interruptible(sk->sk_sleep)
* ...
* }
*
* The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
* in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
* could then endup calling schedule and sleep forever if there are no more
* data on the socket.
*
* The sk_has_sleeper is always called right after a call to read_lock, so we
* can use smp_mb__after_lock barrier.
*/
static inline int sk_has_sleeper(struct sock *sk)
{
/*
* We need to be sure we are in sync with the
* add_wait_queue modifications to the wait queue.
*
* This memory barrier is paired in the sock_poll_wait.
*/
smp_mb__after_lock();
return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
}
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
* @wait_address: socket wait queue
* @p: poll_table
*
* See the comments in the sk_has_sleeper function.
*/
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
{
if (p && wait_address) {
poll_wait(filp, wait_address, p);
/*
* We need to be sure we are in sync with the
* socket flags modification.
*
* This memory barrier is paired in the sk_has_sleeper.
*/
smp_mb();
}
}
/*
* Queue a received datagram if it will fit. Stream and sequenced
* protocols can't normally use this as they need to fit buffers in

Просмотреть файл

@ -92,7 +92,7 @@ static void vcc_sock_destruct(struct sock *sk)
static void vcc_def_wakeup(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up(sk->sk_sleep);
read_unlock(&sk->sk_callback_lock);
}
@ -110,7 +110,7 @@ static void vcc_write_space(struct sock *sk)
read_lock(&sk->sk_callback_lock);
if (vcc_writable(sk)) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
@ -594,7 +594,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc;
unsigned int mask;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
mask = 0;
vcc = ATM_SD(sock);

Просмотреть файл

@ -712,7 +712,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */

Просмотреть файл

@ -744,7 +744,7 @@ int netpoll_setup(struct netpoll *np)
np->name);
break;
}
cond_resched();
msleep(1);
}
/* If carrier appears to come up instantly, we don't

Просмотреть файл

@ -1715,7 +1715,7 @@ EXPORT_SYMBOL(sock_no_sendpage);
static void sock_def_wakeup(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible_all(sk->sk_sleep);
read_unlock(&sk->sk_callback_lock);
}
@ -1723,7 +1723,7 @@ static void sock_def_wakeup(struct sock *sk)
static void sock_def_error_report(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible_poll(sk->sk_sleep, POLLERR);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
read_unlock(&sk->sk_callback_lock);
@ -1732,7 +1732,7 @@ static void sock_def_error_report(struct sock *sk)
static void sock_def_readable(struct sock *sk, int len)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN |
POLLRDNORM | POLLRDBAND);
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
@ -1747,7 +1747,7 @@ static void sock_def_write_space(struct sock *sk)
* progress. --DaveM
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
POLLWRNORM | POLLWRBAND);

Просмотреть файл

@ -196,7 +196,7 @@ void dccp_write_space(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible(sk->sk_sleep);
/* Should agree with poll, otherwise some programs break */
if (sock_writeable(sk))

Просмотреть файл

@ -311,7 +311,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
unsigned int mask;
struct sock *sk = sock->sk;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);

Просмотреть файл

@ -316,8 +316,8 @@ static inline void check_tnode(const struct tnode *tn)
static const int halve_threshold = 25;
static const int inflate_threshold = 50;
static const int halve_threshold_root = 8;
static const int inflate_threshold_root = 15;
static const int halve_threshold_root = 15;
static const int inflate_threshold_root = 25;
static void __alias_free_mem(struct rcu_head *head)

Просмотреть файл

@ -339,7 +339,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
struct sock *sk = sock->sk;
struct tcp_sock *tp = tcp_sk(sk);
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
if (sk->sk_state == TCP_LISTEN)
return inet_csk_listen_poll(sk);

Просмотреть файл

@ -306,7 +306,7 @@ static inline int iucv_below_msglim(struct sock *sk)
static void iucv_sock_wake_msglim(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible_all(sk->sk_sleep);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
read_unlock(&sk->sk_callback_lock);
@ -1256,7 +1256,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
unsigned int mask = 0;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);

Просмотреть файл

@ -637,7 +637,7 @@ static void mesh_queue_preq(struct mesh_path *mpath, u8 flags)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_preq_queue *preq_node;
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_KERNEL);
preq_node = kmalloc(sizeof(struct mesh_preq_queue), GFP_ATOMIC);
if (!preq_node) {
printk(KERN_DEBUG "Mesh HWMP: could not allocate PREQ node\n");
return;

Просмотреть файл

@ -66,7 +66,7 @@ rix_to_ndx(struct minstrel_sta_info *mi, int rix)
for (i = rix; i >= 0; i--)
if (mi->r[i].rix == rix)
break;
WARN_ON(mi->r[i].rix != rix);
WARN_ON(i < 0);
return i;
}
@ -181,6 +181,9 @@ minstrel_tx_status(void *priv, struct ieee80211_supported_band *sband,
break;
ndx = rix_to_ndx(mi, ar[i].idx);
if (ndx < 0)
continue;
mi->r[ndx].attempts += ar[i].count;
if ((i != IEEE80211_TX_MAX_RATES - 1) && (ar[i + 1].idx < 0))

Просмотреть файл

@ -63,7 +63,7 @@ static void rxrpc_write_space(struct sock *sk)
_enter("%p", sk);
read_lock(&sk->sk_callback_lock);
if (rxrpc_writable(sk)) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible(sk->sk_sleep);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
@ -588,7 +588,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
unsigned int mask;
struct sock *sk = sock->sk;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx

Просмотреть файл

@ -6652,21 +6652,6 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
finish_wait(sk->sk_sleep, &wait);
}
static void sctp_sock_rfree_frag(struct sk_buff *skb)
{
struct sk_buff *frag;
if (!skb->data_len)
goto done;
/* Don't forget the fragments. */
skb_walk_frags(skb, frag)
sctp_sock_rfree_frag(frag);
done:
sctp_sock_rfree(skb);
}
static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
{
struct sk_buff *frag;
@ -6776,7 +6761,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
sctp_sock_rfree_frag(skb);
__skb_unlink(skb, &oldsk->sk_receive_queue);
__skb_queue_tail(&newsk->sk_receive_queue, skb);
sctp_skb_set_owner_r_frag(skb, newsk);
@ -6807,7 +6791,6 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) {
event = sctp_skb2event(skb);
if (event->asoc == assoc) {
sctp_sock_rfree_frag(skb);
__skb_unlink(skb, &oldsp->pd_lobby);
__skb_queue_tail(queue, skb);
sctp_skb_set_owner_r_frag(skb, newsk);
@ -6822,15 +6805,11 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
}
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp) {
sctp_sock_rfree_frag(skb);
sctp_skb_for_each(skb, &assoc->ulpq.reasm, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
}
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp) {
sctp_sock_rfree_frag(skb);
sctp_skb_for_each(skb, &assoc->ulpq.lobby, tmp)
sctp_skb_set_owner_r_frag(skb, newsk);
}
/* Set the type of socket to indicate that it is peeled off from the
* original UDP-style socket or created with the accept() call on a

Просмотреть файл

@ -315,7 +315,7 @@ static void unix_write_space(struct sock *sk)
{
read_lock(&sk->sk_callback_lock);
if (unix_writable(sk)) {
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
if (sk_has_sleeper(sk))
wake_up_interruptible_sync(sk->sk_sleep);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
@ -1985,7 +1985,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
struct sock *sk = sock->sk;
unsigned int mask;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
@ -2022,7 +2022,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk, *other;
unsigned int mask, writable;
poll_wait(file, sk->sk_sleep, wait);
sock_poll_wait(file, sk->sk_sleep, wait);
mask = 0;
/* exceptional events? */
@ -2053,7 +2053,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
other = unix_peer_get(sk);
if (other) {
if (unix_peer(other) != sk) {
poll_wait(file, &unix_sk(other)->peer_wait,
sock_poll_wait(file, &unix_sk(other)->peer_wait,
wait);
if (unix_recvq_full(other))
writable = 0;

Просмотреть файл

@ -447,6 +447,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
rdev = __cfg80211_drv_from_info(info);
if (IS_ERR(rdev)) {
mutex_unlock(&cfg80211_mutex);
result = PTR_ERR(rdev);
goto unlock;
}

Просмотреть файл

@ -366,7 +366,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found = rb_find_bss(dev, res);
if (found) {
kref_get(&found->ref);
found->pub.beacon_interval = res->pub.beacon_interval;
found->pub.tsf = res->pub.tsf;
found->pub.signal = res->pub.signal;