Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (94 commits)
  netfilter: ctnetlink: fix gcc warning during compilation
  net/netrom: Fix socket locking
  netlabel: Always remove the correct address selector
  ucc_geth.c: Fix upsmr setting in RMII mode
  8139too: fix HW initial flow
  af_iucv: Fix race when queuing incoming iucv messages
  af_iucv: Test additional sk states in iucv_sock_shutdown
  af_iucv: Reject incoming msgs if RECV_SHUTDOWN is set
  af_iucv: fix oops in iucv_sock_recvmsg() for MSG_PEEK flag
  af_iucv: consider state IUCV_CLOSING when closing a socket
  iwlwifi: DMA fixes
  iwlwifi: add debugging for TX path
  mwl8: fix build warning.
  mac80211: fix alignment calculation bug
  mac80211: do not print WARN if config interface
  iwl3945: use cancel_delayed_work_sync to cancel rfkill_poll
  iwlwifi: fix EEPROM validation mask to include OTP only devices
  atmel: fix netdev ops conversion
  pcnet_cs: add cis(firmware) of the Allied Telesis LA-PCM
  mlx4_en: Fix cleanup if workqueue create in mlx4_en_add() fails
  ...
This commit is contained in:
Linus Torvalds 2009-04-24 07:46:51 -07:00
Родитель dc0046c758 29fe1b4812
Коммит 3e241ff0c5
83 изменённых файлов: 849 добавлений и 485 удалений

Просмотреть файл

@ -3991,8 +3991,8 @@ NETWORKING [GENERAL]
P: David S. Miller
M: davem@davemloft.net
L: netdev@vger.kernel.org
W: http://linux-net.osdl.org/
T: git kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6.git
W: http://www.linuxfoundation.org/en/Net
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
S: Maintained
F: net/
F: include/net/
@ -4033,6 +4033,13 @@ F: net/wireless/
F: include/net/ieee80211*
F: include/net/wireless.h
NETWORKING DRIVERS
L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
S: Odd Fixes
F: drivers/net/
NETXEN (1/10) GbE SUPPORT
P: Dhananjay Phadke
M: dhananjay@netxen.com

Просмотреть файл

@ -1383,6 +1383,11 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
tp->cur_rx = 0;
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@ -1390,8 +1395,6 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W32 (RxConfig, tp->rx_config);
RTL_W32 (TxConfig, rtl8139_tx_config);
tp->cur_rx = 0;
rtl_check_media (dev, 1);
if (tp->chipset >= CH_8139B) {
@ -1406,9 +1409,6 @@ static void rtl8139_hw_start (struct net_device *dev)
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
/* init Rx ring buffer DMA address */
RTL_W32_F (RxBuf, tp->rx_ring_dma);
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));

Просмотреть файл

@ -28,9 +28,9 @@ if NETDEVICES
config COMPAT_NET_DEV_OPS
default y
bool "Enable older network device API compatiablity"
bool "Enable older network device API compatibility"
---help---
This option enables kernel compatiability with older network devices
This option enables kernel compatibility with older network devices
that do not use net_device_ops interface.
If unsure, say Y.

Просмотреть файл

@ -1117,8 +1117,8 @@ static void cxgb_down(struct adapter *adapter)
spin_unlock_irq(&adapter->work_lock);
free_irq_resources(adapter);
flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
quiesce_rx(adapter);
flush_workqueue(cxgb3_wq); /* wait for external IRQ handler */
}
static void schedule_chk_task(struct adapter *adap)
@ -1187,6 +1187,9 @@ static int offload_close(struct t3cdev *tdev)
sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
/* Flush work scheduled while releasing TIDs */
flush_scheduled_work();
tdev->lldev = NULL;
cxgb3_set_dummy_ops(tdev);
t3_tp_set_offload_mode(adapter, 0);
@ -1232,6 +1235,10 @@ static int cxgb_close(struct net_device *dev)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
if (!adapter->open_device_map)
return 0;
/* Stop link fault interrupts */
t3_xgm_intr_disable(adapter, pi->port_id);
t3_read_reg(adapter, A_XGM_INT_STATUS + pi->mac.offset);
@ -1247,8 +1254,7 @@ static int cxgb_close(struct net_device *dev)
spin_unlock_irq(&adapter->work_lock);
if (!(adapter->open_device_map & PORT_MASK))
cancel_rearming_delayed_workqueue(cxgb3_wq,
&adapter->adap_check_task);
cancel_delayed_work_sync(&adapter->adap_check_task);
if (!adapter->open_device_map)
cxgb_down(adapter);
@ -2493,6 +2499,7 @@ static void check_link_status(struct adapter *adapter)
spin_lock_irq(&adapter->work_lock);
if (p->link_fault) {
t3_link_fault(adapter, i);
spin_unlock_irq(&adapter->work_lock);
continue;
}
@ -2554,9 +2561,7 @@ static void t3_adap_check_task(struct work_struct *work)
adapter->check_task_cnt++;
/* Check link status for PHYs without interrupts */
if (p->linkpoll_period)
check_link_status(adapter);
check_link_status(adapter);
/* Accumulate MAC stats if needed */
if (!p->linkpoll_period ||
@ -2680,21 +2685,6 @@ void t3_os_ext_intr_handler(struct adapter *adapter)
spin_unlock(&adapter->work_lock);
}
static void link_fault_task(struct work_struct *work)
{
struct adapter *adapter = container_of(work, struct adapter,
link_fault_handler_task);
int i;
for_each_port(adapter, i) {
struct net_device *netdev = adapter->port[i];
struct port_info *pi = netdev_priv(netdev);
if (pi->link_fault)
t3_link_fault(adapter, i);
}
}
void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
{
struct net_device *netdev = adapter->port[port_id];
@ -2702,7 +2692,6 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id)
spin_lock(&adapter->work_lock);
pi->link_fault = 1;
queue_work(cxgb3_wq, &adapter->link_fault_handler_task);
spin_unlock(&adapter->work_lock);
}
@ -2838,6 +2827,9 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
struct adapter *adapter = pci_get_drvdata(pdev);
int ret;
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
ret = t3_adapter_error(adapter, 0);
/* Request a slot reset. */
@ -2932,8 +2924,13 @@ static int __devinit cxgb_enable_msix(struct adapter *adap)
while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
vectors = err;
if (!err && vectors < (adap->params.nports + 1))
if (err < 0)
pci_disable_msix(adap->pdev);
if (!err && vectors < (adap->params.nports + 1)) {
pci_disable_msix(adap->pdev);
err = -1;
}
if (!err) {
for (i = 0; i < vectors; ++i)
@ -3082,7 +3079,6 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_WORK(&adapter->link_fault_handler_task, link_fault_task);
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);

Просмотреть файл

@ -1202,7 +1202,6 @@ void t3_link_changed(struct adapter *adapter, int port_id)
struct cphy *phy = &pi->phy;
struct cmac *mac = &pi->mac;
struct link_config *lc = &pi->link_config;
int force_link_down = 0;
phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
@ -1218,14 +1217,9 @@ void t3_link_changed(struct adapter *adapter, int port_id)
status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
if (status & F_LINKFAULTCHANGE) {
mac->stats.link_faults++;
force_link_down = 1;
pi->link_fault = 1;
}
t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
if (force_link_down) {
t3_os_link_fault_handler(adapter, port_id);
return;
}
}
if (lc->requested_fc & PAUSE_AUTONEG)
@ -1292,9 +1286,6 @@ void t3_link_fault(struct adapter *adapter, int port_id)
/* Account link faults only when the phy reports a link up */
if (link_ok)
mac->stats.link_faults++;
msleep(1000);
t3_os_link_fault_handler(adapter, port_id);
} else {
if (link_ok)
t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,

Просмотреть файл

@ -3834,7 +3834,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
bool cleaned = false;
unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
@ -3843,7 +3842,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
for (cleaned = false; !cleaned; count++) {
bool cleaned = false;
for ( ; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
@ -3871,7 +3871,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD 32
if (unlikely(cleaned && netif_carrier_ok(netdev) &&
if (unlikely(count && netif_carrier_ok(netdev) &&
E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.

Просмотреть файл

@ -621,7 +621,6 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
struct e1000_buffer *buffer_info;
unsigned int i, eop;
unsigned int count = 0;
bool cleaned = false;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
i = tx_ring->next_to_clean;
@ -630,7 +629,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
for (cleaned = 0; !cleaned; count++) {
bool cleaned = false;
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
@ -661,8 +661,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i;
#define TX_WAKE_THRESHOLD 32
if (cleaned && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
/* Make sure that anybody stopping the queue after this
* sees the new next_to_clean.
*/

Просмотреть файл

@ -290,7 +290,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw,
s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
struct ixgbe_dcb_config *dcb_config)
{
u32 i, reg;
u32 i, reg, rx_pba_size;
/* If PFC is disabled globally then fall back to LFC. */
if (!dcb_config->pfc_mode_enable) {
@ -301,17 +301,23 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
/* Config and remember Tx */
if (dcb_config->rx_pba_cfg == pba_equal)
rx_pba_size = IXGBE_RXPBSIZE_64KB;
else
rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
: IXGBE_RXPBSIZE_48KB;
reg = ((rx_pba_size >> 5) & 0xFFE0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) {
reg = hw->fc.high_water | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
reg = hw->fc.low_water | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
} else {
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
}
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
reg = ((rx_pba_size >> 2) & 0xFFE0);
if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
reg |= IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
}
/* Configure pause time (2 TCs per register) */

Просмотреть файл

@ -2841,11 +2841,55 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < dcb_i; i++) {
adapter->rx_ring[i].reg_idx = i << 4;
adapter->tx_ring[i].reg_idx = i << 4;
if (dcb_i == 8) {
/*
* Tx TC0 starts at: descriptor queue 0
* Tx TC1 starts at: descriptor queue 32
* Tx TC2 starts at: descriptor queue 64
* Tx TC3 starts at: descriptor queue 80
* Tx TC4 starts at: descriptor queue 96
* Tx TC5 starts at: descriptor queue 104
* Tx TC6 starts at: descriptor queue 112
* Tx TC7 starts at: descriptor queue 120
*
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
adapter->tx_ring[i].reg_idx = i << 5;
adapter->rx_ring[i].reg_idx = i << 4;
}
for ( ; i < 5; i++) {
adapter->tx_ring[i].reg_idx =
((i + 2) << 4);
adapter->rx_ring[i].reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
adapter->tx_ring[i].reg_idx =
((i + 8) << 3);
adapter->rx_ring[i].reg_idx = i << 4;
}
ret = true;
} else if (dcb_i == 4) {
/*
* Tx TC0 starts at: descriptor queue 0
* Tx TC1 starts at: descriptor queue 64
* Tx TC2 starts at: descriptor queue 96
* Tx TC3 starts at: descriptor queue 112
*
* Rx TC0-TC3 are offset by 32 queues each
*/
adapter->tx_ring[0].reg_idx = 0;
adapter->tx_ring[1].reg_idx = 64;
adapter->tx_ring[2].reg_idx = 96;
adapter->tx_ring[3].reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
adapter->rx_ring[i].reg_idx = i << 5;
ret = true;
} else {
ret = false;
}
ret = true;
} else {
ret = false;
}
@ -3946,7 +3990,7 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
}
hw->mac.ops.setup_sfp(hw);
if (!adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)
if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
/* This will also work for DA Twinax connections */
schedule_work(&adapter->multispeed_fiber_task);
adapter->flags &= ~IXGBE_FLAG_IN_SFP_MOD_TASK;

Просмотреть файл

@ -316,10 +316,11 @@ static void macb_tx(struct macb *bp)
dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
(unsigned long)status);
if (status & MACB_BIT(UND)) {
if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
int i;
printk(KERN_ERR "%s: TX underrun, resetting buffers\n",
bp->dev->name);
printk(KERN_ERR "%s: TX %s, resetting buffers\n",
bp->dev->name, status & MACB_BIT(UND) ?
"underrun" : "retry limit exceeded");
/* Transfer ongoing, disable transmitter, to avoid confusion */
if (status & MACB_BIT(TGO))
@ -520,27 +521,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
macb_writel(bp, RSR, status);
work_done = 0;
if (!status) {
/*
* This may happen if an interrupt was pending before
* this function was called last time, and no packets
* have been received since.
*/
napi_complete(napi);
goto out;
}
dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget);
if (!(status & MACB_BIT(REC))) {
dev_warn(&bp->pdev->dev,
"No RX buffers complete, status = %02lx\n",
(unsigned long)status);
napi_complete(napi);
goto out;
}
work_done = macb_rx(bp, budget);
if (work_done < budget)
napi_complete(napi);
@ -549,7 +533,6 @@ static int macb_poll(struct napi_struct *napi, int budget)
* We've done what we can to clean the buffers. Make sure we
* get notified when new packets arrive.
*/
out:
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
/* TODO: Handle errors */
@ -590,7 +573,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
}
if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND)))
if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) |
MACB_BIT(ISR_RLE)))
macb_tx(bp);
/*

Просмотреть файл

@ -376,7 +376,8 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
if (lowerdev->ethtool_ops->get_rx_csum == NULL)
if (lowerdev->ethtool_ops == NULL ||
lowerdev->ethtool_ops->get_rx_csum == NULL)
return 0;
return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
}
@ -387,7 +388,8 @@ static int macvlan_ethtool_get_settings(struct net_device *dev,
const struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
if (!lowerdev->ethtool_ops->get_settings)
if (!lowerdev->ethtool_ops ||
!lowerdev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
return lowerdev->ethtool_ops->get_settings(lowerdev, cmd);
@ -398,7 +400,8 @@ static u32 macvlan_ethtool_get_flags(struct net_device *dev)
const struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
if (!lowerdev->ethtool_ops->get_flags)
if (!lowerdev->ethtool_ops ||
!lowerdev->ethtool_ops->get_flags)
return 0;
return lowerdev->ethtool_ops->get_flags(lowerdev);
}

Просмотреть файл

@ -181,7 +181,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->workqueue = create_singlethread_workqueue("mlx4_en");
if (!mdev->workqueue) {
err = -ENOMEM;
goto err_close_nic;
goto err_mr;
}
/* At this stage all non-port specific tasks are complete:
@ -214,9 +214,8 @@ err_free_netdev:
flush_workqueue(mdev->workqueue);
/* Stop event queue before we drop down to release shared SW state */
err_close_nic:
destroy_workqueue(mdev->workqueue);
err_mr:
mlx4_mr_free(dev, &mdev->mr);
err_uar:

Просмотреть файл

@ -348,11 +348,9 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
if (netif_msg_timer(priv))
mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
if (netif_carrier_ok(dev)) {
priv->port_stats.tx_timeout++;
mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
}
priv->port_stats.tx_timeout++;
mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
queue_work(mdev->workqueue, &priv->watchdog_task);
}
@ -761,9 +759,14 @@ static void mlx4_en_restart(struct work_struct *work)
struct net_device *dev = priv->dev;
mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
mlx4_en_stop_port(dev);
if (mlx4_en_start_port(dev))
mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev);
if (mlx4_en_start_port(dev))
mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
}
@ -1054,7 +1057,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
* Set driver features
*/
dev->features |= NETIF_F_SG;
dev->features |= NETIF_F_HW_CSUM;
dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
dev->features |= NETIF_F_HIGHDMA;
dev->features |= NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |

Просмотреть файл

@ -151,6 +151,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
int i;
mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
if (IS_ERR(mailbox))
@ -165,38 +166,18 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
spin_lock_bh(&priv->stats_lock);
stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
be32_to_cpu(mlx4_en_stats->RDROP);
stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
be64_to_cpu(mlx4_en_stats->ROCT_novlan);
stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
stats->rx_packets = 0;
stats->rx_bytes = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
stats->rx_packets += priv->rx_ring[i].packets;
stats->rx_bytes += priv->rx_ring[i].bytes;
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
for (i = 0; i <= priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i].packets;
stats->tx_bytes += priv->tx_ring[i].bytes;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
be32_to_cpu(mlx4_en_stats->RdropLength) +

Просмотреть файл

@ -94,3 +94,9 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
vunmap(buf->direct.buf);
}
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
}

Просмотреть файл

@ -436,8 +436,9 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
/* Initialize page allocators */
err = mlx4_en_init_allocator(priv, ring);
if (err) {
mlx4_err(mdev, "Failed initializing ring allocator\n");
goto err_allocator;
mlx4_err(mdev, "Failed initializing ring allocator\n");
ring_ind--;
goto err_allocator;
}
/* Fill Rx buffers */
@ -467,6 +468,7 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
ring->wqres.db.dma, &ring->srq);
if (err){
mlx4_err(mdev, "Failed to allocate srq\n");
ring_ind--;
goto err_srq;
}
ring->srq.event = mlx4_en_srq_event;
@ -926,12 +928,6 @@ void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
}
}
static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
}
static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
int qpn, int srqn, int cqn,
enum mlx4_qp_state *state,

Просмотреть файл

@ -112,6 +112,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
}
ring->qp.event = mlx4_en_sqp_event;
return 0;

Просмотреть файл

@ -538,6 +538,7 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int srqn,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
int mlx4_en_map_buffer(struct mlx4_buf *buf);
void mlx4_en_unmap_buffer(struct mlx4_buf *buf);

Просмотреть файл

@ -1758,7 +1758,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "LA-PCM.cis"),
PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),

Просмотреть файл

@ -2101,6 +2101,9 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
if (ret) {
pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
ret);
ret = -EPERM;
gelic_wl_send_iwap_event(wl, NULL);
goto out;
}
/* start association */

Просмотреть файл

@ -2190,7 +2190,14 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
if (!(tmp & EEPROM_ADDR_COMPLETE))
return -EBUSY;
*val = tr32(GRC_EEPROM_DATA);
tmp = tr32(GRC_EEPROM_DATA);
/*
* The data will always be opposite the native endian
* format. Perform a blind byteswap to compensate.
*/
*val = swab32(tmp);
return 0;
}
@ -10663,7 +10670,13 @@ static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
memcpy(&data, buf + i, 4);
tw32(GRC_EEPROM_DATA, be32_to_cpu(data));
/*
* The SEEPROM interface expects the data to always be opposite
* the native endian format. We accomplish this by reversing
* all the operations that would have been performed on the
* data from a call to tg3_nvram_read_be32().
*/
tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
val = tr32(GRC_EEPROM_ADDR);
tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
@ -12443,13 +12456,8 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
/* Next, try NVRAM. */
if (!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
dev->dev_addr[0] = ((hi >> 16) & 0xff);
dev->dev_addr[1] = ((hi >> 24) & 0xff);
dev->dev_addr[2] = ((lo >> 0) & 0xff);
dev->dev_addr[3] = ((lo >> 8) & 0xff);
dev->dev_addr[4] = ((lo >> 16) & 0xff);
dev->dev_addr[5] = ((lo >> 24) & 0xff);
memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {

Просмотреть файл

@ -93,7 +93,6 @@ struct tun_file {
atomic_t count;
struct tun_struct *tun;
struct net *net;
wait_queue_head_t read_wait;
};
struct tun_sock;
@ -156,6 +155,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
tfile->tun = tun;
tun->tfile = tfile;
dev_hold(tun->dev);
sock_hold(tun->sk);
atomic_inc(&tfile->count);
out:
@ -165,11 +165,8 @@ out:
static void __tun_detach(struct tun_struct *tun)
{
struct tun_file *tfile = tun->tfile;
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
tfile->tun = NULL;
tun->tfile = NULL;
netif_tx_unlock_bh(tun->dev);
@ -333,12 +330,19 @@ static void tun_net_uninit(struct net_device *dev)
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
wake_up_all(&tfile->read_wait);
wake_up_all(&tun->socket.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
}
static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
sock_put(tun->sk);
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
@ -393,7 +397,7 @@ static int tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
wake_up_interruptible(&tun->tfile->read_wait);
wake_up_interruptible(&tun->socket.wait);
return 0;
drop:
@ -490,7 +494,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
poll_wait(file, &tfile->read_wait, wait);
poll_wait(file, &tun->socket.wait, wait);
if (!skb_queue_empty(&tun->readq))
mask |= POLLIN | POLLRDNORM;
@ -763,7 +767,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
goto out;
}
add_wait_queue(&tfile->read_wait, &wait);
add_wait_queue(&tun->socket.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
@ -794,7 +798,7 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
}
current->state = TASK_RUNNING;
remove_wait_queue(&tfile->read_wait, &wait);
remove_wait_queue(&tun->socket.wait, &wait);
out:
tun_put(tun);
@ -811,7 +815,7 @@ static void tun_setup(struct net_device *dev)
tun->group = -1;
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = free_netdev;
dev->destructor = tun_free_netdev;
}
/* Trivial set of netlink ops to allow deleting tun or tap
@ -848,7 +852,7 @@ static void tun_sock_write_space(struct sock *sk)
static void tun_sock_destruct(struct sock *sk)
{
dev_put(container_of(sk, struct tun_sock, sk)->tun->dev);
free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
}
static struct proto tun_proto = {
@ -862,7 +866,6 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
struct sock *sk;
struct tun_struct *tun;
struct net_device *dev;
struct tun_file *tfile = file->private_data;
int err;
dev = __dev_get_by_name(net, ifr->ifr_name);
@ -920,13 +923,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (!sk)
goto err_free_dev;
/* This ref count is for tun->sk. */
dev_hold(dev);
init_waitqueue_head(&tun->socket.wait);
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_destruct = tun_sock_destruct;
sk->sk_sndbuf = INT_MAX;
sk->sk_sleep = &tfile->read_wait;
tun->sk = sk;
container_of(sk, struct tun_sock, sk)->tun = tun;
@ -942,11 +942,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
err = -EINVAL;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_free_dev;
goto err_free_sk;
sk->sk_destruct = tun_sock_destruct;
err = tun_attach(tun, file);
if (err < 0)
goto err_free_dev;
goto failed;
}
DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
@ -1266,7 +1268,6 @@ static int tun_chr_open(struct inode *inode, struct file * file)
atomic_set(&tfile->count, 0);
tfile->tun = NULL;
tfile->net = get_net(current->nsproxy->net_ns);
init_waitqueue_head(&tfile->read_wait);
file->private_data = tfile;
return 0;
}
@ -1284,14 +1285,16 @@ static int tun_chr_close(struct inode *inode, struct file *file)
__tun_detach(tun);
/* If desireable, unregister the netdevice. */
if (!(tun->flags & TUN_PERSIST)) {
sock_put(tun->sk);
if (!(tun->flags & TUN_PERSIST))
unregister_netdevice(tun->dev);
}
rtnl_unlock();
}
tun = tfile->tun;
if (tun)
sock_put(tun->sk);
put_net(tfile->net);
kfree(tfile);

Просмотреть файл

@ -1394,7 +1394,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
upsmr |= UCC_GETH_UPSMR_RPM;
if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
upsmr |= UCC_GETH_UPSMR_RPM;
switch (ugeth->max_speed) {
case SPEED_10:
upsmr |= UCC_GETH_UPSMR_R10M;

Просмотреть файл

@ -899,6 +899,7 @@ static int pegasus_start_xmit(struct sk_buff *skb, struct net_device *net)
/* cleanup should already have been scheduled */
break;
case -ENODEV: /* disconnect() upcoming */
case -EPERM:
netif_device_detach(pegasus->net);
break;
default:

Просмотреть файл

@ -6713,11 +6713,11 @@ static int airo_set_auth(struct net_device *dev,
local->config.authType = AUTH_ENCRYPT;
} else
return -EINVAL;
break;
/* Commit the changes to flags if needed */
if (local->config.authType != currentAuthType)
set_bit (FLAG_COMMIT, &local->flags);
break;
}
case IW_AUTH_WPA_ENABLED:

Просмотреть файл

@ -310,7 +310,7 @@ struct ar9170_tx_control {
struct ar9170_rx_head {
u8 plcp[12];
};
} __packed;
struct ar9170_rx_tail {
union {
@ -318,16 +318,16 @@ struct ar9170_rx_tail {
u8 rssi_ant0, rssi_ant1, rssi_ant2,
rssi_ant0x, rssi_ant1x, rssi_ant2x,
rssi_combined;
};
} __packed;
u8 rssi[7];
};
} __packed;
u8 evm_stream0[6], evm_stream1[6];
u8 phy_err;
u8 SAidx, DAidx;
u8 error;
u8 status;
};
} __packed;
#define AR9170_ENC_ALG_NONE 0x0
#define AR9170_ENC_ALG_WEP64 0x1

Просмотреть файл

@ -59,6 +59,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cf3, 0x9170) },
/* Atheros TG121N */
{ USB_DEVICE(0x0cf3, 0x1001) },
/* Cace Airpcap NX */
{ USB_DEVICE(0xcace, 0x0300) },
/* D-Link DWA 160A */
{ USB_DEVICE(0x07d1, 0x3c10) },
/* Netgear WNDA3100 */
@ -67,6 +69,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0846, 0x9001) },
/* Zydas ZD1221 */
{ USB_DEVICE(0x0ace, 0x1221) },
/* ZyXEL NWD271N */
{ USB_DEVICE(0x0586, 0x3417) },
/* Z-Com UB81 BG */
{ USB_DEVICE(0x0cde, 0x0023) },
/* Z-Com UB82 ABG */
@ -619,6 +623,39 @@ static int ar9170_usb_open(struct ar9170 *ar)
return 0;
}
static int ar9170_usb_init_device(struct ar9170_usb *aru)
{
int err;
err = ar9170_usb_alloc_rx_irq_urb(aru);
if (err)
goto err_out;
err = ar9170_usb_alloc_rx_bulk_urbs(aru);
if (err)
goto err_unrx;
err = ar9170_usb_upload_firmware(aru);
if (err) {
err = ar9170_echo_test(&aru->common, 0x60d43110);
if (err) {
/* force user invention, by disabling the device */
err = usb_driver_set_configuration(aru->udev, -1);
dev_err(&aru->udev->dev, "device is in a bad state. "
"please reconnect it!\n");
goto err_unrx;
}
}
return 0;
err_unrx:
ar9170_usb_cancel_urbs(aru);
err_out:
return err;
}
static int ar9170_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@ -654,32 +691,16 @@ static int ar9170_usb_probe(struct usb_interface *intf,
err = ar9170_usb_reset(aru);
if (err)
goto err_unlock;
goto err_freehw;
err = ar9170_usb_request_firmware(aru);
if (err)
goto err_unlock;
goto err_freehw;
err = ar9170_usb_alloc_rx_irq_urb(aru);
err = ar9170_usb_init_device(aru);
if (err)
goto err_freefw;
err = ar9170_usb_alloc_rx_bulk_urbs(aru);
if (err)
goto err_unrx;
err = ar9170_usb_upload_firmware(aru);
if (err) {
err = ar9170_echo_test(&aru->common, 0x60d43110);
if (err) {
/* force user invention, by disabling the device */
err = usb_driver_set_configuration(aru->udev, -1);
dev_err(&aru->udev->dev, "device is in a bad state. "
"please reconnect it!\n");
goto err_unrx;
}
}
err = ar9170_usb_open(ar);
if (err)
goto err_unrx;
@ -699,7 +720,7 @@ err_freefw:
release_firmware(aru->init_values);
release_firmware(aru->firmware);
err_unlock:
err_freehw:
usb_set_intfdata(intf, NULL);
usb_put_dev(udev);
ieee80211_free_hw(ar->hw);
@ -726,12 +747,65 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
ieee80211_free_hw(aru->common.hw);
}
#ifdef CONFIG_PM
static int ar9170_suspend(struct usb_interface *intf,
pm_message_t message)
{
struct ar9170_usb *aru = usb_get_intfdata(intf);
if (!aru)
return -ENODEV;
aru->common.state = AR9170_IDLE;
ar9170_usb_cancel_urbs(aru);
return 0;
}
static int ar9170_resume(struct usb_interface *intf)
{
struct ar9170_usb *aru = usb_get_intfdata(intf);
int err;
if (!aru)
return -ENODEV;
usb_unpoison_anchored_urbs(&aru->rx_submitted);
usb_unpoison_anchored_urbs(&aru->tx_submitted);
/*
* FIXME: firmware upload will fail on resume.
* but this is better than a hang!
*/
err = ar9170_usb_init_device(aru);
if (err)
goto err_unrx;
err = ar9170_usb_open(&aru->common);
if (err)
goto err_unrx;
return 0;
err_unrx:
aru->common.state = AR9170_IDLE;
ar9170_usb_cancel_urbs(aru);
return err;
}
#endif /* CONFIG_PM */
static struct usb_driver ar9170_driver = {
.name = "ar9170usb",
.probe = ar9170_usb_probe,
.disconnect = ar9170_usb_disconnect,
.id_table = ar9170_usb_ids,
.soft_unbind = 1,
#ifdef CONFIG_PM
.suspend = ar9170_suspend,
.resume = ar9170_resume,
#endif /* CONFIG_PM */
};
static int __init ar9170_init(void)

Просмотреть файл

@ -250,6 +250,8 @@ static struct usb_device_id dev_table[] = {
{ USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A) },
/* Siemens Gigaset USB WLAN Adapter 11 */
{ USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A) },
/* OQO Model 01+ Internal Wi-Fi */
{ USB_DEVICE(0x1557, 0x0002), USB_DEVICE_DATA(BOARD_505A) },
/*
* at76c505amx-rfmd
*/

Просмотреть файл

@ -493,6 +493,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
int hdrlen, padsize, retval;
bool decrypt_error = false;
u8 keyix;
__le16 fc;
spin_lock_bh(&sc->rx.rxbuflock);
@ -606,6 +607,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
/* see if any padding is done by the hw and remove it */
hdr = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
fc = hdr->frame_control;
/* The MAC header is padded to have 32-bit boundary if the
* packet payload is non-zero. The general calculation for
@ -690,7 +692,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
if (ieee80211_is_beacon(hdr->frame_control) &&
if (ieee80211_is_beacon(fc) &&
(sc->sc_flags & SC_OP_WAIT_FOR_BEACON)) {
sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);

Просмотреть файл

@ -1502,7 +1502,6 @@ static const struct net_device_ops atmel_netdev_ops = {
.ndo_set_mac_address = atmel_set_mac_address,
.ndo_start_xmit = start_tx,
.ndo_do_ioctl = atmel_ioctl,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};

Просмотреть файл

@ -555,11 +555,32 @@ address_error:
return 1;
}
static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
{
unsigned char *f = skb->data + ring->frameoffset;
return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
}
static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
{
struct b43_rxhdr_fw4 *rxhdr;
unsigned char *frame;
/* This poisons the RX buffer to detect DMA failures. */
rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
rxhdr->frame_len = 0;
B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
frame = skb->data + ring->frameoffset;
memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
}
static int setup_rx_descbuffer(struct b43_dmaring *ring,
struct b43_dmadesc_generic *desc,
struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
{
struct b43_rxhdr_fw4 *rxhdr;
dma_addr_t dmaaddr;
struct sk_buff *skb;
@ -568,6 +589,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
b43_poison_rx_buffer(ring, skb);
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
/* ugh. try to realloc in zone_dma */
@ -578,6 +600,7 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
if (unlikely(!skb))
return -ENOMEM;
b43_poison_rx_buffer(ring, skb);
dmaaddr = map_descbuffer(ring, skb->data,
ring->rx_buffersize, 0);
if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
@ -592,9 +615,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
ring->ops->fill_descriptor(ring, desc, dmaaddr,
ring->rx_buffersize, 0, 0, 0);
rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
rxhdr->frame_len = 0;
return 0;
}
@ -1483,12 +1503,17 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
len = le16_to_cpu(rxhdr->frame_len);
} while (len == 0 && i++ < 5);
if (unlikely(len == 0)) {
/* recycle the descriptor buffer. */
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
goto drop;
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
}
if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
/* Something went wrong with the DMA.
* The device did not touch the buffer and did not overwrite the poison. */
b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
dmaaddr = meta->dmaaddr;
goto drop_recycle_buffer;
}
if (unlikely(len > ring->rx_buffersize)) {
/* The data did not fit into one descriptor buffer
* and is split over multiple buffers.
@ -1501,6 +1526,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
while (1) {
desc = ops->idx2desc(ring, *slot, &meta);
/* recycle the descriptor buffer. */
b43_poison_rx_buffer(ring, meta->skb);
sync_descbuffer_for_device(ring, meta->dmaaddr,
ring->rx_buffersize);
*slot = next_slot(ring, *slot);
@ -1519,8 +1545,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
if (unlikely(err)) {
b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
goto drop;
goto drop_recycle_buffer;
}
unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
@ -1530,6 +1555,11 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
b43_rx(ring->dev, skb, rxhdr);
drop:
return;
drop_recycle_buffer:
/* Poison and recycle the RX buffer. */
b43_poison_rx_buffer(ring, skb);
sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
void b43_dma_rx(struct b43_dmaring *ring)

Просмотреть файл

@ -3974,6 +3974,11 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
phy->next_txpwr_check_time = jiffies;
/* PHY TX errors counter. */
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
#if B43_DEBUG
phy->phy_locked = 0;
phy->radio_locked = 0;
#endif
}
static void setup_struct_wldev_for_init(struct b43_wldev *dev)

Просмотреть файл

@ -131,12 +131,16 @@ void b43_radio_lock(struct b43_wldev *dev)
{
u32 macctl;
#if B43_DEBUG
B43_WARN_ON(dev->phy.radio_locked);
dev->phy.radio_locked = 1;
#endif
macctl = b43_read32(dev, B43_MMIO_MACCTL);
B43_WARN_ON(macctl & B43_MACCTL_RADIOLOCK);
macctl |= B43_MACCTL_RADIOLOCK;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
/* Commit the write and wait for the device
* to exit any radio register access. */
/* Commit the write and wait for the firmware
* to finish any radio register access. */
b43_read32(dev, B43_MMIO_MACCTL);
udelay(10);
}
@ -145,11 +149,15 @@ void b43_radio_unlock(struct b43_wldev *dev)
{
u32 macctl;
#if B43_DEBUG
B43_WARN_ON(!dev->phy.radio_locked);
dev->phy.radio_locked = 0;
#endif
/* Commit any write */
b43_read16(dev, B43_MMIO_PHY_VER);
/* unlock */
macctl = b43_read32(dev, B43_MMIO_MACCTL);
B43_WARN_ON(!(macctl & B43_MACCTL_RADIOLOCK));
macctl &= ~B43_MACCTL_RADIOLOCK;
b43_write32(dev, B43_MMIO_MACCTL, macctl);
}

Просмотреть файл

@ -245,8 +245,10 @@ struct b43_phy {
atomic_t txerr_cnt;
#ifdef CONFIG_B43_DEBUG
/* PHY registers locked by b43_phy_lock()? */
/* PHY registers locked (w.r.t. firmware) */
bool phy_locked;
/* Radio registers locked (w.r.t. firmware) */
bool radio_locked;
#endif /* B43_DEBUG */
};

Просмотреть файл

@ -1192,7 +1192,7 @@ int iwl3945_hw_nic_init(struct iwl_priv *priv)
return -ENOMEM;
}
} else
iwl_rx_queue_reset(priv, rxq);
iwl3945_rx_queue_reset(priv, rxq);
iwl3945_rx_replenish(priv);

Просмотреть файл

@ -215,6 +215,7 @@ extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
extern int iwl3945_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq, int count, u32 id);
extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern void iwl3945_tx_queue_free(struct iwl_priv *priv, struct iwl_tx_queue *txq);
extern int iwl3945_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len,
const void *data);

Просмотреть файл

@ -976,11 +976,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
dma_sync_single_range_for_cpu(
&priv->pci_dev->dev, rxb->real_dma_addr,
rxb->aligned_dma_addr - rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@ -1031,9 +1029,6 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);

Просмотреть файл

@ -223,7 +223,7 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
#define CSR_EEPROM_GP_VALID_MSK (0x00000007)
#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)

Просмотреть файл

@ -360,12 +360,16 @@ struct iwl_host_cmd {
/**
* struct iwl_rx_queue - Rx queue
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
* @dma_addr: bus address of buffer of receive buffer descriptors (rbd)
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
* @rx_free: list of free SKBs for use
* @rx_used: List of Rx buffers with no SKB
* @need_update: flag to indicate we need to update read/write index
* @rb_stts: driver's pointer to receive buffer status
* @rb_stts_dma: bus address of receive buffer status
*
* NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
*/

Просмотреть файл

@ -799,6 +799,22 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdr_len);
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
if (info->control.hw_key)
iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@ -819,21 +835,30 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
else
len_org = 0;
/* Tell NIC about any 2-byte padding after MAC header */
if (len_org)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
out_cmd, sizeof(struct iwl_cmd),
&out_cmd->hdr, len,
PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
pci_unmap_len_set(&out_cmd->meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
txcmd_phys += offsetof(struct iwl_cmd, hdr);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, len, 1, 0);
if (info->control.hw_key)
iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@ -846,41 +871,29 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
0, 0);
}
/* Tell NIC about any 2-byte padding after MAC header */
if (len_org)
tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx_cmd->len = cpu_to_le16(len);
/* TODO need this for burst mode later on */
iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, sta_id, 0);
iwl_update_tx_stats(priv, le16_to_cpu(fc), len);
scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
offsetof(struct iwl_tx_cmd, scratch);
offsetof(struct iwl_tx_cmd, scratch);
len = sizeof(struct iwl_tx_cmd) +
sizeof(struct iwl_cmd_header) + hdr_len;
/* take back ownership of DMA buffer to enable update */
pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
len, PCI_DMA_BIDIRECTIONAL);
tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
/* Set up entry for this TFD in Tx byte-count array */
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
le16_to_cpu(tx_cmd->len));
pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
len, PCI_DMA_BIDIRECTIONAL);
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@ -968,18 +981,9 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
INDEX_TO_SEQ(q->write_ptr));
if (out_cmd->meta.flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
len = (idx == TFD_CMD_SLOTS) ?
IWL_MAX_SCAN_SIZE : sizeof(struct iwl_cmd);
len = sizeof(struct iwl_cmd) - sizeof(struct iwl_cmd_meta);
len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
phys_addr = pci_map_single(priv->pci_dev, out_cmd,
len, PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
pci_unmap_len_set(&out_cmd->meta, len, len);
phys_addr += offsetof(struct iwl_cmd, hdr);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
U32_PAD(cmd->len));
#ifdef CONFIG_IWLWIFI_DEBUG
switch (out_cmd->hdr.cmd) {
@ -1007,6 +1011,15 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Set up entry in queue's byte count circular buffer */
priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
fix_size, PCI_DMA_BIDIRECTIONAL);
pci_unmap_addr_set(&out_cmd->meta, mapping, phys_addr);
pci_unmap_len_set(&out_cmd->meta, len, fix_size);
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
phys_addr, fix_size, 1,
U32_PAD(cmd->len));
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
ret = iwl_txq_update_write_ptr(priv, txq);

Просмотреть файл

@ -972,7 +972,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
dma_addr_t phys_addr;
dma_addr_t txcmd_phys;
int txq_id = skb_get_queue_mapping(skb);
u16 len, idx, len_org, hdr_len;
u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */
u8 id;
u8 unicast;
u8 sta_id;
@ -1074,6 +1074,40 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx->hdr, hdr, hdr_len);
if (info->control.hw_key)
iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
/* TODO need this for burst mode later on */
iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx->len = cpu_to_le16(len);
tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
le16_to_cpu(out_cmd->hdr.sequence));
IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags));
iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
ieee80211_hdrlen(fc));
/*
* Use the first empty entry in this queue's command buffer array
* to contain the Tx command and MAC header concatenated together
@ -1096,22 +1130,18 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Physical address of this Tx command's header (not MAC header!),
* within command buffer array. */
txcmd_phys = pci_map_single(priv->pci_dev,
out_cmd, sizeof(struct iwl_cmd),
PCI_DMA_TODEVICE);
txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
len, PCI_DMA_TODEVICE);
/* we do not map meta data ... so we can safely access address to
* provide to unmap command*/
pci_unmap_addr_set(&out_cmd->meta, mapping, txcmd_phys);
pci_unmap_len_set(&out_cmd->meta, len, sizeof(struct iwl_cmd));
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
txcmd_phys += offsetof(struct iwl_cmd, hdr);
pci_unmap_len_set(&out_cmd->meta, len, len);
/* Add buffer containing Tx command and MAC(!) header to TFD's
* first entry */
priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
txcmd_phys, len, 1, 0);
if (info->control.hw_key)
iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
/* Set up TFD's 2nd entry to point directly to remainder of skb,
* if any (802.11 null frames have no payload). */
@ -1124,32 +1154,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
0, U32_PAD(len));
}
/* Total # bytes to be transmitted */
len = (u16)skb->len;
tx->len = cpu_to_le16(len);
/* TODO need this for burst mode later on */
iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
/* set is_hcca to 0; it probably will never be implemented */
iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
if (!ieee80211_has_morefrags(hdr->frame_control)) {
txq->need_update = 1;
if (qc)
priv->stations_39[sta_id].tid[tid].seq_number = seq_number;
} else {
wait_write_ptr = 1;
txq->need_update = 0;
}
iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx));
iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr,
ieee80211_hdrlen(fc));
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
@ -1661,6 +1665,37 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv)
spin_unlock_irqrestore(&rxq->lock, flags);
}
void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
unsigned long flags;
int i;
spin_lock_irqsave(&rxq->lock, flags);
INIT_LIST_HEAD(&rxq->rx_free);
INIT_LIST_HEAD(&rxq->rx_used);
/* Fill the rx_used queue with _all_ of the Rx buffers */
for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
/* In the reset function, these buffers may have been allocated
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb);
rxq->pool[i].skb = NULL;
}
list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
}
/* Set us so that we have processed and used all buffers, but have
* not restocked the Rx queue with fresh buffers */
rxq->read = rxq->write = 0;
rxq->free_count = 0;
spin_unlock_irqrestore(&rxq->lock, flags);
}
EXPORT_SYMBOL(iwl3945_rx_queue_reset);
/*
* this should be called while priv->lock is locked
*/
@ -1685,6 +1720,34 @@ void iwl3945_rx_replenish(void *data)
spin_unlock_irqrestore(&priv->lock, flags);
}
/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
* If an SKB has been detached, the POOL needs to have its SKB set to NULL
* This free routine walks the list of POOL entries and if SKB is set to
* non NULL it is unmapped and freed
*/
static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
{
int i;
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
rxq->pool[i].real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
}
pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
rxq->dma_addr);
pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
EXPORT_SYMBOL(iwl3945_rx_queue_free);
/* Convert linear signal-to-noise ratio into dB */
static u8 ratio2dB[100] = {
/* 0 1 2 3 4 5 6 7 8 9 */
@ -1802,9 +1865,9 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@ -1852,9 +1915,6 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
priv->hw_params.rx_buf_size,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
@ -4075,7 +4135,7 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (!static_key) {
sta_id = iwl3945_hw_find_station(priv, addr);
if (sta_id == IWL_INVALID_STATION) {
IWL_DEBUG_MAC80211(priv, "leave - %pMnot in station map.\n",
IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
addr);
return -EINVAL;
}
@ -4913,6 +4973,8 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
hw->wiphy->custom_regulatory = true;
hw->wiphy->max_scan_ssids = 1; /* WILL FIX */
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
@ -5194,12 +5256,12 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
iwl_rfkill_unregister(priv);
cancel_delayed_work(&priv->rfkill_poll);
cancel_delayed_work_sync(&priv->rfkill_poll);
iwl3945_dealloc_ucode_pci(priv);
if (priv->rxq.bd)
iwl_rx_queue_free(priv, &priv->rxq);
iwl3945_rx_queue_free(priv, &priv->rxq);
iwl3945_hw_txq_ctx_free(priv);
iwl3945_unset_hw_params(priv);

Просмотреть файл

@ -170,6 +170,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
lbs_deb_rx("rx err: frame received with bad length\n");
dev->stats.rx_length_errors++;
ret = 0;
dev_kfree_skb(skb);
goto done;
}
@ -181,6 +182,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
lbs_pr_alert("rxpd not ok\n");
dev->stats.rx_errors++;
ret = 0;
dev_kfree_skb(skb);
goto done;
}

Просмотреть файл

@ -893,8 +893,7 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
rx_desc->next_rx_desc_phys_addr =
cpu_to_le32(rxq->rx_desc_dma
+ nexti * sizeof(*rx_desc));
rx_desc->rx_ctrl =
cpu_to_le32(MWL8K_RX_CTRL_OWNED_BY_HOST);
rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST;
}
return 0;
@ -3720,12 +3719,12 @@ err_free_reg:
return rc;
}
static void __devexit mwl8k_remove(struct pci_dev *pdev)
static void __devexit mwl8k_shutdown(struct pci_dev *pdev)
{
printk(KERN_ERR "===>%s(%u)\n", __func__, __LINE__);
}
static void __devexit mwl8k_shutdown(struct pci_dev *pdev)
static void __devexit mwl8k_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct mwl8k_priv *priv;

Просмотреть файл

@ -372,15 +372,13 @@ int __orinoco_hw_set_tkip_key(hermes_t *hw, int key_idx, int set_tx,
}
/* Wait upto 100ms for tx queue to empty */
k = 100;
do {
k--;
for (k = 100; k > 0; k--) {
udelay(1000);
ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY,
&xmitting);
if (ret)
if (ret || !xmitting)
break;
} while ((k > 0) && xmitting);
}
if (k == 0)
ret = -ETIMEDOUT;

Просмотреть файл

@ -14,9 +14,9 @@
* published by the Free Software Foundation.
*/
#ifdef CONFIG_MAC80211_LEDS
#ifdef CONFIG_P54_LEDS
#include <linux/leds.h>
#endif /* CONFIG_MAC80211_LEDS */
#endif /* CONFIG_P54_LEDS */
enum p54_control_frame_types {
P54_CONTROL_TYPE_SETUP = 0,
@ -116,7 +116,7 @@ enum fw_state {
FW_STATE_RESETTING,
};
#ifdef CONFIG_MAC80211_LEDS
#ifdef CONFIG_P54_LEDS
#define P54_LED_MAX_NAME_LEN 31
@ -129,7 +129,7 @@ struct p54_led_dev {
unsigned int registered;
};
#endif /* CONFIG_MAC80211_LEDS */
#endif /* CONFIG_P54_LEDS */
struct p54_common {
struct ieee80211_hw *hw;
@ -177,10 +177,10 @@ struct p54_common {
u8 privacy_caps;
u8 rx_keycache_size;
/* LED management */
#ifdef CONFIG_MAC80211_LEDS
#ifdef CONFIG_P54_LEDS
struct p54_led_dev assoc_led;
struct p54_led_dev tx_led;
#endif /* CONFIG_MAC80211_LEDS */
#endif /* CONFIG_P54_LEDS */
u16 softled_state; /* bit field of glowing LEDs */
};

Просмотреть файл

@ -2543,8 +2543,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
priv->basic_rate_mask = 0x15f;
skb_queue_head_init(&priv->tx_queue);
dev->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM;

Просмотреть файл

@ -457,9 +457,10 @@ static int p54spi_wq_tx(struct p54s_priv *priv)
struct ieee80211_tx_info *info;
struct p54_tx_info *minfo;
struct p54s_tx_info *dinfo;
unsigned long flags;
int ret = 0;
spin_lock_bh(&priv->tx_lock);
spin_lock_irqsave(&priv->tx_lock, flags);
while (!list_empty(&priv->tx_pending)) {
entry = list_entry(priv->tx_pending.next,
@ -467,7 +468,7 @@ static int p54spi_wq_tx(struct p54s_priv *priv)
list_del_init(&entry->tx_list);
spin_unlock_bh(&priv->tx_lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
dinfo = container_of((void *) entry, struct p54s_tx_info,
tx_list);
@ -479,16 +480,14 @@ static int p54spi_wq_tx(struct p54s_priv *priv)
ret = p54spi_tx_frame(priv, skb);
spin_lock_bh(&priv->tx_lock);
if (ret < 0) {
p54_free_skb(priv->hw, skb);
goto out;
return ret;
}
}
out:
spin_unlock_bh(&priv->tx_lock);
spin_lock_irqsave(&priv->tx_lock, flags);
}
spin_unlock_irqrestore(&priv->tx_lock, flags);
return ret;
}
@ -498,12 +497,13 @@ static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data;
struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data;
unsigned long flags;
BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data)));
spin_lock_bh(&priv->tx_lock);
spin_lock_irqsave(&priv->tx_lock, flags);
list_add_tail(&di->tx_list, &priv->tx_pending);
spin_unlock_bh(&priv->tx_lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
queue_work(priv->hw->workqueue, &priv->work);
}
@ -604,6 +604,7 @@ out:
static void p54spi_op_stop(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
unsigned long flags;
if (mutex_lock_interruptible(&priv->mutex)) {
/* FIXME: how to handle this error? */
@ -615,9 +616,9 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
cancel_work_sync(&priv->work);
p54spi_power_off(priv);
spin_lock_bh(&priv->tx_lock);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
spin_unlock_bh(&priv->tx_lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);

Просмотреть файл

@ -71,6 +71,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x1260, 0xee22)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */
{USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */
{USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
{USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
{USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
{USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */

Просмотреть файл

@ -646,10 +646,8 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
* Register HW.
*/
status = ieee80211_register_hw(rt2x00dev->hw);
if (status) {
rt2x00lib_remove_hw(rt2x00dev);
if (status)
return status;
}
set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags);

Просмотреть файл

@ -369,8 +369,6 @@ int rt2x00pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
if (retval)
return retval;
rt2x00pci_free_reg(rt2x00dev);
pci_save_state(pci_dev);
pci_disable_device(pci_dev);
return pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
@ -381,7 +379,6 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pci_dev);
struct rt2x00_dev *rt2x00dev = hw->priv;
int retval;
if (pci_set_power_state(pci_dev, PCI_D0) ||
pci_enable_device(pci_dev) ||
@ -390,20 +387,7 @@ int rt2x00pci_resume(struct pci_dev *pci_dev)
return -EIO;
}
retval = rt2x00pci_alloc_reg(rt2x00dev);
if (retval)
return retval;
retval = rt2x00lib_resume(rt2x00dev);
if (retval)
goto exit_free_reg;
return 0;
exit_free_reg:
rt2x00pci_free_reg(rt2x00dev);
return retval;
return rt2x00lib_resume(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00pci_resume);
#endif /* CONFIG_PM */

Просмотреть файл

@ -702,8 +702,6 @@ int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
if (retval)
return retval;
rt2x00usb_free_reg(rt2x00dev);
/*
* Decrease usbdev refcount.
*/
@ -717,24 +715,10 @@ int rt2x00usb_resume(struct usb_interface *usb_intf)
{
struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
struct rt2x00_dev *rt2x00dev = hw->priv;
int retval;
usb_get_dev(interface_to_usbdev(usb_intf));
retval = rt2x00usb_alloc_reg(rt2x00dev);
if (retval)
return retval;
retval = rt2x00lib_resume(rt2x00dev);
if (retval)
goto exit_free_reg;
return 0;
exit_free_reg:
rt2x00usb_free_reg(rt2x00dev);
return retval;
return rt2x00lib_resume(rt2x00dev);
}
EXPORT_SYMBOL_GPL(rt2x00usb_resume);
#endif /* CONFIG_PM */

Просмотреть файл

@ -2369,6 +2369,8 @@ static struct usb_device_id rt73usb_device_table[] = {
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
/* CNet */
{ USB_DEVICE(0x1371, 0x9022), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x1371, 0x9032), USB_DEVICE_DATA(&rt73usb_ops) },

Просмотреть файл

@ -46,6 +46,7 @@ fw-shipped-$(CONFIG_DVB_TTUSB_BUDGET) += ttusb-budget/dspbootcode.bin
fw-shipped-$(CONFIG_E100) += e100/d101m_ucode.bin e100/d101s_ucode.bin \
e100/d102e_ucode.bin
fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin
fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis
fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
advansys/3550.bin advansys/38C0800.bin

Просмотреть файл

@ -576,6 +576,16 @@ Found in hex form in kernel source.
--------------------------------------------------------------------------
Driver: PCMCIA_PCNET - NE2000 compatible PCMCIA adapter
File: cis/LA-PCM.cis
Licence: GPL
Originally developed by the pcmcia-cs project
--------------------------------------------------------------------------
Driver: PCMCIA_SMC91C92 - SMC 91Cxx PCMCIA
File: ositech/Xilinx7OD.bin

Просмотреть файл

@ -0,0 +1,20 @@
:100000000105D4F953E9FF17035338FF20040FC04B
:1000100002002102060315390401416C6C69656414
:100020002054656C657369732C4B2E4B00457468C6
:1000300065726E6574204C414E20436172640043CA
:10004000656E747265434F4D004C412D50434D0019
:10005000FF1A0602100000020B1B08810108E06075
:1000600000021F1B08820108E06020021F1B08839A
:100070000108E06040021F1B08840108E060600284
:100080001F1B08850108E06080021F1B088601080D
:10009000E060A0021F1B08870108E060C0021F1B70
:1000A00008880108E060E0021F1B08890108E06081
:1000B00000031F1B088A0108E06020031F1B088B38
:1000C0000108E06040031F1B088C0108E06060032A
:1000D0001F1B088D0108E06080031F1B088E0108AC
:1000E000E060A0031F1B088F0108E060C0031F1B16
:0D00F00008900108E060E0031F1400FF000D
:00000001FF
#
# Replacement CIS for Allied Telesis LA-PCM
#

Просмотреть файл

@ -18,6 +18,7 @@ enum nf_nat_manip_type
#define IP_NAT_RANGE_MAP_IPS 1
#define IP_NAT_RANGE_PROTO_SPECIFIED 2
#define IP_NAT_RANGE_PROTO_RANDOM 4
#define IP_NAT_RANGE_PERSISTENT 8
/* NAT sequence number modifications */
struct nf_nat_seq {

Просмотреть файл

@ -668,7 +668,8 @@ static int vlan_ethtool_get_settings(struct net_device *dev,
const struct vlan_dev_info *vlan = vlan_dev_info(dev);
struct net_device *real_dev = vlan->real_dev;
if (!real_dev->ethtool_ops->get_settings)
if (!real_dev->ethtool_ops ||
!real_dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
return real_dev->ethtool_ops->get_settings(real_dev, cmd);

Просмотреть файл

@ -148,9 +148,13 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
{
struct ax25_uid_assoc *pt;
struct hlist_node *node;
int i = 0;
int i = 1;
read_lock(&ax25_uid_lock);
if (*pos == 0)
return SEQ_START_TOKEN;
ax25_uid_for_each(pt, node, &ax25_uid_list) {
if (i == *pos)
return pt;
@ -162,8 +166,10 @@ static void *ax25_uid_seq_start(struct seq_file *seq, loff_t *pos)
static void *ax25_uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
if (v == SEQ_START_TOKEN)
return ax25_uid_list.first;
else
return hlist_entry(((ax25_uid_assoc *)v)->uid_node.next,
ax25_uid_assoc, uid_node);
}

Просмотреть файл

@ -866,8 +866,16 @@ static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *s
hci_dev_lock(hdev);
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
if (!conn)
goto unlock;
if (!conn) {
if (ev->link_type != SCO_LINK)
goto unlock;
conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
if (!conn)
goto unlock;
conn->type = SCO_LINK;
}
if (!ev->status) {
conn->handle = __le16_to_cpu(ev->handle);
@ -1646,20 +1654,28 @@ static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_bu
conn->type = SCO_LINK;
}
if (conn->out && ev->status == 0x1c && conn->attempt < 2) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
hci_setup_sync(conn, conn->link->handle);
goto unlock;
}
if (!ev->status) {
switch (ev->status) {
case 0x00:
conn->handle = __le16_to_cpu(ev->handle);
conn->state = BT_CONNECTED;
hci_conn_add_sysfs(conn);
} else
break;
case 0x1c: /* SCO interval rejected */
case 0x1f: /* Unspecified error */
if (conn->out && conn->attempt < 2) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
hci_setup_sync(conn, conn->link->handle);
goto unlock;
}
/* fall through */
default:
conn->state = BT_CLOSED;
break;
}
hci_proto_connect_cfm(conn, ev->status);
if (ev->status)

Просмотреть файл

@ -1194,6 +1194,8 @@ void rfcomm_dlc_accept(struct rfcomm_dlc *d)
rfcomm_send_ua(d->session, d->dlci);
rfcomm_dlc_clear_timer(d);
rfcomm_dlc_lock(d);
d->state = BT_CONNECTED;
d->state_change(d, 0);

Просмотреть файл

@ -674,8 +674,8 @@ static int can_rcv(struct sk_buff *skb, struct net_device *dev,
rcu_read_unlock();
/* free the skbuff allocated by the netdevice driver */
kfree_skb(skb);
/* consume the skbuff allocated by the netdevice driver */
consume_skb(skb);
if (matches > 0) {
can_stats.matches++;

Просмотреть файл

@ -1336,7 +1336,12 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
{
struct packet_type *ptype;
#ifdef CONFIG_NET_CLS_ACT
if (!(skb->tstamp.tv64 && (G_TC_FROM(skb->tc_verd) & AT_INGRESS)))
net_timestamp(skb);
#else
net_timestamp(skb);
#endif
rcu_read_lock();
list_for_each_entry_rcu(ptype, &ptype_all, list) {
@ -2540,9 +2545,9 @@ struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
}
BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
frag = &info->frags[info->nr_frags - 1];
frag = info->frags;
for (i = skb_shinfo(skb)->nr_frags; i < info->nr_frags; i++) {
for (i = 0; i < info->nr_frags; i++) {
skb_fill_page_desc(skb, i, frag->page, frag->page_offset,
frag->size);
frag++;
@ -4400,7 +4405,7 @@ int register_netdevice(struct net_device *dev)
dev->iflink = -1;
#ifdef CONFIG_COMPAT_NET_DEV_OPS
/* Netdevice_ops API compatiability support.
/* Netdevice_ops API compatibility support.
* This is temporary until all network devices are converted.
*/
if (dev->netdev_ops) {
@ -4411,7 +4416,7 @@ int register_netdevice(struct net_device *dev)
dev->name, netdev_drivername(dev, drivername, 64));
/* This works only because net_device_ops and the
compatiablity structure are the same. */
compatibility structure are the same. */
dev->netdev_ops = (void *) &(dev->init);
}
#endif

Просмотреть файл

@ -211,7 +211,8 @@ find_best_ips_proto(struct nf_conntrack_tuple *tuple,
minip = ntohl(range->min_ip);
maxip = ntohl(range->max_ip);
j = jhash_2words((__force u32)tuple->src.u3.ip,
(__force u32)tuple->dst.u3.ip, 0);
range->flags & IP_NAT_RANGE_PERSISTENT ?
(__force u32)tuple->dst.u3.ip : 0, 0);
j = ((u64)j * (maxip - minip + 1)) >> 32;
*var_ipp = htonl(minip + j);
}

Просмотреть файл

@ -2511,6 +2511,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
struct sk_buff *p;
struct tcphdr *th;
struct tcphdr *th2;
unsigned int len;
unsigned int thlen;
unsigned int flags;
unsigned int mss = 1;
@ -2531,6 +2532,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
skb_gro_pull(skb, thlen);
len = skb_gro_len(skb);
flags = tcp_flag_word(th);
for (; (p = *head); head = &p->next) {
@ -2561,7 +2563,7 @@ found:
mss = skb_shinfo(p)->gso_size;
flush |= (skb_gro_len(skb) > mss) | !skb_gro_len(skb);
flush |= (len > mss) | !len;
flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
if (flush || skb_gro_receive(head, skb)) {
@ -2574,7 +2576,7 @@ found:
tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
out_check_final:
flush = skb_gro_len(skb) < mss;
flush = len < mss;
flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST |
TCP_FLAG_SYN | TCP_FLAG_FIN);

Просмотреть файл

@ -778,7 +778,7 @@ static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
if (tp->lost_skb_hint &&
before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
(tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked))
(tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)))
tp->lost_cnt_hint -= decr;
tcp_verify_left_out(tp);

Просмотреть файл

@ -172,6 +172,7 @@ static void iucv_sock_close(struct sock *sk)
err = iucv_sock_wait_state(sk, IUCV_CLOSED, 0, timeo);
}
case IUCV_CLOSING: /* fall through */
sk->sk_state = IUCV_CLOSED;
sk->sk_state_change(sk);
@ -224,6 +225,8 @@ static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio)
spin_lock_init(&iucv_sk(sk)->message_q.lock);
skb_queue_head_init(&iucv_sk(sk)->backlog_skb_q);
iucv_sk(sk)->send_tag = 0;
iucv_sk(sk)->path = NULL;
memset(&iucv_sk(sk)->src_user_id , 0, 32);
sk->sk_destruct = iucv_sock_destruct;
sk->sk_sndtimeo = IUCV_CONN_TIMEOUT;
@ -811,6 +814,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
/* receive/dequeue next skb:
* the function understands MSG_PEEK and, thus, does not dequeue skb */
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb) {
if (sk->sk_shutdown & RCV_SHUTDOWN)
@ -858,9 +863,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
iucv_process_message_q(sk);
spin_unlock_bh(&iucv->message_q.lock);
}
} else
skb_queue_head(&sk->sk_receive_queue, skb);
}
done:
return err ? : copied;
@ -934,6 +937,9 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
lock_sock(sk);
switch (sk->sk_state) {
case IUCV_DISCONN:
case IUCV_CLOSING:
case IUCV_SEVERED:
case IUCV_CLOSED:
err = -ENOTCONN;
goto fail;
@ -1113,8 +1119,12 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
struct sock_msg_q *save_msg;
int len;
if (sk->sk_shutdown & RCV_SHUTDOWN)
if (sk->sk_shutdown & RCV_SHUTDOWN) {
iucv_message_reject(path, msg);
return;
}
spin_lock(&iucv->message_q.lock);
if (!list_empty(&iucv->message_q.list) ||
!skb_queue_empty(&iucv->backlog_skb_q))
@ -1129,9 +1139,8 @@ static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg)
if (!skb)
goto save_message;
spin_lock(&iucv->message_q.lock);
iucv_process_message(sk, skb, path, msg);
spin_unlock(&iucv->message_q.lock);
goto out_unlock;
return;
@ -1142,8 +1151,9 @@ save_message:
save_msg->path = path;
save_msg->msg = *msg;
spin_lock(&iucv->message_q.lock);
list_add_tail(&save_msg->list, &iucv->message_q.list);
out_unlock:
spin_unlock(&iucv->message_q.lock);
}

Просмотреть файл

@ -202,10 +202,3 @@ config MAC80211_DEBUG_COUNTERS
and show them in debugfs.
If unsure, say N.
config MAC80211_VERBOSE_SPECT_MGMT_DEBUG
bool "Verbose Spectrum Management (IEEE 802.11h)debugging"
depends on MAC80211_DEBUG_MENU
---help---
Say Y here to print out verbose Spectrum Management (IEEE 802.11h)
debug messages.

Просмотреть файл

@ -258,7 +258,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
(chan->max_power - local->power_constr_level) :
chan->max_power;
if (local->user_power_level)
if (local->user_power_level >= 0)
power = min(power, local->user_power_level);
if (local->hw.conf.power_level != power) {

Просмотреть файл

@ -441,6 +441,9 @@ static bool ieee80211_check_tim(struct ieee802_11_elems *elems, u16 aid)
u8 index, indexn1, indexn2;
struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *) elems->tim;
if (unlikely(!tim || elems->tim_len < 4))
return false;
aid &= 0x3fff;
index = aid / 8;
mask = 1 << (aid & 7);
@ -945,9 +948,13 @@ void ieee80211_beacon_loss_work(struct work_struct *work)
u.mgd.beacon_loss_work);
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
"- sending probe request\n", sdata->dev->name,
sdata->u.mgd.bssid);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: driver reports beacon loss from AP %pM "
"- sending probe request\n", sdata->dev->name,
sdata->u.mgd.bssid);
}
#endif
ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
@ -1007,9 +1014,13 @@ static void ieee80211_associated(struct ieee80211_sub_if_data *sdata)
(local->hw.conf.flags & IEEE80211_CONF_PS)) &&
time_after(jiffies,
ifmgd->last_beacon + IEEE80211_MONITORING_INTERVAL)) {
printk(KERN_DEBUG "%s: beacon loss from AP %pM "
"- sending probe request\n",
sdata->dev->name, ifmgd->bssid);
#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: beacon loss from AP %pM "
"- sending probe request\n",
sdata->dev->name, ifmgd->bssid);
}
#endif
ifmgd->flags |= IEEE80211_STA_PROBEREQ_POLL;
ieee80211_send_probe_req(sdata, ifmgd->bssid, ifmgd->ssid,
ifmgd->ssid_len, NULL, 0);
@ -1355,7 +1366,7 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
for (i = 0; i < elems.ext_supp_rates_len; i++) {
int rate = (elems.ext_supp_rates[i] & 0x7f) * 5;
bool is_basic = !!(elems.supp_rates[i] & 0x80);
bool is_basic = !!(elems.ext_supp_rates[i] & 0x80);
if (rate > 110)
have_higher_than_11mbit = true;
@ -1902,9 +1913,17 @@ static void ieee80211_sta_work(struct work_struct *work)
static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
{
if (sdata->vif.type == NL80211_IFTYPE_STATION)
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
/*
* Need to update last_beacon to avoid beacon loss
* test to trigger.
*/
sdata->u.mgd.last_beacon = jiffies;
queue_work(sdata->local->hw.workqueue,
&sdata->u.mgd.work);
}
}
/* interface setup */
@ -2105,12 +2124,13 @@ void ieee80211_dynamic_ps_enable_work(struct work_struct *work)
struct ieee80211_local *local =
container_of(work, struct ieee80211_local,
dynamic_ps_enable_work);
/* XXX: using scan_sdata is completely broken! */
struct ieee80211_sub_if_data *sdata = local->scan_sdata;
if (local->hw.conf.flags & IEEE80211_CONF_PS)
return;
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK)
if (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK && sdata)
ieee80211_send_nullfunc(local, sdata, 1);
local->hw.conf.flags |= IEEE80211_CONF_PS;

Просмотреть файл

@ -156,8 +156,19 @@ int __ieee80211_resume(struct ieee80211_hw *hw)
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
WARN_ON(ieee80211_if_config(sdata, changed));
ieee80211_bss_info_change_notify(sdata, ~0);
/*
* Driver's config_interface can fail if rfkill is
* enabled. Accommodate this return code.
* FIXME: When mac80211 has knowledge of rfkill
* state the code below can change back to:
* WARN(ieee80211_if_config(sdata, changed));
* ieee80211_bss_info_change_notify(sdata, ~0);
*/
if (ieee80211_if_config(sdata, changed))
printk(KERN_DEBUG "%s: failed to configure interface during resume\n",
sdata->dev->name);
else
ieee80211_bss_info_change_notify(sdata, ~0);
break;
case NL80211_IFTYPE_WDS:
break;

Просмотреть файл

@ -29,6 +29,7 @@
static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb,
struct ieee80211_rx_status *status,
u16 mpdu_seq_num,
int bar_req);
/*
@ -1396,7 +1397,7 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
* mac80211. That also explains the __skb_push()
* below.
*/
align = (unsigned long)skb->data & 4;
align = (unsigned long)skb->data & 3;
if (align) {
if (WARN_ON(skb_headroom(skb) < 3)) {
dev_kfree_skb(skb);
@ -1688,7 +1689,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx)
/* manage reordering buffer according to requested */
/* sequence number */
rcu_read_lock();
ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL,
ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, NULL, NULL,
start_seq_num, 1);
rcu_read_unlock();
return RX_DROP_UNUSABLE;
@ -2293,6 +2294,7 @@ static inline u16 seq_sub(u16 sq1, u16 sq2)
static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
struct tid_ampdu_rx *tid_agg_rx,
struct sk_buff *skb,
struct ieee80211_rx_status *rxstatus,
u16 mpdu_seq_num,
int bar_req)
{
@ -2374,6 +2376,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
/* put the frame in the reordering buffer */
tid_agg_rx->reorder_buf[index] = skb;
memcpy(tid_agg_rx->reorder_buf[index]->cb, rxstatus,
sizeof(*rxstatus));
tid_agg_rx->stored_mpdu_num++;
/* release the buffer until next missing frame */
index = seq_sub(tid_agg_rx->head_seq_num, tid_agg_rx->ssn)
@ -2399,7 +2403,8 @@ static u8 ieee80211_sta_manage_reorder_buf(struct ieee80211_hw *hw,
}
static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
struct sk_buff *skb)
struct sk_buff *skb,
struct ieee80211_rx_status *status)
{
struct ieee80211_hw *hw = &local->hw;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@ -2448,7 +2453,7 @@ static u8 ieee80211_rx_reorder_ampdu(struct ieee80211_local *local,
/* according to mpdu sequence number deal with reordering buffer */
mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb,
ret = ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, status,
mpdu_seq_num, 0);
end_reorder:
return ret;
@ -2512,7 +2517,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
return;
}
if (!ieee80211_rx_reorder_ampdu(local, skb))
if (!ieee80211_rx_reorder_ampdu(local, skb, status))
__ieee80211_rx_handle_packet(hw, skb, status, rate);
rcu_read_unlock();

Просмотреть файл

@ -417,6 +417,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
{
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
struct ieee80211_channel* chan = local->hw.conf.channel;
bool reconf = false;
u32 reconf_flags = 0;
int new_power_level;
@ -427,14 +428,38 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
if (!chan)
return -EINVAL;
if (data->txpower.fixed)
new_power_level = min(data->txpower.value, chan->max_power);
else /* Automatic power level setting */
new_power_level = chan->max_power;
/* only change when not disabling */
if (!data->txpower.disabled) {
if (data->txpower.fixed) {
if (data->txpower.value < 0)
return -EINVAL;
new_power_level = data->txpower.value;
/*
* Debatable, but we cannot do a fixed power
* level above the regulatory constraint.
* Use "iwconfig wlan0 txpower 15dBm" instead.
*/
if (new_power_level > chan->max_power)
return -EINVAL;
} else {
/*
* Automatic power level setting, max being the value
* passed in from userland.
*/
if (data->txpower.value < 0)
new_power_level = -1;
else
new_power_level = data->txpower.value;
}
local->user_power_level = new_power_level;
if (local->hw.conf.power_level != new_power_level)
reconf_flags |= IEEE80211_CONF_CHANGE_POWER;
reconf = true;
/*
* ieee80211_hw_config() will limit to the channel's
* max power and possibly power constraint from AP.
*/
local->user_power_level = new_power_level;
}
if (local->hw.conf.radio_enabled != !(data->txpower.disabled)) {
local->hw.conf.radio_enabled = !(data->txpower.disabled);
@ -442,7 +467,7 @@ static int ieee80211_ioctl_siwtxpower(struct net_device *dev,
ieee80211_led_radio(local, local->hw.conf.radio_enabled);
}
if (reconf_flags)
if (reconf || reconf_flags)
ieee80211_hw_config(local, reconf_flags);
return 0;
@ -530,7 +555,7 @@ static int ieee80211_ioctl_giwfrag(struct net_device *dev,
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
frag->value = local->fragmentation_threshold;
frag->disabled = (frag->value >= IEEE80211_MAX_RTS_THRESHOLD);
frag->disabled = (frag->value >= IEEE80211_MAX_FRAG_THRESHOLD);
frag->fixed = 1;
return 0;

Просмотреть файл

@ -176,7 +176,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me,
}
/* Get rid of expecteds, set helpers to NULL. */
hlist_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
hlist_nulls_for_each_entry(h, nn, &net->ct.unconfirmed, hnnode)
unhelp(h, me);
for (i = 0; i < nf_conntrack_htable_size; i++) {
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)

Просмотреть файл

@ -512,7 +512,7 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
skb = ctnetlink_alloc_skb(tuple(ct, IP_CT_DIR_ORIGINAL), GFP_ATOMIC);
if (!skb)
return NOTIFY_DONE;
goto errout;
b = skb->tail;
@ -591,8 +591,9 @@ static int ctnetlink_conntrack_event(struct notifier_block *this,
nla_put_failure:
rcu_read_unlock();
nlmsg_failure:
nfnetlink_set_err(0, group, -ENOBUFS);
kfree_skb(skb);
errout:
nfnetlink_set_err(0, group, -ENOBUFS);
return NOTIFY_DONE;
}
#endif /* CONFIG_NF_CONNTRACK_EVENTS */
@ -987,7 +988,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
{
struct nf_conntrack_helper *helper;
struct nf_conn_help *help = nfct_help(ct);
char *helpname;
char *helpname = NULL;
int err;
/* don't change helper of sibling connections */
@ -1230,7 +1231,7 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
rcu_read_lock();
if (cda[CTA_HELP]) {
char *helpname;
char *helpname = NULL;
err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
if (err < 0)
@ -1564,7 +1565,7 @@ static int ctnetlink_expect_event(struct notifier_block *this,
skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!skb)
return NOTIFY_DONE;
goto errout;
b = skb->tail;
@ -1589,8 +1590,9 @@ static int ctnetlink_expect_event(struct notifier_block *this,
nla_put_failure:
rcu_read_unlock();
nlmsg_failure:
nfnetlink_set_err(0, 0, -ENOBUFS);
kfree_skb(skb);
errout:
nfnetlink_set_err(0, 0, -ENOBUFS);
return NOTIFY_DONE;
}
#endif

Просмотреть файл

@ -36,10 +36,14 @@ static struct nf_logger *__find_logger(int pf, const char *str_logger)
int nf_log_register(u_int8_t pf, struct nf_logger *logger)
{
const struct nf_logger *llog;
int i;
if (pf >= ARRAY_SIZE(nf_loggers))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(logger->list); i++)
INIT_LIST_HEAD(&logger->list[i]);
mutex_lock(&nf_log_mutex);
if (pf == NFPROTO_UNSPEC) {

Просмотреть файл

@ -203,7 +203,7 @@ static int __init nfnetlink_init(void)
nfnetlink_rcv, NULL, THIS_MODULE);
if (!nfnl) {
printk(KERN_ERR "cannot initialize nfnetlink!\n");
return -1;
return -ENOMEM;
}
return 0;

Просмотреть файл

@ -256,13 +256,11 @@ struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
{
struct netlbl_af4list *entry;
entry = netlbl_af4list_search(addr, head);
if (entry != NULL && entry->addr == addr && entry->mask == mask) {
netlbl_af4list_remove_entry(entry);
return entry;
}
return NULL;
entry = netlbl_af4list_search_exact(addr, mask, head);
if (entry == NULL)
return NULL;
netlbl_af4list_remove_entry(entry);
return entry;
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@ -299,15 +297,11 @@ struct netlbl_af6list *netlbl_af6list_remove(const struct in6_addr *addr,
{
struct netlbl_af6list *entry;
entry = netlbl_af6list_search(addr, head);
if (entry != NULL &&
ipv6_addr_equal(&entry->addr, addr) &&
ipv6_addr_equal(&entry->mask, mask)) {
netlbl_af6list_remove_entry(entry);
return entry;
}
return NULL;
entry = netlbl_af6list_search_exact(addr, mask, head);
if (entry == NULL)
return NULL;
netlbl_af6list_remove_entry(entry);
return entry;
}
#endif /* IPv6 */

Просмотреть файл

@ -1084,8 +1084,10 @@ static int nr_sendmsg(struct kiocb *iocb, struct socket *sock,
/* Build a packet - the conventional user limit is 236 bytes. We can
do ludicrously large NetROM frames but must not overflow */
if (len > 65536)
return -EMSGSIZE;
if (len > 65536) {
err = -EMSGSIZE;
goto out;
}
SOCK_DEBUG(sk, "NET/ROM: sendto: building packet.\n");
size = len + NR_NETWORK_LEN + NR_TRANSPORT_LEN;

Просмотреть файл

@ -280,6 +280,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
if (unlikely(!skb))
return NULL;
#ifdef CONFIG_NET_CLS_ACT
/*
* If it's at ingress let's pretend the delay is
* from the network (tstamp will be updated).
*/
if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
skb->tstamp.tv64 = 0;
#endif
pr_debug("netem_dequeue: return skb=%p\n", skb);
sch->q.qlen--;
return skb;

Просмотреть файл

@ -90,7 +90,7 @@ struct cfg80211_internal_bss {
struct rb_node rbn;
unsigned long ts;
struct kref ref;
bool hold;
bool hold, ies_allocated;
/* must be last because of priv member */
struct cfg80211_bss pub;

Просмотреть файл

@ -3334,7 +3334,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
msg = nlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
if (!msg)
return;
@ -3353,7 +3353,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
return;
}
genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL);
genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, GFP_ATOMIC);
return;
nla_put_failure:

Просмотреть файл

@ -2095,11 +2095,12 @@ int set_regdom(const struct ieee80211_regdomain *rd)
/* Caller must hold cfg80211_mutex */
void reg_device_remove(struct wiphy *wiphy)
{
struct wiphy *request_wiphy;
struct wiphy *request_wiphy = NULL;
assert_cfg80211_lock();
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
if (last_request)
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
kfree(wiphy->regd);
if (!last_request || !request_wiphy)

Просмотреть файл

@ -58,6 +58,10 @@ static void bss_release(struct kref *ref)
bss = container_of(ref, struct cfg80211_internal_bss, ref);
if (bss->pub.free_priv)
bss->pub.free_priv(&bss->pub);
if (bss->ies_allocated)
kfree(bss->pub.information_elements);
kfree(bss);
}
@ -360,19 +364,41 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found = rb_find_bss(dev, res);
if (found && overwrite) {
list_replace(&found->list, &res->list);
rb_replace_node(&found->rbn, &res->rbn,
&dev->bss_tree);
kref_put(&found->ref, bss_release);
found = res;
} else if (found) {
if (found) {
kref_get(&found->ref);
found->pub.beacon_interval = res->pub.beacon_interval;
found->pub.tsf = res->pub.tsf;
found->pub.signal = res->pub.signal;
found->pub.capability = res->pub.capability;
found->ts = res->ts;
/* overwrite IEs */
if (overwrite) {
size_t used = dev->wiphy.bss_priv_size + sizeof(*res);
size_t ielen = res->pub.len_information_elements;
if (ksize(found) >= used + ielen) {
memcpy(found->pub.information_elements,
res->pub.information_elements, ielen);
found->pub.len_information_elements = ielen;
} else {
u8 *ies = found->pub.information_elements;
if (found->ies_allocated) {
if (ksize(ies) < ielen)
ies = krealloc(ies, ielen,
GFP_ATOMIC);
} else
ies = kmalloc(ielen, GFP_ATOMIC);
if (ies) {
memcpy(ies, res->pub.information_elements, ielen);
found->ies_allocated = true;
found->pub.information_elements = ies;
}
}
}
kref_put(&res->ref, bss_release);
} else {
/* this "consumes" the reference */