Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix namespace init and cleanup in phonet to fix some oopses, from Eric W. Biederman. 2) Missing kfree_skb() in AF_KEY, from Julia Lawall. 3) Refcount leak and source address handling fix in l2tp from James Chapman. 4) Memory leak fix in CAIF from Tomasz Gregorek. 5) When routes are cloned from ipv6 addrconf routes, we don't process expirations properly. Fix from Gao Feng. 6) Fix panic on DMA errors in atl1 driver, from Tony Zelenoff. 7) Only enable interrupts in 8139cp driver after we've registered the IRQ handler. From Jason Wang. 8) Fix too many reads of KS_CIDER register in ks8851 during probe, fixing crashes on spurious interrupts. From Matt Renzelmann. 9) Missing include in ath5k driver and missing iounmap on probe failure, from Jonathan Bither. 10) Fix RX packet handling in smsc911x driver, from Will Deacon. 11) Fix ixgbe WoL on fiber by leaving the laser on during shutdown. 12) ks8851 needs MAX_RECV_FRAMES increased otherwise the internal MAC buffers are easily overflown. Fix from Davide Cimingahi. 13) Fix memory leaks in peak_usb CAN driver, from Jesper Juhl. 14) gred packet scheduler can dump in WRED more when doing a netlink dump. Fix from David Ward. 15) Fix MTU in USB smsc75xx driver, from Stephane Fillod. 16) Dummy device needs ->ndo_uninit handler to properly handle ->ndo_init failures. From Hiroaki SHIMODA. 17) Fix TX fragmentation in ath9k driver, from Sujith Manoharan. 18) Missing RTNL lock in ixgbe PM resume, from Benjamin Poirier. 19) Missing iounmap in farsync WAN driver, from Julia Lawall. 20) With LRO/GRO, tcp_grow_window() is easily tricked into not growing the receive window properly, and this hurts performance. Fix from Eric Dumazet. 21) Network namespace init failure can leak net_generic data, fix from Julian Anastasov. 22) Fix skb_over_panic due to mis-accounting in TCP for partially ACK'd SKBs. From Eric Dumazet. 23) New IDs for qmi_wwan driver, from Bjørn Mork. 24) Fix races in ax25_exit(), from Eric W. Biederman. 25) IPV6 TCP doesn't handle TCP_MAXSEG socket option properly, copy over logic from the IPV4 side. From Neal Cardwell. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (59 commits) tcp: fix TCP_MAXSEG for established IPv6 passive sockets drivers/net: Do not free an IRQ if its request failed drop_monitor: allow more events per second ks8851: Fix request_irq/free_irq mismatch net/hyperv: Adding cancellation to ensure rndis filter is closed ks8851: Fix mutex deadlock in ks8851_net_stop() net ax25: Reorder ax25_exit to remove races. icplus: fix interrupt for IC+ 101A/G and 1001LF net: qmi_wwan: support Sierra Wireless MC77xx devices in QMI mode bnx2x: off by one in bnx2x_ets_e3b0_sp_pri_to_cos_set() ksz884x: don't copy too much in netdev_set_mac_address() tcp: fix retransmit of partially acked frames netns: do not leak net_generic data on failed init net/sock.h: fix sk_peek_off kernel-doc warning tcp: fix tcp_grow_window() for large incoming frames drivers/net/wan/farsync.c: add missing iounmap davinci_mdio: Fix MDIO timeout check ipv6: clean up rt6_clean_expires ipv6: fix rt6_update_expires arcnet: rimi: Fix device name in debug output ...
This commit is contained in:
Коммит
7e29629543
|
@ -3592,6 +3592,7 @@ S: Supported
|
|||
F: drivers/net/wireless/iwlegacy/
|
||||
|
||||
INTEL WIRELESS WIFI LINK (iwlwifi)
|
||||
M: Johannes Berg <johannes.berg@intel.com>
|
||||
M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
|
||||
M: Intel Linux Wireless <ilw@linux.intel.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
|
@ -7578,8 +7579,8 @@ F: Documentation/filesystems/xfs.txt
|
|||
F: fs/xfs/
|
||||
|
||||
XILINX AXI ETHERNET DRIVER
|
||||
M: Ariane Keller <ariane.keller@tik.ee.ethz.ch>
|
||||
M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
|
||||
M: Anirudha Sarangi <anirudh@xilinx.com>
|
||||
M: John Linn <John.Linn@xilinx.com>
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/xilinx/xilinx_axienet*
|
||||
|
||||
|
|
|
@ -404,16 +404,19 @@ int bcma_sprom_get(struct bcma_bus *bus)
|
|||
return -EOPNOTSUPP;
|
||||
|
||||
if (!bcma_sprom_ext_available(bus)) {
|
||||
bool sprom_onchip;
|
||||
|
||||
/*
|
||||
* External SPROM takes precedence so check
|
||||
* on-chip OTP only when no external SPROM
|
||||
* is present.
|
||||
*/
|
||||
if (bcma_sprom_onchip_available(bus)) {
|
||||
sprom_onchip = bcma_sprom_onchip_available(bus);
|
||||
if (sprom_onchip) {
|
||||
/* determine offset */
|
||||
offset = bcma_sprom_onchip_offset(bus);
|
||||
}
|
||||
if (!offset) {
|
||||
if (!offset || !sprom_onchip) {
|
||||
/*
|
||||
* Maybe there is no SPROM on the device?
|
||||
* Now we ask the arch code if there is some sprom
|
||||
|
|
|
@ -89,16 +89,16 @@ static int __init arcrimi_probe(struct net_device *dev)
|
|||
BUGLVL(D_NORMAL) printk(VERSION);
|
||||
BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n");
|
||||
|
||||
BUGMSG(D_NORMAL, "Given: node %02Xh, shmem %lXh, irq %d\n",
|
||||
BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n",
|
||||
dev->dev_addr[0], dev->mem_start, dev->irq);
|
||||
|
||||
if (dev->mem_start <= 0 || dev->irq <= 0) {
|
||||
BUGMSG(D_NORMAL, "No autoprobe for RIM I; you "
|
||||
BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you "
|
||||
"must specify the shmem and irq!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (dev->dev_addr[0] == 0) {
|
||||
BUGMSG(D_NORMAL, "You need to specify your card's station "
|
||||
BUGLVL(D_NORMAL) printk("You need to specify your card's station "
|
||||
"ID!\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ static int __init arcrimi_probe(struct net_device *dev)
|
|||
* will be taken.
|
||||
*/
|
||||
if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) {
|
||||
BUGMSG(D_NORMAL, "Card memory already allocated\n");
|
||||
BUGLVL(D_NORMAL) printk("Card memory already allocated\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
return arcrimi_found(dev);
|
||||
|
|
|
@ -744,14 +744,14 @@ static void cfhsi_wake_up(struct work_struct *work)
|
|||
size_t fifo_occupancy = 0;
|
||||
|
||||
/* Wakeup timeout */
|
||||
dev_err(&cfhsi->ndev->dev, "%s: Timeout.\n",
|
||||
dev_dbg(&cfhsi->ndev->dev, "%s: Timeout.\n",
|
||||
__func__);
|
||||
|
||||
/* Check FIFO to check if modem has sent something. */
|
||||
WARN_ON(cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
|
||||
&fifo_occupancy));
|
||||
|
||||
dev_err(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
|
||||
dev_dbg(&cfhsi->ndev->dev, "%s: Bytes in FIFO: %u.\n",
|
||||
__func__, (unsigned) fifo_occupancy);
|
||||
|
||||
/* Check if we misssed the interrupt. */
|
||||
|
@ -1210,7 +1210,7 @@ int cfhsi_probe(struct platform_device *pdev)
|
|||
|
||||
static void cfhsi_shutdown(struct cfhsi *cfhsi)
|
||||
{
|
||||
u8 *tx_buf, *rx_buf;
|
||||
u8 *tx_buf, *rx_buf, *flip_buf;
|
||||
|
||||
/* Stop TXing */
|
||||
netif_tx_stop_all_queues(cfhsi->ndev);
|
||||
|
@ -1234,7 +1234,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
|
|||
/* Store bufferes: will be freed later. */
|
||||
tx_buf = cfhsi->tx_buf;
|
||||
rx_buf = cfhsi->rx_buf;
|
||||
|
||||
flip_buf = cfhsi->rx_flip_buf;
|
||||
/* Flush transmit queues. */
|
||||
cfhsi_abort_tx(cfhsi);
|
||||
|
||||
|
@ -1247,6 +1247,7 @@ static void cfhsi_shutdown(struct cfhsi *cfhsi)
|
|||
/* Free buffers. */
|
||||
kfree(tx_buf);
|
||||
kfree(rx_buf);
|
||||
kfree(flip_buf);
|
||||
}
|
||||
|
||||
int cfhsi_remove(struct platform_device *pdev)
|
||||
|
|
|
@ -875,6 +875,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
|
|||
PCAN_USBPRO_INFO_FW,
|
||||
&fi, sizeof(fi));
|
||||
if (err) {
|
||||
kfree(usb_if);
|
||||
dev_err(dev->netdev->dev.parent,
|
||||
"unable to read %s firmware info (err %d)\n",
|
||||
pcan_usb_pro.name, err);
|
||||
|
@ -885,6 +886,7 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
|
|||
PCAN_USBPRO_INFO_BL,
|
||||
&bi, sizeof(bi));
|
||||
if (err) {
|
||||
kfree(usb_if);
|
||||
dev_err(dev->netdev->dev.parent,
|
||||
"unable to read %s bootloader info (err %d)\n",
|
||||
pcan_usb_pro.name, err);
|
||||
|
|
|
@ -107,14 +107,14 @@ static int dummy_dev_init(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void dummy_dev_free(struct net_device *dev)
|
||||
static void dummy_dev_uninit(struct net_device *dev)
|
||||
{
|
||||
free_percpu(dev->dstats);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
static const struct net_device_ops dummy_netdev_ops = {
|
||||
.ndo_init = dummy_dev_init,
|
||||
.ndo_uninit = dummy_dev_uninit,
|
||||
.ndo_start_xmit = dummy_xmit,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_rx_mode = set_multicast_list,
|
||||
|
@ -128,7 +128,7 @@ static void dummy_setup(struct net_device *dev)
|
|||
|
||||
/* Initialize the device structure. */
|
||||
dev->netdev_ops = &dummy_netdev_ops;
|
||||
dev->destructor = dummy_dev_free;
|
||||
dev->destructor = free_netdev;
|
||||
|
||||
/* Fill in device structure with ethernet-generic values. */
|
||||
dev->tx_queue_len = 0;
|
||||
|
|
|
@ -2476,7 +2476,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
|
|||
"pcie phy link down %x\n", status);
|
||||
if (netif_running(adapter->netdev)) { /* reset MAC */
|
||||
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
|
||||
schedule_work(&adapter->pcie_dma_to_rst_task);
|
||||
schedule_work(&adapter->reset_dev_task);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
|
@ -2488,7 +2488,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
|
|||
"pcie DMA r/w error (status = 0x%x)\n",
|
||||
status);
|
||||
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
|
||||
schedule_work(&adapter->pcie_dma_to_rst_task);
|
||||
schedule_work(&adapter->reset_dev_task);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -2633,10 +2633,10 @@ static void atl1_down(struct atl1_adapter *adapter)
|
|||
atl1_clean_rx_ring(adapter);
|
||||
}
|
||||
|
||||
static void atl1_tx_timeout_task(struct work_struct *work)
|
||||
static void atl1_reset_dev_task(struct work_struct *work)
|
||||
{
|
||||
struct atl1_adapter *adapter =
|
||||
container_of(work, struct atl1_adapter, tx_timeout_task);
|
||||
container_of(work, struct atl1_adapter, reset_dev_task);
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
@ -3038,12 +3038,10 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
|
|||
(unsigned long)adapter);
|
||||
adapter->phy_timer_pending = false;
|
||||
|
||||
INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
|
||||
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
|
||||
|
||||
INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
|
||||
|
||||
INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
|
||||
|
||||
err = register_netdev(netdev);
|
||||
if (err)
|
||||
goto err_common;
|
||||
|
|
|
@ -758,9 +758,8 @@ struct atl1_adapter {
|
|||
u16 link_speed;
|
||||
u16 link_duplex;
|
||||
spinlock_t lock;
|
||||
struct work_struct tx_timeout_task;
|
||||
struct work_struct reset_dev_task;
|
||||
struct work_struct link_chg_task;
|
||||
struct work_struct pcie_dma_to_rst_task;
|
||||
|
||||
struct timer_list phy_config_timer;
|
||||
bool phy_timer_pending;
|
||||
|
|
|
@ -194,7 +194,7 @@ static void atlx_tx_timeout(struct net_device *netdev)
|
|||
{
|
||||
struct atlx_adapter *adapter = netdev_priv(netdev);
|
||||
/* Do the reset outside of interrupt context */
|
||||
schedule_work(&adapter->tx_timeout_task);
|
||||
schedule_work(&adapter->reset_dev_task);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -942,6 +942,12 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
|
|||
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
|
||||
DCBX_E3B0_MAX_NUM_COS_PORT0;
|
||||
|
||||
if (pri >= max_num_of_cos) {
|
||||
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
|
||||
"parameter Illegal strict priority\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
|
||||
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
|
||||
"parameter There can't be two COS's with "
|
||||
|
@ -949,12 +955,6 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pri > max_num_of_cos) {
|
||||
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
|
||||
"parameter Illegal strict priority\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
sp_pri_to_cos[pri] = cos_entry;
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -1310,10 +1310,6 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
|
|||
|
||||
if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
|
||||
oem_reg |= HV_OEM_BITS_LPLU;
|
||||
|
||||
/* Set Restart auto-neg to activate the bits */
|
||||
if (!hw->phy.ops.check_reset_block(hw))
|
||||
oem_reg |= HV_OEM_BITS_RESTART_AN;
|
||||
} else {
|
||||
if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
|
||||
E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
|
||||
|
@ -1324,6 +1320,11 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
|
|||
oem_reg |= HV_OEM_BITS_LPLU;
|
||||
}
|
||||
|
||||
/* Set Restart auto-neg to activate the bits */
|
||||
if ((d0_state || (hw->mac.type != e1000_pchlan)) &&
|
||||
!hw->phy.ops.check_reset_block(hw))
|
||||
oem_reg |= HV_OEM_BITS_RESTART_AN;
|
||||
|
||||
ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
|
||||
|
||||
release:
|
||||
|
@ -3682,7 +3683,11 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
|
|||
|
||||
if (hw->mac.type >= e1000_pchlan) {
|
||||
e1000_oem_bits_config_ich8lan(hw, false);
|
||||
e1000_phy_hw_reset_ich8lan(hw);
|
||||
|
||||
/* Reset PHY to activate OEM bits on 82577/8 */
|
||||
if (hw->mac.type == e1000_pchlan)
|
||||
e1000e_phy_hw_reset_generic(hw);
|
||||
|
||||
ret_val = hw->phy.ops.acquire(hw);
|
||||
if (ret_val)
|
||||
return;
|
||||
|
|
|
@ -622,6 +622,16 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
|
|||
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
|
||||
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
|
||||
|
||||
#ifdef IXGBE_FCOE
|
||||
if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
|
||||
struct ixgbe_ring_feature *f;
|
||||
f = &adapter->ring_feature[RING_F_FCOE];
|
||||
if ((rxr_idx >= f->mask) &&
|
||||
(rxr_idx < f->mask + f->indices))
|
||||
set_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state);
|
||||
}
|
||||
|
||||
#endif /* IXGBE_FCOE */
|
||||
/* apply Rx specific ring traits */
|
||||
ring->count = adapter->rx_ring_count;
|
||||
ring->queue_index = rxr_idx;
|
||||
|
|
|
@ -3154,14 +3154,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
|
|||
set_ring_rsc_enabled(rx_ring);
|
||||
else
|
||||
clear_ring_rsc_enabled(rx_ring);
|
||||
#ifdef IXGBE_FCOE
|
||||
if (netdev->features & NETIF_F_FCOE_MTU) {
|
||||
struct ixgbe_ring_feature *f;
|
||||
f = &adapter->ring_feature[RING_F_FCOE];
|
||||
if ((i >= f->mask) && (i < f->mask + f->indices))
|
||||
set_bit(__IXGBE_RX_FCOE_BUFSZ, &rx_ring->state);
|
||||
}
|
||||
#endif /* IXGBE_FCOE */
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4836,7 +4828,9 @@ static int ixgbe_resume(struct pci_dev *pdev)
|
|||
|
||||
pci_wake_from_d3(pdev, false);
|
||||
|
||||
rtnl_lock();
|
||||
err = ixgbe_init_interrupt_scheme(adapter);
|
||||
rtnl_unlock();
|
||||
if (err) {
|
||||
e_dev_err("Cannot initialize interrupts for device\n");
|
||||
return err;
|
||||
|
@ -4893,6 +4887,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
|
|||
if (wufc) {
|
||||
ixgbe_set_rx_mode(netdev);
|
||||
|
||||
/*
|
||||
* enable the optics for both mult-speed fiber and
|
||||
* 82599 SFP+ fiber as we can WoL.
|
||||
*/
|
||||
if (hw->mac.ops.enable_tx_laser &&
|
||||
(hw->phy.multispeed_fiber ||
|
||||
(hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber &&
|
||||
hw->mac.type == ixgbe_mac_82599EB)))
|
||||
hw->mac.ops.enable_tx_laser(hw);
|
||||
|
||||
/* turn on all-multi mode if wake on multicast is enabled */
|
||||
if (wufc & IXGBE_WUFC_MC) {
|
||||
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
|
||||
|
|
|
@ -889,16 +889,17 @@ static int ks8851_net_stop(struct net_device *dev)
|
|||
netif_stop_queue(dev);
|
||||
|
||||
mutex_lock(&ks->lock);
|
||||
/* turn off the IRQs and ack any outstanding */
|
||||
ks8851_wrreg16(ks, KS_IER, 0x0000);
|
||||
ks8851_wrreg16(ks, KS_ISR, 0xffff);
|
||||
mutex_unlock(&ks->lock);
|
||||
|
||||
/* stop any outstanding work */
|
||||
flush_work(&ks->irq_work);
|
||||
flush_work(&ks->tx_work);
|
||||
flush_work(&ks->rxctrl_work);
|
||||
|
||||
/* turn off the IRQs and ack any outstanding */
|
||||
ks8851_wrreg16(ks, KS_IER, 0x0000);
|
||||
ks8851_wrreg16(ks, KS_ISR, 0xffff);
|
||||
|
||||
mutex_lock(&ks->lock);
|
||||
/* shutdown RX process */
|
||||
ks8851_wrreg16(ks, KS_RXCR1, 0x0000);
|
||||
|
||||
|
@ -907,6 +908,7 @@ static int ks8851_net_stop(struct net_device *dev)
|
|||
|
||||
/* set powermode to soft power down to save power */
|
||||
ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
|
||||
mutex_unlock(&ks->lock);
|
||||
|
||||
/* ensure any queued tx buffers are dumped */
|
||||
while (!skb_queue_empty(&ks->txq)) {
|
||||
|
@ -918,7 +920,6 @@ static int ks8851_net_stop(struct net_device *dev)
|
|||
dev_kfree_skb(txb);
|
||||
}
|
||||
|
||||
mutex_unlock(&ks->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1418,6 +1419,7 @@ static int __devinit ks8851_probe(struct spi_device *spi)
|
|||
struct net_device *ndev;
|
||||
struct ks8851_net *ks;
|
||||
int ret;
|
||||
unsigned cider;
|
||||
|
||||
ndev = alloc_etherdev(sizeof(struct ks8851_net));
|
||||
if (!ndev)
|
||||
|
@ -1484,8 +1486,8 @@ static int __devinit ks8851_probe(struct spi_device *spi)
|
|||
ks8851_soft_reset(ks, GRR_GSR);
|
||||
|
||||
/* simple check for a valid chip being connected to the bus */
|
||||
|
||||
if ((ks8851_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
|
||||
cider = ks8851_rdreg16(ks, KS_CIDER);
|
||||
if ((cider & ~CIDER_REV_MASK) != CIDER_ID) {
|
||||
dev_err(&spi->dev, "failed to read device ID\n");
|
||||
ret = -ENODEV;
|
||||
goto err_id;
|
||||
|
@ -1516,15 +1518,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
|
|||
}
|
||||
|
||||
netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
|
||||
CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
|
||||
ndev->dev_addr, ndev->irq,
|
||||
CIDER_REV_GET(cider), ndev->dev_addr, ndev->irq,
|
||||
ks->rc_ccr & CCR_EEPROM ? "has" : "no");
|
||||
|
||||
return 0;
|
||||
|
||||
|
||||
err_netdev:
|
||||
free_irq(ndev->irq, ndev);
|
||||
free_irq(ndev->irq, ks);
|
||||
|
||||
err_id:
|
||||
err_irq:
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
#define DRV_NAME "ks8851_mll"
|
||||
|
||||
static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
|
||||
#define MAX_RECV_FRAMES 32
|
||||
#define MAX_RECV_FRAMES 255
|
||||
#define MAX_BUF_SIZE 2048
|
||||
#define TX_BUF_SIZE 2000
|
||||
#define RX_BUF_SIZE 2000
|
||||
|
|
|
@ -5675,7 +5675,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
|
|||
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
|
||||
}
|
||||
|
||||
memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
|
||||
memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
|
||||
|
||||
interrupt = hw_block_intr(hw);
|
||||
|
||||
|
|
|
@ -958,6 +958,11 @@ static inline void cp_start_hw (struct cp_private *cp)
|
|||
cpw8(Cmd, RxOn | TxOn);
|
||||
}
|
||||
|
||||
static void cp_enable_irq(struct cp_private *cp)
|
||||
{
|
||||
cpw16_f(IntrMask, cp_intr_mask);
|
||||
}
|
||||
|
||||
static void cp_init_hw (struct cp_private *cp)
|
||||
{
|
||||
struct net_device *dev = cp->dev;
|
||||
|
@ -997,8 +1002,6 @@ static void cp_init_hw (struct cp_private *cp)
|
|||
|
||||
cpw16(MultiIntr, 0);
|
||||
|
||||
cpw16_f(IntrMask, cp_intr_mask);
|
||||
|
||||
cpw8_f(Cfg9346, Cfg9346_Lock);
|
||||
}
|
||||
|
||||
|
@ -1130,6 +1133,8 @@ static int cp_open (struct net_device *dev)
|
|||
if (rc)
|
||||
goto err_out_hw;
|
||||
|
||||
cp_enable_irq(cp);
|
||||
|
||||
netif_carrier_off(dev);
|
||||
mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
|
||||
netif_start_queue(dev);
|
||||
|
@ -2031,6 +2036,7 @@ static int cp_resume (struct pci_dev *pdev)
|
|||
/* FIXME: sh*t may happen if the Rx ring buffer is depleted */
|
||||
cp_init_rings_index (cp);
|
||||
cp_init_hw (cp);
|
||||
cp_enable_irq(cp);
|
||||
netif_start_queue (dev);
|
||||
|
||||
spin_lock_irqsave (&cp->lock, flags);
|
||||
|
|
|
@ -1166,10 +1166,8 @@ smsc911x_rx_counterrors(struct net_device *dev, unsigned int rxstat)
|
|||
|
||||
/* Quickly dumps bad packets */
|
||||
static void
|
||||
smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktbytes)
|
||||
smsc911x_rx_fastforward(struct smsc911x_data *pdata, unsigned int pktwords)
|
||||
{
|
||||
unsigned int pktwords = (pktbytes + NET_IP_ALIGN + 3) >> 2;
|
||||
|
||||
if (likely(pktwords >= 4)) {
|
||||
unsigned int timeout = 500;
|
||||
unsigned int val;
|
||||
|
@ -1233,7 +1231,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
|
|||
continue;
|
||||
}
|
||||
|
||||
skb = netdev_alloc_skb(dev, pktlength + NET_IP_ALIGN);
|
||||
skb = netdev_alloc_skb(dev, pktwords << 2);
|
||||
if (unlikely(!skb)) {
|
||||
SMSC_WARN(pdata, rx_err,
|
||||
"Unable to allocate skb for rx packet");
|
||||
|
@ -1243,14 +1241,12 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
|
|||
break;
|
||||
}
|
||||
|
||||
skb->data = skb->head;
|
||||
skb_reset_tail_pointer(skb);
|
||||
pdata->ops->rx_readfifo(pdata,
|
||||
(unsigned int *)skb->data, pktwords);
|
||||
|
||||
/* Align IP on 16B boundary */
|
||||
skb_reserve(skb, NET_IP_ALIGN);
|
||||
skb_put(skb, pktlength - 4);
|
||||
pdata->ops->rx_readfifo(pdata,
|
||||
(unsigned int *)skb->head, pktwords);
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
skb_checksum_none_assert(skb);
|
||||
netif_receive_skb(skb);
|
||||
|
@ -1565,7 +1561,7 @@ static int smsc911x_open(struct net_device *dev)
|
|||
smsc911x_reg_write(pdata, FIFO_INT, temp);
|
||||
|
||||
/* set RX Data offset to 2 bytes for alignment */
|
||||
smsc911x_reg_write(pdata, RX_CFG, (2 << 8));
|
||||
smsc911x_reg_write(pdata, RX_CFG, (NET_IP_ALIGN << 8));
|
||||
|
||||
/* enable NAPI polling before enabling RX interrupts */
|
||||
napi_enable(&pdata->napi);
|
||||
|
@ -2382,7 +2378,6 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
|
|||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
|
||||
pdata = netdev_priv(dev);
|
||||
|
||||
dev->irq = irq_res->start;
|
||||
irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
|
||||
pdata->ioaddr = ioremap_nocache(res->start, res_size);
|
||||
|
@ -2446,7 +2441,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
|
|||
if (retval) {
|
||||
SMSC_WARN(pdata, probe,
|
||||
"Unable to claim requested irq: %d", dev->irq);
|
||||
goto out_free_irq;
|
||||
goto out_disable_resources;
|
||||
}
|
||||
|
||||
retval = register_netdev(dev);
|
||||
|
|
|
@ -181,6 +181,11 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
|
|||
__davinci_mdio_reset(data);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
reg = __raw_readl(®s->user[0].access);
|
||||
if ((reg & USERACCESS_GO) == 0)
|
||||
return 0;
|
||||
|
||||
dev_err(data->dev, "timed out waiting for user access\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
|
|
@ -2,9 +2,7 @@
|
|||
* Definitions for Xilinx Axi Ethernet device driver.
|
||||
*
|
||||
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
|
||||
* Copyright (c) 2010 Xilinx, Inc. All rights reserved.
|
||||
* Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
|
||||
* Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
|
||||
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef XILINX_AXIENET_H
|
||||
|
|
|
@ -4,9 +4,9 @@
|
|||
* Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
|
||||
* Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
|
||||
* Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
|
||||
* Copyright (c) 2010 Xilinx, Inc. All rights reserved.
|
||||
* Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
|
||||
* Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
|
||||
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
|
||||
* Copyright (c) 2010 - 2011 PetaLogix
|
||||
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
|
||||
*
|
||||
* This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
|
||||
* and Spartan6.
|
||||
|
|
|
@ -2,9 +2,9 @@
|
|||
* MDIO bus driver for the Xilinx Axi Ethernet device
|
||||
*
|
||||
* Copyright (c) 2009 Secret Lab Technologies, Ltd.
|
||||
* Copyright (c) 2010 Xilinx, Inc. All rights reserved.
|
||||
* Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch>
|
||||
* Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch>
|
||||
* Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
|
||||
* Copyright (c) 2010 - 2011 PetaLogix
|
||||
* Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/of_address.h>
|
||||
|
|
|
@ -44,6 +44,7 @@ struct net_device_context {
|
|||
/* point back to our device context */
|
||||
struct hv_device *device_ctx;
|
||||
struct delayed_work dwork;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
|
||||
|
@ -51,30 +52,22 @@ static int ring_size = 128;
|
|||
module_param(ring_size, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
|
||||
|
||||
struct set_multicast_work {
|
||||
struct work_struct work;
|
||||
struct net_device *net;
|
||||
};
|
||||
|
||||
static void do_set_multicast(struct work_struct *w)
|
||||
{
|
||||
struct set_multicast_work *swk =
|
||||
container_of(w, struct set_multicast_work, work);
|
||||
struct net_device *net = swk->net;
|
||||
|
||||
struct net_device_context *ndevctx = netdev_priv(net);
|
||||
struct net_device_context *ndevctx =
|
||||
container_of(w, struct net_device_context, work);
|
||||
struct netvsc_device *nvdev;
|
||||
struct rndis_device *rdev;
|
||||
|
||||
nvdev = hv_get_drvdata(ndevctx->device_ctx);
|
||||
if (nvdev == NULL)
|
||||
goto out;
|
||||
if (nvdev == NULL || nvdev->ndev == NULL)
|
||||
return;
|
||||
|
||||
rdev = nvdev->extension;
|
||||
if (rdev == NULL)
|
||||
goto out;
|
||||
return;
|
||||
|
||||
if (net->flags & IFF_PROMISC)
|
||||
if (nvdev->ndev->flags & IFF_PROMISC)
|
||||
rndis_filter_set_packet_filter(rdev,
|
||||
NDIS_PACKET_TYPE_PROMISCUOUS);
|
||||
else
|
||||
|
@ -82,21 +75,13 @@ static void do_set_multicast(struct work_struct *w)
|
|||
NDIS_PACKET_TYPE_BROADCAST |
|
||||
NDIS_PACKET_TYPE_ALL_MULTICAST |
|
||||
NDIS_PACKET_TYPE_DIRECTED);
|
||||
|
||||
out:
|
||||
kfree(w);
|
||||
}
|
||||
|
||||
static void netvsc_set_multicast_list(struct net_device *net)
|
||||
{
|
||||
struct set_multicast_work *swk =
|
||||
kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
|
||||
if (swk == NULL)
|
||||
return;
|
||||
struct net_device_context *net_device_ctx = netdev_priv(net);
|
||||
|
||||
swk->net = net;
|
||||
INIT_WORK(&swk->work, do_set_multicast);
|
||||
schedule_work(&swk->work);
|
||||
schedule_work(&net_device_ctx->work);
|
||||
}
|
||||
|
||||
static int netvsc_open(struct net_device *net)
|
||||
|
@ -125,6 +110,8 @@ static int netvsc_close(struct net_device *net)
|
|||
|
||||
netif_tx_disable(net);
|
||||
|
||||
/* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
|
||||
cancel_work_sync(&net_device_ctx->work);
|
||||
ret = rndis_filter_close(device_obj);
|
||||
if (ret != 0)
|
||||
netdev_err(net, "unable to close device (ret %d).\n", ret);
|
||||
|
@ -335,6 +322,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
|
|||
|
||||
nvdev->start_remove = true;
|
||||
cancel_delayed_work_sync(&ndevctx->dwork);
|
||||
cancel_work_sync(&ndevctx->work);
|
||||
netif_tx_disable(ndev);
|
||||
rndis_filter_device_remove(hdev);
|
||||
|
||||
|
@ -403,6 +391,7 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
net_device_ctx->device_ctx = dev;
|
||||
hv_set_drvdata(dev, net);
|
||||
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
|
||||
INIT_WORK(&net_device_ctx->work, do_set_multicast);
|
||||
|
||||
net->netdev_ops = &device_ops;
|
||||
|
||||
|
@ -456,6 +445,7 @@ static int netvsc_remove(struct hv_device *dev)
|
|||
|
||||
ndev_ctx = netdev_priv(net);
|
||||
cancel_delayed_work_sync(&ndev_ctx->dwork);
|
||||
cancel_work_sync(&ndev_ctx->work);
|
||||
|
||||
/* Stop outbound asap */
|
||||
netif_tx_disable(net);
|
||||
|
|
|
@ -40,6 +40,7 @@ MODULE_LICENSE("GPL");
|
|||
#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
|
||||
#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
|
||||
#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
|
||||
#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
|
||||
|
||||
static int ip175c_config_init(struct phy_device *phydev)
|
||||
{
|
||||
|
@ -185,6 +186,15 @@ static int ip175c_config_aneg(struct phy_device *phydev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ip101a_g_ack_interrupt(struct phy_device *phydev)
|
||||
{
|
||||
int err = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct phy_driver ip175c_driver = {
|
||||
.phy_id = 0x02430d80,
|
||||
.name = "ICPlus IP175C",
|
||||
|
@ -204,7 +214,6 @@ static struct phy_driver ip1001_driver = {
|
|||
.phy_id_mask = 0x0ffffff0,
|
||||
.features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
|
||||
SUPPORTED_Asym_Pause,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_init = &ip1001_config_init,
|
||||
.config_aneg = &genphy_config_aneg,
|
||||
.read_status = &genphy_read_status,
|
||||
|
@ -220,6 +229,7 @@ static struct phy_driver ip101a_g_driver = {
|
|||
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
|
||||
SUPPORTED_Asym_Pause,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.ack_interrupt = ip101a_g_ack_interrupt,
|
||||
.config_init = &ip101a_g_config_init,
|
||||
.config_aneg = &genphy_config_aneg,
|
||||
.read_status = &genphy_read_status,
|
||||
|
|
|
@ -235,7 +235,7 @@ struct ppp_net {
|
|||
/* Prototypes. */
|
||||
static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
|
||||
struct file *file, unsigned int cmd, unsigned long arg);
|
||||
static int ppp_xmit_process(struct ppp *ppp);
|
||||
static void ppp_xmit_process(struct ppp *ppp);
|
||||
static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
|
||||
static void ppp_push(struct ppp *ppp);
|
||||
static void ppp_channel_push(struct channel *pch);
|
||||
|
@ -969,8 +969,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
put_unaligned_be16(proto, pp);
|
||||
|
||||
skb_queue_tail(&ppp->file.xq, skb);
|
||||
if (!ppp_xmit_process(ppp))
|
||||
netif_stop_queue(dev);
|
||||
ppp_xmit_process(ppp);
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
outf:
|
||||
|
@ -1048,11 +1047,10 @@ static void ppp_setup(struct net_device *dev)
|
|||
* Called to do any work queued up on the transmit side
|
||||
* that can now be done.
|
||||
*/
|
||||
static int
|
||||
static void
|
||||
ppp_xmit_process(struct ppp *ppp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int ret = 0;
|
||||
|
||||
ppp_xmit_lock(ppp);
|
||||
if (!ppp->closing) {
|
||||
|
@ -1062,13 +1060,12 @@ ppp_xmit_process(struct ppp *ppp)
|
|||
ppp_send_frame(ppp, skb);
|
||||
/* If there's no work left to do, tell the core net
|
||||
code that we can accept some more. */
|
||||
if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
|
||||
if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
|
||||
netif_wake_queue(ppp->dev);
|
||||
ret = 1;
|
||||
}
|
||||
else
|
||||
netif_stop_queue(ppp->dev);
|
||||
}
|
||||
ppp_xmit_unlock(ppp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct sk_buff *
|
||||
|
|
|
@ -365,6 +365,27 @@ static const struct driver_info qmi_wwan_force_int4 = {
|
|||
.data = BIT(4), /* interface whitelist bitmap */
|
||||
};
|
||||
|
||||
/* Sierra Wireless provide equally useless interface descriptors
|
||||
* Devices in QMI mode can be switched between two different
|
||||
* configurations:
|
||||
* a) USB interface #8 is QMI/wwan
|
||||
* b) USB interfaces #8, #19 and #20 are QMI/wwan
|
||||
*
|
||||
* Both configurations provide a number of other interfaces (serial++),
|
||||
* some of which have the same endpoint configuration as we expect, so
|
||||
* a whitelist or blacklist is necessary.
|
||||
*
|
||||
* FIXME: The below whitelist should include BIT(20). It does not
|
||||
* because I cannot get it to work...
|
||||
*/
|
||||
static const struct driver_info qmi_wwan_sierra = {
|
||||
.description = "Sierra Wireless wwan/QMI device",
|
||||
.flags = FLAG_WWAN,
|
||||
.bind = qmi_wwan_bind_gobi,
|
||||
.unbind = qmi_wwan_unbind_shared,
|
||||
.manage_power = qmi_wwan_manage_power,
|
||||
.data = BIT(8) | BIT(19), /* interface whitelist bitmap */
|
||||
};
|
||||
|
||||
#define HUAWEI_VENDOR_ID 0x12D1
|
||||
#define QMI_GOBI_DEVICE(vend, prod) \
|
||||
|
@ -445,6 +466,15 @@ static const struct usb_device_id products[] = {
|
|||
.bInterfaceProtocol = 0xff,
|
||||
.driver_info = (unsigned long)&qmi_wwan_force_int4,
|
||||
},
|
||||
{ /* Sierra Wireless MC77xx in QMI mode */
|
||||
.match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,
|
||||
.idVendor = 0x1199,
|
||||
.idProduct = 0x68a2,
|
||||
.bInterfaceClass = 0xff,
|
||||
.bInterfaceSubClass = 0xff,
|
||||
.bInterfaceProtocol = 0xff,
|
||||
.driver_info = (unsigned long)&qmi_wwan_sierra,
|
||||
},
|
||||
{QMI_GOBI_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
{QMI_GOBI_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
|
||||
{QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */
|
||||
|
|
|
@ -1051,6 +1051,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
|
|||
dev->net->ethtool_ops = &smsc75xx_ethtool_ops;
|
||||
dev->net->flags |= IFF_MULTICAST;
|
||||
dev->net->hard_header_len += SMSC75XX_TX_OVERHEAD;
|
||||
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -626,16 +626,15 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
/* This can happen with OOM and indirect buffers. */
|
||||
if (unlikely(capacity < 0)) {
|
||||
if (likely(capacity == -ENOMEM)) {
|
||||
if (net_ratelimit()) {
|
||||
if (net_ratelimit())
|
||||
dev_warn(&dev->dev,
|
||||
"TX queue failure: out of memory\n");
|
||||
} else {
|
||||
} else {
|
||||
dev->stats.tx_fifo_errors++;
|
||||
if (net_ratelimit())
|
||||
dev_warn(&dev->dev,
|
||||
"Unexpected TX queue failure: %d\n",
|
||||
capacity);
|
||||
}
|
||||
}
|
||||
dev->stats.tx_dropped++;
|
||||
kfree_skb(skb);
|
||||
|
|
|
@ -2483,6 +2483,7 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
pr_err("Control memory remap failed\n");
|
||||
pci_release_regions(pdev);
|
||||
pci_disable_device(pdev);
|
||||
iounmap(card->mem);
|
||||
kfree(card);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/nl80211.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/export.h>
|
||||
#include <ar231x_platform.h>
|
||||
#include "ath5k.h"
|
||||
#include "debug.h"
|
||||
|
@ -119,7 +120,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
if (res == NULL) {
|
||||
dev_err(&pdev->dev, "no IRQ resource found\n");
|
||||
ret = -ENXIO;
|
||||
goto err_out;
|
||||
goto err_iounmap;
|
||||
}
|
||||
|
||||
irq = res->start;
|
||||
|
@ -128,7 +129,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
if (hw == NULL) {
|
||||
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
|
||||
ret = -ENOMEM;
|
||||
goto err_out;
|
||||
goto err_iounmap;
|
||||
}
|
||||
|
||||
ah = hw->priv;
|
||||
|
@ -185,6 +186,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
err_free_hw:
|
||||
ieee80211_free_hw(hw);
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
err_iounmap:
|
||||
iounmap(mem);
|
||||
err_out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1548,6 +1548,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
struct ath_hw *ah = sc->sc_ah;
|
||||
struct ath_common *common = ath9k_hw_common(ah);
|
||||
struct ieee80211_conf *conf = &hw->conf;
|
||||
bool reset_channel = false;
|
||||
|
||||
ath9k_ps_wakeup(sc);
|
||||
mutex_lock(&sc->mutex);
|
||||
|
@ -1556,6 +1557,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
|
||||
if (sc->ps_idle)
|
||||
ath_cancel_work(sc);
|
||||
else
|
||||
/*
|
||||
* The chip needs a reset to properly wake up from
|
||||
* full sleep
|
||||
*/
|
||||
reset_channel = ah->chip_fullsleep;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1584,7 +1591,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
|
|||
}
|
||||
}
|
||||
|
||||
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
|
||||
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
|
||||
struct ieee80211_channel *curchan = hw->conf.channel;
|
||||
int pos = curchan->hw_value;
|
||||
int old_pos = -1;
|
||||
|
|
|
@ -1820,6 +1820,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
|||
struct ath_frame_info *fi = get_frame_info(skb);
|
||||
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
|
||||
struct ath_buf *bf;
|
||||
int fragno;
|
||||
u16 seqno;
|
||||
|
||||
bf = ath_tx_get_buffer(sc);
|
||||
|
@ -1831,9 +1832,16 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
|
|||
ATH_TXBUF_RESET(bf);
|
||||
|
||||
if (tid) {
|
||||
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
|
||||
seqno = tid->seq_next;
|
||||
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
|
||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||
|
||||
if (fragno)
|
||||
hdr->seq_ctrl |= cpu_to_le16(fragno);
|
||||
|
||||
if (!ieee80211_has_morefrags(hdr->frame_control))
|
||||
INCR(tid->seq_next, IEEE80211_SEQ_MAX);
|
||||
|
||||
bf->bf_state.seqno = seqno;
|
||||
}
|
||||
|
||||
|
|
|
@ -7614,6 +7614,7 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
|
|||
{
|
||||
int len_mpdu;
|
||||
struct ieee80211_rx_status rx_status;
|
||||
struct ieee80211_hdr *hdr;
|
||||
|
||||
memset(&rx_status, 0, sizeof(rx_status));
|
||||
prep_mac80211_status(wlc, rxh, p, &rx_status);
|
||||
|
@ -7623,6 +7624,13 @@ brcms_c_recvctl(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
|
|||
skb_pull(p, D11_PHY_HDR_LEN);
|
||||
__skb_trim(p, len_mpdu);
|
||||
|
||||
/* unmute transmit */
|
||||
if (wlc->hw->suspended_fifos) {
|
||||
hdr = (struct ieee80211_hdr *)p->data;
|
||||
if (ieee80211_is_beacon(hdr->frame_control))
|
||||
brcms_b_mute(wlc->hw, false);
|
||||
}
|
||||
|
||||
memcpy(IEEE80211_SKB_RXCB(p), &rx_status, sizeof(rx_status));
|
||||
ieee80211_rx_irqsafe(wlc->pub->ieee_hw, p);
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ static const u32 cipher_suites[] = {
|
|||
* Convert NL80211's auth_type to the one from Libertas, see chapter 5.9.1
|
||||
* in the firmware spec
|
||||
*/
|
||||
static u8 lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
|
||||
static int lbs_auth_to_authtype(enum nl80211_auth_type auth_type)
|
||||
{
|
||||
int ret = -ENOTSUPP;
|
||||
|
||||
|
@ -1411,7 +1411,12 @@ static int lbs_cfg_connect(struct wiphy *wiphy, struct net_device *dev,
|
|||
goto done;
|
||||
}
|
||||
|
||||
lbs_set_authtype(priv, sme);
|
||||
ret = lbs_set_authtype(priv, sme);
|
||||
if (ret == -ENOTSUPP) {
|
||||
wiphy_err(wiphy, "unsupported authtype 0x%x\n", sme->auth_type);
|
||||
goto done;
|
||||
}
|
||||
|
||||
lbs_set_radio(priv, preamble, 1);
|
||||
|
||||
/* Do the actual association */
|
||||
|
|
|
@ -48,15 +48,15 @@
|
|||
#define PCIE_HOST_INT_STATUS_MASK 0xC3C
|
||||
#define PCIE_SCRATCH_2_REG 0xC40
|
||||
#define PCIE_SCRATCH_3_REG 0xC44
|
||||
#define PCIE_SCRATCH_4_REG 0xCC0
|
||||
#define PCIE_SCRATCH_5_REG 0xCC4
|
||||
#define PCIE_SCRATCH_6_REG 0xCC8
|
||||
#define PCIE_SCRATCH_7_REG 0xCCC
|
||||
#define PCIE_SCRATCH_8_REG 0xCD0
|
||||
#define PCIE_SCRATCH_9_REG 0xCD4
|
||||
#define PCIE_SCRATCH_10_REG 0xCD8
|
||||
#define PCIE_SCRATCH_11_REG 0xCDC
|
||||
#define PCIE_SCRATCH_12_REG 0xCE0
|
||||
#define PCIE_SCRATCH_4_REG 0xCD0
|
||||
#define PCIE_SCRATCH_5_REG 0xCD4
|
||||
#define PCIE_SCRATCH_6_REG 0xCD8
|
||||
#define PCIE_SCRATCH_7_REG 0xCDC
|
||||
#define PCIE_SCRATCH_8_REG 0xCE0
|
||||
#define PCIE_SCRATCH_9_REG 0xCE4
|
||||
#define PCIE_SCRATCH_10_REG 0xCE8
|
||||
#define PCIE_SCRATCH_11_REG 0xCEC
|
||||
#define PCIE_SCRATCH_12_REG 0xCF0
|
||||
|
||||
#define CPU_INTR_DNLD_RDY BIT(0)
|
||||
#define CPU_INTR_DOOR_BELL BIT(1)
|
||||
|
|
|
@ -238,7 +238,7 @@ static void handle_tx(struct vhost_net *net)
|
|||
|
||||
vq->heads[vq->upend_idx].len = len;
|
||||
ubuf->callback = vhost_zerocopy_callback;
|
||||
ubuf->arg = vq->ubufs;
|
||||
ubuf->ctx = vq->ubufs;
|
||||
ubuf->desc = vq->upend_idx;
|
||||
msg.msg_control = ubuf;
|
||||
msg.msg_controllen = sizeof(ubuf);
|
||||
|
|
|
@ -1598,10 +1598,9 @@ void vhost_ubuf_put_and_wait(struct vhost_ubuf_ref *ubufs)
|
|||
kfree(ubufs);
|
||||
}
|
||||
|
||||
void vhost_zerocopy_callback(void *arg)
|
||||
void vhost_zerocopy_callback(struct ubuf_info *ubuf)
|
||||
{
|
||||
struct ubuf_info *ubuf = arg;
|
||||
struct vhost_ubuf_ref *ubufs = ubuf->arg;
|
||||
struct vhost_ubuf_ref *ubufs = ubuf->ctx;
|
||||
struct vhost_virtqueue *vq = ubufs->vq;
|
||||
|
||||
/* set len = 1 to mark this desc buffers done DMA */
|
||||
|
|
|
@ -188,7 +188,7 @@ bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
|
|||
|
||||
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
|
||||
unsigned int log_num, u64 len);
|
||||
void vhost_zerocopy_callback(void *arg);
|
||||
void vhost_zerocopy_callback(struct ubuf_info *);
|
||||
int vhost_zerocopy_signal_used(struct vhost_virtqueue *vq);
|
||||
|
||||
#define vq_err(vq, fmt, ...) do { \
|
||||
|
|
|
@ -238,11 +238,12 @@ enum {
|
|||
/*
|
||||
* The callback notifies userspace to release buffers when skb DMA is done in
|
||||
* lower device, the skb last reference should be 0 when calling this.
|
||||
* The desc is used to track userspace buffer index.
|
||||
* The ctx field is used to track device context.
|
||||
* The desc field is used to track userspace buffer index.
|
||||
*/
|
||||
struct ubuf_info {
|
||||
void (*callback)(void *);
|
||||
void *arg;
|
||||
void (*callback)(struct ubuf_info *);
|
||||
void *ctx;
|
||||
unsigned long desc;
|
||||
};
|
||||
|
||||
|
|
|
@ -36,7 +36,11 @@ struct dst_entry {
|
|||
struct net_device *dev;
|
||||
struct dst_ops *ops;
|
||||
unsigned long _metrics;
|
||||
unsigned long expires;
|
||||
union {
|
||||
unsigned long expires;
|
||||
/* point to where the dst_entry copied from */
|
||||
struct dst_entry *from;
|
||||
};
|
||||
struct dst_entry *path;
|
||||
struct neighbour __rcu *_neighbour;
|
||||
#ifdef CONFIG_XFRM
|
||||
|
|
|
@ -123,6 +123,54 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
|
|||
return ((struct rt6_info *)dst)->rt6i_idev;
|
||||
}
|
||||
|
||||
static inline void rt6_clean_expires(struct rt6_info *rt)
|
||||
{
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
|
||||
dst_release(rt->dst.from);
|
||||
|
||||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
rt->dst.from = NULL;
|
||||
}
|
||||
|
||||
static inline void rt6_set_expires(struct rt6_info *rt, unsigned long expires)
|
||||
{
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from)
|
||||
dst_release(rt->dst.from);
|
||||
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
rt->dst.expires = expires;
|
||||
}
|
||||
|
||||
static inline void rt6_update_expires(struct rt6_info *rt, int timeout)
|
||||
{
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
|
||||
if (rt->dst.from)
|
||||
dst_release(rt->dst.from);
|
||||
/* dst_set_expires relies on expires == 0
|
||||
* if it has not been set previously.
|
||||
*/
|
||||
rt->dst.expires = 0;
|
||||
}
|
||||
|
||||
dst_set_expires(&rt->dst, timeout);
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
}
|
||||
|
||||
static inline void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
|
||||
{
|
||||
struct dst_entry *new = (struct dst_entry *) from;
|
||||
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES) && rt->dst.from) {
|
||||
if (new == rt->dst.from)
|
||||
return;
|
||||
dst_release(rt->dst.from);
|
||||
}
|
||||
|
||||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
rt->dst.from = new;
|
||||
dst_hold(new);
|
||||
}
|
||||
|
||||
struct fib6_walker_t {
|
||||
struct list_head lh;
|
||||
struct fib6_node *root, *node;
|
||||
|
|
|
@ -245,7 +245,7 @@ static inline unsigned long red_calc_qavg_from_idle_time(const struct red_parms
|
|||
*
|
||||
* dummy packets as a burst after idle time, i.e.
|
||||
*
|
||||
* p->qavg *= (1-W)^m
|
||||
* v->qavg *= (1-W)^m
|
||||
*
|
||||
* This is an apparently overcomplicated solution (f.e. we have to
|
||||
* precompute a table to make this calculation in reasonable time)
|
||||
|
@ -279,7 +279,7 @@ static inline unsigned long red_calc_qavg_no_idle_time(const struct red_parms *p
|
|||
unsigned int backlog)
|
||||
{
|
||||
/*
|
||||
* NOTE: p->qavg is fixed point number with point at Wlog.
|
||||
* NOTE: v->qavg is fixed point number with point at Wlog.
|
||||
* The formula below is equvalent to floating point
|
||||
* version:
|
||||
*
|
||||
|
@ -390,7 +390,7 @@ static inline void red_adaptative_algo(struct red_parms *p, struct red_vars *v)
|
|||
if (red_is_idling(v))
|
||||
qavg = red_calc_qavg_from_idle_time(p, v);
|
||||
|
||||
/* p->qavg is fixed point number with point at Wlog */
|
||||
/* v->qavg is fixed point number with point at Wlog */
|
||||
qavg >>= p->Wlog;
|
||||
|
||||
if (qavg > p->target_max && p->max_P <= MAX_P_MAX)
|
||||
|
|
|
@ -246,6 +246,7 @@ struct cg_proto;
|
|||
* @sk_user_data: RPC layer private data
|
||||
* @sk_sndmsg_page: cached page for sendmsg
|
||||
* @sk_sndmsg_off: cached offset for sendmsg
|
||||
* @sk_peek_off: current peek_offset value
|
||||
* @sk_send_head: front of stuff to transmit
|
||||
* @sk_security: used by security modules
|
||||
* @sk_mark: generic packet mark
|
||||
|
|
|
@ -2011,16 +2011,17 @@ static void __exit ax25_exit(void)
|
|||
proc_net_remove(&init_net, "ax25_route");
|
||||
proc_net_remove(&init_net, "ax25");
|
||||
proc_net_remove(&init_net, "ax25_calls");
|
||||
ax25_rt_free();
|
||||
ax25_uid_free();
|
||||
ax25_dev_free();
|
||||
|
||||
ax25_unregister_sysctl();
|
||||
unregister_netdevice_notifier(&ax25_dev_notifier);
|
||||
ax25_unregister_sysctl();
|
||||
|
||||
dev_remove_pack(&ax25_packet_type);
|
||||
|
||||
sock_unregister(PF_AX25);
|
||||
proto_unregister(&ax25_proto);
|
||||
|
||||
ax25_rt_free();
|
||||
ax25_uid_free();
|
||||
ax25_dev_free();
|
||||
}
|
||||
module_exit(ax25_exit);
|
||||
|
|
|
@ -103,6 +103,7 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
|
|||
skb->protocol = htons(ETH_P_IPV6);
|
||||
break;
|
||||
default:
|
||||
kfree_skb(skb);
|
||||
priv->netdev->stats.rx_errors++;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -220,14 +221,16 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
if (skb->len > priv->netdev->mtu) {
|
||||
pr_warn("Size of skb exceeded MTU\n");
|
||||
kfree_skb(skb);
|
||||
dev->stats.tx_errors++;
|
||||
return -ENOSPC;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (!priv->flowenabled) {
|
||||
pr_debug("dropping packets flow off\n");
|
||||
kfree_skb(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_BUSY;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
|
||||
|
@ -242,7 +245,7 @@ static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
|
||||
if (result) {
|
||||
dev->stats.tx_dropped++;
|
||||
return result;
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Update statistics. */
|
||||
|
|
|
@ -1409,14 +1409,34 @@ EXPORT_SYMBOL(register_netdevice_notifier);
|
|||
* register_netdevice_notifier(). The notifier is unlinked into the
|
||||
* kernel structures and may then be reused. A negative errno code
|
||||
* is returned on a failure.
|
||||
*
|
||||
* After unregistering unregister and down device events are synthesized
|
||||
* for all devices on the device list to the removed notifier to remove
|
||||
* the need for special case cleanup code.
|
||||
*/
|
||||
|
||||
int unregister_netdevice_notifier(struct notifier_block *nb)
|
||||
{
|
||||
struct net_device *dev;
|
||||
struct net *net;
|
||||
int err;
|
||||
|
||||
rtnl_lock();
|
||||
err = raw_notifier_chain_unregister(&netdev_chain, nb);
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
for_each_net(net) {
|
||||
for_each_netdev(net, dev) {
|
||||
if (dev->flags & IFF_UP) {
|
||||
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
|
||||
nb->notifier_call(nb, NETDEV_DOWN, dev);
|
||||
}
|
||||
nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
|
||||
nb->notifier_call(nb, NETDEV_UNREGISTER_BATCH, dev);
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
rtnl_unlock();
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -150,6 +150,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
|
|||
for (i = 0; i < msg->entries; i++) {
|
||||
if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
|
||||
msg->points[i].count++;
|
||||
atomic_inc(&data->dm_hit_count);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -83,21 +83,29 @@ assign:
|
|||
|
||||
static int ops_init(const struct pernet_operations *ops, struct net *net)
|
||||
{
|
||||
int err;
|
||||
int err = -ENOMEM;
|
||||
void *data = NULL;
|
||||
|
||||
if (ops->id && ops->size) {
|
||||
void *data = kzalloc(ops->size, GFP_KERNEL);
|
||||
data = kzalloc(ops->size, GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
goto out;
|
||||
|
||||
err = net_assign_generic(net, *ops->id, data);
|
||||
if (err) {
|
||||
kfree(data);
|
||||
return err;
|
||||
}
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
err = 0;
|
||||
if (ops->init)
|
||||
return ops->init(net);
|
||||
return 0;
|
||||
err = ops->init(net);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
kfree(data);
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void ops_free(const struct pernet_operations *ops, struct net *net)
|
||||
|
@ -448,12 +456,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
|
|||
static int __register_pernet_operations(struct list_head *list,
|
||||
struct pernet_operations *ops)
|
||||
{
|
||||
int err = 0;
|
||||
err = ops_init(ops, &init_net);
|
||||
if (err)
|
||||
ops_free(ops, &init_net);
|
||||
return err;
|
||||
|
||||
return ops_init(ops, &init_net);
|
||||
}
|
||||
|
||||
static void __unregister_pernet_operations(struct pernet_operations *ops)
|
||||
|
|
|
@ -335,6 +335,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
|||
incr = __tcp_grow_window(sk, skb);
|
||||
|
||||
if (incr) {
|
||||
incr = max_t(int, incr, 2 * skb->len);
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
|
||||
tp->window_clamp);
|
||||
inet_csk(sk)->icsk_ack.quick |= 1;
|
||||
|
|
|
@ -1096,6 +1096,7 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)
|
|||
eat = min_t(int, len, skb_headlen(skb));
|
||||
if (eat) {
|
||||
__skb_pull(skb, eat);
|
||||
skb->avail_size -= eat;
|
||||
len -= eat;
|
||||
if (!len)
|
||||
return;
|
||||
|
|
|
@ -803,8 +803,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
|
|||
ip6_del_rt(rt);
|
||||
rt = NULL;
|
||||
} else if (!(rt->rt6i_flags & RTF_EXPIRES)) {
|
||||
rt->dst.expires = expires;
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
rt6_set_expires(rt, expires);
|
||||
}
|
||||
}
|
||||
dst_release(&rt->dst);
|
||||
|
@ -1887,11 +1886,9 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
|
|||
rt = NULL;
|
||||
} else if (addrconf_finite_timeout(rt_expires)) {
|
||||
/* not infinity */
|
||||
rt->dst.expires = jiffies + rt_expires;
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
rt6_set_expires(rt, jiffies + rt_expires);
|
||||
} else {
|
||||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
rt->dst.expires = 0;
|
||||
rt6_clean_expires(rt);
|
||||
}
|
||||
} else if (valid_lft) {
|
||||
clock_t expires = 0;
|
||||
|
|
|
@ -673,11 +673,10 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
|
|||
&rt->rt6i_gateway)) {
|
||||
if (!(iter->rt6i_flags & RTF_EXPIRES))
|
||||
return -EEXIST;
|
||||
iter->dst.expires = rt->dst.expires;
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES)) {
|
||||
iter->rt6i_flags &= ~RTF_EXPIRES;
|
||||
iter->dst.expires = 0;
|
||||
}
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES))
|
||||
rt6_clean_expires(iter);
|
||||
else
|
||||
rt6_set_expires(iter, rt->dst.expires);
|
||||
return -EEXIST;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1264,8 +1264,7 @@ static void ndisc_router_discovery(struct sk_buff *skb)
|
|||
}
|
||||
|
||||
if (rt)
|
||||
rt->dst.expires = jiffies + (HZ * lifetime);
|
||||
|
||||
rt6_set_expires(rt, jiffies + (HZ * lifetime));
|
||||
if (ra_msg->icmph.icmp6_hop_limit) {
|
||||
in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
|
||||
if (rt)
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
#include <linux/sysctl.h>
|
||||
#endif
|
||||
|
||||
static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
|
||||
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
|
||||
const struct in6_addr *dest);
|
||||
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
|
||||
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
|
||||
|
@ -285,6 +285,10 @@ static void ip6_dst_destroy(struct dst_entry *dst)
|
|||
rt->rt6i_idev = NULL;
|
||||
in6_dev_put(idev);
|
||||
}
|
||||
|
||||
if (!(rt->rt6i_flags & RTF_EXPIRES) && dst->from)
|
||||
dst_release(dst->from);
|
||||
|
||||
if (peer) {
|
||||
rt->rt6i_peer = NULL;
|
||||
inet_putpeer(peer);
|
||||
|
@ -329,8 +333,17 @@ static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
|
|||
|
||||
static __inline__ int rt6_check_expired(const struct rt6_info *rt)
|
||||
{
|
||||
return (rt->rt6i_flags & RTF_EXPIRES) &&
|
||||
time_after(jiffies, rt->dst.expires);
|
||||
struct rt6_info *ort = NULL;
|
||||
|
||||
if (rt->rt6i_flags & RTF_EXPIRES) {
|
||||
if (time_after(jiffies, rt->dst.expires))
|
||||
return 1;
|
||||
} else if (rt->dst.from) {
|
||||
ort = (struct rt6_info *) rt->dst.from;
|
||||
return (ort->rt6i_flags & RTF_EXPIRES) &&
|
||||
time_after(jiffies, ort->dst.expires);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int rt6_need_strict(const struct in6_addr *daddr)
|
||||
|
@ -620,12 +633,11 @@ int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
|
|||
(rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
|
||||
|
||||
if (rt) {
|
||||
if (!addrconf_finite_timeout(lifetime)) {
|
||||
rt->rt6i_flags &= ~RTF_EXPIRES;
|
||||
} else {
|
||||
rt->dst.expires = jiffies + HZ * lifetime;
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
}
|
||||
if (!addrconf_finite_timeout(lifetime))
|
||||
rt6_clean_expires(rt);
|
||||
else
|
||||
rt6_set_expires(rt, jiffies + HZ * lifetime);
|
||||
|
||||
dst_release(&rt->dst);
|
||||
}
|
||||
return 0;
|
||||
|
@ -730,7 +742,7 @@ int ip6_ins_rt(struct rt6_info *rt)
|
|||
return __ip6_ins_rt(rt, &info);
|
||||
}
|
||||
|
||||
static struct rt6_info *rt6_alloc_cow(const struct rt6_info *ort,
|
||||
static struct rt6_info *rt6_alloc_cow(struct rt6_info *ort,
|
||||
const struct in6_addr *daddr,
|
||||
const struct in6_addr *saddr)
|
||||
{
|
||||
|
@ -954,10 +966,10 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori
|
|||
rt->rt6i_idev = ort->rt6i_idev;
|
||||
if (rt->rt6i_idev)
|
||||
in6_dev_hold(rt->rt6i_idev);
|
||||
rt->dst.expires = 0;
|
||||
|
||||
rt->rt6i_gateway = ort->rt6i_gateway;
|
||||
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
|
||||
rt->rt6i_flags = ort->rt6i_flags;
|
||||
rt6_clean_expires(rt);
|
||||
rt->rt6i_metric = 0;
|
||||
|
||||
memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
|
||||
|
@ -1019,10 +1031,9 @@ static void ip6_link_failure(struct sk_buff *skb)
|
|||
|
||||
rt = (struct rt6_info *) skb_dst(skb);
|
||||
if (rt) {
|
||||
if (rt->rt6i_flags & RTF_CACHE) {
|
||||
dst_set_expires(&rt->dst, 0);
|
||||
rt->rt6i_flags |= RTF_EXPIRES;
|
||||
} else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
|
||||
if (rt->rt6i_flags & RTF_CACHE)
|
||||
rt6_update_expires(rt, 0);
|
||||
else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT))
|
||||
rt->rt6i_node->fn_sernum = -1;
|
||||
}
|
||||
}
|
||||
|
@ -1289,9 +1300,12 @@ int ip6_route_add(struct fib6_config *cfg)
|
|||
}
|
||||
|
||||
rt->dst.obsolete = -1;
|
||||
rt->dst.expires = (cfg->fc_flags & RTF_EXPIRES) ?
|
||||
jiffies + clock_t_to_jiffies(cfg->fc_expires) :
|
||||
0;
|
||||
|
||||
if (cfg->fc_flags & RTF_EXPIRES)
|
||||
rt6_set_expires(rt, jiffies +
|
||||
clock_t_to_jiffies(cfg->fc_expires));
|
||||
else
|
||||
rt6_clean_expires(rt);
|
||||
|
||||
if (cfg->fc_protocol == RTPROT_UNSPEC)
|
||||
cfg->fc_protocol = RTPROT_BOOT;
|
||||
|
@ -1736,8 +1750,8 @@ again:
|
|||
features |= RTAX_FEATURE_ALLFRAG;
|
||||
dst_metric_set(&rt->dst, RTAX_FEATURES, features);
|
||||
}
|
||||
dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
|
||||
rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
|
||||
rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
|
||||
rt->rt6i_flags |= RTF_MODIFIED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1765,9 +1779,8 @@ again:
|
|||
* which is 10 mins. After 10 mins the decreased pmtu is expired
|
||||
* and detecting PMTU increase will be automatically happened.
|
||||
*/
|
||||
dst_set_expires(&nrt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
|
||||
nrt->rt6i_flags |= RTF_DYNAMIC|RTF_EXPIRES;
|
||||
|
||||
rt6_update_expires(nrt, net->ipv6.sysctl.ip6_rt_mtu_expires);
|
||||
nrt->rt6i_flags |= RTF_DYNAMIC;
|
||||
ip6_ins_rt(nrt);
|
||||
}
|
||||
out:
|
||||
|
@ -1799,7 +1812,7 @@ void rt6_pmtu_discovery(const struct in6_addr *daddr, const struct in6_addr *sad
|
|||
* Misc support functions
|
||||
*/
|
||||
|
||||
static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
|
||||
static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
|
||||
const struct in6_addr *dest)
|
||||
{
|
||||
struct net *net = dev_net(ort->dst.dev);
|
||||
|
@ -1819,10 +1832,14 @@ static struct rt6_info *ip6_rt_copy(const struct rt6_info *ort,
|
|||
if (rt->rt6i_idev)
|
||||
in6_dev_hold(rt->rt6i_idev);
|
||||
rt->dst.lastuse = jiffies;
|
||||
rt->dst.expires = 0;
|
||||
|
||||
rt->rt6i_gateway = ort->rt6i_gateway;
|
||||
rt->rt6i_flags = ort->rt6i_flags & ~RTF_EXPIRES;
|
||||
rt->rt6i_flags = ort->rt6i_flags;
|
||||
if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
|
||||
(RTF_DEFAULT | RTF_ADDRCONF))
|
||||
rt6_set_from(rt, ort);
|
||||
else
|
||||
rt6_clean_expires(rt);
|
||||
rt->rt6i_metric = 0;
|
||||
|
||||
#ifdef CONFIG_IPV6_SUBTREES
|
||||
|
|
|
@ -1383,6 +1383,10 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
|
|||
tcp_mtup_init(newsk);
|
||||
tcp_sync_mss(newsk, dst_mtu(dst));
|
||||
newtp->advmss = dst_metric_advmss(dst);
|
||||
if (tcp_sk(sk)->rx_opt.user_mss &&
|
||||
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
|
||||
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
|
||||
|
||||
tcp_initialize_rcv_mss(newsk);
|
||||
if (tcp_rsk(req)->snt_synack)
|
||||
tcp_valid_rtt_meas(newsk,
|
||||
|
|
|
@ -3480,7 +3480,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
|
|||
|
||||
/* Addresses to be used by KM for negotiation, if ext is available */
|
||||
if (k != NULL && (set_sadb_kmaddress(skb, k) < 0))
|
||||
return -EINVAL;
|
||||
goto err;
|
||||
|
||||
/* selector src */
|
||||
set_sadb_address(skb, sasize_sel, SADB_EXT_ADDRESS_SRC, sel);
|
||||
|
|
|
@ -232,7 +232,7 @@ static void l2tp_ip_close(struct sock *sk, long timeout)
|
|||
{
|
||||
write_lock_bh(&l2tp_ip_lock);
|
||||
hlist_del_init(&sk->sk_bind_node);
|
||||
hlist_del_init(&sk->sk_node);
|
||||
sk_del_node_init(sk);
|
||||
write_unlock_bh(&l2tp_ip_lock);
|
||||
sk_common_release(sk);
|
||||
}
|
||||
|
@ -271,7 +271,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
|
||||
goto out;
|
||||
|
||||
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
|
||||
if (addr->l2tp_addr.s_addr)
|
||||
inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
|
||||
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
|
||||
inet->inet_saddr = 0; /* Use device */
|
||||
sk_dst_reset(sk);
|
||||
|
|
|
@ -457,8 +457,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
|
|||
* fall back to HT20 if we don't use or use
|
||||
* the other extension channel
|
||||
*/
|
||||
if ((channel_type == NL80211_CHAN_HT40MINUS ||
|
||||
channel_type == NL80211_CHAN_HT40PLUS) &&
|
||||
if (!(channel_type == NL80211_CHAN_HT40MINUS ||
|
||||
channel_type == NL80211_CHAN_HT40PLUS) ||
|
||||
channel_type != sdata->u.ibss.channel_type)
|
||||
sta_ht_cap_new.cap &=
|
||||
~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
|
||||
|
|
|
@ -103,7 +103,7 @@ static void
|
|||
ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
|
||||
struct sk_buff *skb,
|
||||
struct ieee80211_rate *rate,
|
||||
int rtap_len)
|
||||
int rtap_len, bool has_fcs)
|
||||
{
|
||||
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
|
||||
struct ieee80211_radiotap_header *rthdr;
|
||||
|
@ -134,7 +134,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
|
|||
}
|
||||
|
||||
/* IEEE80211_RADIOTAP_FLAGS */
|
||||
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
|
||||
if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
|
||||
*pos |= IEEE80211_RADIOTAP_F_FCS;
|
||||
if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
|
||||
*pos |= IEEE80211_RADIOTAP_F_BADFCS;
|
||||
|
@ -294,7 +294,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
|
|||
}
|
||||
|
||||
/* prepend radiotap information */
|
||||
ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
|
||||
ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
|
||||
true);
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
@ -2571,7 +2572,8 @@ static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
|
|||
goto out_free_skb;
|
||||
|
||||
/* prepend radiotap information */
|
||||
ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom);
|
||||
ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
|
||||
false);
|
||||
|
||||
skb_set_mac_header(skb, 0);
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
|
|
@ -331,23 +331,6 @@ static int __net_init phonet_init_net(struct net *net)
|
|||
|
||||
static void __net_exit phonet_exit_net(struct net *net)
|
||||
{
|
||||
struct phonet_net *pnn = phonet_pernet(net);
|
||||
struct net_device *dev;
|
||||
unsigned i;
|
||||
|
||||
rtnl_lock();
|
||||
for_each_netdev(net, dev)
|
||||
phonet_device_destroy(dev);
|
||||
|
||||
for (i = 0; i < 64; i++) {
|
||||
dev = pnn->routes.table[i];
|
||||
if (dev) {
|
||||
rtm_phonet_notify(RTM_DELROUTE, dev, i);
|
||||
dev_put(dev);
|
||||
}
|
||||
}
|
||||
rtnl_unlock();
|
||||
|
||||
proc_net_remove(net, "phonet");
|
||||
}
|
||||
|
||||
|
@ -361,7 +344,7 @@ static struct pernet_operations phonet_net_ops = {
|
|||
/* Initialize Phonet devices list */
|
||||
int __init phonet_device_init(void)
|
||||
{
|
||||
int err = register_pernet_device(&phonet_net_ops);
|
||||
int err = register_pernet_subsys(&phonet_net_ops);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -377,7 +360,7 @@ void phonet_device_exit(void)
|
|||
{
|
||||
rtnl_unregister_all(PF_PHONET);
|
||||
unregister_netdevice_notifier(&phonet_device_notifier);
|
||||
unregister_pernet_device(&phonet_net_ops);
|
||||
unregister_pernet_subsys(&phonet_net_ops);
|
||||
proc_net_remove(&init_net, "pnresource");
|
||||
}
|
||||
|
||||
|
|
|
@ -565,11 +565,8 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
|
|||
opt.packets = q->packetsin;
|
||||
opt.bytesin = q->bytesin;
|
||||
|
||||
if (gred_wred_mode(table)) {
|
||||
q->vars.qidlestart =
|
||||
table->tab[table->def]->vars.qidlestart;
|
||||
q->vars.qavg = table->tab[table->def]->vars.qavg;
|
||||
}
|
||||
if (gred_wred_mode(table))
|
||||
gred_load_wred_set(table, q);
|
||||
|
||||
opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
|
||||
|
||||
|
|
|
@ -989,7 +989,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
|
|||
if (rdev->wiphy.software_iftypes & BIT(iftype))
|
||||
continue;
|
||||
for (j = 0; j < c->n_limits; j++) {
|
||||
if (!(limits[j].types & iftype))
|
||||
if (!(limits[j].types & BIT(iftype)))
|
||||
continue;
|
||||
if (limits[j].max < num[iftype])
|
||||
goto cont;
|
||||
|
|
Загрузка…
Ссылка в новой задаче