Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (52 commits)
  vlan: Use bitmask of feature flags instead of seperate feature bits
  fmvj18x_cs: add NextCom NC5310 rev B support
  xirc2ps_cs: re-initialize the multicast address in do_reset
  3C509: rx_bytes should not be increased when alloc_skb failed
  NETFRONT: Use __skb_queue_purge()
  VIRTIO: Use __skb_queue_purge()
  phylib: do EXPORT_SYMBOL on get_phy_id
  netlink: Fix nla_parse_nested_compat() to call nla_parse() directly
  WAN: protect HDLC proto list while insmod/rmmod
  drivers/net/fs_enet: remove null pointer dereference
  S2io: Version update for napi and MSI-X patches
  S2io: Added napi support when MSIX is enabled.
  S2io: Move all the transmit completions to a single msi-x (alarm) vector
  drivers/net/ehea - remove unnecessary memset after kzalloc
  au1000_eth: remove useless check
  Blackfin EMAC Driver: Removed duplicated include <linux/ethtool.h>
  cpmac bugfixes and enhancements
  e1000e: use resource_size_t, not unsigned long, for phys addrs
  net/usb: add support for Apple USB Ethernet Adapter
  uli526x: add support for netpoll
  ...
This commit is contained in:
Linus Torvalds 2008-05-26 10:14:02 -07:00
Родитель b3733034f1 289c79a4bd
Коммит c5e6fd28e5
53 изменённых файлов: 866 добавлений и 598 удалений

Просмотреть файл

@ -1062,7 +1062,6 @@ el3_rx(struct net_device *dev)
struct sk_buff *skb; struct sk_buff *skb;
skb = dev_alloc_skb(pkt_len+5); skb = dev_alloc_skb(pkt_len+5);
dev->stats.rx_bytes += pkt_len;
if (el3_debug > 4) if (el3_debug > 4)
printk("Receiving packet size %d status %4.4x.\n", printk("Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status); pkt_len, rx_status);
@ -1077,6 +1076,7 @@ el3_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb,dev); skb->protocol = eth_type_trans(skb,dev);
netif_rx(skb); netif_rx(skb);
dev->last_rx = jiffies; dev->last_rx = jiffies;
dev->stats.rx_bytes += pkt_len;
dev->stats.rx_packets++; dev->stats.rx_packets++;
continue; continue;
} }

Просмотреть файл

@ -1239,12 +1239,7 @@ static int au1000_rx(struct net_device *dev)
*/ */
static irqreturn_t au1000_interrupt(int irq, void *dev_id) static irqreturn_t au1000_interrupt(int irq, void *dev_id)
{ {
struct net_device *dev = (struct net_device *) dev_id; struct net_device *dev = dev_id;
if (dev == NULL) {
printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name);
return IRQ_RETVAL(1);
}
/* Handle RX interrupts first to minimize chance of overrun */ /* Handle RX interrupts first to minimize chance of overrun */

Просмотреть файл

@ -22,7 +22,6 @@
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/ethtool.h>
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>

Просмотреть файл

@ -38,6 +38,7 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <asm/gpio.h> #include <asm/gpio.h>
#include <asm/atomic.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
@ -187,6 +188,7 @@ struct cpmac_desc {
#define CPMAC_EOQ 0x1000 #define CPMAC_EOQ 0x1000
struct sk_buff *skb; struct sk_buff *skb;
struct cpmac_desc *next; struct cpmac_desc *next;
struct cpmac_desc *prev;
dma_addr_t mapping; dma_addr_t mapping;
dma_addr_t data_mapping; dma_addr_t data_mapping;
}; };
@ -208,6 +210,7 @@ struct cpmac_priv {
struct work_struct reset_work; struct work_struct reset_work;
struct platform_device *pdev; struct platform_device *pdev;
struct napi_struct napi; struct napi_struct napi;
atomic_t reset_pending;
}; };
static irqreturn_t cpmac_irq(int, void *); static irqreturn_t cpmac_irq(int, void *);
@ -241,6 +244,16 @@ static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
printk("\n"); printk("\n");
} }
static void cpmac_dump_all_desc(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
struct cpmac_desc *dump = priv->rx_head;
do {
cpmac_dump_desc(dev, dump);
dump = dump->next;
} while (dump != priv->rx_head);
}
static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
{ {
int i; int i;
@ -412,21 +425,42 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
static int cpmac_poll(struct napi_struct *napi, int budget) static int cpmac_poll(struct napi_struct *napi, int budget)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct cpmac_desc *desc; struct cpmac_desc *desc, *restart;
int received = 0;
struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
int received = 0, processed = 0;
spin_lock(&priv->rx_lock); spin_lock(&priv->rx_lock);
if (unlikely(!priv->rx_head)) { if (unlikely(!priv->rx_head)) {
if (netif_msg_rx_err(priv) && net_ratelimit()) if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx: polling, but no queue\n", printk(KERN_WARNING "%s: rx: polling, but no queue\n",
priv->dev->name); priv->dev->name);
spin_unlock(&priv->rx_lock);
netif_rx_complete(priv->dev, napi); netif_rx_complete(priv->dev, napi);
return 0; return 0;
} }
desc = priv->rx_head; desc = priv->rx_head;
restart = NULL;
while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
processed++;
if ((desc->dataflags & CPMAC_EOQ) != 0) {
/* The last update to eoq->hw_next didn't happen
* soon enough, and the receiver stopped here.
*Remember this descriptor so we can restart
* the receiver after freeing some space.
*/
if (unlikely(restart)) {
if (netif_msg_rx_err(priv))
printk(KERN_ERR "%s: poll found a"
" duplicate EOQ: %p and %p\n",
priv->dev->name, restart, desc);
goto fatal_error;
}
restart = desc->next;
}
skb = cpmac_rx_one(priv, desc); skb = cpmac_rx_one(priv, desc);
if (likely(skb)) { if (likely(skb)) {
netif_receive_skb(skb); netif_receive_skb(skb);
@ -435,19 +469,90 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
desc = desc->next; desc = desc->next;
} }
if (desc != priv->rx_head) {
/* We freed some buffers, but not the whole ring,
* add what we did free to the rx list */
desc->prev->hw_next = (u32)0;
priv->rx_head->prev->hw_next = priv->rx_head->mapping;
}
/* Optimization: If we did not actually process an EOQ (perhaps because
* of quota limits), check to see if the tail of the queue has EOQ set.
* We should immediately restart in that case so that the receiver can
* restart and run in parallel with more packet processing.
* This lets us handle slightly larger bursts before running
* out of ring space (assuming dev->weight < ring_size) */
if (!restart &&
(priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
== CPMAC_EOQ &&
(priv->rx_head->dataflags & CPMAC_OWN) != 0) {
/* reset EOQ so the poll loop (above) doesn't try to
* restart this when it eventually gets to this descriptor.
*/
priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
restart = priv->rx_head;
}
if (restart) {
priv->dev->stats.rx_errors++;
priv->dev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx dma ring overrun\n",
priv->dev->name);
if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: cpmac_poll is trying to "
"restart rx from a descriptor that's "
"not free: %p\n",
priv->dev->name, restart);
goto fatal_error;
}
cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
}
priv->rx_head = desc; priv->rx_head = desc;
spin_unlock(&priv->rx_lock); spin_unlock(&priv->rx_lock);
if (unlikely(netif_msg_rx_status(priv))) if (unlikely(netif_msg_rx_status(priv)))
printk(KERN_DEBUG "%s: poll processed %d packets\n", printk(KERN_DEBUG "%s: poll processed %d packets\n",
priv->dev->name, received); priv->dev->name, received);
if (desc->dataflags & CPMAC_OWN) { if (processed == 0) {
/* we ran out of packets to read,
* revert to interrupt-driven mode */
netif_rx_complete(priv->dev, napi); netif_rx_complete(priv->dev, napi);
cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
return 0; return 0;
} }
return 1; return 1;
fatal_error:
/* Something went horribly wrong.
* Reset hardware to try to recover rather than wedging. */
if (netif_msg_drv(priv)) {
printk(KERN_ERR "%s: cpmac_poll is confused. "
"Resetting hardware\n", priv->dev->name);
cpmac_dump_all_desc(priv->dev);
printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
priv->dev->name,
cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
}
spin_unlock(&priv->rx_lock);
netif_rx_complete(priv->dev, napi);
netif_stop_queue(priv->dev);
napi_disable(&priv->napi);
atomic_inc(&priv->reset_pending);
cpmac_hw_stop(priv->dev);
if (!schedule_work(&priv->reset_work))
atomic_dec(&priv->reset_pending);
return 0;
} }
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
@ -456,6 +561,9 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpmac_desc *desc; struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev); struct cpmac_priv *priv = netdev_priv(dev);
if (unlikely(atomic_read(&priv->reset_pending)))
return NETDEV_TX_BUSY;
if (unlikely(skb_padto(skb, ETH_ZLEN))) if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK; return NETDEV_TX_OK;
@ -621,8 +729,10 @@ static void cpmac_clear_rx(struct net_device *dev)
desc->dataflags = CPMAC_OWN; desc->dataflags = CPMAC_OWN;
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
} }
desc->hw_next = desc->next->mapping;
desc = desc->next; desc = desc->next;
} }
priv->rx_head->prev->hw_next = 0;
} }
static void cpmac_clear_tx(struct net_device *dev) static void cpmac_clear_tx(struct net_device *dev)
@ -635,14 +745,14 @@ static void cpmac_clear_tx(struct net_device *dev)
priv->desc_ring[i].dataflags = 0; priv->desc_ring[i].dataflags = 0;
if (priv->desc_ring[i].skb) { if (priv->desc_ring[i].skb) {
dev_kfree_skb_any(priv->desc_ring[i].skb); dev_kfree_skb_any(priv->desc_ring[i].skb);
if (netif_subqueue_stopped(dev, i)) priv->desc_ring[i].skb = NULL;
netif_wake_subqueue(dev, i);
} }
} }
} }
static void cpmac_hw_error(struct work_struct *work) static void cpmac_hw_error(struct work_struct *work)
{ {
int i;
struct cpmac_priv *priv = struct cpmac_priv *priv =
container_of(work, struct cpmac_priv, reset_work); container_of(work, struct cpmac_priv, reset_work);
@ -651,8 +761,48 @@ static void cpmac_hw_error(struct work_struct *work)
spin_unlock(&priv->rx_lock); spin_unlock(&priv->rx_lock);
cpmac_clear_tx(priv->dev); cpmac_clear_tx(priv->dev);
cpmac_hw_start(priv->dev); cpmac_hw_start(priv->dev);
napi_enable(&priv->napi); barrier();
netif_start_queue(priv->dev); atomic_dec(&priv->reset_pending);
for (i = 0; i < CPMAC_QUEUES; i++)
netif_wake_subqueue(priv->dev, i);
netif_wake_queue(priv->dev);
cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
}
static void cpmac_check_status(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
int rx_channel = (macstatus >> 8) & 7;
int rx_code = (macstatus >> 12) & 15;
int tx_channel = (macstatus >> 16) & 7;
int tx_code = (macstatus >> 20) & 15;
if (rx_code || tx_code) {
if (netif_msg_drv(priv) && net_ratelimit()) {
/* Can't find any documentation on what these
*error codes actually are. So just log them and hope..
*/
if (rx_code)
printk(KERN_WARNING "%s: host error %d on rx "
"channel %d (macstatus %08x), resetting\n",
dev->name, rx_code, rx_channel, macstatus);
if (tx_code)
printk(KERN_WARNING "%s: host error %d on tx "
"channel %d (macstatus %08x), resetting\n",
dev->name, tx_code, tx_channel, macstatus);
}
netif_stop_queue(dev);
cpmac_hw_stop(dev);
if (schedule_work(&priv->reset_work))
atomic_inc(&priv->reset_pending);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_regs(dev);
}
cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
} }
static irqreturn_t cpmac_irq(int irq, void *dev_id) static irqreturn_t cpmac_irq(int irq, void *dev_id)
@ -683,49 +833,32 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
if (netif_msg_drv(priv) && net_ratelimit()) cpmac_check_status(dev);
printk(KERN_ERR "%s: hw error, resetting...\n",
dev->name);
netif_stop_queue(dev);
napi_disable(&priv->napi);
cpmac_hw_stop(dev);
schedule_work(&priv->reset_work);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_regs(dev);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void cpmac_tx_timeout(struct net_device *dev) static void cpmac_tx_timeout(struct net_device *dev)
{ {
struct cpmac_priv *priv = netdev_priv(dev);
int i; int i;
struct cpmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock); spin_lock(&priv->lock);
dev->stats.tx_errors++; dev->stats.tx_errors++;
spin_unlock(&priv->lock); spin_unlock(&priv->lock);
if (netif_msg_tx_err(priv) && net_ratelimit()) if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: transmit timeout\n", dev->name); printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
/*
* FIXME: waking up random queue is not the best thing to atomic_inc(&priv->reset_pending);
* do... on the other hand why we got here at all? barrier();
*/ cpmac_clear_tx(dev);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE barrier();
atomic_dec(&priv->reset_pending);
netif_wake_queue(priv->dev);
for (i = 0; i < CPMAC_QUEUES; i++) for (i = 0; i < CPMAC_QUEUES; i++)
if (priv->desc_ring[i].skb) { netif_wake_subqueue(dev, i);
priv->desc_ring[i].dataflags = 0;
dev_kfree_skb_any(priv->desc_ring[i].skb);
netif_wake_subqueue(dev, i);
break;
}
#else
priv->desc_ring[0].dataflags = 0;
if (priv->desc_ring[0].skb)
dev_kfree_skb_any(priv->desc_ring[0].skb);
netif_wake_queue(dev);
#endif
} }
static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@ -901,9 +1034,12 @@ static int cpmac_open(struct net_device *dev)
desc->buflen = CPMAC_SKB_SIZE; desc->buflen = CPMAC_SKB_SIZE;
desc->dataflags = CPMAC_OWN; desc->dataflags = CPMAC_OWN;
desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
desc->next->prev = desc;
desc->hw_next = (u32)desc->next->mapping; desc->hw_next = (u32)desc->next->mapping;
} }
priv->rx_head->prev->hw_next = (u32)0;
if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED,
dev->name, dev))) { dev->name, dev))) {
if (netif_msg_drv(priv)) if (netif_msg_drv(priv))
@ -912,6 +1048,7 @@ static int cpmac_open(struct net_device *dev)
goto fail_irq; goto fail_irq;
} }
atomic_set(&priv->reset_pending, 0);
INIT_WORK(&priv->reset_work, cpmac_hw_error); INIT_WORK(&priv->reset_work, cpmac_hw_error);
cpmac_hw_start(dev); cpmac_hw_start(dev);
@ -1007,21 +1144,10 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
if (phy_id == PHY_MAX_ADDR) { if (phy_id == PHY_MAX_ADDR) {
if (external_switch || dumb_switch) { if (external_switch || dumb_switch) {
struct fixed_phy_status status = {}; mdio_bus_id = 0; /* fixed phys bus */
phy_id = pdev->id;
/*
* FIXME: this should be in the platform code!
* Since there is not platform code at all (that is,
* no mainline users of that driver), place it here
* for now.
*/
phy_id = 0;
status.link = 1;
status.duplex = 1;
status.speed = 100;
fixed_phy_add(PHY_POLL, phy_id, &status);
} else { } else {
printk(KERN_ERR "cpmac: no PHY present\n"); dev_err(&pdev->dev, "no PHY present\n");
return -ENODEV; return -ENODEV;
} }
} }
@ -1064,10 +1190,8 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug_level, 0xff); priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr));
snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id,
&cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII);
priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(priv->phy)) { if (IS_ERR(priv->phy)) {
if (netif_msg_drv(priv)) if (netif_msg_drv(priv))
printk(KERN_ERR "%s: Could not attach to PHY\n", printk(KERN_ERR "%s: Could not attach to PHY\n",

Просмотреть файл

@ -903,7 +903,7 @@ dm9000_stop(struct net_device *ndev)
if (netif_msg_ifdown(db)) if (netif_msg_ifdown(db))
dev_dbg(db->dev, "shutting down %s\n", ndev->name); dev_dbg(db->dev, "shutting down %s\n", ndev->name);
cancel_delayed_work(&db->phy_poll); cancel_delayed_work_sync(&db->phy_poll);
netif_stop_queue(ndev); netif_stop_queue(ndev);
netif_carrier_off(ndev); netif_carrier_off(ndev);

Просмотреть файл

@ -4201,8 +4201,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
struct e1000_adapter *adapter; struct e1000_adapter *adapter;
struct e1000_hw *hw; struct e1000_hw *hw;
const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len; resource_size_t mmio_start, mmio_len;
unsigned long flash_start, flash_len; resource_size_t flash_start, flash_len;
static int cards_found; static int cards_found;
int i, err, pci_using_dac; int i, err, pci_using_dac;

Просмотреть файл

@ -2213,8 +2213,6 @@ static void ehea_vlan_rx_register(struct net_device *dev,
goto out; goto out;
} }
memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter));
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1); H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) if (hret != H_SUCCESS)
@ -3178,11 +3176,12 @@ out_err:
static void ehea_shutdown_single_port(struct ehea_port *port) static void ehea_shutdown_single_port(struct ehea_port *port)
{ {
struct ehea_adapter *adapter = port->adapter;
unregister_netdev(port->netdev); unregister_netdev(port->netdev);
ehea_unregister_port(port); ehea_unregister_port(port);
kfree(port->mc_list); kfree(port->mc_list);
free_netdev(port->netdev); free_netdev(port->netdev);
port->adapter->active_ports--; adapter->active_ports--;
} }
static int ehea_setup_ports(struct ehea_adapter *adapter) static int ehea_setup_ports(struct ehea_adapter *adapter)

Просмотреть файл

@ -5823,6 +5823,7 @@ static int nv_resume(struct pci_dev *pdev)
writel(txreg, base + NvRegTransmitPoll); writel(txreg, base + NvRegTransmitPoll);
rc = nv_open(dev); rc = nv_open(dev);
nv_set_multicast(dev);
out: out:
return rc; return rc;
} }

Просмотреть файл

@ -1093,7 +1093,7 @@ err:
if (registered) if (registered)
unregister_netdev(ndev); unregister_netdev(ndev);
if (fep != NULL) { if (fep && fep->ops) {
(*fep->ops->free_bd)(ndev); (*fep->ops->free_bd)(ndev);
(*fep->ops->cleanup_data)(ndev); (*fep->ops->cleanup_data)(ndev);
} }

Просмотреть файл

@ -1340,9 +1340,10 @@ static unsigned int scc_set_param(struct scc_channel *scc, unsigned int cmd, uns
case PARAM_RTS: case PARAM_RTS:
if ( !(scc->wreg[R5] & RTS) ) if ( !(scc->wreg[R5] & RTS) )
{ {
if (arg != TX_OFF) if (arg != TX_OFF) {
scc_key_trx(scc, TX_ON); scc_key_trx(scc, TX_ON);
scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay);
}
} else { } else {
if (arg == TX_OFF) if (arg == TX_OFF)
{ {

Просмотреть файл

@ -631,7 +631,7 @@ static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
return status; return status;
} }
int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)
{ {
struct myri10ge_cmd cmd; struct myri10ge_cmd cmd;
int status; int status;

Просмотреть файл

@ -391,7 +391,9 @@ static int fmvj18x_config(struct pcmcia_device *link)
cardtype = CONTEC; cardtype = CONTEC;
break; break;
case MANFID_FUJITSU: case MANFID_FUJITSU:
if (link->card_id == PRODID_FUJITSU_MBH10302) if (link->conf.ConfigBase == 0x0fe0)
cardtype = MBH10302;
else if (link->card_id == PRODID_FUJITSU_MBH10302)
/* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),
but these are MBH10304 based card. */ but these are MBH10304 based card. */
cardtype = MBH10304; cardtype = MBH10304;

Просмотреть файл

@ -1461,22 +1461,25 @@ static void
set_multicast_list(struct net_device *dev) set_multicast_list(struct net_device *dev)
{ {
unsigned int ioaddr = dev->base_addr; unsigned int ioaddr = dev->base_addr;
unsigned value;
SelectPage(0x42); SelectPage(0x42);
value = GetByte(XIRCREG42_SWC1) & 0xC0;
if (dev->flags & IFF_PROMISC) { /* snoop */ if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
} else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
} else if (dev->mc_count) { } else if (dev->mc_count) {
/* the chip can filter 9 addresses perfectly */ /* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, 0x01); PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40); SelectPage(0x40);
PutByte(XIRCREG40_CMD0, Offline); PutByte(XIRCREG40_CMD0, Offline);
set_addresses(dev); set_addresses(dev);
SelectPage(0x40); SelectPage(0x40);
PutByte(XIRCREG40_CMD0, EnableRecv | Online); PutByte(XIRCREG40_CMD0, EnableRecv | Online);
} else { /* standard usage */ } else { /* standard usage */
PutByte(XIRCREG42_SWC1, 0x00); PutByte(XIRCREG42_SWC1, value | 0x00);
} }
SelectPage(0); SelectPage(0);
} }
@ -1722,6 +1725,7 @@ do_reset(struct net_device *dev, int full)
/* enable receiver and put the mac online */ /* enable receiver and put the mac online */
if (full) { if (full) {
set_multicast_list(dev);
SelectPage(0x40); SelectPage(0x40);
PutByte(XIRCREG40_CMD0, EnableRecv | Online); PutByte(XIRCREG40_CMD0, EnableRecv | Online);
} }

Просмотреть файл

@ -325,7 +325,7 @@ static int pcnet32_get_regs_len(struct net_device *dev);
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *ptr); void *ptr);
static void pcnet32_purge_tx_ring(struct net_device *dev); static void pcnet32_purge_tx_ring(struct net_device *dev);
static int pcnet32_alloc_ring(struct net_device *dev, char *name); static int pcnet32_alloc_ring(struct net_device *dev, const char *name);
static void pcnet32_free_ring(struct net_device *dev); static void pcnet32_free_ring(struct net_device *dev);
static void pcnet32_check_media(struct net_device *dev, int verbose); static void pcnet32_check_media(struct net_device *dev, int verbose);
@ -1983,7 +1983,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
} }
/* if any allocation fails, caller must also call pcnet32_free_ring */ /* if any allocation fails, caller must also call pcnet32_free_ring */
static int pcnet32_alloc_ring(struct net_device *dev, char *name) static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
{ {
struct pcnet32_private *lp = netdev_priv(dev); struct pcnet32_private *lp = netdev_priv(dev);

Просмотреть файл

@ -5,7 +5,7 @@
menuconfig PHYLIB menuconfig PHYLIB
tristate "PHY Device support and infrastructure" tristate "PHY Device support and infrastructure"
depends on !S390 depends on !S390
depends on NET_ETHERNET && (BROKEN || !S390) depends on NET_ETHERNET
help help
Ethernet controllers are usually attached to PHY Ethernet controllers are usually attached to PHY
devices. This option provides infrastructure for devices. This option provides infrastructure for

Просмотреть файл

@ -207,6 +207,7 @@ int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
return 0; return 0;
} }
EXPORT_SYMBOL(get_phy_id);
/** /**
* get_phy_device - reads the specified PHY device and returns its @phy_device struct * get_phy_device - reads the specified PHY device and returns its @phy_device struct

Просмотреть файл

@ -250,7 +250,7 @@ struct XENA_dev_config {
u64 tx_mat0_n[0x8]; u64 tx_mat0_n[0x8];
#define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8)
u8 unused_1[0x8]; u64 xmsi_mask_reg;
u64 stat_byte_cnt; u64 stat_byte_cnt;
#define STAT_BC(n) vBIT(n,4,12) #define STAT_BC(n) vBIT(n,4,12)

Просмотреть файл

@ -86,7 +86,7 @@
#include "s2io.h" #include "s2io.h"
#include "s2io-regs.h" #include "s2io-regs.h"
#define DRV_VERSION "2.0.26.23" #define DRV_VERSION "2.0.26.24"
/* S2io Driver name & version. */ /* S2io Driver name & version. */
static char s2io_driver_name[] = "Neterion"; static char s2io_driver_name[] = "Neterion";
@ -1113,9 +1113,10 @@ static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
struct pci_dev *tdev = NULL; struct pci_dev *tdev = NULL;
while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
if (tdev->bus == s2io_pdev->bus->parent) if (tdev->bus == s2io_pdev->bus->parent) {
pci_dev_put(tdev); pci_dev_put(tdev);
return 1; return 1;
}
} }
} }
return 0; return 0;
@ -1219,15 +1220,33 @@ static int init_tti(struct s2io_nic *nic, int link)
TTI_DATA1_MEM_TX_URNG_B(0x10) | TTI_DATA1_MEM_TX_URNG_B(0x10) |
TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_URNG_C(0x30) |
TTI_DATA1_MEM_TX_TIMER_AC_EN; TTI_DATA1_MEM_TX_TIMER_AC_EN;
if (i == 0)
if (use_continuous_tx_intrs && (link == LINK_UP)) if (use_continuous_tx_intrs && (link == LINK_UP))
val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
writeq(val64, &bar0->tti_data1_mem); writeq(val64, &bar0->tti_data1_mem);
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | if (nic->config.intr_type == MSI_X) {
TTI_DATA2_MEM_TX_UFC_B(0x20) | val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_B(0x100) |
TTI_DATA2_MEM_TX_UFC_D(0x80); TTI_DATA2_MEM_TX_UFC_C(0x200) |
TTI_DATA2_MEM_TX_UFC_D(0x300);
} else {
if ((nic->config.tx_steering_type ==
TX_DEFAULT_STEERING) &&
(config->tx_fifo_num > 1) &&
(i >= nic->udp_fifo_idx) &&
(i < (nic->udp_fifo_idx +
nic->total_udp_fifos)))
val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) |
TTI_DATA2_MEM_TX_UFC_B(0x80) |
TTI_DATA2_MEM_TX_UFC_C(0x100) |
TTI_DATA2_MEM_TX_UFC_D(0x120);
else
val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
TTI_DATA2_MEM_TX_UFC_B(0x20) |
TTI_DATA2_MEM_TX_UFC_C(0x40) |
TTI_DATA2_MEM_TX_UFC_D(0x80);
}
writeq(val64, &bar0->tti_data2_mem); writeq(val64, &bar0->tti_data2_mem);
@ -2813,6 +2832,15 @@ static void free_rx_buffers(struct s2io_nic *sp)
} }
} }
static int s2io_chk_rx_buffers(struct ring_info *ring)
{
if (fill_rx_buffers(ring) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
return 0;
}
/** /**
* s2io_poll - Rx interrupt handler for NAPI support * s2io_poll - Rx interrupt handler for NAPI support
* @napi : pointer to the napi structure. * @napi : pointer to the napi structure.
@ -2826,57 +2854,72 @@ static void free_rx_buffers(struct s2io_nic *sp)
* 0 on success and 1 if there are No Rx packets to be processed. * 0 on success and 1 if there are No Rx packets to be processed.
*/ */
static int s2io_poll(struct napi_struct *napi, int budget) static int s2io_poll_msix(struct napi_struct *napi, int budget)
{
struct ring_info *ring = container_of(napi, struct ring_info, napi);
struct net_device *dev = ring->dev;
struct config_param *config;
struct mac_info *mac_control;
int pkts_processed = 0;
u8 *addr = NULL, val8 = 0;
struct s2io_nic *nic = dev->priv;
struct XENA_dev_config __iomem *bar0 = nic->bar0;
int budget_org = budget;
config = &nic->config;
mac_control = &nic->mac_control;
if (unlikely(!is_s2io_card_up(nic)))
return 0;
pkts_processed = rx_intr_handler(ring, budget);
s2io_chk_rx_buffers(ring);
if (pkts_processed < budget_org) {
netif_rx_complete(dev, napi);
/*Re Enable MSI-Rx Vector*/
addr = (u8 *)&bar0->xmsi_mask_reg;
addr += 7 - ring->ring_no;
val8 = (ring->ring_no == 0) ? 0x3f : 0xbf;
writeb(val8, addr);
val8 = readb(addr);
}
return pkts_processed;
}
static int s2io_poll_inta(struct napi_struct *napi, int budget)
{ {
struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi);
struct ring_info *ring;
struct net_device *dev = nic->dev; struct net_device *dev = nic->dev;
int pkt_cnt = 0, org_pkts_to_process;
struct mac_info *mac_control;
struct config_param *config; struct config_param *config;
struct mac_info *mac_control;
int pkts_processed = 0;
int ring_pkts_processed, i;
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
int i; int budget_org = budget;
mac_control = &nic->mac_control;
config = &nic->config; config = &nic->config;
mac_control = &nic->mac_control;
nic->pkts_to_process = budget; if (unlikely(!is_s2io_card_up(nic)))
org_pkts_to_process = nic->pkts_to_process; return 0;
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
readl(&bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
rx_intr_handler(&mac_control->rings[i]); ring = &mac_control->rings[i];
pkt_cnt = org_pkts_to_process - nic->pkts_to_process; ring_pkts_processed = rx_intr_handler(ring, budget);
if (!nic->pkts_to_process) { s2io_chk_rx_buffers(ring);
/* Quota for the current iteration has been met */ pkts_processed += ring_pkts_processed;
goto no_rx; budget -= ring_pkts_processed;
} if (budget <= 0)
}
netif_rx_complete(dev, napi);
for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
break; break;
}
} }
/* Re enable the Rx interrupts. */ if (pkts_processed < budget_org) {
writeq(0x0, &bar0->rx_traffic_mask); netif_rx_complete(dev, napi);
readl(&bar0->rx_traffic_mask); /* Re enable the Rx interrupts for the ring */
return pkt_cnt; writeq(0, &bar0->rx_traffic_mask);
readl(&bar0->rx_traffic_mask);
no_rx:
for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
break;
}
} }
return pkt_cnt; return pkts_processed;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
@ -2918,7 +2961,7 @@ static void s2io_netpoll(struct net_device *dev)
/* check for received packet and indicate up to network */ /* check for received packet and indicate up to network */
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++)
rx_intr_handler(&mac_control->rings[i]); rx_intr_handler(&mac_control->rings[i], 0);
for (i = 0; i < config->rx_ring_num; i++) { for (i = 0; i < config->rx_ring_num; i++) {
if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
@ -2934,7 +2977,8 @@ static void s2io_netpoll(struct net_device *dev)
/** /**
* rx_intr_handler - Rx interrupt handler * rx_intr_handler - Rx interrupt handler
* @nic: device private variable. * @ring_info: per ring structure.
* @budget: budget for napi processing.
* Description: * Description:
* If the interrupt is because of a received frame or if the * If the interrupt is because of a received frame or if the
* receive ring contains fresh as yet un-processed frames,this function is * receive ring contains fresh as yet un-processed frames,this function is
@ -2942,15 +2986,15 @@ static void s2io_netpoll(struct net_device *dev)
* stopped and sends the skb to the OSM's Rx handler and then increments * stopped and sends the skb to the OSM's Rx handler and then increments
* the offset. * the offset.
* Return Value: * Return Value:
* NONE. * No. of napi packets processed.
*/ */
static void rx_intr_handler(struct ring_info *ring_data) static int rx_intr_handler(struct ring_info *ring_data, int budget)
{ {
int get_block, put_block; int get_block, put_block;
struct rx_curr_get_info get_info, put_info; struct rx_curr_get_info get_info, put_info;
struct RxD_t *rxdp; struct RxD_t *rxdp;
struct sk_buff *skb; struct sk_buff *skb;
int pkt_cnt = 0; int pkt_cnt = 0, napi_pkts = 0;
int i; int i;
struct RxD1* rxdp1; struct RxD1* rxdp1;
struct RxD3* rxdp3; struct RxD3* rxdp3;
@ -2977,7 +3021,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
DBG_PRINT(ERR_DBG, "%s: The skb is ", DBG_PRINT(ERR_DBG, "%s: The skb is ",
ring_data->dev->name); ring_data->dev->name);
DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
return; return 0;
} }
if (ring_data->rxd_mode == RXD_MODE_1) { if (ring_data->rxd_mode == RXD_MODE_1) {
rxdp1 = (struct RxD1*)rxdp; rxdp1 = (struct RxD1*)rxdp;
@ -3014,9 +3058,10 @@ static void rx_intr_handler(struct ring_info *ring_data)
rxdp = ring_data->rx_blocks[get_block].block_virt_addr; rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
} }
if(ring_data->nic->config.napi){ if (ring_data->nic->config.napi) {
ring_data->nic->pkts_to_process -= 1; budget--;
if (!ring_data->nic->pkts_to_process) napi_pkts++;
if (!budget)
break; break;
} }
pkt_cnt++; pkt_cnt++;
@ -3034,6 +3079,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
} }
} }
} }
return(napi_pkts);
} }
/** /**
@ -3730,14 +3776,19 @@ static void restore_xmsi_data(struct s2io_nic *nic)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64; u64 val64;
int i; int i, msix_index;
if (nic->device_type == XFRAME_I_DEVICE)
return;
for (i=0; i < MAX_REQUESTED_MSI_X; i++) { for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
msix_index = (i) ? ((i-1) * 8 + 1): 0;
writeq(nic->msix_info[i].addr, &bar0->xmsi_address); writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
writeq(nic->msix_info[i].data, &bar0->xmsi_data); writeq(nic->msix_info[i].data, &bar0->xmsi_data);
val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access); writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) { if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
continue; continue;
} }
@ -3748,13 +3799,17 @@ static void store_xmsi_data(struct s2io_nic *nic)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 val64, addr, data; u64 val64, addr, data;
int i; int i, msix_index;
if (nic->device_type == XFRAME_I_DEVICE)
return;
/* Store and display */ /* Store and display */
for (i=0; i < MAX_REQUESTED_MSI_X; i++) { for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
val64 = (s2BIT(15) | vBIT(i, 26, 6)); msix_index = (i) ? ((i-1) * 8 + 1): 0;
val64 = (s2BIT(15) | vBIT(msix_index, 26, 6));
writeq(val64, &bar0->xmsi_access); writeq(val64, &bar0->xmsi_access);
if (wait_for_msix_trans(nic, i)) { if (wait_for_msix_trans(nic, msix_index)) {
DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
continue; continue;
} }
@ -3770,11 +3825,11 @@ static void store_xmsi_data(struct s2io_nic *nic)
static int s2io_enable_msi_x(struct s2io_nic *nic) static int s2io_enable_msi_x(struct s2io_nic *nic)
{ {
struct XENA_dev_config __iomem *bar0 = nic->bar0; struct XENA_dev_config __iomem *bar0 = nic->bar0;
u64 tx_mat, rx_mat; u64 rx_mat;
u16 msi_control; /* Temp variable */ u16 msi_control; /* Temp variable */
int ret, i, j, msix_indx = 1; int ret, i, j, msix_indx = 1;
nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!nic->entries) { if (!nic->entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \
@ -3783,10 +3838,12 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
return -ENOMEM; return -ENOMEM;
} }
nic->mac_control.stats_info->sw_stat.mem_allocated nic->mac_control.stats_info->sw_stat.mem_allocated
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (nic->num_entries * sizeof(struct msix_entry));
memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry));
nic->s2io_entries = nic->s2io_entries =
kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry),
GFP_KERNEL); GFP_KERNEL);
if (!nic->s2io_entries) { if (!nic->s2io_entries) {
DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n",
@ -3794,60 +3851,52 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++;
kfree(nic->entries); kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (nic->num_entries * sizeof(struct msix_entry));
return -ENOMEM; return -ENOMEM;
} }
nic->mac_control.stats_info->sw_stat.mem_allocated nic->mac_control.stats_info->sw_stat.mem_allocated
+= (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); += (nic->num_entries * sizeof(struct s2io_msix_entry));
memset(nic->s2io_entries, 0,
nic->num_entries * sizeof(struct s2io_msix_entry));
for (i=0; i< MAX_REQUESTED_MSI_X; i++) { nic->entries[0].entry = 0;
nic->entries[i].entry = i; nic->s2io_entries[0].entry = 0;
nic->s2io_entries[i].entry = i; nic->s2io_entries[0].in_use = MSIX_FLG;
nic->s2io_entries[0].type = MSIX_ALARM_TYPE;
nic->s2io_entries[0].arg = &nic->mac_control.fifos;
for (i = 1; i < nic->num_entries; i++) {
nic->entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].entry = ((i - 1) * 8) + 1;
nic->s2io_entries[i].arg = NULL; nic->s2io_entries[i].arg = NULL;
nic->s2io_entries[i].in_use = 0; nic->s2io_entries[i].in_use = 0;
} }
tx_mat = readq(&bar0->tx_mat0_n[0]);
for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
tx_mat |= TX_MAT_SET(i, msix_indx);
nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
}
writeq(tx_mat, &bar0->tx_mat0_n[0]);
rx_mat = readq(&bar0->rx_mat); rx_mat = readq(&bar0->rx_mat);
for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { for (j = 0; j < nic->config.rx_ring_num; j++) {
rx_mat |= RX_MAT_SET(j, msix_indx); rx_mat |= RX_MAT_SET(j, msix_indx);
nic->s2io_entries[msix_indx].arg nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j];
= &nic->mac_control.rings[j]; nic->s2io_entries[j+1].type = MSIX_RING_TYPE;
nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; nic->s2io_entries[j+1].in_use = MSIX_FLG;
nic->s2io_entries[msix_indx].in_use = MSIX_FLG; msix_indx += 8;
} }
writeq(rx_mat, &bar0->rx_mat); writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
nic->avail_msix_vectors = 0; ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
/* We fail init if error or we get less vectors than min required */ /* We fail init if error or we get less vectors than min required */
if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
nic->avail_msix_vectors = ret;
ret = pci_enable_msix(nic->pdev, nic->entries, ret);
}
if (ret) { if (ret) {
DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
kfree(nic->entries); kfree(nic->entries);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (nic->num_entries * sizeof(struct msix_entry));
kfree(nic->s2io_entries); kfree(nic->s2io_entries);
nic->mac_control.stats_info->sw_stat.mem_freed nic->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); += (nic->num_entries * sizeof(struct s2io_msix_entry));
nic->entries = NULL; nic->entries = NULL;
nic->s2io_entries = NULL; nic->s2io_entries = NULL;
nic->avail_msix_vectors = 0;
return -ENOMEM; return -ENOMEM;
} }
if (!nic->avail_msix_vectors)
nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
/* /*
* To enable MSI-X, MSI also needs to be enabled, due to a bug * To enable MSI-X, MSI also needs to be enabled, due to a bug
@ -3919,7 +3968,7 @@ static void remove_msix_isr(struct s2io_nic *sp)
int i; int i;
u16 msi_control; u16 msi_control;
for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].in_use == if (sp->s2io_entries[i].in_use ==
MSIX_REGISTERED_SUCCESS) { MSIX_REGISTERED_SUCCESS) {
int vector = sp->entries[i].vector; int vector = sp->entries[i].vector;
@ -3975,29 +4024,6 @@ static int s2io_open(struct net_device *dev)
netif_carrier_off(dev); netif_carrier_off(dev);
sp->last_link_state = 0; sp->last_link_state = 0;
if (sp->config.intr_type == MSI_X) {
int ret = s2io_enable_msi_x(sp);
if (!ret) {
ret = s2io_test_msi(sp);
/* rollback MSI-X, will re-enable during add_isr() */
remove_msix_isr(sp);
}
if (ret) {
DBG_PRINT(ERR_DBG,
"%s: MSI-X requested but failed to enable\n",
dev->name);
sp->config.intr_type = INTA;
}
}
/* NAPI doesn't work well with MSI(X) */
if (sp->config.intr_type != INTA) {
if(sp->config.napi)
sp->config.napi = 0;
}
/* Initialize H/W and enable interrupts */ /* Initialize H/W and enable interrupts */
err = s2io_card_up(sp); err = s2io_card_up(sp);
if (err) { if (err) {
@ -4020,12 +4046,12 @@ hw_init_failed:
if (sp->entries) { if (sp->entries) {
kfree(sp->entries); kfree(sp->entries);
sp->mac_control.stats_info->sw_stat.mem_freed sp->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); += (sp->num_entries * sizeof(struct msix_entry));
} }
if (sp->s2io_entries) { if (sp->s2io_entries) {
kfree(sp->s2io_entries); kfree(sp->s2io_entries);
sp->mac_control.stats_info->sw_stat.mem_freed sp->mac_control.stats_info->sw_stat.mem_freed
+= (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); += (sp->num_entries * sizeof(struct s2io_msix_entry));
} }
} }
return err; return err;
@ -4327,40 +4353,64 @@ s2io_alarm_handle(unsigned long data)
mod_timer(&sp->alarm_timer, jiffies + HZ / 2); mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
} }
static int s2io_chk_rx_buffers(struct ring_info *ring)
{
if (fill_rx_buffers(ring) == -ENOMEM) {
DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
}
return 0;
}
static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
{ {
struct ring_info *ring = (struct ring_info *)dev_id; struct ring_info *ring = (struct ring_info *)dev_id;
struct s2io_nic *sp = ring->nic; struct s2io_nic *sp = ring->nic;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
struct net_device *dev = sp->dev;
if (!is_s2io_card_up(sp)) if (unlikely(!is_s2io_card_up(sp)))
return IRQ_HANDLED; return IRQ_HANDLED;
rx_intr_handler(ring); if (sp->config.napi) {
s2io_chk_rx_buffers(ring); u8 *addr = NULL, val8 = 0;
addr = (u8 *)&bar0->xmsi_mask_reg;
addr += (7 - ring->ring_no);
val8 = (ring->ring_no == 0) ? 0x7f : 0xff;
writeb(val8, addr);
val8 = readb(addr);
netif_rx_schedule(dev, &ring->napi);
} else {
rx_intr_handler(ring, 0);
s2io_chk_rx_buffers(ring);
}
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
{ {
struct fifo_info *fifo = (struct fifo_info *)dev_id; int i;
struct s2io_nic *sp = fifo->nic; struct fifo_info *fifos = (struct fifo_info *)dev_id;
struct s2io_nic *sp = fifos->nic;
struct XENA_dev_config __iomem *bar0 = sp->bar0;
struct config_param *config = &sp->config;
u64 reason;
if (!is_s2io_card_up(sp)) if (unlikely(!is_s2io_card_up(sp)))
return IRQ_NONE;
reason = readq(&bar0->general_int_status);
if (unlikely(reason == S2IO_MINUS_ONE))
/* Nothing much can be done. Get out */
return IRQ_HANDLED; return IRQ_HANDLED;
tx_intr_handler(fifo); writeq(S2IO_MINUS_ONE, &bar0->general_int_mask);
if (reason & GEN_INTR_TXTRAFFIC)
writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
for (i = 0; i < config->tx_fifo_num; i++)
tx_intr_handler(&fifos[i]);
writeq(sp->general_int_mask, &bar0->general_int_mask);
readl(&bar0->general_int_status);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static void s2io_txpic_intr_handle(struct s2io_nic *sp) static void s2io_txpic_intr_handle(struct s2io_nic *sp)
{ {
struct XENA_dev_config __iomem *bar0 = sp->bar0; struct XENA_dev_config __iomem *bar0 = sp->bar0;
@ -4762,14 +4812,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
if (config->napi) { if (config->napi) {
if (reason & GEN_INTR_RXTRAFFIC) { if (reason & GEN_INTR_RXTRAFFIC) {
if (likely(netif_rx_schedule_prep(dev, netif_rx_schedule(dev, &sp->napi);
&sp->napi))) { writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
__netif_rx_schedule(dev, &sp->napi); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
writeq(S2IO_MINUS_ONE, readl(&bar0->rx_traffic_int);
&bar0->rx_traffic_mask);
} else
writeq(S2IO_MINUS_ONE,
&bar0->rx_traffic_int);
} }
} else { } else {
/* /*
@ -4781,7 +4827,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
for (i = 0; i < config->rx_ring_num; i++) for (i = 0; i < config->rx_ring_num; i++)
rx_intr_handler(&mac_control->rings[i]); rx_intr_handler(&mac_control->rings[i], 0);
} }
/* /*
@ -6984,62 +7030,62 @@ static int s2io_add_isr(struct s2io_nic * sp)
/* After proper initialization of H/W, register ISR */ /* After proper initialization of H/W, register ISR */
if (sp->config.intr_type == MSI_X) { if (sp->config.intr_type == MSI_X) {
int i, msix_tx_cnt=0,msix_rx_cnt=0; int i, msix_rx_cnt = 0;
for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { for (i = 0; i < sp->num_entries; i++) {
if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { if (sp->s2io_entries[i].in_use == MSIX_FLG) {
sprintf(sp->desc[i], "%s:MSI-X-%d-TX", if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle, 0,
sp->desc[i],
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
dev->name, i); dev->name, i);
err = request_irq(sp->entries[i].vector, err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle, 0, sp->desc[i], s2io_msix_fifo_handle, 0,
sp->s2io_entries[i].arg); sp->desc[i],
/* If either data or addr is zero print it */ sp->s2io_entries[i].arg);
if(!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) {
DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
"Data:0x%llx\n",sp->desc[i],
(unsigned long long)
sp->msix_info[i].addr,
(unsigned long long)
sp->msix_info[i].data);
} else {
msix_tx_cnt++;
} }
} else { /* if either data or addr is zero print it. */
sprintf(sp->desc[i], "%s:MSI-X-%d-RX", if (!(sp->msix_info[i].addr &&
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle, 0, sp->desc[i],
sp->s2io_entries[i].arg);
/* If either data or addr is zero print it */
if(!(sp->msix_info[i].addr &&
sp->msix_info[i].data)) { sp->msix_info[i].data)) {
DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " DBG_PRINT(ERR_DBG,
"Data:0x%llx\n",sp->desc[i], "%s @Addr:0x%llx Data:0x%llx\n",
sp->desc[i],
(unsigned long long) (unsigned long long)
sp->msix_info[i].addr, sp->msix_info[i].addr,
(unsigned long long) (unsigned long long)
sp->msix_info[i].data); ntohl(sp->msix_info[i].data));
} else { } else
msix_rx_cnt++; msix_rx_cnt++;
if (err) {
remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,
"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
DBG_PRINT(ERR_DBG,
"%s: Defaulting to INTA\n",
dev->name);
sp->config.intr_type = INTA;
break;
} }
sp->s2io_entries[i].in_use =
MSIX_REGISTERED_SUCCESS;
} }
if (err) {
remove_msix_isr(sp);
DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
"failed\n", dev->name, i);
DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n",
dev->name);
sp->config.intr_type = INTA;
break;
}
sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
} }
if (!err) { if (!err) {
printk(KERN_INFO "MSI-X-TX %d entries enabled\n",
msix_tx_cnt);
printk(KERN_INFO "MSI-X-RX %d entries enabled\n", printk(KERN_INFO "MSI-X-RX %d entries enabled\n",
msix_rx_cnt); --msix_rx_cnt);
DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled"
" through alarm vector\n");
} }
} }
if (sp->config.intr_type == INTA) { if (sp->config.intr_type == INTA) {
@ -7080,8 +7126,15 @@ static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
clear_bit(__S2IO_STATE_CARD_UP, &sp->state); clear_bit(__S2IO_STATE_CARD_UP, &sp->state);
/* Disable napi */ /* Disable napi */
if (config->napi) if (sp->config.napi) {
napi_disable(&sp->napi); int off = 0;
if (config->intr_type == MSI_X) {
for (; off < sp->config.rx_ring_num; off++)
napi_disable(&sp->mac_control.rings[off].napi);
}
else
napi_disable(&sp->napi);
}
/* disable Tx and Rx traffic on the NIC */ /* disable Tx and Rx traffic on the NIC */
if (do_io) if (do_io)
@ -7173,8 +7226,15 @@ static int s2io_card_up(struct s2io_nic * sp)
} }
/* Initialise napi */ /* Initialise napi */
if (config->napi) if (config->napi) {
napi_enable(&sp->napi); int i;
if (config->intr_type == MSI_X) {
for (i = 0; i < sp->config.rx_ring_num; i++)
napi_enable(&sp->mac_control.rings[i].napi);
} else {
napi_enable(&sp->napi);
}
}
/* Maintain the state prior to the open */ /* Maintain the state prior to the open */
if (sp->promisc_flg) if (sp->promisc_flg)
@ -7217,7 +7277,7 @@ static int s2io_card_up(struct s2io_nic * sp)
/* Enable select interrupts */ /* Enable select interrupts */
en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS);
if (sp->config.intr_type != INTA) if (sp->config.intr_type != INTA)
en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS);
else { else {
interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
interruptible |= TX_PIC_INTR; interruptible |= TX_PIC_INTR;
@ -7615,9 +7675,6 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
rx_ring_num = MAX_RX_RINGS; rx_ring_num = MAX_RX_RINGS;
} }
if (*dev_intr_type != INTA)
napi = 0;
if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) {
DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
"Defaulting to INTA\n"); "Defaulting to INTA\n");
@ -7918,8 +7975,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
* will use eth_mac_addr() for dev->set_mac_address * will use eth_mac_addr() for dev->set_mac_address
* mac address will be set every time dev->open() is called * mac address will be set every time dev->open() is called
*/ */
netif_napi_add(dev, &sp->napi, s2io_poll, 32);
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = s2io_netpoll; dev->poll_controller = s2io_netpoll;
#endif #endif
@ -7963,6 +8018,32 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
} }
} }
if (sp->config.intr_type == MSI_X) {
sp->num_entries = config->rx_ring_num + 1;
ret = s2io_enable_msi_x(sp);
if (!ret) {
ret = s2io_test_msi(sp);
/* rollback MSI-X, will re-enable during add_isr() */
remove_msix_isr(sp);
}
if (ret) {
DBG_PRINT(ERR_DBG,
"%s: MSI-X requested but failed to enable\n",
dev->name);
sp->config.intr_type = INTA;
}
}
if (config->intr_type == MSI_X) {
for (i = 0; i < config->rx_ring_num ; i++)
netif_napi_add(dev, &mac_control->rings[i].napi,
s2io_poll_msix, 64);
} else {
netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64);
}
/* Not needed for Herc */ /* Not needed for Herc */
if (sp->device_type & XFRAME_I_DEVICE) { if (sp->device_type & XFRAME_I_DEVICE) {
/* /*
@ -8013,6 +8094,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* store mac addresses from CAM to s2io_nic structure */ /* store mac addresses from CAM to s2io_nic structure */
do_s2io_store_unicast_mc(sp); do_s2io_store_unicast_mc(sp);
/* Configure MSIX vector for number of rings configured plus one */
if ((sp->device_type == XFRAME_II_DEVICE) &&
(config->intr_type == MSI_X))
sp->num_entries = config->rx_ring_num + 1;
/* Store the values of the MSIX table in the s2io_nic structure */ /* Store the values of the MSIX table in the s2io_nic structure */
store_xmsi_data(sp); store_xmsi_data(sp);
/* reset Nic and bring it to known state */ /* reset Nic and bring it to known state */
@ -8078,8 +8164,14 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
break; break;
} }
if (napi) switch (sp->config.napi) {
case 0:
DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name);
break;
case 1:
DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
break;
}
DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
sp->config.tx_fifo_num); sp->config.tx_fifo_num);

Просмотреть файл

@ -706,7 +706,7 @@ struct ring_info {
/* per-ring buffer counter */ /* per-ring buffer counter */
u32 rx_bufs_left; u32 rx_bufs_left;
#define MAX_LRO_SESSIONS 32 #define MAX_LRO_SESSIONS 32
struct lro lro0_n[MAX_LRO_SESSIONS]; struct lro lro0_n[MAX_LRO_SESSIONS];
u8 lro; u8 lro;
@ -725,6 +725,11 @@ struct ring_info {
/* copy of sp->pdev pointer */ /* copy of sp->pdev pointer */
struct pci_dev *pdev; struct pci_dev *pdev;
/* Per ring napi struct */
struct napi_struct napi;
unsigned long interrupt_count;
/* /*
* Place holders for the virtual and physical addresses of * Place holders for the virtual and physical addresses of
* all the Rx Blocks * all the Rx Blocks
@ -841,7 +846,7 @@ struct usr_addr {
* Structure to keep track of the MSI-X vectors and the corresponding * Structure to keep track of the MSI-X vectors and the corresponding
* argument registered against each vector * argument registered against each vector
*/ */
#define MAX_REQUESTED_MSI_X 17 #define MAX_REQUESTED_MSI_X 9
struct s2io_msix_entry struct s2io_msix_entry
{ {
u16 vector; u16 vector;
@ -849,8 +854,8 @@ struct s2io_msix_entry
void *arg; void *arg;
u8 type; u8 type;
#define MSIX_FIFO_TYPE 1 #define MSIX_ALARM_TYPE 1
#define MSIX_RING_TYPE 2 #define MSIX_RING_TYPE 2
u8 in_use; u8 in_use;
#define MSIX_REGISTERED_SUCCESS 0xAA #define MSIX_REGISTERED_SUCCESS 0xAA
@ -877,7 +882,6 @@ struct s2io_nic {
*/ */
int pkts_to_process; int pkts_to_process;
struct net_device *dev; struct net_device *dev;
struct napi_struct napi;
struct mac_info mac_control; struct mac_info mac_control;
struct config_param config; struct config_param config;
struct pci_dev *pdev; struct pci_dev *pdev;
@ -948,6 +952,7 @@ struct s2io_nic {
*/ */
u8 other_fifo_idx; u8 other_fifo_idx;
struct napi_struct napi;
/* after blink, the adapter must be restored with original /* after blink, the adapter must be restored with original
* values. * values.
*/ */
@ -962,6 +967,7 @@ struct s2io_nic {
unsigned long long start_time; unsigned long long start_time;
struct vlan_group *vlgrp; struct vlan_group *vlgrp;
#define MSIX_FLG 0xA5 #define MSIX_FLG 0xA5
int num_entries;
struct msix_entry *entries; struct msix_entry *entries;
int msi_detected; int msi_detected;
wait_queue_head_t msi_wait; wait_queue_head_t msi_wait;
@ -982,6 +988,7 @@ struct s2io_nic {
u16 lro_max_aggr_per_sess; u16 lro_max_aggr_per_sess;
volatile unsigned long state; volatile unsigned long state;
u64 general_int_mask; u64 general_int_mask;
#define VPD_STRING_LEN 80 #define VPD_STRING_LEN 80
u8 product_name[VPD_STRING_LEN]; u8 product_name[VPD_STRING_LEN];
u8 serial_num[VPD_STRING_LEN]; u8 serial_num[VPD_STRING_LEN];
@ -1103,7 +1110,7 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev);
static int init_shared_mem(struct s2io_nic *sp); static int init_shared_mem(struct s2io_nic *sp);
static void free_shared_mem(struct s2io_nic *sp); static void free_shared_mem(struct s2io_nic *sp);
static int init_nic(struct s2io_nic *nic); static int init_nic(struct s2io_nic *nic);
static void rx_intr_handler(struct ring_info *ring_data); static int rx_intr_handler(struct ring_info *ring_data, int budget);
static void tx_intr_handler(struct fifo_info *fifo_data); static void tx_intr_handler(struct fifo_info *fifo_data);
static void s2io_handle_errors(void * dev_id); static void s2io_handle_errors(void * dev_id);
@ -1114,7 +1121,8 @@ static void s2io_set_multicast(struct net_device *dev);
static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp);
static void s2io_link(struct s2io_nic * sp, int link); static void s2io_link(struct s2io_nic * sp, int link);
static void s2io_reset(struct s2io_nic * sp); static void s2io_reset(struct s2io_nic * sp);
static int s2io_poll(struct napi_struct *napi, int budget); static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp); static void s2io_init_pci(struct s2io_nic * sp);
static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
static void s2io_alarm_handle(unsigned long data); static void s2io_alarm_handle(unsigned long data);

Просмотреть файл

@ -179,8 +179,7 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256 #define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256 #define SBMAC_MAX_RXDESCR 256
#define ETHER_ALIGN 2 #define ETHER_ADDR_LEN 6
#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518 #define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */ /*#define ENET_PACKET_SIZE 9216 */
@ -262,8 +261,6 @@ struct sbmac_softc {
spinlock_t sbm_lock; /* spin lock */ spinlock_t sbm_lock; /* spin lock */
int sbm_devflags; /* current device flags */ int sbm_devflags; /* current device flags */
int sbm_buffersize;
/* /*
* Controller-specific things * Controller-specific things
*/ */
@ -305,10 +302,11 @@ struct sbmac_softc {
static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
int txrx, int maxdescr); int txrx, int maxdescr);
static void sbdma_channel_start(struct sbmacdma *d, int rxtx); static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
struct sk_buff *m);
static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
static void sbdma_emptyring(struct sbmacdma *d); static void sbdma_emptyring(struct sbmacdma *d);
static void sbdma_fillring(struct sbmacdma *d); static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
int work_to_do, int poll); int work_to_do, int poll);
static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
@ -777,16 +775,13 @@ static void sbdma_channel_stop(struct sbmacdma *d)
d->sbdma_remptr = NULL; d->sbdma_remptr = NULL;
} }
static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) static inline void sbdma_align_skb(struct sk_buff *skb,
unsigned int power2, unsigned int offset)
{ {
unsigned long addr; unsigned char *addr = skb->data;
unsigned long newaddr; unsigned char *newaddr = PTR_ALIGN(addr, power2);
addr = (unsigned long) skb->data; skb_reserve(skb, newaddr - addr + offset);
newaddr = (addr + power2 - 1) & ~(power2 - 1);
skb_reserve(skb,newaddr-addr+offset);
} }
@ -797,7 +792,8 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
* this queues a buffer for inbound packets. * this queues a buffer for inbound packets.
* *
* Input parameters: * Input parameters:
* d - DMA channel descriptor * sc - softc structure
* d - DMA channel descriptor
* sb - sk_buff to add, or NULL if we should allocate one * sb - sk_buff to add, or NULL if we should allocate one
* *
* Return value: * Return value:
@ -806,8 +802,10 @@ static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset)
********************************************************************* */ ********************************************************************* */
static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
struct sk_buff *sb)
{ {
struct net_device *dev = sc->sbm_dev;
struct sbdmadscr *dsc; struct sbdmadscr *dsc;
struct sbdmadscr *nextdsc; struct sbdmadscr *nextdsc;
struct sk_buff *sb_new = NULL; struct sk_buff *sb_new = NULL;
@ -848,14 +846,16 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
*/ */
if (sb == NULL) { if (sb == NULL) {
sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
SMP_CACHE_BYTES * 2 +
NET_IP_ALIGN);
if (sb_new == NULL) { if (sb_new == NULL) {
pr_info("%s: sk_buff allocation failed\n", pr_info("%s: sk_buff allocation failed\n",
d->sbdma_eth->sbm_dev->name); d->sbdma_eth->sbm_dev->name);
return -ENOBUFS; return -ENOBUFS;
} }
sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
} }
else { else {
sb_new = sb; sb_new = sb;
@ -874,10 +874,10 @@ static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb)
* Do not interrupt per DMA transfer. * Do not interrupt per DMA transfer.
*/ */
dsc->dscr_a = virt_to_phys(sb_new->data) | dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
#else #else
dsc->dscr_a = virt_to_phys(sb_new->data) | dsc->dscr_a = virt_to_phys(sb_new->data) |
V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
M_DMA_DSCRA_INTERRUPT; M_DMA_DSCRA_INTERRUPT;
#endif #endif
@ -1032,18 +1032,19 @@ static void sbdma_emptyring(struct sbmacdma *d)
* with sk_buffs * with sk_buffs
* *
* Input parameters: * Input parameters:
* d - DMA channel * sc - softc structure
* d - DMA channel
* *
* Return value: * Return value:
* nothing * nothing
********************************************************************* */ ********************************************************************* */
static void sbdma_fillring(struct sbmacdma *d) static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
{ {
int idx; int idx;
for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
if (sbdma_add_rcvbuffer(d,NULL) != 0) if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
break; break;
} }
} }
@ -1159,10 +1160,11 @@ again:
* packet and put it right back on the receive ring. * packet and put it right back on the receive ring.
*/ */
if (unlikely (sbdma_add_rcvbuffer(d,NULL) == if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
-ENOBUFS)) { -ENOBUFS)) {
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ /* Re-add old buffer */
sbdma_add_rcvbuffer(sc, d, sb);
/* No point in continuing at the moment */ /* No point in continuing at the moment */
printk(KERN_ERR "dropped packet (1)\n"); printk(KERN_ERR "dropped packet (1)\n");
d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
@ -1212,7 +1214,7 @@ again:
* put it back on the receive ring. * put it back on the receive ring.
*/ */
dev->stats.rx_errors++; dev->stats.rx_errors++;
sbdma_add_rcvbuffer(d,sb); sbdma_add_rcvbuffer(sc, d, sb);
} }
@ -1570,7 +1572,7 @@ static void sbmac_channel_start(struct sbmac_softc *s)
* Fill the receive ring * Fill the receive ring
*/ */
sbdma_fillring(&(s->sbm_rxdma)); sbdma_fillring(s, &(s->sbm_rxdma));
/* /*
* Turn on the rest of the bits in the enable register * Turn on the rest of the bits in the enable register
@ -2312,13 +2314,6 @@ static int sbmac_init(struct platform_device *pldev, long long base)
dev->dev_addr[i] = eaddr[i]; dev->dev_addr[i] = eaddr[i];
} }
/*
* Init packet size
*/
sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN;
/* /*
* Initialize context (get pointers to registers and stuff), then * Initialize context (get pointers to registers and stuff), then
* allocate the memory for the descriptor tables. * allocate the memory for the descriptor tables.

Просмотреть файл

@ -953,9 +953,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned entry; unsigned entry;
u32 tx_status; u32 tx_status;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
if (unlikely(skb->len > TX_BUF_SIZE)) { if (unlikely(skb->len > TX_BUF_SIZE)) {
dev->stats.tx_dropped++; dev->stats.tx_dropped++;
goto out; goto out;
@ -975,6 +972,11 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
len = skb->len; len = skb->len;
if (unlikely(len < ETH_ZLEN)) {
memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
0, ETH_ZLEN - len);
len = ETH_ZLEN;
}
wmb(); wmb();

Просмотреть файл

@ -483,7 +483,7 @@ typedef union efx_oword {
#endif #endif
#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
if (FALCON_REV(efx) >= FALCON_REV_B0) { \ if (falcon_rev(efx) >= FALCON_REV_B0) { \
EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
} else { \ } else { \
EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
@ -491,7 +491,7 @@ typedef union efx_oword {
} while (0) } while (0)
#define EFX_QWORD_FIELD_VER(efx, qword, field) \ #define EFX_QWORD_FIELD_VER(efx, qword, field) \
(FALCON_REV(efx) >= FALCON_REV_B0 ? \ (falcon_rev(efx) >= FALCON_REV_B0 ? \
EFX_QWORD_FIELD((qword), field##_B0) : \ EFX_QWORD_FIELD((qword), field##_B0) : \
EFX_QWORD_FIELD((qword), field##_A1)) EFX_QWORD_FIELD((qword), field##_A1))
@ -501,8 +501,5 @@ typedef union efx_oword {
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
#define EFX_DMA_TYPE_WIDTH(width) \ #define EFX_DMA_TYPE_WIDTH(width) \
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
~((u64) 0) : ~((u32) 0))
#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
#endif /* EFX_BITFIELD_H */ #endif /* EFX_BITFIELD_H */

Просмотреть файл

@ -27,10 +27,8 @@ static void blink_led_timer(unsigned long context)
struct efx_blinker *bl = &efx->board_info.blinker; struct efx_blinker *bl = &efx->board_info.blinker;
efx->board_info.set_fault_led(efx, bl->state); efx->board_info.set_fault_led(efx, bl->state);
bl->state = !bl->state; bl->state = !bl->state;
if (bl->resubmit) { if (bl->resubmit)
bl->timer.expires = jiffies + BLINK_INTERVAL; mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
add_timer(&bl->timer);
}
} }
static void board_blink(struct efx_nic *efx, int blink) static void board_blink(struct efx_nic *efx, int blink)
@ -44,8 +42,7 @@ static void board_blink(struct efx_nic *efx, int blink)
blinker->state = 0; blinker->state = 0;
setup_timer(&blinker->timer, blink_led_timer, setup_timer(&blinker->timer, blink_led_timer,
(unsigned long)efx); (unsigned long)efx);
blinker->timer.expires = jiffies + BLINK_INTERVAL; mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
add_timer(&blinker->timer);
} else { } else {
blinker->resubmit = 0; blinker->resubmit = 0;
if (blinker->timer.function) if (blinker->timer.function)

Просмотреть файл

@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
*/ */
static inline void efx_channel_processed(struct efx_channel *channel) static inline void efx_channel_processed(struct efx_channel *channel)
{ {
/* Write to EVQ_RPTR_REG. If a new event arrived in a race /* The interrupt handler for this channel may set work_pending
* with finishing processing, a new interrupt will be raised. * as soon as we acknowledge the events we've seen. Make sure
*/ * it's cleared before then. */
channel->work_pending = 0; channel->work_pending = 0;
smp_wmb(); /* Ensure channel updated before any new interrupt. */ smp_wmb();
falcon_eventq_read_ack(channel); falcon_eventq_read_ack(channel);
} }
@ -265,7 +266,7 @@ void efx_process_channel_now(struct efx_channel *channel)
napi_disable(&channel->napi_str); napi_disable(&channel->napi_str);
/* Poll the channel */ /* Poll the channel */
(void) efx_process_channel(channel, efx->type->evq_size); efx_process_channel(channel, efx->type->evq_size);
/* Ack the eventq. This may cause an interrupt to be generated /* Ack the eventq. This may cause an interrupt to be generated
* when they are reenabled */ * when they are reenabled */
@ -317,26 +318,6 @@ static void efx_remove_eventq(struct efx_channel *channel)
* *
*************************************************************************/ *************************************************************************/
/* Setup per-NIC RX buffer parameters.
* Calculate the rx buffer allocation parameters required to support
* the current MTU, including padding for header alignment and overruns.
*/
static void efx_calc_rx_buffer_params(struct efx_nic *efx)
{
unsigned int order, len;
len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
/* Calculate page-order */
for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
;
efx->rx_buffer_len = len;
efx->rx_buffer_order = order;
}
static int efx_probe_channel(struct efx_channel *channel) static int efx_probe_channel(struct efx_channel *channel)
{ {
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
@ -387,7 +368,14 @@ static int efx_init_channels(struct efx_nic *efx)
struct efx_channel *channel; struct efx_channel *channel;
int rc = 0; int rc = 0;
efx_calc_rx_buffer_params(efx); /* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header
* alignment and overruns.
*/
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
efx->rx_buffer_order = get_order(efx->rx_buffer_len);
/* Initialise the channels */ /* Initialise the channels */
efx_for_each_channel(channel, efx) { efx_for_each_channel(channel, efx) {
@ -440,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel)
netif_napi_add(channel->napi_dev, &channel->napi_str, netif_napi_add(channel->napi_dev, &channel->napi_str,
efx_poll, napi_weight); efx_poll, napi_weight);
/* The interrupt handler for this channel may set work_pending
* as soon as we enable it. Make sure it's cleared before
* then. Similarly, make sure it sees the enabled flag set. */
channel->work_pending = 0; channel->work_pending = 0;
channel->enabled = 1; channel->enabled = 1;
smp_wmb(); /* ensure channel updated before first interrupt */ smp_wmb();
napi_enable(&channel->napi_str); napi_enable(&channel->napi_str);
@ -704,7 +695,7 @@ static void efx_stop_port(struct efx_nic *efx)
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
/* Serialise against efx_set_multicast_list() */ /* Serialise against efx_set_multicast_list() */
if (NET_DEV_REGISTERED(efx)) { if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
} }
@ -791,22 +782,23 @@ static int efx_init_io(struct efx_nic *efx)
efx->membase = ioremap_nocache(efx->membase_phys, efx->membase = ioremap_nocache(efx->membase_phys,
efx->type->mem_map_size); efx->type->mem_map_size);
if (!efx->membase) { if (!efx->membase) {
EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",
efx->type->mem_bar, efx->membase_phys, efx->type->mem_bar,
(unsigned long long)efx->membase_phys,
efx->type->mem_map_size); efx->type->mem_map_size);
rc = -ENOMEM; rc = -ENOMEM;
goto fail4; goto fail4;
} }
EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",
efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, efx->type->mem_bar, (unsigned long long)efx->membase_phys,
efx->membase); efx->type->mem_map_size, efx->membase);
return 0; return 0;
fail4: fail4:
release_mem_region(efx->membase_phys, efx->type->mem_map_size); release_mem_region(efx->membase_phys, efx->type->mem_map_size);
fail3: fail3:
efx->membase_phys = 0UL; efx->membase_phys = 0;
fail2: fail2:
pci_disable_device(efx->pci_dev); pci_disable_device(efx->pci_dev);
fail1: fail1:
@ -824,7 +816,7 @@ static void efx_fini_io(struct efx_nic *efx)
if (efx->membase_phys) { if (efx->membase_phys) {
pci_release_region(efx->pci_dev, efx->type->mem_bar); pci_release_region(efx->pci_dev, efx->type->mem_bar);
efx->membase_phys = 0UL; efx->membase_phys = 0;
} }
pci_disable_device(efx->pci_dev); pci_disable_device(efx->pci_dev);
@ -1043,7 +1035,7 @@ static void efx_start_all(struct efx_nic *efx)
return; return;
if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
return; return;
if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) if (efx_dev_registered(efx) && !netif_running(efx->net_dev))
return; return;
/* Mark the port as enabled so port reconfigurations can start, then /* Mark the port as enabled so port reconfigurations can start, then
@ -1073,9 +1065,8 @@ static void efx_flush_all(struct efx_nic *efx)
cancel_delayed_work_sync(&efx->monitor_work); cancel_delayed_work_sync(&efx->monitor_work);
/* Ensure that all RX slow refills are complete. */ /* Ensure that all RX slow refills are complete. */
efx_for_each_rx_queue(rx_queue, efx) { efx_for_each_rx_queue(rx_queue, efx)
cancel_delayed_work_sync(&rx_queue->work); cancel_delayed_work_sync(&rx_queue->work);
}
/* Stop scheduled port reconfigurations */ /* Stop scheduled port reconfigurations */
cancel_work_sync(&efx->reconfigure_work); cancel_work_sync(&efx->reconfigure_work);
@ -1101,9 +1092,10 @@ static void efx_stop_all(struct efx_nic *efx)
falcon_disable_interrupts(efx); falcon_disable_interrupts(efx);
if (efx->legacy_irq) if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq); synchronize_irq(efx->legacy_irq);
efx_for_each_channel_with_interrupt(channel, efx) efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq) if (channel->irq)
synchronize_irq(channel->irq); synchronize_irq(channel->irq);
}
/* Stop all NAPI processing and synchronous rx refills */ /* Stop all NAPI processing and synchronous rx refills */
efx_for_each_channel(channel, efx) efx_for_each_channel(channel, efx)
@ -1125,7 +1117,7 @@ static void efx_stop_all(struct efx_nic *efx)
/* Stop the kernel transmit interface late, so the watchdog /* Stop the kernel transmit interface late, so the watchdog
* timer isn't ticking over the flush */ * timer isn't ticking over the flush */
efx_stop_queue(efx); efx_stop_queue(efx);
if (NET_DEV_REGISTERED(efx)) { if (efx_dev_registered(efx)) {
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
} }
@ -1344,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev)
return 0; return 0;
} }
/* Context: process, dev_base_lock held, non-blocking. */ /* Context: process, dev_base_lock or RTNL held, non-blocking. */
static struct net_device_stats *efx_net_stats(struct net_device *net_dev) static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
{ {
struct efx_nic *efx = net_dev->priv; struct efx_nic *efx = net_dev->priv;
struct efx_mac_stats *mac_stats = &efx->mac_stats; struct efx_mac_stats *mac_stats = &efx->mac_stats;
struct net_device_stats *stats = &net_dev->stats; struct net_device_stats *stats = &net_dev->stats;
/* Update stats if possible, but do not wait if another thread
* is updating them (or resetting the NIC); slightly stale
* stats are acceptable.
*/
if (!spin_trylock(&efx->stats_lock)) if (!spin_trylock(&efx->stats_lock))
return stats; return stats;
if (efx->state == STATE_RUNNING) { if (efx->state == STATE_RUNNING) {
@ -1494,7 +1490,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
static int efx_netdev_event(struct notifier_block *this, static int efx_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
struct net_device *net_dev = (struct net_device *)ptr; struct net_device *net_dev = ptr;
if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
struct efx_nic *efx = net_dev->priv; struct efx_nic *efx = net_dev->priv;
@ -1563,7 +1559,7 @@ static void efx_unregister_netdev(struct efx_nic *efx)
efx_for_each_tx_queue(tx_queue, efx) efx_for_each_tx_queue(tx_queue, efx)
efx_release_tx_buffers(tx_queue); efx_release_tx_buffers(tx_queue);
if (NET_DEV_REGISTERED(efx)) { if (efx_dev_registered(efx)) {
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
unregister_netdev(efx->net_dev); unregister_netdev(efx->net_dev);
} }
@ -1688,7 +1684,7 @@ static int efx_reset(struct efx_nic *efx)
if (method == RESET_TYPE_DISABLE) { if (method == RESET_TYPE_DISABLE) {
/* Reinitialise the device anyway so the driver unload sequence /* Reinitialise the device anyway so the driver unload sequence
* can talk to the external SRAM */ * can talk to the external SRAM */
(void) falcon_init_nic(efx); falcon_init_nic(efx);
rc = -EIO; rc = -EIO;
goto fail4; goto fail4;
} }

Просмотреть файл

@ -116,17 +116,8 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
************************************************************************** **************************************************************************
*/ */
/* DMA address mask (up to 46-bit, avoiding compiler warnings) /* DMA address mask */
* #define FALCON_DMA_MASK DMA_BIT_MASK(46)
* Note that it is possible to have a platform with 64-bit longs and
* 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the
* platform DMA mask.
*/
#if BITS_PER_LONG == 64
#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
#else
#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
#endif
/* TX DMA length mask (13-bit) */ /* TX DMA length mask (13-bit) */
#define FALCON_TX_DMA_MASK (4096 - 1) #define FALCON_TX_DMA_MASK (4096 - 1)
@ -145,7 +136,7 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
#define PCI_EXP_LNKSTA_LNK_WID_LBN 4 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4
#define FALCON_IS_DUAL_FUNC(efx) \ #define FALCON_IS_DUAL_FUNC(efx) \
(FALCON_REV(efx) < FALCON_REV_B0) (falcon_rev(efx) < FALCON_REV_B0)
/************************************************************************** /**************************************************************************
* *
@ -465,7 +456,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
TX_DESCQ_TYPE, 0, TX_DESCQ_TYPE, 0,
TX_NON_IP_DROP_DIS_B0, 1); TX_NON_IP_DROP_DIS_B0, 1);
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
@ -474,7 +465,7 @@ int falcon_init_tx(struct efx_tx_queue *tx_queue)
falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
tx_queue->queue); tx_queue->queue);
if (FALCON_REV(efx) < FALCON_REV_B0) { if (falcon_rev(efx) < FALCON_REV_B0) {
efx_oword_t reg; efx_oword_t reg;
BUG_ON(tx_queue->queue >= 128); /* HW limit */ BUG_ON(tx_queue->queue >= 128); /* HW limit */
@ -635,7 +626,7 @@ int falcon_init_rx(struct efx_rx_queue *rx_queue)
efx_oword_t rx_desc_ptr; efx_oword_t rx_desc_ptr;
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
int rc; int rc;
int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;
int iscsi_digest_en = is_b0; int iscsi_digest_en = is_b0;
EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
@ -822,10 +813,10 @@ static inline void falcon_handle_tx_event(struct efx_channel *channel,
tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
tx_queue = &efx->tx_queue[tx_ev_q_label]; tx_queue = &efx->tx_queue[tx_ev_q_label];
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_lock(efx->net_dev); netif_tx_lock(efx->net_dev);
falcon_notify_tx_desc(tx_queue); falcon_notify_tx_desc(tx_queue);
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_unlock(efx->net_dev); netif_tx_unlock(efx->net_dev);
} else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
EFX_WORKAROUND_10727(efx)) { EFX_WORKAROUND_10727(efx)) {
@ -884,7 +875,7 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
RX_EV_TCP_UDP_CHKSUM_ERR); RX_EV_TCP_UDP_CHKSUM_ERR);
rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
@ -1065,7 +1056,7 @@ static void falcon_handle_global_event(struct efx_channel *channel,
EFX_QWORD_FIELD(*event, XG_PHY_INTR)) EFX_QWORD_FIELD(*event, XG_PHY_INTR))
is_phy_event = 1; is_phy_event = 1;
if ((FALCON_REV(efx) >= FALCON_REV_B0) && if ((falcon_rev(efx) >= FALCON_REV_B0) &&
EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
is_phy_event = 1; is_phy_event = 1;
@ -1405,7 +1396,7 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
{ {
struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nic_data *nic_data = efx->nic_data;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
efx_oword_t fatal_intr; efx_oword_t fatal_intr;
int error, mem_perr; int error, mem_perr;
static int n_int_errors; static int n_int_errors;
@ -1451,8 +1442,8 @@ out:
*/ */
static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
{ {
struct efx_nic *efx = (struct efx_nic *)dev_id; struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel; struct efx_channel *channel;
efx_dword_t reg; efx_dword_t reg;
u32 queues; u32 queues;
@ -1489,8 +1480,8 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{ {
struct efx_nic *efx = (struct efx_nic *)dev_id; struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
struct efx_channel *channel; struct efx_channel *channel;
int syserr; int syserr;
int queues; int queues;
@ -1542,9 +1533,9 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
*/ */
static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
{ {
struct efx_channel *channel = (struct efx_channel *)dev_id; struct efx_channel *channel = dev_id;
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; efx_oword_t *int_ker = efx->irq_status.addr;
int syserr; int syserr;
efx->last_irq_cpu = raw_smp_processor_id(); efx->last_irq_cpu = raw_smp_processor_id();
@ -1572,7 +1563,7 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
unsigned long offset; unsigned long offset;
efx_dword_t dword; efx_dword_t dword;
if (FALCON_REV(efx) < FALCON_REV_B0) if (falcon_rev(efx) < FALCON_REV_B0)
return; return;
for (offset = RX_RSS_INDIR_TBL_B0; for (offset = RX_RSS_INDIR_TBL_B0;
@ -1595,7 +1586,7 @@ int falcon_init_interrupt(struct efx_nic *efx)
if (!EFX_INT_MODE_USE_MSI(efx)) { if (!EFX_INT_MODE_USE_MSI(efx)) {
irq_handler_t handler; irq_handler_t handler;
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
handler = falcon_legacy_interrupt_b0; handler = falcon_legacy_interrupt_b0;
else else
handler = falcon_legacy_interrupt_a1; handler = falcon_legacy_interrupt_a1;
@ -1636,12 +1627,13 @@ void falcon_fini_interrupt(struct efx_nic *efx)
efx_oword_t reg; efx_oword_t reg;
/* Disable MSI/MSI-X interrupts */ /* Disable MSI/MSI-X interrupts */
efx_for_each_channel_with_interrupt(channel, efx) efx_for_each_channel_with_interrupt(channel, efx) {
if (channel->irq) if (channel->irq)
free_irq(channel->irq, channel); free_irq(channel->irq, channel);
}
/* ACK legacy interrupt */ /* ACK legacy interrupt */
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
falcon_read(efx, &reg, INT_ISR0_B0); falcon_read(efx, &reg, INT_ISR0_B0);
else else
falcon_irq_ack_a1(efx); falcon_irq_ack_a1(efx);
@ -1732,7 +1724,7 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
efx_oword_t temp; efx_oword_t temp;
int count; int count;
if ((FALCON_REV(efx) < FALCON_REV_B0) || if ((falcon_rev(efx) < FALCON_REV_B0) ||
(efx->loopback_mode != LOOPBACK_NONE)) (efx->loopback_mode != LOOPBACK_NONE))
return; return;
@ -1785,7 +1777,7 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
{ {
efx_oword_t temp; efx_oword_t temp;
if (FALCON_REV(efx) < FALCON_REV_B0) if (falcon_rev(efx) < FALCON_REV_B0)
return; return;
/* Isolate the MAC -> RX */ /* Isolate the MAC -> RX */
@ -1823,7 +1815,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
MAC_SPEED, link_speed); MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get /* On B0, MAC backpressure can be disabled and packets get
* discarded. */ * discarded. */
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
!efx->link_up); !efx->link_up);
} }
@ -1841,7 +1833,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
/* Unisolate the MAC -> RX */ /* Unisolate the MAC -> RX */
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
falcon_write(efx, &reg, RX_CFG_REG_KER); falcon_write(efx, &reg, RX_CFG_REG_KER);
} }
@ -1856,7 +1848,7 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
return 0; return 0;
/* Statistics fetch will fail if the MAC is in TX drain */ /* Statistics fetch will fail if the MAC is in TX drain */
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
efx_oword_t temp; efx_oword_t temp;
falcon_read(efx, &temp, MAC0_CTRL_REG_KER); falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
@ -1940,7 +1932,7 @@ static int falcon_gmii_wait(struct efx_nic *efx)
static void falcon_mdio_write(struct net_device *net_dev, int phy_id, static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
int addr, int value) int addr, int value)
{ {
struct efx_nic *efx = (struct efx_nic *)net_dev->priv; struct efx_nic *efx = net_dev->priv;
unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg; efx_oword_t reg;
@ -2008,7 +2000,7 @@ static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
* could be read, -1 will be returned. */ * could be read, -1 will be returned. */
static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
{ {
struct efx_nic *efx = (struct efx_nic *)net_dev->priv; struct efx_nic *efx = net_dev->priv;
unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
efx_oword_t reg; efx_oword_t reg;
int value = -1; int value = -1;
@ -2113,7 +2105,7 @@ int falcon_probe_port(struct efx_nic *efx)
falcon_init_mdio(&efx->mii); falcon_init_mdio(&efx->mii);
/* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
efx->flow_control = EFX_FC_RX | EFX_FC_TX; efx->flow_control = EFX_FC_RX | EFX_FC_TX;
else else
efx->flow_control = EFX_FC_RX; efx->flow_control = EFX_FC_RX;
@ -2373,7 +2365,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
return -ENODEV; return -ENODEV;
} }
switch (FALCON_REV(efx)) { switch (falcon_rev(efx)) {
case FALCON_REV_A0: case FALCON_REV_A0:
case 0xff: case 0xff:
EFX_ERR(efx, "Falcon rev A0 not supported\n"); EFX_ERR(efx, "Falcon rev A0 not supported\n");
@ -2399,7 +2391,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
break; break;
default: default:
EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));
return -ENODEV; return -ENODEV;
} }
@ -2419,7 +2411,7 @@ int falcon_probe_nic(struct efx_nic *efx)
/* Allocate storage for hardware specific data */ /* Allocate storage for hardware specific data */
nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
efx->nic_data = (void *) nic_data; efx->nic_data = nic_data;
/* Determine number of ports etc. */ /* Determine number of ports etc. */
rc = falcon_probe_nic_variant(efx); rc = falcon_probe_nic_variant(efx);
@ -2489,13 +2481,10 @@ int falcon_probe_nic(struct efx_nic *efx)
*/ */
int falcon_init_nic(struct efx_nic *efx) int falcon_init_nic(struct efx_nic *efx)
{ {
struct falcon_nic_data *data;
efx_oword_t temp; efx_oword_t temp;
unsigned thresh; unsigned thresh;
int rc; int rc;
data = (struct falcon_nic_data *)efx->nic_data;
/* Set up the address region register. This is only needed /* Set up the address region register. This is only needed
* for the B0 FPGA, but since we are just pushing in the * for the B0 FPGA, but since we are just pushing in the
* reset defaults this may as well be unconditional. */ * reset defaults this may as well be unconditional. */
@ -2562,7 +2551,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Set number of RSS queues for receive path. */ /* Set number of RSS queues for receive path. */
falcon_read(efx, &temp, RX_FILTER_CTL_REG); falcon_read(efx, &temp, RX_FILTER_CTL_REG);
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
else else
EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
@ -2600,7 +2589,7 @@ int falcon_init_nic(struct efx_nic *efx)
/* Prefetch threshold 2 => fetch when descriptor cache half empty */ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
/* Squash TX of packets of 16 bytes or less */ /* Squash TX of packets of 16 bytes or less */
if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
falcon_write(efx, &temp, TX_CFG2_REG_KER); falcon_write(efx, &temp, TX_CFG2_REG_KER);
@ -2617,7 +2606,7 @@ int falcon_init_nic(struct efx_nic *efx)
if (EFX_WORKAROUND_7575(efx)) if (EFX_WORKAROUND_7575(efx))
EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
(3 * 4096) / 32); (3 * 4096) / 32);
if (FALCON_REV(efx) >= FALCON_REV_B0) if (falcon_rev(efx) >= FALCON_REV_B0)
EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
/* RX FIFO flow control thresholds */ /* RX FIFO flow control thresholds */
@ -2633,7 +2622,7 @@ int falcon_init_nic(struct efx_nic *efx)
falcon_write(efx, &temp, RX_CFG_REG_KER); falcon_write(efx, &temp, RX_CFG_REG_KER);
/* Set destination of both TX and RX Flush events */ /* Set destination of both TX and RX Flush events */
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
falcon_write(efx, &temp, DP_CTRL_REG); falcon_write(efx, &temp, DP_CTRL_REG);
} }
@ -2647,7 +2636,7 @@ void falcon_remove_nic(struct efx_nic *efx)
falcon_free_buffer(efx, &efx->irq_status); falcon_free_buffer(efx, &efx->irq_status);
(void) falcon_reset_hw(efx, RESET_TYPE_ALL); falcon_reset_hw(efx, RESET_TYPE_ALL);
/* Release the second function after the reset */ /* Release the second function after the reset */
if (nic_data->pci_dev2) { if (nic_data->pci_dev2) {

Просмотреть файл

@ -23,7 +23,10 @@ enum falcon_revision {
FALCON_REV_B0 = 2, FALCON_REV_B0 = 2,
}; };
#define FALCON_REV(efx) ((efx)->pci_dev->revision) static inline int falcon_rev(struct efx_nic *efx)
{
return efx->pci_dev->revision;
}
extern struct efx_nic_type falcon_a_nic_type; extern struct efx_nic_type falcon_a_nic_type;
extern struct efx_nic_type falcon_b_nic_type; extern struct efx_nic_type falcon_b_nic_type;

Просмотреть файл

@ -1125,7 +1125,7 @@ struct falcon_nvconfig_board_v2 {
u8 port1_phy_type; u8 port1_phy_type;
__le16 asic_sub_revision; __le16 asic_sub_revision;
__le16 board_revision; __le16 board_revision;
} __attribute__ ((packed)); } __packed;
#define NVCONFIG_BASE 0x300 #define NVCONFIG_BASE 0x300
#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
@ -1144,6 +1144,6 @@ struct falcon_nvconfig {
__le16 board_struct_ver; __le16 board_struct_ver;
__le16 board_checksum; __le16 board_checksum;
struct falcon_nvconfig_board_v2 board_v2; struct falcon_nvconfig_board_v2 board_v2;
} __attribute__ ((packed)); } __packed;
#endif /* EFX_FALCON_HWDEFS_H */ #endif /* EFX_FALCON_HWDEFS_H */

Просмотреть файл

@ -56,14 +56,27 @@
#define FALCON_USE_QWORD_IO 1 #define FALCON_USE_QWORD_IO 1
#endif #endif
#define _falcon_writeq(efx, value, reg) \ #ifdef FALCON_USE_QWORD_IO
__raw_writeq((__force u64) (value), (efx)->membase + (reg)) static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
#define _falcon_writel(efx, value, reg) \ unsigned int reg)
__raw_writel((__force u32) (value), (efx)->membase + (reg)) {
#define _falcon_readq(efx, reg) \ __raw_writeq((__force u64)value, efx->membase + reg);
((__force __le64) __raw_readq((efx)->membase + (reg))) }
#define _falcon_readl(efx, reg) \ static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
((__force __le32) __raw_readl((efx)->membase + (reg))) {
return (__force __le64)__raw_readq(efx->membase + reg);
}
#endif
static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
unsigned int reg)
{
__raw_writel((__force u32)value, efx->membase + reg);
}
static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
{
return (__force __le32)__raw_readl(efx->membase + reg);
}
/* Writes to a normal 16-byte Falcon register, locking as appropriate. */ /* Writes to a normal 16-byte Falcon register, locking as appropriate. */
static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,

Просмотреть файл

@ -221,7 +221,7 @@ static int falcon_xgmii_status(struct efx_nic *efx)
{ {
efx_dword_t reg; efx_dword_t reg;
if (FALCON_REV(efx) < FALCON_REV_B0) if (falcon_rev(efx) < FALCON_REV_B0)
return 1; return 1;
/* The ISR latches, so clear it and re-read */ /* The ISR latches, so clear it and re-read */
@ -241,7 +241,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
{ {
efx_dword_t reg; efx_dword_t reg;
if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx))
return; return;
/* Flush the ISR */ /* Flush the ISR */
@ -454,7 +454,7 @@ static int falcon_check_xaui_link_up(struct efx_nic *efx)
EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
__func__, tries); __func__, tries);
(void) falcon_reset_xaui(efx); falcon_reset_xaui(efx);
udelay(200); udelay(200);
tries--; tries--;
} }
@ -572,7 +572,7 @@ int falcon_check_xmac(struct efx_nic *efx)
xaui_link_ok = falcon_xaui_link_ok(efx); xaui_link_ok = falcon_xaui_link_ok(efx);
if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
(void) falcon_reset_xaui(efx); falcon_reset_xaui(efx);
/* Call the PHY check_hw routine */ /* Call the PHY check_hw routine */
rc = efx->phy_op->check_hw(efx); rc = efx->phy_op->check_hw(efx);
@ -639,7 +639,7 @@ int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
reset = ((flow_control & EFX_FC_TX) && reset = ((flow_control & EFX_FC_TX) &&
!(efx->flow_control & EFX_FC_TX)); !(efx->flow_control & EFX_FC_TX));
if (EFX_WORKAROUND_11482(efx) && reset) { if (EFX_WORKAROUND_11482(efx) && reset) {
if (FALCON_REV(efx) >= FALCON_REV_B0) { if (falcon_rev(efx) >= FALCON_REV_B0) {
/* Recover by resetting the EM block */ /* Recover by resetting the EM block */
if (efx->link_up) if (efx->link_up)
falcon_drain_tx_fifo(efx); falcon_drain_tx_fifo(efx);

Просмотреть файл

@ -42,7 +42,7 @@
#ifndef EFX_DRIVER_NAME #ifndef EFX_DRIVER_NAME
#define EFX_DRIVER_NAME "sfc" #define EFX_DRIVER_NAME "sfc"
#endif #endif
#define EFX_DRIVER_VERSION "2.2.0136" #define EFX_DRIVER_VERSION "2.2"
#ifdef EFX_ENABLE_DEBUG #ifdef EFX_ENABLE_DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x) #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@ -52,28 +52,19 @@
#define EFX_WARN_ON_PARANOID(x) do {} while (0) #define EFX_WARN_ON_PARANOID(x) do {} while (0)
#endif #endif
#define NET_DEV_REGISTERED(efx) \
((efx)->net_dev->reg_state == NETREG_REGISTERED)
/* Include net device name in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
/* Un-rate-limited logging */ /* Un-rate-limited logging */
#define EFX_ERR(efx, fmt, args...) \ #define EFX_ERR(efx, fmt, args...) \
dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args)
#define EFX_INFO(efx, fmt, args...) \ #define EFX_INFO(efx, fmt, args...) \
dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args)
#ifdef EFX_ENABLE_DEBUG #ifdef EFX_ENABLE_DEBUG
#define EFX_LOG(efx, fmt, args...) \ #define EFX_LOG(efx, fmt, args...) \
dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
#else #else
#define EFX_LOG(efx, fmt, args...) \ #define EFX_LOG(efx, fmt, args...) \
dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args)
#endif #endif
#define EFX_TRACE(efx, fmt, args...) do {} while (0) #define EFX_TRACE(efx, fmt, args...) do {} while (0)
@ -90,11 +81,6 @@ do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
#define EFX_LOG_RL(efx, fmt, args...) \ #define EFX_LOG_RL(efx, fmt, args...) \
do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
/* Kernel headers may redefine inline anyway */
#ifndef inline
#define inline inline __attribute__ ((always_inline))
#endif
/************************************************************************** /**************************************************************************
* *
* Efx data structures * Efx data structures
@ -695,7 +681,7 @@ struct efx_nic {
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
struct work_struct reset_work; struct work_struct reset_work;
struct delayed_work monitor_work; struct delayed_work monitor_work;
unsigned long membase_phys; resource_size_t membase_phys;
void __iomem *membase; void __iomem *membase;
spinlock_t biu_lock; spinlock_t biu_lock;
enum efx_int_mode interrupt_mode; enum efx_int_mode interrupt_mode;
@ -719,7 +705,7 @@ struct efx_nic {
unsigned n_rx_nodesc_drop_cnt; unsigned n_rx_nodesc_drop_cnt;
void *nic_data; struct falcon_nic_data *nic_data;
struct mutex mac_lock; struct mutex mac_lock;
int port_enabled; int port_enabled;
@ -760,6 +746,20 @@ struct efx_nic {
void *loopback_selftest; void *loopback_selftest;
}; };
static inline int efx_dev_registered(struct efx_nic *efx)
{
return efx->net_dev->reg_state == NETREG_REGISTERED;
}
/* Net device name, for inclusion in log messages if it has been registered.
* Use efx->name not efx->net_dev->name so that races with (un)registration
* are harmless.
*/
static inline const char *efx_dev_name(struct efx_nic *efx)
{
return efx_dev_registered(efx) ? efx->name : "";
}
/** /**
* struct efx_nic_type - Efx device type definition * struct efx_nic_type - Efx device type definition
* @mem_bar: Memory BAR number * @mem_bar: Memory BAR number
@ -795,7 +795,7 @@ struct efx_nic_type {
unsigned int txd_ring_mask; unsigned int txd_ring_mask;
unsigned int rxd_ring_mask; unsigned int rxd_ring_mask;
unsigned int evq_size; unsigned int evq_size;
dma_addr_t max_dma_mask; u64 max_dma_mask;
unsigned int tx_dma_mask; unsigned int tx_dma_mask;
unsigned bug5391_mask; unsigned bug5391_mask;

Просмотреть файл

@ -86,14 +86,17 @@ static unsigned int rx_refill_limit = 95;
*/ */
#define EFX_RXD_HEAD_ROOM 2 #define EFX_RXD_HEAD_ROOM 2
/* Macros for zero-order pages (potentially) containing multiple RX buffers */ static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf)
#define RX_DATA_OFFSET(_data) \ {
(((unsigned long) (_data)) & (PAGE_SIZE-1)) /* Offset is always within one page, so we don't need to consider
#define RX_BUF_OFFSET(_rx_buf) \ * the page order.
RX_DATA_OFFSET((_rx_buf)->data) */
return (__force unsigned long) buf->data & (PAGE_SIZE - 1);
#define RX_PAGE_SIZE(_efx) \ }
(PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{
return PAGE_SIZE << efx->rx_buffer_order;
}
/************************************************************************** /**************************************************************************
@ -106,7 +109,7 @@ static unsigned int rx_refill_limit = 95;
static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
void **tcpudp_hdr, u64 *hdr_flags, void *priv) void **tcpudp_hdr, u64 *hdr_flags, void *priv)
{ {
struct efx_channel *channel = (struct efx_channel *)priv; struct efx_channel *channel = priv;
struct iphdr *iph; struct iphdr *iph;
struct tcphdr *th; struct tcphdr *th;
@ -131,12 +134,12 @@ static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
void *priv) void *priv)
{ {
struct efx_channel *channel = (struct efx_channel *)priv; struct efx_channel *channel = priv;
struct ethhdr *eh; struct ethhdr *eh;
struct iphdr *iph; struct iphdr *iph;
/* We support EtherII and VLAN encapsulated IPv4 */ /* We support EtherII and VLAN encapsulated IPv4 */
eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); eh = page_address(frag->page) + frag->page_offset;
*mac_hdr = eh; *mac_hdr = eh;
if (eh->h_proto == htons(ETH_P_IP)) { if (eh->h_proto == htons(ETH_P_IP)) {
@ -269,7 +272,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
return -ENOMEM; return -ENOMEM;
dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
0, RX_PAGE_SIZE(efx), 0, efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(dma_addr))) { if (unlikely(pci_dma_mapping_error(dma_addr))) {
@ -280,14 +283,14 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue->buf_page = rx_buf->page; rx_queue->buf_page = rx_buf->page;
rx_queue->buf_dma_addr = dma_addr; rx_queue->buf_dma_addr = dma_addr;
rx_queue->buf_data = ((char *) page_address(rx_buf->page) + rx_queue->buf_data = (page_address(rx_buf->page) +
EFX_PAGE_IP_ALIGN); EFX_PAGE_IP_ALIGN);
} }
offset = RX_DATA_OFFSET(rx_queue->buf_data);
rx_buf->len = bytes; rx_buf->len = bytes;
rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
rx_buf->data = rx_queue->buf_data; rx_buf->data = rx_queue->buf_data;
offset = efx_rx_buf_offset(rx_buf);
rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
/* Try to pack multiple buffers per page */ /* Try to pack multiple buffers per page */
if (efx->rx_buffer_order == 0) { if (efx->rx_buffer_order == 0) {
@ -295,7 +298,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
offset += ((bytes + 0x1ff) & ~0x1ff); offset += ((bytes + 0x1ff) & ~0x1ff);
space = RX_PAGE_SIZE(efx) - offset; space = efx_rx_buf_size(efx) - offset;
if (space >= bytes) { if (space >= bytes) {
/* Refs dropped on kernel releasing each skb */ /* Refs dropped on kernel releasing each skb */
get_page(rx_queue->buf_page); get_page(rx_queue->buf_page);
@ -344,7 +347,8 @@ static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
EFX_BUG_ON_PARANOID(rx_buf->skb); EFX_BUG_ON_PARANOID(rx_buf->skb);
if (rx_buf->unmap_addr) { if (rx_buf->unmap_addr) {
pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); efx_rx_buf_size(efx),
PCI_DMA_FROMDEVICE);
rx_buf->unmap_addr = 0; rx_buf->unmap_addr = 0;
} }
} else if (likely(rx_buf->skb)) { } else if (likely(rx_buf->skb)) {
@ -400,9 +404,10 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
return 0; return 0;
/* Record minimum fill level */ /* Record minimum fill level */
if (unlikely(fill_level < rx_queue->min_fill)) if (unlikely(fill_level < rx_queue->min_fill)) {
if (fill_level) if (fill_level)
rx_queue->min_fill = fill_level; rx_queue->min_fill = fill_level;
}
/* Acquire RX add lock. If this lock is contended, then a fast /* Acquire RX add lock. If this lock is contended, then a fast
* fill must already be in progress (e.g. in the refill * fill must already be in progress (e.g. in the refill
@ -552,7 +557,7 @@ static inline void efx_rx_packet_lro(struct efx_channel *channel,
struct skb_frag_struct frags; struct skb_frag_struct frags;
frags.page = rx_buf->page; frags.page = rx_buf->page;
frags.page_offset = RX_BUF_OFFSET(rx_buf); frags.page_offset = efx_rx_buf_offset(rx_buf);
frags.size = rx_buf->len; frags.size = rx_buf->len;
lro_receive_frags(lro_mgr, &frags, rx_buf->len, lro_receive_frags(lro_mgr, &frags, rx_buf->len,
@ -597,7 +602,7 @@ static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
if (unlikely(rx_buf->len > hdr_len)) { if (unlikely(rx_buf->len > hdr_len)) {
struct skb_frag_struct *frag = skb_shinfo(skb)->frags; struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
frag->page = rx_buf->page; frag->page = rx_buf->page;
frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len;
frag->size = skb->len - hdr_len; frag->size = skb->len - hdr_len;
skb_shinfo(skb)->nr_frags = 1; skb_shinfo(skb)->nr_frags = 1;
skb->data_len = frag->size; skb->data_len = frag->size;
@ -851,7 +856,8 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
/* For a page that is part-way through splitting into RX buffers */ /* For a page that is part-way through splitting into RX buffers */
if (rx_queue->buf_page != NULL) { if (rx_queue->buf_page != NULL) {
pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); efx_rx_buf_size(rx_queue->efx),
PCI_DMA_FROMDEVICE);
__free_pages(rx_queue->buf_page, __free_pages(rx_queue->buf_page,
rx_queue->efx->rx_buffer_order); rx_queue->efx->rx_buffer_order);
rx_queue->buf_page = NULL; rx_queue->buf_page = NULL;

Просмотреть файл

@ -290,7 +290,7 @@ void efx_loopback_rx_packet(struct efx_nic *efx,
payload = &state->payload; payload = &state->payload;
received = (struct efx_loopback_payload *)(char *) buf_ptr; received = (struct efx_loopback_payload *) buf_ptr;
received->ip.saddr = payload->ip.saddr; received->ip.saddr = payload->ip.saddr;
received->ip.check = payload->ip.check; received->ip.check = payload->ip.check;
@ -424,10 +424,10 @@ static int efx_tx_loopback(struct efx_tx_queue *tx_queue)
* interrupt handler. */ * interrupt handler. */
smp_wmb(); smp_wmb();
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
rc = efx_xmit(efx, tx_queue, skb); rc = efx_xmit(efx, tx_queue, skb);
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
if (rc != NETDEV_TX_OK) { if (rc != NETDEV_TX_OK) {
@ -453,7 +453,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
int tx_done = 0, rx_good, rx_bad; int tx_done = 0, rx_good, rx_bad;
int i, rc = 0; int i, rc = 0;
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_lock_bh(efx->net_dev); netif_tx_lock_bh(efx->net_dev);
/* Count the number of tx completions, and decrement the refcnt. Any /* Count the number of tx completions, and decrement the refcnt. Any
@ -465,7 +465,7 @@ static int efx_rx_loopback(struct efx_tx_queue *tx_queue,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
if (NET_DEV_REGISTERED(efx)) if (efx_dev_registered(efx))
netif_tx_unlock_bh(efx->net_dev); netif_tx_unlock_bh(efx->net_dev);
/* Check TX completion and received packet counts */ /* Check TX completion and received packet counts */
@ -517,6 +517,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
state->packet_count = min(1 << (i << 2), state->packet_count); state->packet_count = min(1 << (i << 2), state->packet_count);
state->skbs = kzalloc(sizeof(state->skbs[0]) * state->skbs = kzalloc(sizeof(state->skbs[0]) *
state->packet_count, GFP_KERNEL); state->packet_count, GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = 0; state->flush = 0;
EFX_LOG(efx, "TX queue %d testing %s loopback with %d " EFX_LOG(efx, "TX queue %d testing %s loopback with %d "
@ -700,7 +702,7 @@ int efx_offline_test(struct efx_nic *efx,
* "flushing" so all inflight packets are dropped */ * "flushing" so all inflight packets are dropped */
BUG_ON(efx->loopback_selftest); BUG_ON(efx->loopback_selftest);
state->flush = 1; state->flush = 1;
efx->loopback_selftest = (void *)state; efx->loopback_selftest = state;
rc = efx_test_loopbacks(efx, tests, loopback_modes); rc = efx_test_loopbacks(efx, tests, loopback_modes);

Просмотреть файл

@ -116,18 +116,18 @@ void sfe4001_poweroff(struct efx_nic *efx)
/* Turn off all power rails */ /* Turn off all power rails */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */ /* Disable port 1 outputs on IO expander */
cfg = 0xff; cfg = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
/* Disable port 0 outputs on IO expander */ /* Disable port 0 outputs on IO expander */
cfg = 0xff; cfg = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
/* Clear any over-temperature alert */ /* Clear any over-temperature alert */
(void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
} }
/* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected
@ -253,14 +253,14 @@ done:
fail3: fail3:
/* Turn off all power rails */ /* Turn off all power rails */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
/* Disable port 1 outputs on IO expander */ /* Disable port 1 outputs on IO expander */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
fail2: fail2:
/* Disable port 0 outputs on IO expander */ /* Disable port 0 outputs on IO expander */
out = 0xff; out = 0xff;
(void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
fail1: fail1:
return rc; return rc;
} }

Просмотреть файл

@ -211,6 +211,8 @@ static int tenxpress_phy_init(struct efx_nic *efx)
int rc = 0; int rc = 0;
phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data; efx->phy_data = phy_data;
tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
@ -376,7 +378,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx)
* perform a special software reset */ * perform a special software reset */
if ((phy_data->tx_disabled && !efx->tx_disabled) || if ((phy_data->tx_disabled && !efx->tx_disabled) ||
loop_change) { loop_change) {
(void) tenxpress_special_reset(efx); tenxpress_special_reset(efx);
falcon_reset_xaui(efx); falcon_reset_xaui(efx);
} }

Просмотреть файл

@ -387,7 +387,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
if (unlikely(tx_queue->stopped)) { if (unlikely(tx_queue->stopped)) {
fill_level = tx_queue->insert_count - tx_queue->read_count; fill_level = tx_queue->insert_count - tx_queue->read_count;
if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
/* Do this under netif_tx_lock(), to avoid racing /* Do this under netif_tx_lock(), to avoid racing
* with efx_xmit(). */ * with efx_xmit(). */
@ -639,11 +639,12 @@ static void efx_tsoh_block_free(struct efx_tx_queue *tx_queue,
base_dma = tsoh->dma_addr & PAGE_MASK; base_dma = tsoh->dma_addr & PAGE_MASK;
p = &tx_queue->tso_headers_free; p = &tx_queue->tso_headers_free;
while (*p != NULL) while (*p != NULL) {
if (((unsigned long)*p & PAGE_MASK) == base_kva) if (((unsigned long)*p & PAGE_MASK) == base_kva)
*p = (*p)->next; *p = (*p)->next;
else else
p = &(*p)->next; p = &(*p)->next;
}
pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma);
} }
@ -939,9 +940,10 @@ static inline int tso_start_new_packet(struct efx_tx_queue *tx_queue,
/* Allocate a DMA-mapped header buffer. */ /* Allocate a DMA-mapped header buffer. */
if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) {
if (tx_queue->tso_headers_free == NULL) if (tx_queue->tso_headers_free == NULL) {
if (efx_tsoh_block_alloc(tx_queue)) if (efx_tsoh_block_alloc(tx_queue))
return -1; return -1;
}
EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free);
tsoh = tx_queue->tso_headers_free; tsoh = tx_queue->tso_headers_free;
tx_queue->tso_headers_free = tsoh->next; tx_queue->tso_headers_free = tsoh->next;
@ -1106,9 +1108,10 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
{ {
unsigned i; unsigned i;
if (tx_queue->buffer) if (tx_queue->buffer) {
for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i)
efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
}
while (tx_queue->tso_headers_free != NULL) while (tx_queue->tso_headers_free != NULL)
efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free, efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,

Просмотреть файл

@ -16,7 +16,7 @@
*/ */
#define EFX_WORKAROUND_ALWAYS(efx) 1 #define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1)
/* XAUI resets if link not detected */ /* XAUI resets if link not detected */
#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS

Просмотреть файл

@ -85,7 +85,9 @@ static int xfp_phy_init(struct efx_nic *efx)
int rc; int rc;
phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL);
efx->phy_data = (void *) phy_data; if (!phy_data)
return -ENOMEM;
efx->phy_data = phy_data;
EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
" %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),

Просмотреть файл

@ -1159,17 +1159,9 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
} }
#ifdef SKY2_VLAN_TAG_USED #ifdef SKY2_VLAN_TAG_USED
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
{ {
struct sky2_port *sky2 = netdev_priv(dev); if (onoff) {
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
netif_tx_lock_bh(dev);
napi_disable(&hw->napi);
sky2->vlgrp = grp;
if (grp) {
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
RX_VLAN_STRIP_ON); RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
@ -1180,6 +1172,19 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
TX_VLAN_TAG_OFF); TX_VLAN_TAG_OFF);
} }
}
static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port;
netif_tx_lock_bh(dev);
napi_disable(&hw->napi);
sky2->vlgrp = grp;
sky2_set_vlan_mode(hw, port, grp != NULL);
sky2_read32(hw, B0_Y2_SP_LISR); sky2_read32(hw, B0_Y2_SP_LISR);
napi_enable(&hw->napi); napi_enable(&hw->napi);
@ -1418,6 +1423,10 @@ static int sky2_up(struct net_device *dev)
sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
TX_RING_SIZE - 1); TX_RING_SIZE - 1);
#ifdef SKY2_VLAN_TAG_USED
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
err = sky2_rx_start(sky2); err = sky2_rx_start(sky2);
if (err) if (err)
goto err_out; goto err_out;

Просмотреть файл

@ -264,7 +264,7 @@ struct xl_private {
u16 asb; u16 asb;
u8 __iomem *xl_mmio; u8 __iomem *xl_mmio;
char *xl_card_name; const char *xl_card_name;
struct pci_dev *pdev ; struct pci_dev *pdev ;
spinlock_t xl_lock ; spinlock_t xl_lock ;

Просмотреть файл

@ -254,7 +254,7 @@ struct olympic_private {
u8 __iomem *olympic_mmio; u8 __iomem *olympic_mmio;
u8 __iomem *olympic_lap; u8 __iomem *olympic_lap;
struct pci_dev *pdev ; struct pci_dev *pdev ;
char *olympic_card_name ; const char *olympic_card_name;
spinlock_t olympic_lock ; spinlock_t olympic_lock ;

Просмотреть файл

@ -225,6 +225,9 @@ static void uli526x_set_filter_mode(struct net_device *);
static const struct ethtool_ops netdev_ethtool_ops; static const struct ethtool_ops netdev_ethtool_ops;
static u16 read_srom_word(long, int); static u16 read_srom_word(long, int);
static irqreturn_t uli526x_interrupt(int, void *); static irqreturn_t uli526x_interrupt(int, void *);
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uli526x_poll(struct net_device *dev);
#endif
static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
static void allocate_rx_buffer(struct uli526x_board_info *); static void allocate_rx_buffer(struct uli526x_board_info *);
static void update_cr6(u32, unsigned long); static void update_cr6(u32, unsigned long);
@ -339,6 +342,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
dev->get_stats = &uli526x_get_stats; dev->get_stats = &uli526x_get_stats;
dev->set_multicast_list = &uli526x_set_filter_mode; dev->set_multicast_list = &uli526x_set_filter_mode;
dev->ethtool_ops = &netdev_ethtool_ops; dev->ethtool_ops = &netdev_ethtool_ops;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = &uli526x_poll;
#endif
spin_lock_init(&db->lock); spin_lock_init(&db->lock);
@ -681,8 +687,9 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
db->cr5_data = inl(ioaddr + DCR5); db->cr5_data = inl(ioaddr + DCR5);
outl(db->cr5_data, ioaddr + DCR5); outl(db->cr5_data, ioaddr + DCR5);
if ( !(db->cr5_data & 0x180c1) ) { if ( !(db->cr5_data & 0x180c1) ) {
spin_unlock_irqrestore(&db->lock, flags); /* Restore CR7 to enable interrupt mask */
outl(db->cr7_data, ioaddr + DCR7); outl(db->cr7_data, ioaddr + DCR7);
spin_unlock_irqrestore(&db->lock, flags);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -715,6 +722,13 @@ static irqreturn_t uli526x_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void uli526x_poll(struct net_device *dev)
{
/* ISR grabs the irqsave lock, so this should be safe */
uli526x_interrupt(dev->irq, dev);
}
#endif
/* /*
* Free TX resource after TX complete * Free TX resource after TX complete

Просмотреть файл

@ -237,7 +237,7 @@ static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
skb->dev = ugeth->dev; skb->dev = ugeth->dev;
out_be32(&((struct qe_bd __iomem *)bd)->buf, out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(NULL, dma_map_single(&ugeth->dev->dev,
skb->data, skb->data,
ugeth->ug_info->uf_info.max_rx_buf_length + ugeth->ug_info->uf_info.max_rx_buf_length +
UCC_GETH_RX_DATA_BUF_ALIGNMENT, UCC_GETH_RX_DATA_BUF_ALIGNMENT,
@ -2158,7 +2158,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
continue; continue;
for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
if (ugeth->tx_skbuff[i][j]) { if (ugeth->tx_skbuff[i][j]) {
dma_unmap_single(NULL, dma_unmap_single(&ugeth->dev->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf), in_be32(&((struct qe_bd __iomem *)bd)->buf),
(in_be32((u32 __iomem *)bd) & (in_be32((u32 __iomem *)bd) &
BD_LENGTH_MASK), BD_LENGTH_MASK),
@ -2186,7 +2186,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
bd = ugeth->p_rx_bd_ring[i]; bd = ugeth->p_rx_bd_ring[i];
for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
if (ugeth->rx_skbuff[i][j]) { if (ugeth->rx_skbuff[i][j]) {
dma_unmap_single(NULL, dma_unmap_single(&ugeth->dev->dev,
in_be32(&((struct qe_bd __iomem *)bd)->buf), in_be32(&((struct qe_bd __iomem *)bd)->buf),
ugeth->ug_info-> ugeth->ug_info->
uf_info.max_rx_buf_length + uf_info.max_rx_buf_length +
@ -3406,7 +3406,8 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set up the buffer descriptor */ /* set up the buffer descriptor */
out_be32(&((struct qe_bd __iomem *)bd)->buf, out_be32(&((struct qe_bd __iomem *)bd)->buf,
dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); dma_map_single(&ugeth->dev->dev, skb->data,
skb->len, DMA_TO_DEVICE));
/* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */

Просмотреть файл

@ -1440,6 +1440,10 @@ static const struct usb_device_id products [] = {
// Belkin F5D5055 // Belkin F5D5055
USB_DEVICE(0x050d, 0x5055), USB_DEVICE(0x050d, 0x5055),
.driver_info = (unsigned long) &ax88178_info, .driver_info = (unsigned long) &ax88178_info,
}, {
// Apple USB Ethernet Adapter
USB_DEVICE(0x05ac, 0x1402),
.driver_info = (unsigned long) &ax88772_info,
}, },
{ }, // END { }, // END
}; };

Просмотреть файл

@ -194,7 +194,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf)
dev_dbg(&info->control->dev, dev_dbg(&info->control->dev,
"rndis response error, code %d\n", retval); "rndis response error, code %d\n", retval);
} }
msleep(2); msleep(20);
} }
dev_dbg(&info->control->dev, "rndis response timeout\n"); dev_dbg(&info->control->dev, "rndis response timeout\n");
return -ETIMEDOUT; return -ETIMEDOUT;

Просмотреть файл

@ -470,8 +470,7 @@ static void virtnet_remove(struct virtio_device *vdev)
kfree_skb(skb); kfree_skb(skb);
vi->num--; vi->num--;
} }
while ((skb = __skb_dequeue(&vi->send)) != NULL) __skb_queue_purge(&vi->send);
kfree_skb(skb);
BUG_ON(vi->num != 0); BUG_ON(vi->num != 0);

Просмотреть файл

@ -43,8 +43,7 @@ static const char* version = "HDLC support module revision 1.22";
#undef DEBUG_LINK #undef DEBUG_LINK
static struct hdlc_proto *first_proto = NULL; static struct hdlc_proto *first_proto;
static int hdlc_change_mtu(struct net_device *dev, int new_mtu) static int hdlc_change_mtu(struct net_device *dev, int new_mtu)
{ {
@ -314,21 +313,25 @@ void detach_hdlc_protocol(struct net_device *dev)
void register_hdlc_protocol(struct hdlc_proto *proto) void register_hdlc_protocol(struct hdlc_proto *proto)
{ {
rtnl_lock();
proto->next = first_proto; proto->next = first_proto;
first_proto = proto; first_proto = proto;
rtnl_unlock();
} }
void unregister_hdlc_protocol(struct hdlc_proto *proto) void unregister_hdlc_protocol(struct hdlc_proto *proto)
{ {
struct hdlc_proto **p = &first_proto; struct hdlc_proto **p;
while (*p) {
if (*p == proto) { rtnl_lock();
*p = proto->next; p = &first_proto;
return; while (*p != proto) {
} BUG_ON(!*p);
p = &((*p)->next); p = &((*p)->next);
} }
*p = proto->next;
rtnl_unlock();
} }

Просмотреть файл

@ -56,6 +56,7 @@ struct cisco_state {
cisco_proto settings; cisco_proto settings;
struct timer_list timer; struct timer_list timer;
spinlock_t lock;
unsigned long last_poll; unsigned long last_poll;
int up; int up;
int request_sent; int request_sent;
@ -158,6 +159,7 @@ static int cisco_rx(struct sk_buff *skb)
{ {
struct net_device *dev = skb->dev; struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
struct hdlc_header *data = (struct hdlc_header*)skb->data; struct hdlc_header *data = (struct hdlc_header*)skb->data;
struct cisco_packet *cisco_data; struct cisco_packet *cisco_data;
struct in_device *in_dev; struct in_device *in_dev;
@ -220,11 +222,12 @@ static int cisco_rx(struct sk_buff *skb)
goto rx_error; goto rx_error;
case CISCO_KEEPALIVE_REQ: case CISCO_KEEPALIVE_REQ:
state(hdlc)->rxseq = ntohl(cisco_data->par1); spin_lock(&st->lock);
if (state(hdlc)->request_sent && st->rxseq = ntohl(cisco_data->par1);
ntohl(cisco_data->par2) == state(hdlc)->txseq) { if (st->request_sent &&
state(hdlc)->last_poll = jiffies; ntohl(cisco_data->par2) == st->txseq) {
if (!state(hdlc)->up) { st->last_poll = jiffies;
if (!st->up) {
u32 sec, min, hrs, days; u32 sec, min, hrs, days;
sec = ntohl(cisco_data->time) / 1000; sec = ntohl(cisco_data->time) / 1000;
min = sec / 60; sec -= min * 60; min = sec / 60; sec -= min * 60;
@ -232,12 +235,12 @@ static int cisco_rx(struct sk_buff *skb)
days = hrs / 24; hrs -= days * 24; days = hrs / 24; hrs -= days * 24;
printk(KERN_INFO "%s: Link up (peer " printk(KERN_INFO "%s: Link up (peer "
"uptime %ud%uh%um%us)\n", "uptime %ud%uh%um%us)\n",
dev->name, days, hrs, dev->name, days, hrs, min, sec);
min, sec);
netif_dormant_off(dev); netif_dormant_off(dev);
state(hdlc)->up = 1; st->up = 1;
} }
} }
spin_unlock(&st->lock);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NET_RX_SUCCESS; return NET_RX_SUCCESS;
@ -261,24 +264,25 @@ static void cisco_timer(unsigned long arg)
{ {
struct net_device *dev = (struct net_device *)arg; struct net_device *dev = (struct net_device *)arg;
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
if (state(hdlc)->up && spin_lock(&st->lock);
time_after(jiffies, state(hdlc)->last_poll + if (st->up &&
state(hdlc)->settings.timeout * HZ)) { time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
state(hdlc)->up = 0; st->up = 0;
printk(KERN_INFO "%s: Link down\n", dev->name); printk(KERN_INFO "%s: Link down\n", dev->name);
netif_dormant_on(dev); netif_dormant_on(dev);
} }
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
htonl(++state(hdlc)->txseq), htonl(st->rxseq));
htonl(state(hdlc)->rxseq)); st->request_sent = 1;
state(hdlc)->request_sent = 1; spin_unlock(&st->lock);
state(hdlc)->timer.expires = jiffies +
state(hdlc)->settings.interval * HZ; st->timer.expires = jiffies + st->settings.interval * HZ;
state(hdlc)->timer.function = cisco_timer; st->timer.function = cisco_timer;
state(hdlc)->timer.data = arg; st->timer.data = arg;
add_timer(&state(hdlc)->timer); add_timer(&st->timer);
} }
@ -286,15 +290,20 @@ static void cisco_timer(unsigned long arg)
static void cisco_start(struct net_device *dev) static void cisco_start(struct net_device *dev)
{ {
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
state(hdlc)->up = 0; struct cisco_state *st = state(hdlc);
state(hdlc)->request_sent = 0; unsigned long flags;
state(hdlc)->txseq = state(hdlc)->rxseq = 0;
init_timer(&state(hdlc)->timer); spin_lock_irqsave(&st->lock, flags);
state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ st->up = 0;
state(hdlc)->timer.function = cisco_timer; st->request_sent = 0;
state(hdlc)->timer.data = (unsigned long)dev; st->txseq = st->rxseq = 0;
add_timer(&state(hdlc)->timer); spin_unlock_irqrestore(&st->lock, flags);
init_timer(&st->timer);
st->timer.expires = jiffies + HZ; /* First poll after 1 s */
st->timer.function = cisco_timer;
st->timer.data = (unsigned long)dev;
add_timer(&st->timer);
} }
@ -302,10 +311,16 @@ static void cisco_start(struct net_device *dev)
static void cisco_stop(struct net_device *dev) static void cisco_stop(struct net_device *dev)
{ {
hdlc_device *hdlc = dev_to_hdlc(dev); hdlc_device *hdlc = dev_to_hdlc(dev);
del_timer_sync(&state(hdlc)->timer); struct cisco_state *st = state(hdlc);
unsigned long flags;
del_timer_sync(&st->timer);
spin_lock_irqsave(&st->lock, flags);
netif_dormant_on(dev); netif_dormant_on(dev);
state(hdlc)->up = 0; st->up = 0;
state(hdlc)->request_sent = 0; st->request_sent = 0;
spin_unlock_irqrestore(&st->lock, flags);
} }
@ -367,6 +382,7 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return result; return result;
memcpy(&state(hdlc)->settings, &new_settings, size); memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
dev->hard_start_xmit = hdlc->xmit; dev->hard_start_xmit = hdlc->xmit;
dev->header_ops = &cisco_header_ops; dev->header_ops = &cisco_header_ops;
dev->type = ARPHRD_CISCO; dev->type = ARPHRD_CISCO;

Просмотреть файл

@ -946,8 +946,7 @@ err:
work_done++; work_done++;
} }
while ((skb = __skb_dequeue(&errq))) __skb_queue_purge(&errq);
kfree_skb(skb);
work_done -= handle_incoming_queue(dev, &rxq); work_done -= handle_incoming_queue(dev, &rxq);
@ -1079,8 +1078,7 @@ static void xennet_release_rx_bufs(struct netfront_info *np)
} }
} }
while ((skb = __skb_dequeue(&free_list)) != NULL) __skb_queue_purge(&free_list);
dev_kfree_skb(skb);
spin_unlock_bh(&np->rx_lock); spin_unlock_bh(&np->rx_lock);
} }

Просмотреть файл

@ -514,12 +514,10 @@ struct net_device
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */ #define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
#define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */ #define NETIF_F_MULTI_QUEUE 16384 /* Has multiple TX/RX queues */
#define NETIF_F_LRO 32768 /* large receive offload */ #define NETIF_F_LRO 32768 /* large receive offload */
#define NETIF_F_VLAN_TSO 65536 /* Supports TSO for VLANs */
#define NETIF_F_VLAN_CSUM 131072 /* Supports TX checksumming for VLANs */
/* Segmentation offload features */ /* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 20 #define NETIF_F_GSO_SHIFT 16
#define NETIF_F_GSO_MASK 0xfff00000 #define NETIF_F_GSO_MASK 0xffff0000
#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT) #define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT) #define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT) #define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
@ -747,6 +745,9 @@ struct net_device
/* rtnetlink link ops */ /* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops; const struct rtnl_link_ops *rtnl_link_ops;
/* VLAN feature mask */
unsigned long vlan_features;
/* for setting kernel sock attribute on TCP connection setup */ /* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536 #define GSO_MAX_SIZE 65536
unsigned int gso_max_size; unsigned int gso_max_size;

Просмотреть файл

@ -772,12 +772,13 @@ static inline int __nla_parse_nested_compat(struct nlattr *tb[], int maxtype,
const struct nla_policy *policy, const struct nla_policy *policy,
int len) int len)
{ {
if (nla_len(nla) < len) int nested_len = nla_len(nla) - NLA_ALIGN(len);
if (nested_len < 0)
return -1; return -1;
if (nla_len(nla) >= NLA_ALIGN(len) + sizeof(struct nlattr)) if (nested_len >= nla_attr_size(0))
return nla_parse_nested(tb, maxtype, return nla_parse(tb, maxtype, nla_data(nla) + NLA_ALIGN(len),
nla_data(nla) + NLA_ALIGN(len), nested_len, policy);
policy);
memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
return 0; return 0;
} }

Просмотреть файл

@ -387,14 +387,8 @@ static void vlan_transfer_features(struct net_device *dev,
{ {
unsigned long old_features = vlandev->features; unsigned long old_features = vlandev->features;
if (dev->features & NETIF_F_VLAN_TSO) { vlandev->features &= ~dev->vlan_features;
vlandev->features &= ~VLAN_TSO_FEATURES; vlandev->features |= dev->features & dev->vlan_features;
vlandev->features |= dev->features & VLAN_TSO_FEATURES;
}
if (dev->features & NETIF_F_VLAN_CSUM) {
vlandev->features &= ~NETIF_F_ALL_CSUM;
vlandev->features |= dev->features & NETIF_F_ALL_CSUM;
}
if (old_features != vlandev->features) if (old_features != vlandev->features)
netdev_features_change(vlandev); netdev_features_change(vlandev);

Просмотреть файл

@ -7,8 +7,6 @@
#define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT) #define VLAN_GRP_HASH_SIZE (1 << VLAN_GRP_HASH_SHIFT)
#define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1) #define VLAN_GRP_HASH_MASK (VLAN_GRP_HASH_SIZE - 1)
#define VLAN_TSO_FEATURES (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG)
/* Find a VLAN device by the MAC address of its Ethernet device, and /* Find a VLAN device by the MAC address of its Ethernet device, and
* it's VLAN ID. The default configuration is to have VLAN's scope * it's VLAN ID. The default configuration is to have VLAN's scope
* to be box-wide, so the MAC will be ignored. The mac will only be * to be box-wide, so the MAC will be ignored. The mac will only be

Просмотреть файл

@ -663,10 +663,7 @@ static int vlan_dev_init(struct net_device *dev)
(1<<__LINK_STATE_DORMANT))) | (1<<__LINK_STATE_DORMANT))) |
(1<<__LINK_STATE_PRESENT); (1<<__LINK_STATE_PRESENT);
if (real_dev->features & NETIF_F_VLAN_TSO) dev->features |= real_dev->features & real_dev->vlan_features;
dev->features |= real_dev->features & VLAN_TSO_FEATURES;
if (real_dev->features & NETIF_F_VLAN_CSUM)
dev->features |= real_dev->features & NETIF_F_ALL_CSUM;
/* ipv6 shared card related stuff */ /* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id; dev->dev_id = real_dev->dev_id;