Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) If an IPVS tunnel is created with a mixed-family destination address, it cannot be removed. Fix from Alexey Andriyanov. 2) Fix module refcount underflow in netfilter's nft_compat, from Pablo Neira Ayuso. 3) Generic statistics infrastructure can reference variables sitting on a released function stack, therefore use dynamic allocation always. Fix from Ignacy Gawędzki. 4) skb_copy_bits() return value test is inverted in ip_check_defrag(). 5) Fix network namespace exit in openvswitch, we have to release all of the per-net vports. From Pravin B Shelar. 6) Fix signedness bug in CAIF's cfpkt_iterate(), from Dan Carpenter. 7) Fix rhashtable grow/shrink behavior, only expand during inserts and shrink during deletes. From Daniel Borkmann. 8) Netdevice names with semicolons should never be allowed, because they serve as a separator. From Matthew Thode. 9) Use {,__}set_current_state() where appropriate, from Fabian Frederick. 10) Revert byte queue limits support in r8169 driver, it's causing regressions we can't figure out. 11) tcp_should_expand_sndbuf() erroneously uses tp->packets_out to measure packets in flight, properly use tcp_packets_in_flight() instead. From Neal Cardwell. 12) Fix accidental removal of support for bluetooth in CSR based Intel wireless cards. From Marcel Holtmann. 13) We accidently added a behavioral change between native and compat tasks, wrt testing the MSG_CMSG_COMPAT bit. Just ignore it if the user happened to set it in a native binary as that was always the behavior we had. From Catalin Marinas. 14) Check genlmsg_unicast() return valud in hwsim netlink tx frame handling, from Bob Copeland. 15) Fix stale ->radar_required setting in mac80211 that can prevent starting new scans, from Eliad Peller. 16) Fix memory leak in nl80211 monitor, from Johannes Berg. 17) Fix race in TX index handling in xen-netback, from David Vrabel. 18) Don't enable interrupts in amx-xgbe driver until all software et al. state is ready for the interrupt handler to run. From Thomas Lendacky. 19) Add missing netlink_ns_capable() checks to rtnl_newlink(), from Eric W Biederman. 20) The amount of header space needed in macvtap was not calculated properly, fix it otherwise we splat past the beginning of the packet. From Eric Dumazet. 21) Fix bcmgenet TCP TX perf regression, from Jaedon Shin. 22) Don't raw initialize or mod timers, use setup_timer() and mod_timer() instead. From Vaishali Thakkar. 23) Fix software maintained statistics in bcmgenet and systemport drivers, from Florian Fainelli. 24) DMA descriptor updates in sh_eth need proper memory barriers, from Ben Hutchings. 25) Don't do UDP Fragmentation Offload on RAW sockets, from Michal Kubecek. 26) Openvswitch's non-masked set actions aren't constructed properly into netlink messages, fix from Joe Stringer. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (116 commits) openvswitch: Fix serialization of non-masked set actions. gianfar: Reduce logging noise seen due to phy polling if link is down ibmveth: Add function to enable live MAC address changes net: bridge: add compile-time assert for cb struct size udp: only allow UFO for packets from SOCK_DGRAM sockets sh_eth: Really fix padding of short frames on TX Revert "sh_eth: Enable Rx descriptor word 0 shift for r8a7790" sh_eth: Fix RX recovery on R-Car in case of RX ring underrun sh_eth: Ensure proper ordering of descriptor active bit write/read net/mlx4_en: Disbale GRO for incoming loopback/selftest packets net/mlx4_core: Fix wrong mask and error flow for the update-qp command net: systemport: fix software maintained statistics net: bcmgenet: fix software maintained statistics rxrpc: don't multiply with HZ twice rxrpc: terminate retrans loop when sending of skb fails net/hsr: Fix NULL pointer dereference and refcnt bugs when deleting a HSR interface. net: pasemi: Use setup_timer and mod_timer net: stmmac: Use setup_timer and mod_timer net: 8390: axnet_cs: Use setup_timer and mod_timer net: 8390: pcnet_cs: Use setup_timer and mod_timer ...
This commit is contained in:
Коммит
789d7f60cd
|
@ -27,6 +27,8 @@ property is used.
|
|||
- amd,serdes-cdr-rate: CDR rate speed selection
|
||||
- amd,serdes-pq-skew: PQ (data sampling) skew
|
||||
- amd,serdes-tx-amp: TX amplitude boost
|
||||
- amd,serdes-dfe-tap-config: DFE taps available to run
|
||||
- amd,serdes-dfe-tap-enable: DFE taps to enable
|
||||
|
||||
Example:
|
||||
xgbe_phy@e1240800 {
|
||||
|
@ -41,4 +43,6 @@ Example:
|
|||
amd,serdes-cdr-rate = <2>, <2>, <7>;
|
||||
amd,serdes-pq-skew = <10>, <10>, <30>;
|
||||
amd,serdes-tx-amp = <15>, <15>, <10>;
|
||||
amd,serdes-dfe-tap-config = <3>, <3>, <1>;
|
||||
amd,serdes-dfe-tap-enable = <0>, <0>, <127>;
|
||||
};
|
||||
|
|
|
@ -2065,7 +2065,7 @@ F: include/net/bluetooth/
|
|||
BONDING DRIVER
|
||||
M: Jay Vosburgh <j.vosburgh@gmail.com>
|
||||
M: Veaceslav Falico <vfalico@gmail.com>
|
||||
M: Andy Gospodarek <andy@greyhouse.net>
|
||||
M: Andy Gospodarek <gospo@cumulusnetworks.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/bonding/
|
||||
S: Supported
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/input.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = {
|
|||
[1] = {
|
||||
.start = MSM_GPIO_TO_INT(49),
|
||||
.end = MSM_GPIO_TO_INT(49),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
|
||||
},
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/usb/msm_hsusb.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
|
@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = {
|
|||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
|
||||
},
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static int __init msm_init_smc91x(void)
|
||||
|
|
|
@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = {
|
|||
}
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static void idp_backlight_power(int on)
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/pwm_backlight.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = {
|
|||
[1] = {
|
||||
.start = LPD270_ETHERNET_IRQ,
|
||||
.end = LPD270_ETHERNET_IRQ,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
|
||||
},
|
||||
};
|
||||
|
||||
struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT;
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev.platform_data = &smc91x_platdata,
|
||||
};
|
||||
|
||||
static struct resource lpd270_flash_resources[] = {
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <linux/platform_data/video-clcd-versatile.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/smsc911x.h>
|
||||
#include <linux/smc91x.h>
|
||||
#include <linux/ata_platform.h>
|
||||
#include <linux/amba/mmci.h>
|
||||
#include <linux/gfp.h>
|
||||
|
@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = {
|
|||
.phy_interface = PHY_INTERFACE_MODE_MII,
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_32BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device realview_eth_device = {
|
||||
.name = "smsc911x",
|
||||
.id = 0,
|
||||
|
@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res)
|
|||
realview_eth_device.resource = res;
|
||||
if (strcmp(realview_eth_device.name, "smsc911x") == 0)
|
||||
realview_eth_device.dev.platform_data = &smsc911x_config;
|
||||
else
|
||||
realview_eth_device.dev.platform_data = &smc91x_platdata;
|
||||
|
||||
return platform_device_register(&realview_eth_device);
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = {
|
|||
[1] = {
|
||||
.start = IRQ_EB_ETH,
|
||||
.end = IRQ_EB_ETH,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#include <linux/pm.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev)
|
|||
0x02000000, "smc91x-attrib"),
|
||||
{ .flags = IORESOURCE_IRQ },
|
||||
};
|
||||
struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT,
|
||||
};
|
||||
struct platform_device_info smc91x_devinfo = {
|
||||
.parent = &dev->dev,
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.res = smc91x_resources,
|
||||
.num_res = ARRAY_SIZE(smc91x_resources),
|
||||
.data = &smc91c_platdata,
|
||||
.size_data = sizeof(smc91c_platdata),
|
||||
};
|
||||
int ret, irq;
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/smc91x.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/setup.h>
|
||||
|
@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = {
|
|||
#endif
|
||||
};
|
||||
|
||||
static struct smc91x_platdata smc91x_platdata = {
|
||||
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
|
||||
};
|
||||
|
||||
static struct platform_device smc91x_device = {
|
||||
.name = "smc91x",
|
||||
.id = 0,
|
||||
.num_resources = ARRAY_SIZE(smc91x_resources),
|
||||
.resource = smc91x_resources,
|
||||
.dev = {
|
||||
.platform_data = &smc91c_platdata,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device *devices[] __initdata = {
|
||||
|
|
|
@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL },
|
||||
|
||||
/* Intel Bluetooth devices */
|
||||
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
|
||||
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
|
||||
|
|
|
@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc)
|
|||
enable_hwirq(hc);
|
||||
spin_unlock_irqrestore(&hc->lock, flags);
|
||||
/* Timeout 80ms */
|
||||
current->state = TASK_UNINTERRUPTIBLE;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
schedule_timeout((80 * HZ) / 1000);
|
||||
printk(KERN_INFO "HFC PCI: IRQ %d count %d\n",
|
||||
hc->irq, hc->irqcnt);
|
||||
|
|
|
@ -157,7 +157,7 @@ config IPVLAN
|
|||
making it transparent to the connected L2 switch.
|
||||
|
||||
Ipvlan devices can be added using the "ip" command from the
|
||||
iproute2 package starting with the iproute2-X.Y.ZZ release:
|
||||
iproute2 package starting with the iproute2-3.19 release:
|
||||
|
||||
"ip link add link <main-dev> [ NAME ] type ipvlan"
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ config DEV_APPLETALK
|
|||
|
||||
config LTPC
|
||||
tristate "Apple/Farallon LocalTalk PC support"
|
||||
depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API
|
||||
depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS
|
||||
help
|
||||
This allows you to use the AppleTalk PC card to connect to LocalTalk
|
||||
networks. The card is also known as the Farallon PhoneNet PC card.
|
||||
|
|
|
@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
|
|||
{ \
|
||||
u32 indir, dir; \
|
||||
spin_lock(&priv->indir_lock); \
|
||||
indir = reg_readl(priv, REG_DIR_DATA_READ); \
|
||||
dir = __raw_readl(priv->name + off); \
|
||||
indir = reg_readl(priv, REG_DIR_DATA_READ); \
|
||||
spin_unlock(&priv->indir_lock); \
|
||||
return (u64)indir << 32 | dir; \
|
||||
} \
|
||||
|
|
|
@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev)
|
|||
link->open++;
|
||||
|
||||
info->link_status = 0x00;
|
||||
init_timer(&info->watchdog);
|
||||
info->watchdog.function = ei_watchdog;
|
||||
info->watchdog.data = (u_long)dev;
|
||||
info->watchdog.expires = jiffies + HZ;
|
||||
add_timer(&info->watchdog);
|
||||
setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
|
||||
mod_timer(&info->watchdog, jiffies + HZ);
|
||||
|
||||
return ax_open(dev);
|
||||
} /* axnet_open */
|
||||
|
|
|
@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev)
|
|||
|
||||
info->phy_id = info->eth_phy;
|
||||
info->link_status = 0x00;
|
||||
init_timer(&info->watchdog);
|
||||
info->watchdog.function = ei_watchdog;
|
||||
info->watchdog.data = (u_long)dev;
|
||||
info->watchdog.expires = jiffies + HZ;
|
||||
add_timer(&info->watchdog);
|
||||
setup_timer(&info->watchdog, ei_watchdog, (u_long)dev);
|
||||
mod_timer(&info->watchdog, jiffies + HZ);
|
||||
|
||||
return ei_open(dev);
|
||||
} /* pcnet_open */
|
||||
|
|
|
@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
|
|||
u16 pktlength;
|
||||
u16 pktstatus;
|
||||
|
||||
while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) {
|
||||
while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) &&
|
||||
(count < limit)) {
|
||||
pktstatus = rxstatus >> 16;
|
||||
pktlength = rxstatus & 0xffff;
|
||||
|
||||
|
@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget)
|
|||
struct altera_tse_private *priv =
|
||||
container_of(napi, struct altera_tse_private, napi);
|
||||
int rxcomplete = 0;
|
||||
int txcomplete = 0;
|
||||
unsigned long int flags;
|
||||
|
||||
txcomplete = tse_tx_complete(priv);
|
||||
tse_tx_complete(priv);
|
||||
|
||||
rxcomplete = tse_rx(priv, budget);
|
||||
|
||||
if (rxcomplete >= budget || txcomplete > 0)
|
||||
return rxcomplete;
|
||||
if (rxcomplete < budget) {
|
||||
|
||||
napi_gro_flush(napi, false);
|
||||
__napi_complete(napi);
|
||||
napi_gro_flush(napi, false);
|
||||
__napi_complete(napi);
|
||||
|
||||
netdev_dbg(priv->dev,
|
||||
"NAPI Complete, did %d packets with budget %d\n",
|
||||
txcomplete+rxcomplete, budget);
|
||||
netdev_dbg(priv->dev,
|
||||
"NAPI Complete, did %d packets with budget %d\n",
|
||||
rxcomplete, budget);
|
||||
|
||||
spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
|
||||
priv->dmaops->enable_rxirq(priv);
|
||||
priv->dmaops->enable_txirq(priv);
|
||||
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
|
||||
return rxcomplete + txcomplete;
|
||||
spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
|
||||
priv->dmaops->enable_rxirq(priv);
|
||||
priv->dmaops->enable_txirq(priv);
|
||||
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
|
||||
}
|
||||
return rxcomplete;
|
||||
}
|
||||
|
||||
/* DMA TX & RX FIFO interrupt routing
|
||||
|
@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
|
|||
{
|
||||
struct net_device *dev = dev_id;
|
||||
struct altera_tse_private *priv;
|
||||
unsigned long int flags;
|
||||
|
||||
if (unlikely(!dev)) {
|
||||
pr_err("%s: invalid dev pointer\n", __func__);
|
||||
|
@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
|
|||
}
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
/* turn off desc irqs and enable napi rx */
|
||||
spin_lock_irqsave(&priv->rxdma_irq_lock, flags);
|
||||
|
||||
if (likely(napi_schedule_prep(&priv->napi))) {
|
||||
priv->dmaops->disable_rxirq(priv);
|
||||
priv->dmaops->disable_txirq(priv);
|
||||
__napi_schedule(&priv->napi);
|
||||
}
|
||||
|
||||
spin_lock(&priv->rxdma_irq_lock);
|
||||
/* reset IRQs */
|
||||
priv->dmaops->clear_rxirq(priv);
|
||||
priv->dmaops->clear_txirq(priv);
|
||||
spin_unlock(&priv->rxdma_irq_lock);
|
||||
|
||||
if (likely(napi_schedule_prep(&priv->napi))) {
|
||||
spin_lock(&priv->rxdma_irq_lock);
|
||||
priv->dmaops->disable_rxirq(priv);
|
||||
priv->dmaops->disable_txirq(priv);
|
||||
spin_unlock(&priv->rxdma_irq_lock);
|
||||
__napi_schedule(&priv->napi);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -1399,7 +1398,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
|
||||
&priv->rx_fifo_depth)) {
|
||||
&priv->tx_fifo_depth)) {
|
||||
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
|
||||
ret = -ENXIO;
|
||||
goto err_free_netdev;
|
||||
|
|
|
@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
|
|||
}
|
||||
}
|
||||
|
||||
static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_channel *channel;
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
|
||||
netdev->name, pdata);
|
||||
if (ret) {
|
||||
netdev_alert(netdev, "error requesting irq %d\n",
|
||||
pdata->dev_irq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!pdata->per_channel_irq)
|
||||
return 0;
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
snprintf(channel->dma_irq_name,
|
||||
sizeof(channel->dma_irq_name) - 1,
|
||||
"%s-TxRx-%u", netdev_name(netdev),
|
||||
channel->queue_index);
|
||||
|
||||
ret = devm_request_irq(pdata->dev, channel->dma_irq,
|
||||
xgbe_dma_isr, 0,
|
||||
channel->dma_irq_name, channel);
|
||||
if (ret) {
|
||||
netdev_alert(netdev, "error requesting irq %d\n",
|
||||
channel->dma_irq);
|
||||
goto err_irq;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_irq:
|
||||
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
|
||||
for (i--, channel--; i < pdata->channel_count; i--, channel--)
|
||||
devm_free_irq(pdata->dev, channel->dma_irq, channel);
|
||||
|
||||
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int i;
|
||||
|
||||
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
|
||||
|
||||
if (!pdata->per_channel_irq)
|
||||
return;
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++)
|
||||
devm_free_irq(pdata->dev, channel->dma_irq, channel);
|
||||
}
|
||||
|
||||
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
|
@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
|
||||
spin_lock_irqsave(&pdata->lock, flags);
|
||||
|
||||
if (caller == XGMAC_DRIVER_CONTEXT)
|
||||
netif_device_detach(netdev);
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
xgbe_napi_disable(pdata, 0);
|
||||
|
||||
/* Powerdown Tx/Rx */
|
||||
hw_if->powerdown_tx(pdata);
|
||||
hw_if->powerdown_rx(pdata);
|
||||
|
||||
xgbe_napi_disable(pdata, 0);
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
|
||||
pdata->power_down = 1;
|
||||
|
||||
spin_unlock_irqrestore(&pdata->lock, flags);
|
||||
|
@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
|
|||
|
||||
phy_start(pdata->phydev);
|
||||
|
||||
/* Enable Tx/Rx */
|
||||
xgbe_napi_enable(pdata, 0);
|
||||
|
||||
hw_if->powerup_tx(pdata);
|
||||
hw_if->powerup_rx(pdata);
|
||||
|
||||
if (caller == XGMAC_DRIVER_CONTEXT)
|
||||
netif_device_attach(netdev);
|
||||
|
||||
xgbe_napi_enable(pdata, 0);
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
spin_unlock_irqrestore(&pdata->lock, flags);
|
||||
|
@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|||
{
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
int ret;
|
||||
|
||||
DBGPR("-->xgbe_start\n");
|
||||
|
||||
|
@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
|||
|
||||
phy_start(pdata->phydev);
|
||||
|
||||
xgbe_napi_enable(pdata, 1);
|
||||
|
||||
ret = xgbe_request_irqs(pdata);
|
||||
if (ret)
|
||||
goto err_napi;
|
||||
|
||||
hw_if->enable_tx(pdata);
|
||||
hw_if->enable_rx(pdata);
|
||||
|
||||
xgbe_init_tx_timers(pdata);
|
||||
|
||||
xgbe_napi_enable(pdata, 1);
|
||||
netif_tx_start_all_queues(netdev);
|
||||
|
||||
DBGPR("<--xgbe_start\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_napi:
|
||||
xgbe_napi_disable(pdata, 1);
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
|
||||
hw_if->exit(pdata);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void xgbe_stop(struct xgbe_prv_data *pdata)
|
||||
|
@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
|||
|
||||
DBGPR("-->xgbe_stop\n");
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
xgbe_napi_disable(pdata, 1);
|
||||
|
||||
xgbe_stop_tx_timers(pdata);
|
||||
|
||||
hw_if->disable_tx(pdata);
|
||||
hw_if->disable_rx(pdata);
|
||||
|
||||
xgbe_free_irqs(pdata);
|
||||
|
||||
xgbe_napi_disable(pdata, 1);
|
||||
|
||||
phy_stop(pdata->phydev);
|
||||
|
||||
hw_if->exit(pdata);
|
||||
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
if (!channel->tx_ring)
|
||||
|
@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
|
|||
|
||||
static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct xgbe_channel *channel;
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
unsigned int i;
|
||||
|
||||
DBGPR("-->xgbe_restart_dev\n");
|
||||
|
||||
/* If not running, "restart" will happen on open */
|
||||
|
@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata)
|
|||
return;
|
||||
|
||||
xgbe_stop(pdata);
|
||||
synchronize_irq(pdata->dev_irq);
|
||||
if (pdata->per_channel_irq) {
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++)
|
||||
synchronize_irq(channel->dma_irq);
|
||||
}
|
||||
|
||||
xgbe_free_tx_data(pdata);
|
||||
xgbe_free_rx_data(pdata);
|
||||
|
||||
/* Issue software reset to device */
|
||||
hw_if->exit(pdata);
|
||||
|
||||
xgbe_start(pdata);
|
||||
|
||||
DBGPR("<--xgbe_restart_dev\n");
|
||||
|
@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata,
|
|||
static int xgbe_open(struct net_device *netdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_desc_if *desc_if = &pdata->desc_if;
|
||||
struct xgbe_channel *channel = NULL;
|
||||
unsigned int i = 0;
|
||||
int ret;
|
||||
|
||||
DBGPR("-->xgbe_open\n");
|
||||
|
@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev)
|
|||
INIT_WORK(&pdata->restart_work, xgbe_restart);
|
||||
INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
|
||||
|
||||
/* Request interrupts */
|
||||
ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
|
||||
netdev->name, pdata);
|
||||
if (ret) {
|
||||
netdev_alert(netdev, "error requesting irq %d\n",
|
||||
pdata->dev_irq);
|
||||
goto err_rings;
|
||||
}
|
||||
|
||||
if (pdata->per_channel_irq) {
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++) {
|
||||
snprintf(channel->dma_irq_name,
|
||||
sizeof(channel->dma_irq_name) - 1,
|
||||
"%s-TxRx-%u", netdev_name(netdev),
|
||||
channel->queue_index);
|
||||
|
||||
ret = devm_request_irq(pdata->dev, channel->dma_irq,
|
||||
xgbe_dma_isr, 0,
|
||||
channel->dma_irq_name, channel);
|
||||
if (ret) {
|
||||
netdev_alert(netdev,
|
||||
"error requesting irq %d\n",
|
||||
channel->dma_irq);
|
||||
goto err_irq;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = xgbe_start(pdata);
|
||||
if (ret)
|
||||
goto err_start;
|
||||
goto err_rings;
|
||||
|
||||
DBGPR("<--xgbe_open\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_start:
|
||||
hw_if->exit(pdata);
|
||||
|
||||
err_irq:
|
||||
if (pdata->per_channel_irq) {
|
||||
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
|
||||
for (i--, channel--; i < pdata->channel_count; i--, channel--)
|
||||
devm_free_irq(pdata->dev, channel->dma_irq, channel);
|
||||
}
|
||||
|
||||
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
|
||||
|
||||
err_rings:
|
||||
desc_if->free_ring_resources(pdata);
|
||||
|
||||
|
@ -1399,30 +1424,16 @@ err_phy_init:
|
|||
static int xgbe_close(struct net_device *netdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
struct xgbe_desc_if *desc_if = &pdata->desc_if;
|
||||
struct xgbe_channel *channel;
|
||||
unsigned int i;
|
||||
|
||||
DBGPR("-->xgbe_close\n");
|
||||
|
||||
/* Stop the device */
|
||||
xgbe_stop(pdata);
|
||||
|
||||
/* Issue software reset to device */
|
||||
hw_if->exit(pdata);
|
||||
|
||||
/* Free the ring descriptors and buffers */
|
||||
desc_if->free_ring_resources(pdata);
|
||||
|
||||
/* Release the interrupts */
|
||||
devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
|
||||
if (pdata->per_channel_irq) {
|
||||
channel = pdata->channel;
|
||||
for (i = 0; i < pdata->channel_count; i++, channel++)
|
||||
devm_free_irq(pdata->dev, channel->dma_irq, channel);
|
||||
}
|
||||
|
||||
/* Free the channel and ring structures */
|
||||
xgbe_free_channels(pdata);
|
||||
|
||||
|
|
|
@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
|
|||
/* RBUF misc statistics */
|
||||
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
|
||||
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
|
||||
STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
|
||||
STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
|
||||
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
|
||||
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
|
||||
};
|
||||
|
||||
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
|
||||
|
@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
|
|||
s = &bcm_sysport_gstrings_stats[i];
|
||||
switch (s->type) {
|
||||
case BCM_SYSPORT_STAT_NETDEV:
|
||||
case BCM_SYSPORT_STAT_SOFT:
|
||||
continue;
|
||||
case BCM_SYSPORT_STAT_MIB_RX:
|
||||
case BCM_SYSPORT_STAT_MIB_TX:
|
||||
|
|
|
@ -570,6 +570,7 @@ enum bcm_sysport_stat_type {
|
|||
BCM_SYSPORT_STAT_RUNT,
|
||||
BCM_SYSPORT_STAT_RXCHK,
|
||||
BCM_SYSPORT_STAT_RBUF,
|
||||
BCM_SYSPORT_STAT_SOFT,
|
||||
};
|
||||
|
||||
/* Macros to help define ethtool statistics */
|
||||
|
@ -590,6 +591,7 @@ enum bcm_sysport_stat_type {
|
|||
#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
|
||||
#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
|
||||
#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
|
||||
#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT)
|
||||
|
||||
#define STAT_RXCHK(str, m, ofs) { \
|
||||
.stat_string = str, \
|
||||
|
|
|
@ -487,6 +487,7 @@ enum bcmgenet_stat_type {
|
|||
BCMGENET_STAT_MIB_TX,
|
||||
BCMGENET_STAT_RUNT,
|
||||
BCMGENET_STAT_MISC,
|
||||
BCMGENET_STAT_SOFT,
|
||||
};
|
||||
|
||||
struct bcmgenet_stats {
|
||||
|
@ -515,6 +516,7 @@ struct bcmgenet_stats {
|
|||
#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
|
||||
#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
|
||||
#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
|
||||
#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
|
||||
|
||||
#define STAT_GENET_MISC(str, m, offset) { \
|
||||
.stat_string = str, \
|
||||
|
@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
|
|||
UMAC_RBUF_OVFL_CNT),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
|
||||
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
|
||||
STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed),
|
||||
STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed),
|
||||
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
|
||||
STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
|
||||
};
|
||||
|
||||
#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
|
||||
|
@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
|||
s = &bcmgenet_gstrings_stats[i];
|
||||
switch (s->type) {
|
||||
case BCMGENET_STAT_NETDEV:
|
||||
case BCMGENET_STAT_SOFT:
|
||||
continue;
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
|
@ -971,13 +974,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
|
|||
}
|
||||
|
||||
/* Unlocked version of the reclaim routine */
|
||||
static void __bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
struct bcmgenet_tx_ring *ring)
|
||||
static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
struct bcmgenet_tx_ring *ring)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
int last_tx_cn, last_c_index, num_tx_bds;
|
||||
struct enet_cb *tx_cb_ptr;
|
||||
struct netdev_queue *txq;
|
||||
unsigned int pkts_compl = 0;
|
||||
unsigned int bds_compl;
|
||||
unsigned int c_index;
|
||||
|
||||
|
@ -1005,6 +1009,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
|
|||
tx_cb_ptr = ring->cbs + last_c_index;
|
||||
bds_compl = 0;
|
||||
if (tx_cb_ptr->skb) {
|
||||
pkts_compl++;
|
||||
bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1;
|
||||
dev->stats.tx_bytes += tx_cb_ptr->skb->len;
|
||||
dma_unmap_single(&dev->dev,
|
||||
|
@ -1028,23 +1033,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
|
|||
last_c_index &= (num_tx_bds - 1);
|
||||
}
|
||||
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1))
|
||||
ring->int_disable(priv, ring);
|
||||
|
||||
if (netif_tx_queue_stopped(txq))
|
||||
netif_tx_wake_queue(txq);
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
if (netif_tx_queue_stopped(txq))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
|
||||
ring->c_index = c_index;
|
||||
|
||||
return pkts_compl;
|
||||
}
|
||||
|
||||
static void bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
struct bcmgenet_tx_ring *ring)
|
||||
{
|
||||
unsigned int released;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
__bcmgenet_tx_reclaim(dev, ring);
|
||||
released = __bcmgenet_tx_reclaim(dev, ring);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
return released;
|
||||
}
|
||||
|
||||
static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct bcmgenet_tx_ring *ring =
|
||||
container_of(napi, struct bcmgenet_tx_ring, napi);
|
||||
unsigned int work_done = 0;
|
||||
|
||||
work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
|
||||
if (work_done == 0) {
|
||||
napi_complete(napi);
|
||||
ring->int_enable(ring->priv, ring);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return budget;
|
||||
}
|
||||
|
||||
static void bcmgenet_tx_reclaim_all(struct net_device *dev)
|
||||
|
@ -1302,10 +1329,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
bcmgenet_tdma_ring_writel(priv, ring->index,
|
||||
ring->prod_index, TDMA_PROD_INDEX);
|
||||
|
||||
if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
|
||||
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
|
||||
netif_tx_stop_queue(txq);
|
||||
ring->int_enable(priv, ring);
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
@ -1621,6 +1646,7 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|||
struct device *kdev = &priv->pdev->dev;
|
||||
int ret;
|
||||
u32 reg, cpu_mask_clear;
|
||||
int index;
|
||||
|
||||
dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
|
||||
|
||||
|
@ -1647,7 +1673,7 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|||
|
||||
bcmgenet_intr_disable(priv);
|
||||
|
||||
cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
|
||||
cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE;
|
||||
|
||||
dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
|
||||
|
||||
|
@ -1674,6 +1700,10 @@ static int init_umac(struct bcmgenet_priv *priv)
|
|||
|
||||
bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
|
||||
|
||||
for (index = 0; index < priv->hw_params->tx_queues; index++)
|
||||
bcmgenet_intrl2_1_writel(priv, (1 << index),
|
||||
INTRL2_CPU_MASK_CLEAR);
|
||||
|
||||
/* Enable rx/tx engine.*/
|
||||
dev_dbg(kdev, "done init umac\n");
|
||||
|
||||
|
@ -1693,6 +1723,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
|
|||
unsigned int first_bd;
|
||||
|
||||
spin_lock_init(&ring->lock);
|
||||
ring->priv = priv;
|
||||
netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64);
|
||||
ring->index = index;
|
||||
if (index == DESC_INDEX) {
|
||||
ring->queue = 0;
|
||||
|
@ -1738,6 +1770,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
|
|||
TDMA_WRITE_PTR);
|
||||
bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
|
||||
DMA_END_ADDR);
|
||||
|
||||
napi_enable(&ring->napi);
|
||||
}
|
||||
|
||||
static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv,
|
||||
unsigned int index)
|
||||
{
|
||||
struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
|
||||
|
||||
napi_disable(&ring->napi);
|
||||
netif_napi_del(&ring->napi);
|
||||
}
|
||||
|
||||
/* Initialize a RDMA ring */
|
||||
|
@ -1907,7 +1950,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
|
||||
static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -1926,6 +1969,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
|
|||
kfree(priv->tx_cbs);
|
||||
}
|
||||
|
||||
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i;
|
||||
|
||||
bcmgenet_fini_tx_ring(priv, DESC_INDEX);
|
||||
|
||||
for (i = 0; i < priv->hw_params->tx_queues; i++)
|
||||
bcmgenet_fini_tx_ring(priv, i);
|
||||
|
||||
__bcmgenet_fini_dma(priv);
|
||||
}
|
||||
|
||||
/* init_edma: Initialize DMA control register */
|
||||
static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
|
||||
{
|
||||
|
@ -1952,7 +2007,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
|
|||
priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
|
||||
GFP_KERNEL);
|
||||
if (!priv->tx_cbs) {
|
||||
bcmgenet_fini_dma(priv);
|
||||
__bcmgenet_fini_dma(priv);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1975,9 +2030,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
|
|||
struct bcmgenet_priv, napi);
|
||||
unsigned int work_done;
|
||||
|
||||
/* tx reclaim */
|
||||
bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
|
||||
|
||||
work_done = bcmgenet_desc_rx(priv, budget);
|
||||
|
||||
/* Advancing our consumer index*/
|
||||
|
@ -2022,28 +2074,34 @@ static void bcmgenet_irq_task(struct work_struct *work)
|
|||
static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
||||
{
|
||||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_tx_ring *ring;
|
||||
unsigned int index;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq1_stat =
|
||||
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
~priv->int1_mask;
|
||||
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
|
||||
|
||||
/* Check the MBDONE interrupts.
|
||||
* packet is done, reclaim descriptors
|
||||
*/
|
||||
if (priv->irq1_stat & 0x0000ffff) {
|
||||
index = 0;
|
||||
for (index = 0; index < 16; index++) {
|
||||
if (priv->irq1_stat & (1 << index))
|
||||
bcmgenet_tx_reclaim(priv->dev,
|
||||
&priv->tx_rings[index]);
|
||||
for (index = 0; index < priv->hw_params->tx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(index)))
|
||||
continue;
|
||||
|
||||
ring = &priv->tx_rings[index];
|
||||
|
||||
if (likely(napi_schedule_prep(&ring->napi))) {
|
||||
ring->int_disable(priv, ring);
|
||||
__napi_schedule(&ring->napi);
|
||||
}
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
|
@ -2075,8 +2133,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
|||
}
|
||||
if (priv->irq0_stat &
|
||||
(UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
|
||||
/* Tx reclaim */
|
||||
bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
|
||||
struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&ring->napi))) {
|
||||
ring->int_disable(priv, ring);
|
||||
__napi_schedule(&ring->napi);
|
||||
}
|
||||
}
|
||||
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
|
||||
UMAC_IRQ_PHY_DET_F |
|
||||
|
|
|
@ -520,6 +520,7 @@ struct bcmgenet_hw_params {
|
|||
|
||||
struct bcmgenet_tx_ring {
|
||||
spinlock_t lock; /* ring lock */
|
||||
struct napi_struct napi; /* NAPI per tx queue */
|
||||
unsigned int index; /* ring index */
|
||||
unsigned int queue; /* queue index */
|
||||
struct enet_cb *cbs; /* tx ring buffer control block*/
|
||||
|
@ -534,6 +535,7 @@ struct bcmgenet_tx_ring {
|
|||
struct bcmgenet_tx_ring *);
|
||||
void (*int_disable)(struct bcmgenet_priv *priv,
|
||||
struct bcmgenet_tx_ring *);
|
||||
struct bcmgenet_priv *priv;
|
||||
};
|
||||
|
||||
/* device context */
|
||||
|
|
|
@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key)
|
|||
}
|
||||
|
||||
static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr,
|
||||
int addr_len)
|
||||
u8 v6)
|
||||
{
|
||||
return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) :
|
||||
ipv6_clip_hash(ctbl, addr);
|
||||
return v6 ? ipv6_clip_hash(ctbl, addr) :
|
||||
ipv4_clip_hash(ctbl, addr);
|
||||
}
|
||||
|
||||
static int clip6_get_mbox(const struct net_device *dev,
|
||||
|
@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
|
|||
struct clip_entry *ce, *cte;
|
||||
u32 *addr = (u32 *)lip;
|
||||
int hash;
|
||||
int addr_len;
|
||||
int ret = 0;
|
||||
int ret = -1;
|
||||
|
||||
if (!ctbl)
|
||||
return 0;
|
||||
|
||||
if (v6)
|
||||
addr_len = 16;
|
||||
else
|
||||
addr_len = 4;
|
||||
|
||||
hash = clip_addr_hash(ctbl, addr, addr_len);
|
||||
hash = clip_addr_hash(ctbl, addr, v6);
|
||||
|
||||
read_lock_bh(&ctbl->lock);
|
||||
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
|
||||
if (addr_len == cte->addr_len &&
|
||||
memcmp(lip, cte->addr, cte->addr_len) == 0) {
|
||||
if (cte->addr6.sin6_family == AF_INET6 && v6)
|
||||
ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
|
||||
sizeof(struct in6_addr));
|
||||
else if (cte->addr.sin_family == AF_INET && !v6)
|
||||
ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
|
||||
sizeof(struct in_addr));
|
||||
if (!ret) {
|
||||
ce = cte;
|
||||
read_unlock_bh(&ctbl->lock);
|
||||
goto found;
|
||||
|
@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6)
|
|||
spin_lock_init(&ce->lock);
|
||||
atomic_set(&ce->refcnt, 0);
|
||||
atomic_dec(&ctbl->nfree);
|
||||
ce->addr_len = addr_len;
|
||||
memcpy(ce->addr, lip, addr_len);
|
||||
list_add_tail(&ce->list, &ctbl->hash_list[hash]);
|
||||
if (v6) {
|
||||
ce->addr6.sin6_family = AF_INET6;
|
||||
memcpy(ce->addr6.sin6_addr.s6_addr,
|
||||
lip, sizeof(struct in6_addr));
|
||||
ret = clip6_get_mbox(dev, (const struct in6_addr *)lip);
|
||||
if (ret) {
|
||||
write_unlock_bh(&ctbl->lock);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
ce->addr.sin_family = AF_INET;
|
||||
memcpy((char *)(&ce->addr.sin_addr), lip,
|
||||
sizeof(struct in_addr));
|
||||
}
|
||||
} else {
|
||||
write_unlock_bh(&ctbl->lock);
|
||||
|
@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6)
|
|||
struct clip_entry *ce, *cte;
|
||||
u32 *addr = (u32 *)lip;
|
||||
int hash;
|
||||
int addr_len;
|
||||
int ret = -1;
|
||||
|
||||
if (v6)
|
||||
addr_len = 16;
|
||||
else
|
||||
addr_len = 4;
|
||||
|
||||
hash = clip_addr_hash(ctbl, addr, addr_len);
|
||||
hash = clip_addr_hash(ctbl, addr, v6);
|
||||
|
||||
read_lock_bh(&ctbl->lock);
|
||||
list_for_each_entry(cte, &ctbl->hash_list[hash], list) {
|
||||
if (addr_len == cte->addr_len &&
|
||||
memcmp(lip, cte->addr, cte->addr_len) == 0) {
|
||||
if (cte->addr6.sin6_family == AF_INET6 && v6)
|
||||
ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr,
|
||||
sizeof(struct in6_addr));
|
||||
else if (cte->addr.sin_family == AF_INET && !v6)
|
||||
ret = memcmp(lip, (char *)(&cte->addr.sin_addr),
|
||||
sizeof(struct in_addr));
|
||||
if (!ret) {
|
||||
ce = cte;
|
||||
read_unlock_bh(&ctbl->lock);
|
||||
goto found;
|
||||
|
@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v)
|
|||
for (i = 0 ; i < ctbl->clipt_size; ++i) {
|
||||
list_for_each_entry(ce, &ctbl->hash_list[i], list) {
|
||||
ip[0] = '\0';
|
||||
if (ce->addr_len == 16)
|
||||
sprintf(ip, "%pI6c", ce->addr);
|
||||
else
|
||||
sprintf(ip, "%pI4c", ce->addr);
|
||||
sprintf(ip, "%pISc", &ce->addr);
|
||||
seq_printf(seq, "%-25s %u\n", ip,
|
||||
atomic_read(&ce->refcnt));
|
||||
}
|
||||
|
|
|
@ -14,8 +14,10 @@ struct clip_entry {
|
|||
spinlock_t lock; /* Hold while modifying clip reference */
|
||||
atomic_t refcnt;
|
||||
struct list_head list;
|
||||
u32 addr[4];
|
||||
int addr_len;
|
||||
union {
|
||||
struct sockaddr_in addr;
|
||||
struct sockaddr_in6 addr6;
|
||||
};
|
||||
};
|
||||
|
||||
struct clip_tbl {
|
||||
|
|
|
@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
|
|||
#define T4_MEMORY_WRITE 0
|
||||
#define T4_MEMORY_READ 1
|
||||
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
|
||||
__be32 *buf, int dir);
|
||||
void *buf, int dir);
|
||||
static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
|
||||
u32 len, __be32 *buf)
|
||||
{
|
||||
|
|
|
@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
|
|||
* @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
|
||||
* @addr: address within indicated memory type
|
||||
* @len: amount of memory to transfer
|
||||
* @buf: host memory buffer
|
||||
* @hbuf: host memory buffer
|
||||
* @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
|
||||
*
|
||||
* Reads/writes an [almost] arbitrary memory region in the firmware: the
|
||||
|
@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
|
|||
* caller's responsibility to perform appropriate byte order conversions.
|
||||
*/
|
||||
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
|
||||
u32 len, __be32 *buf, int dir)
|
||||
u32 len, void *hbuf, int dir)
|
||||
{
|
||||
u32 pos, offset, resid, memoffset;
|
||||
u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
|
||||
u32 *buf;
|
||||
|
||||
/* Argument sanity checks ...
|
||||
*/
|
||||
if (addr & 0x3)
|
||||
if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
|
||||
return -EINVAL;
|
||||
buf = (u32 *)hbuf;
|
||||
|
||||
/* It's convenient to be able to handle lengths which aren't a
|
||||
* multiple of 32-bits because we often end up transferring files to
|
||||
|
@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
|
|||
|
||||
/* Transfer data to/from the adapter as long as there's an integral
|
||||
* number of 32-bit transfers to complete.
|
||||
*
|
||||
* A note on Endianness issues:
|
||||
*
|
||||
* The "register" reads and writes below from/to the PCI-E Memory
|
||||
* Window invoke the standard adapter Big-Endian to PCI-E Link
|
||||
* Little-Endian "swizzel." As a result, if we have the following
|
||||
* data in adapter memory:
|
||||
*
|
||||
* Memory: ... | b0 | b1 | b2 | b3 | ...
|
||||
* Address: i+0 i+1 i+2 i+3
|
||||
*
|
||||
* Then a read of the adapter memory via the PCI-E Memory Window
|
||||
* will yield:
|
||||
*
|
||||
* x = readl(i)
|
||||
* 31 0
|
||||
* [ b3 | b2 | b1 | b0 ]
|
||||
*
|
||||
* If this value is stored into local memory on a Little-Endian system
|
||||
* it will show up correctly in local memory as:
|
||||
*
|
||||
* ( ..., b0, b1, b2, b3, ... )
|
||||
*
|
||||
* But on a Big-Endian system, the store will show up in memory
|
||||
* incorrectly swizzled as:
|
||||
*
|
||||
* ( ..., b3, b2, b1, b0, ... )
|
||||
*
|
||||
* So we need to account for this in the reads and writes to the
|
||||
* PCI-E Memory Window below by undoing the register read/write
|
||||
* swizzels.
|
||||
*/
|
||||
while (len > 0) {
|
||||
if (dir == T4_MEMORY_READ)
|
||||
*buf++ = (__force __be32) t4_read_reg(adap,
|
||||
mem_base + offset);
|
||||
*buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
|
||||
mem_base + offset));
|
||||
else
|
||||
t4_write_reg(adap, mem_base + offset,
|
||||
(__force u32) *buf++);
|
||||
(__force u32)cpu_to_le32(*buf++));
|
||||
offset += sizeof(__be32);
|
||||
len -= sizeof(__be32);
|
||||
|
||||
|
@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
|
|||
*/
|
||||
if (resid) {
|
||||
union {
|
||||
__be32 word;
|
||||
u32 word;
|
||||
char byte[4];
|
||||
} last;
|
||||
unsigned char *bp;
|
||||
int i;
|
||||
|
||||
if (dir == T4_MEMORY_READ) {
|
||||
last.word = (__force __be32) t4_read_reg(adap,
|
||||
mem_base + offset);
|
||||
last.word = le32_to_cpu(
|
||||
(__force __le32)t4_read_reg(adap,
|
||||
mem_base + offset));
|
||||
for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
|
||||
bp[i] = last.byte[i];
|
||||
} else {
|
||||
|
@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
|
|||
for (i = resid; i < 4; i++)
|
||||
last.byte[i] = 0;
|
||||
t4_write_reg(adap, mem_base + offset,
|
||||
(__force u32) last.word);
|
||||
(__force u32)cpu_to_le32(last.word));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data)
|
|||
}
|
||||
|
||||
if (ENIC_TEST_INTR(pba, notify_intr)) {
|
||||
vnic_intr_return_all_credits(&enic->intr[notify_intr]);
|
||||
enic_notify_check(enic);
|
||||
vnic_intr_return_all_credits(&enic->intr[notify_intr]);
|
||||
}
|
||||
|
||||
if (ENIC_TEST_INTR(pba, err_intr)) {
|
||||
|
@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)
|
|||
struct enic *enic = data;
|
||||
unsigned int intr = enic_msix_notify_intr(enic);
|
||||
|
||||
vnic_intr_return_all_credits(&enic->intr[intr]);
|
||||
enic_notify_check(enic);
|
||||
vnic_intr_return_all_credits(&enic->intr[intr]);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev)
|
|||
struct phy_device *phydev = priv->phydev;
|
||||
|
||||
if (unlikely(phydev->link != priv->oldlink ||
|
||||
phydev->duplex != priv->oldduplex ||
|
||||
phydev->speed != priv->oldspeed))
|
||||
(phydev->link && (phydev->duplex != priv->oldduplex ||
|
||||
phydev->speed != priv->oldspeed))))
|
||||
gfar_update_link_state(priv);
|
||||
}
|
||||
|
||||
|
|
|
@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev)
|
|||
device_remove_file(&dev->dev, &dev_attr_remove_port);
|
||||
}
|
||||
|
||||
static int ehea_reboot_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *unused)
|
||||
{
|
||||
if (action == SYS_RESTART) {
|
||||
pr_info("Reboot: freeing all eHEA resources\n");
|
||||
ibmebus_unregister_driver(&ehea_driver);
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block ehea_reboot_nb = {
|
||||
.notifier_call = ehea_reboot_notifier,
|
||||
};
|
||||
|
||||
static int ehea_mem_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
int ret = NOTIFY_BAD;
|
||||
struct memory_notify *arg = data;
|
||||
|
||||
mutex_lock(&dlpar_mem_lock);
|
||||
|
||||
switch (action) {
|
||||
case MEM_CANCEL_OFFLINE:
|
||||
pr_info("memory offlining canceled");
|
||||
/* Fall through: re-add canceled memory block */
|
||||
|
||||
case MEM_ONLINE:
|
||||
pr_info("memory is going online");
|
||||
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
||||
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
|
||||
goto out_unlock;
|
||||
ehea_rereg_mrs();
|
||||
break;
|
||||
|
||||
case MEM_GOING_OFFLINE:
|
||||
pr_info("memory is going offline");
|
||||
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
||||
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
|
||||
goto out_unlock;
|
||||
ehea_rereg_mrs();
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ehea_update_firmware_handles();
|
||||
ret = NOTIFY_OK;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dlpar_mem_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block ehea_mem_nb = {
|
||||
.notifier_call = ehea_mem_notifier,
|
||||
};
|
||||
|
||||
static void ehea_crash_handler(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ehea_fw_handles.arr)
|
||||
for (i = 0; i < ehea_fw_handles.num_entries; i++)
|
||||
ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
|
||||
ehea_fw_handles.arr[i].fwh,
|
||||
FORCE_FREE);
|
||||
|
||||
if (ehea_bcmc_regs.arr)
|
||||
for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
|
||||
ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
|
||||
ehea_bcmc_regs.arr[i].port_id,
|
||||
ehea_bcmc_regs.arr[i].reg_type,
|
||||
ehea_bcmc_regs.arr[i].macaddr,
|
||||
0, H_DEREG_BCMC);
|
||||
}
|
||||
|
||||
static atomic_t ehea_memory_hooks_registered;
|
||||
|
||||
/* Register memory hooks on probe of first adapter */
|
||||
static int ehea_register_memory_hooks(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (atomic_inc_and_test(&ehea_memory_hooks_registered))
|
||||
return 0;
|
||||
|
||||
ret = ehea_create_busmap();
|
||||
if (ret) {
|
||||
pr_info("ehea_create_busmap failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = register_reboot_notifier(&ehea_reboot_nb);
|
||||
if (ret) {
|
||||
pr_info("register_reboot_notifier failed\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = register_memory_notifier(&ehea_mem_nb);
|
||||
if (ret) {
|
||||
pr_info("register_memory_notifier failed\n");
|
||||
goto out2;
|
||||
}
|
||||
|
||||
ret = crash_shutdown_register(ehea_crash_handler);
|
||||
if (ret) {
|
||||
pr_info("crash_shutdown_register failed\n");
|
||||
goto out3;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out3:
|
||||
unregister_memory_notifier(&ehea_mem_nb);
|
||||
out2:
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ehea_unregister_memory_hooks(void)
|
||||
{
|
||||
if (atomic_read(&ehea_memory_hooks_registered))
|
||||
return;
|
||||
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
if (crash_shutdown_unregister(ehea_crash_handler))
|
||||
pr_info("failed unregistering crash handler\n");
|
||||
unregister_memory_notifier(&ehea_mem_nb);
|
||||
}
|
||||
|
||||
static int ehea_probe_adapter(struct platform_device *dev)
|
||||
{
|
||||
struct ehea_adapter *adapter;
|
||||
|
@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev)
|
|||
int ret;
|
||||
int i;
|
||||
|
||||
ret = ehea_register_memory_hooks();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!dev || !dev->dev.of_node) {
|
||||
pr_err("Invalid ibmebus device probed\n");
|
||||
return -EINVAL;
|
||||
|
@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ehea_crash_handler(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ehea_fw_handles.arr)
|
||||
for (i = 0; i < ehea_fw_handles.num_entries; i++)
|
||||
ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
|
||||
ehea_fw_handles.arr[i].fwh,
|
||||
FORCE_FREE);
|
||||
|
||||
if (ehea_bcmc_regs.arr)
|
||||
for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
|
||||
ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
|
||||
ehea_bcmc_regs.arr[i].port_id,
|
||||
ehea_bcmc_regs.arr[i].reg_type,
|
||||
ehea_bcmc_regs.arr[i].macaddr,
|
||||
0, H_DEREG_BCMC);
|
||||
}
|
||||
|
||||
static int ehea_mem_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
int ret = NOTIFY_BAD;
|
||||
struct memory_notify *arg = data;
|
||||
|
||||
mutex_lock(&dlpar_mem_lock);
|
||||
|
||||
switch (action) {
|
||||
case MEM_CANCEL_OFFLINE:
|
||||
pr_info("memory offlining canceled");
|
||||
/* Readd canceled memory block */
|
||||
case MEM_ONLINE:
|
||||
pr_info("memory is going online");
|
||||
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
||||
if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
|
||||
goto out_unlock;
|
||||
ehea_rereg_mrs();
|
||||
break;
|
||||
case MEM_GOING_OFFLINE:
|
||||
pr_info("memory is going offline");
|
||||
set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
|
||||
if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
|
||||
goto out_unlock;
|
||||
ehea_rereg_mrs();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ehea_update_firmware_handles();
|
||||
ret = NOTIFY_OK;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&dlpar_mem_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct notifier_block ehea_mem_nb = {
|
||||
.notifier_call = ehea_mem_notifier,
|
||||
};
|
||||
|
||||
static int ehea_reboot_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *unused)
|
||||
{
|
||||
if (action == SYS_RESTART) {
|
||||
pr_info("Reboot: freeing all eHEA resources\n");
|
||||
ibmebus_unregister_driver(&ehea_driver);
|
||||
}
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block ehea_reboot_nb = {
|
||||
.notifier_call = ehea_reboot_notifier,
|
||||
};
|
||||
|
||||
static int check_module_parm(void)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void)
|
|||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ehea_create_busmap();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = register_reboot_notifier(&ehea_reboot_nb);
|
||||
if (ret)
|
||||
pr_info("failed registering reboot notifier\n");
|
||||
|
||||
ret = register_memory_notifier(&ehea_mem_nb);
|
||||
if (ret)
|
||||
pr_info("failed registering memory remove notifier\n");
|
||||
|
||||
ret = crash_shutdown_register(ehea_crash_handler);
|
||||
if (ret)
|
||||
pr_info("failed registering crash handler\n");
|
||||
|
||||
ret = ibmebus_register_driver(&ehea_driver);
|
||||
if (ret) {
|
||||
pr_err("failed registering eHEA device driver on ebus\n");
|
||||
goto out2;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = driver_create_file(&ehea_driver.driver,
|
||||
|
@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void)
|
|||
if (ret) {
|
||||
pr_err("failed to register capabilities attribute, ret=%d\n",
|
||||
ret);
|
||||
goto out3;
|
||||
goto out2;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
out3:
|
||||
ibmebus_unregister_driver(&ehea_driver);
|
||||
out2:
|
||||
unregister_memory_notifier(&ehea_mem_nb);
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
crash_shutdown_unregister(ehea_crash_handler);
|
||||
ibmebus_unregister_driver(&ehea_driver);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ehea_module_exit(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
|
||||
ibmebus_unregister_driver(&ehea_driver);
|
||||
unregister_reboot_notifier(&ehea_reboot_nb);
|
||||
ret = crash_shutdown_unregister(ehea_crash_handler);
|
||||
if (ret)
|
||||
pr_info("failed unregistering crash handler\n");
|
||||
unregister_memory_notifier(&ehea_mem_nb);
|
||||
ehea_unregister_memory_hooks();
|
||||
kfree(ehea_fw_handles.arr);
|
||||
kfree(ehea_bcmc_regs.arr);
|
||||
ehea_destroy_busmap();
|
||||
|
|
|
@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
|
||||
{
|
||||
struct ibmveth_adapter *adapter = netdev_priv(dev);
|
||||
struct sockaddr *addr = p;
|
||||
u64 mac_address;
|
||||
int rc;
|
||||
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EADDRNOTAVAIL;
|
||||
|
||||
mac_address = ibmveth_encode_mac_addr(addr->sa_data);
|
||||
rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
|
||||
if (rc) {
|
||||
netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
ether_addr_copy(dev->dev_addr, addr->sa_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops ibmveth_netdev_ops = {
|
||||
.ndo_open = ibmveth_open,
|
||||
.ndo_stop = ibmveth_close,
|
||||
|
@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
|
|||
.ndo_fix_features = ibmveth_fix_features,
|
||||
.ndo_set_features = ibmveth_set_features,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_set_mac_address = ibmveth_set_mac_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ibmveth_poll_controller,
|
||||
#endif
|
||||
|
|
|
@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
|
|||
* The grst delay value is in 100ms units, and we'll wait a
|
||||
* couple counts longer to be sure we don't just miss the end.
|
||||
*/
|
||||
grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK
|
||||
>> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
|
||||
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
|
||||
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
|
||||
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
|
||||
for (cnt = 0; cnt < grst_del + 2; cnt++) {
|
||||
reg = rd32(hw, I40E_GLGEN_RSTAT);
|
||||
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
|
||||
|
@ -2846,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
|
|||
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
||||
if (!status)
|
||||
if (!status && filter_index)
|
||||
*filter_index = resp->index;
|
||||
|
||||
return status;
|
||||
|
|
|
@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay)
|
|||
u32 val;
|
||||
|
||||
val = rd32(hw, I40E_PRTDCB_GENC);
|
||||
*delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >>
|
||||
*delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >>
|
||||
I40E_PRTDCB_GENC_PFCLDA_SHIFT);
|
||||
}
|
||||
|
||||
|
|
|
@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
|
|||
if (!cmd_buf)
|
||||
return count;
|
||||
bytes_not_copied = copy_from_user(cmd_buf, buffer, count);
|
||||
if (bytes_not_copied < 0)
|
||||
if (bytes_not_copied < 0) {
|
||||
kfree(cmd_buf);
|
||||
return bytes_not_copied;
|
||||
}
|
||||
if (bytes_not_copied > 0)
|
||||
count -= bytes_not_copied;
|
||||
cmd_buf[count] = '\0';
|
||||
|
|
|
@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
|
|||
vsi->tc_config.numtc = numtc;
|
||||
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
|
||||
/* Number of queues per enabled TC */
|
||||
num_tc_qps = vsi->alloc_queue_pairs/numtc;
|
||||
/* In MFP case we can have a much lower count of MSIx
|
||||
* vectors available and so we need to lower the used
|
||||
* q count.
|
||||
*/
|
||||
qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
|
||||
num_tc_qps = qcount / numtc;
|
||||
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
|
||||
|
||||
/* Setup queue offset/count for all TCs for given VSI */
|
||||
|
@ -2684,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
|
|||
u16 qoffset, qcount;
|
||||
int i, n;
|
||||
|
||||
if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED))
|
||||
return;
|
||||
if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
|
||||
/* Reset the TC information */
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
rx_ring = vsi->rx_rings[i];
|
||||
tx_ring = vsi->tx_rings[i];
|
||||
rx_ring->dcb_tc = 0;
|
||||
tx_ring->dcb_tc = 0;
|
||||
}
|
||||
}
|
||||
|
||||
for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
|
||||
if (!(vsi->tc_config.enabled_tc & (1 << n)))
|
||||
|
@ -3830,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
|
|||
{
|
||||
int i;
|
||||
|
||||
i40e_stop_misc_vector(pf);
|
||||
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
|
||||
synchronize_irq(pf->msix_entries[0].vector);
|
||||
free_irq(pf->msix_entries[0].vector, pf);
|
||||
}
|
||||
|
||||
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
|
||||
for (i = 0; i < pf->num_alloc_vsi; i++)
|
||||
if (pf->vsi[i])
|
||||
|
@ -5254,8 +5272,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
|
|||
|
||||
/* Wait for the PF's Tx queues to be disabled */
|
||||
ret = i40e_pf_wait_txq_disabled(pf);
|
||||
if (!ret)
|
||||
if (ret) {
|
||||
/* Schedule PF reset to recover */
|
||||
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
|
||||
i40e_service_event_schedule(pf);
|
||||
} else {
|
||||
i40e_pf_unquiesce_all_vsi(pf);
|
||||
}
|
||||
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
@ -5587,7 +5611,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
|
|||
int i, v;
|
||||
|
||||
/* If we're down or resetting, just bail */
|
||||
if (test_bit(__I40E_CONFIG_BUSY, &pf->state))
|
||||
if (test_bit(__I40E_DOWN, &pf->state) ||
|
||||
test_bit(__I40E_CONFIG_BUSY, &pf->state))
|
||||
return;
|
||||
|
||||
/* for each VSI/netdev
|
||||
|
@ -9533,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev)
|
|||
set_bit(__I40E_DOWN, &pf->state);
|
||||
del_timer_sync(&pf->service_timer);
|
||||
cancel_work_sync(&pf->service_task);
|
||||
i40e_fdir_teardown(pf);
|
||||
|
||||
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
|
||||
i40e_free_vfs(pf);
|
||||
|
@ -9559,12 +9585,6 @@ static void i40e_remove(struct pci_dev *pdev)
|
|||
if (pf->vsi[pf->lan_vsi])
|
||||
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
|
||||
|
||||
i40e_stop_misc_vector(pf);
|
||||
if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
|
||||
synchronize_irq(pf->msix_entries[0].vector);
|
||||
free_irq(pf->msix_entries[0].vector, pf);
|
||||
}
|
||||
|
||||
/* shutdown and destroy the HMC */
|
||||
if (pf->hw.hmc.hmc_obj) {
|
||||
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
|
||||
|
@ -9718,6 +9738,8 @@ static void i40e_shutdown(struct pci_dev *pdev)
|
|||
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
|
||||
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
|
||||
|
||||
i40e_clear_interrupt_scheme(pf);
|
||||
|
||||
if (system_state == SYSTEM_POWER_OFF) {
|
||||
pci_wake_from_d3(pdev, pf->wol_en);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
|
|
|
@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
|
|||
{
|
||||
i40e_status status;
|
||||
enum i40e_nvmupd_cmd upd_cmd;
|
||||
bool retry_attempt = false;
|
||||
|
||||
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
|
||||
|
||||
retry:
|
||||
switch (upd_cmd) {
|
||||
case I40E_NVMUPD_WRITE_CON:
|
||||
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
|
||||
|
@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
|
|||
*errno = -ESRCH;
|
||||
break;
|
||||
}
|
||||
|
||||
/* In some circumstances, a multi-write transaction takes longer
|
||||
* than the default 3 minute timeout on the write semaphore. If
|
||||
* the write failed with an EBUSY status, this is likely the problem,
|
||||
* so here we try to reacquire the semaphore then retry the write.
|
||||
* We only do one retry, then give up.
|
||||
*/
|
||||
if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
|
||||
!retry_attempt) {
|
||||
i40e_status old_status = status;
|
||||
u32 old_asq_status = hw->aq.asq_last_status;
|
||||
u32 gtime;
|
||||
|
||||
gtime = rd32(hw, I40E_GLVFGEN_TIMER);
|
||||
if (gtime >= hw->nvm.hw_semaphore_timeout) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"NVMUPD: write semaphore expired (%d >= %lld), retrying\n",
|
||||
gtime, hw->nvm.hw_semaphore_timeout);
|
||||
i40e_release_nvm(hw);
|
||||
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
|
||||
if (status) {
|
||||
i40e_debug(hw, I40E_DEBUG_ALL,
|
||||
"NVMUPD: write semaphore reacquire failed aq_err = %d\n",
|
||||
hw->aq.asq_last_status);
|
||||
status = old_status;
|
||||
hw->aq.asq_last_status = old_asq_status;
|
||||
} else {
|
||||
retry_attempt = true;
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
|
|
|
@ -585,6 +585,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
* @tx_ring: tx ring to fetch head of
|
||||
*
|
||||
* Returns value of Tx ring head based on value stored
|
||||
* in head write-back location
|
||||
**/
|
||||
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
|
||||
{
|
||||
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
|
||||
|
||||
return le32_to_cpu(*(volatile __le32 *)head);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_tx_pending - how many tx descriptors not processed
|
||||
* @tx_ring: the ring of descriptors
|
||||
|
@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
|
|||
**/
|
||||
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
||||
{
|
||||
u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
|
||||
? ring->next_to_use
|
||||
: ring->next_to_use + ring->count);
|
||||
return ntu - ring->next_to_clean;
|
||||
u32 head, tail;
|
||||
|
||||
head = i40e_get_head(ring);
|
||||
tail = readl(ring->tail);
|
||||
|
||||
if (head != tail)
|
||||
return (head < tail) ?
|
||||
tail - head : (tail + ring->count - head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
|||
**/
|
||||
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
||||
{
|
||||
u32 tx_done = tx_ring->stats.packets;
|
||||
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
|
||||
u32 tx_pending = i40e_get_tx_pending(tx_ring);
|
||||
struct i40e_pf *pf = tx_ring->vsi->back;
|
||||
bool ret = false;
|
||||
|
@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
|||
* run the check_tx_hang logic with a transmit completion
|
||||
* pending but without time to complete it yet.
|
||||
*/
|
||||
if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
||||
(tx_pending >= I40E_MIN_DESC_PENDING)) {
|
||||
if ((tx_done_old == tx_done) && tx_pending) {
|
||||
/* make sure it is true for two checks in a row */
|
||||
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
|
||||
&tx_ring->state);
|
||||
} else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
||||
(tx_pending < I40E_MIN_DESC_PENDING) &&
|
||||
(tx_pending > 0)) {
|
||||
} else if (tx_done_old == tx_done &&
|
||||
(tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
|
||||
if (I40E_DEBUG_FLOW & pf->hw.debug_mask)
|
||||
dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d",
|
||||
tx_pending, tx_ring->queue_index);
|
||||
pf->tx_sluggish_count++;
|
||||
} else {
|
||||
/* update completed stats and disarm the hang check */
|
||||
tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
|
||||
tx_ring->tx_stats.tx_done_old = tx_done;
|
||||
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
* @tx_ring: tx ring to fetch head of
|
||||
*
|
||||
* Returns value of Tx ring head based on value stored
|
||||
* in head write-back location
|
||||
**/
|
||||
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
|
||||
{
|
||||
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
|
||||
|
||||
return le32_to_cpu(*(volatile __le32 *)head);
|
||||
}
|
||||
|
||||
#define WB_STRIDE 0x3
|
||||
|
||||
/**
|
||||
|
@ -2139,6 +2145,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
|
|||
return __i40e_maybe_stop_tx(tx_ring, size);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
||||
* @skb: send buffer
|
||||
* @tx_flags: collected send information
|
||||
* @hdr_len: size of the packet header
|
||||
*
|
||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
||||
* a packet on the wire and so we need to figure out the cases where we
|
||||
* need to linearize the skb.
|
||||
**/
|
||||
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
||||
const u8 hdr_len)
|
||||
{
|
||||
struct skb_frag_struct *frag;
|
||||
bool linearize = false;
|
||||
unsigned int size = 0;
|
||||
u16 num_frags;
|
||||
u16 gso_segs;
|
||||
|
||||
num_frags = skb_shinfo(skb)->nr_frags;
|
||||
gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
|
||||
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
|
||||
u16 j = 1;
|
||||
|
||||
if (num_frags < (I40E_MAX_BUFFER_TXD))
|
||||
goto linearize_chk_done;
|
||||
/* try the simple math, if we have too many frags per segment */
|
||||
if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
|
||||
I40E_MAX_BUFFER_TXD) {
|
||||
linearize = true;
|
||||
goto linearize_chk_done;
|
||||
}
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
size = hdr_len;
|
||||
/* we might still have more fragments per segment */
|
||||
do {
|
||||
size += skb_frag_size(frag);
|
||||
frag++; j++;
|
||||
if (j == I40E_MAX_BUFFER_TXD) {
|
||||
if (size < skb_shinfo(skb)->gso_size) {
|
||||
linearize = true;
|
||||
break;
|
||||
}
|
||||
j = 1;
|
||||
size -= skb_shinfo(skb)->gso_size;
|
||||
if (size)
|
||||
j++;
|
||||
size += hdr_len;
|
||||
}
|
||||
num_frags--;
|
||||
} while (num_frags);
|
||||
} else {
|
||||
if (num_frags >= I40E_MAX_BUFFER_TXD)
|
||||
linearize = true;
|
||||
}
|
||||
|
||||
linearize_chk_done:
|
||||
return linearize;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_tx_map - Build the Tx descriptor
|
||||
* @tx_ring: ring to send buffer on
|
||||
|
@ -2396,6 +2463,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|||
if (tsyn)
|
||||
tx_flags |= I40E_TX_FLAGS_TSYN;
|
||||
|
||||
if (i40e_chk_linearize(skb, tx_flags, hdr_len))
|
||||
if (skb_linearize(skb))
|
||||
goto out_drop;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* always enable CRC insertion offload */
|
||||
|
|
|
@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
|
|||
|
||||
#define i40e_rx_desc i40e_32byte_rx_desc
|
||||
|
||||
#define I40E_MAX_BUFFER_TXD 8
|
||||
#define I40E_MIN_TX_LEN 17
|
||||
#define I40E_MAX_DATA_PER_TXD 8192
|
||||
|
||||
|
|
|
@ -125,6 +125,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
* @tx_ring: tx ring to fetch head of
|
||||
*
|
||||
* Returns value of Tx ring head based on value stored
|
||||
* in head write-back location
|
||||
**/
|
||||
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
|
||||
{
|
||||
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
|
||||
|
||||
return le32_to_cpu(*(volatile __le32 *)head);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_tx_pending - how many tx descriptors not processed
|
||||
* @tx_ring: the ring of descriptors
|
||||
|
@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
|
|||
**/
|
||||
static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
||||
{
|
||||
u32 ntu = ((ring->next_to_clean <= ring->next_to_use)
|
||||
? ring->next_to_use
|
||||
: ring->next_to_use + ring->count);
|
||||
return ntu - ring->next_to_clean;
|
||||
u32 head, tail;
|
||||
|
||||
head = i40e_get_head(ring);
|
||||
tail = readl(ring->tail);
|
||||
|
||||
if (head != tail)
|
||||
return (head < tail) ?
|
||||
tail - head : (tail + ring->count - head);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
|||
**/
|
||||
static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
||||
{
|
||||
u32 tx_done = tx_ring->stats.packets;
|
||||
u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
|
||||
u32 tx_pending = i40e_get_tx_pending(tx_ring);
|
||||
bool ret = false;
|
||||
|
||||
|
@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring)
|
|||
* run the check_tx_hang logic with a transmit completion
|
||||
* pending but without time to complete it yet.
|
||||
*/
|
||||
if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) &&
|
||||
(tx_pending >= I40E_MIN_DESC_PENDING)) {
|
||||
if ((tx_done_old == tx_done) && tx_pending) {
|
||||
/* make sure it is true for two checks in a row */
|
||||
ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED,
|
||||
&tx_ring->state);
|
||||
} else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) ||
|
||||
!(tx_pending < I40E_MIN_DESC_PENDING) ||
|
||||
!(tx_pending > 0)) {
|
||||
} else if (tx_done_old == tx_done &&
|
||||
(tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) {
|
||||
/* update completed stats and disarm the hang check */
|
||||
tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets;
|
||||
tx_ring->tx_stats.tx_done_old = tx_done;
|
||||
clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
* @tx_ring: tx ring to fetch head of
|
||||
*
|
||||
* Returns value of Tx ring head based on value stored
|
||||
* in head write-back location
|
||||
**/
|
||||
static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
|
||||
{
|
||||
void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count;
|
||||
|
||||
return le32_to_cpu(*(volatile __le32 *)head);
|
||||
}
|
||||
|
||||
#define WB_STRIDE 0x3
|
||||
|
||||
/**
|
||||
|
@ -1206,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
||||
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
||||
ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
|
||||
|
||||
if (iph->version == 4) {
|
||||
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
|
||||
iph->tot_len = 0;
|
||||
iph->check = 0;
|
||||
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
|
||||
0, IPPROTO_TCP, 0);
|
||||
} else if (skb_is_gso_v6(skb)) {
|
||||
|
||||
ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
|
||||
: ipv6_hdr(skb);
|
||||
} else if (ipv6h->version == 6) {
|
||||
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
|
||||
ipv6h->payload_len = 0;
|
||||
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
|
||||
|
@ -1274,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
|||
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
||||
}
|
||||
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
|
||||
if (tx_flags & I40E_TX_FLAGS_TSO) {
|
||||
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
||||
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
|
||||
if (tx_flags & I40E_TX_FLAGS_TSO)
|
||||
ip_hdr(skb)->check = 0;
|
||||
} else {
|
||||
*cd_tunneling |=
|
||||
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now set the ctx descriptor fields */
|
||||
|
@ -1290,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
|
|||
((skb_inner_network_offset(skb) -
|
||||
skb_transport_offset(skb)) >> 1) <<
|
||||
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
|
||||
if (this_ip_hdr->version == 6) {
|
||||
tx_flags &= ~I40E_TX_FLAGS_IPV4;
|
||||
tx_flags |= I40E_TX_FLAGS_IPV6;
|
||||
}
|
||||
|
||||
|
||||
} else {
|
||||
network_hdr_len = skb_network_header_len(skb);
|
||||
|
@ -1380,6 +1386,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
|
|||
context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
|
||||
* @skb: send buffer
|
||||
* @tx_flags: collected send information
|
||||
* @hdr_len: size of the packet header
|
||||
*
|
||||
* Note: Our HW can't scatter-gather more than 8 fragments to build
|
||||
* a packet on the wire and so we need to figure out the cases where we
|
||||
* need to linearize the skb.
|
||||
**/
|
||||
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
|
||||
const u8 hdr_len)
|
||||
{
|
||||
struct skb_frag_struct *frag;
|
||||
bool linearize = false;
|
||||
unsigned int size = 0;
|
||||
u16 num_frags;
|
||||
u16 gso_segs;
|
||||
|
||||
num_frags = skb_shinfo(skb)->nr_frags;
|
||||
gso_segs = skb_shinfo(skb)->gso_segs;
|
||||
|
||||
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
|
||||
u16 j = 1;
|
||||
|
||||
if (num_frags < (I40E_MAX_BUFFER_TXD))
|
||||
goto linearize_chk_done;
|
||||
/* try the simple math, if we have too many frags per segment */
|
||||
if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) >
|
||||
I40E_MAX_BUFFER_TXD) {
|
||||
linearize = true;
|
||||
goto linearize_chk_done;
|
||||
}
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
size = hdr_len;
|
||||
/* we might still have more fragments per segment */
|
||||
do {
|
||||
size += skb_frag_size(frag);
|
||||
frag++; j++;
|
||||
if (j == I40E_MAX_BUFFER_TXD) {
|
||||
if (size < skb_shinfo(skb)->gso_size) {
|
||||
linearize = true;
|
||||
break;
|
||||
}
|
||||
j = 1;
|
||||
size -= skb_shinfo(skb)->gso_size;
|
||||
if (size)
|
||||
j++;
|
||||
size += hdr_len;
|
||||
}
|
||||
num_frags--;
|
||||
} while (num_frags);
|
||||
} else {
|
||||
if (num_frags >= I40E_MAX_BUFFER_TXD)
|
||||
linearize = true;
|
||||
}
|
||||
|
||||
linearize_chk_done:
|
||||
return linearize;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_tx_map - Build the Tx descriptor
|
||||
* @tx_ring: ring to send buffer on
|
||||
|
@ -1654,6 +1721,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
|
|||
else if (tso)
|
||||
tx_flags |= I40E_TX_FLAGS_TSO;
|
||||
|
||||
if (i40e_chk_linearize(skb, tx_flags, hdr_len))
|
||||
if (skb_linearize(skb))
|
||||
goto out_drop;
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* always enable CRC insertion offload */
|
||||
|
|
|
@ -112,6 +112,7 @@ enum i40e_dyn_idx_t {
|
|||
|
||||
#define i40e_rx_desc i40e_32byte_rx_desc
|
||||
|
||||
#define I40E_MAX_BUFFER_TXD 8
|
||||
#define I40E_MIN_TX_LEN 17
|
||||
#define I40E_MAX_DATA_PER_TXD 8192
|
||||
|
||||
|
|
|
@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
|
|||
{
|
||||
u32 loopback_ok = 0;
|
||||
int i;
|
||||
|
||||
bool gro_enabled;
|
||||
|
||||
priv->loopback_ok = 0;
|
||||
priv->validate_loopback = 1;
|
||||
gro_enabled = priv->dev->features & NETIF_F_GRO;
|
||||
|
||||
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
|
||||
priv->dev->features &= ~NETIF_F_GRO;
|
||||
|
||||
/* xmit */
|
||||
if (mlx4_en_test_loopback_xmit(priv)) {
|
||||
|
@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv)
|
|||
mlx4_en_test_loopback_exit:
|
||||
|
||||
priv->validate_loopback = 0;
|
||||
|
||||
if (gro_enabled)
|
||||
priv->dev->features |= NETIF_F_GRO;
|
||||
|
||||
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
|
||||
return !loopback_ok;
|
||||
}
|
||||
|
|
|
@ -412,7 +412,6 @@ err_icm:
|
|||
|
||||
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
|
||||
|
||||
#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC
|
||||
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
|
||||
enum mlx4_update_qp_attr attr,
|
||||
struct mlx4_update_qp_params *params)
|
||||
|
|
|
@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
|
|||
struct mlx4_vport_oper_state *vp_oper;
|
||||
struct mlx4_priv *priv;
|
||||
u32 qp_type;
|
||||
int port;
|
||||
int port, err = 0;
|
||||
|
||||
port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
|
||||
priv = mlx4_priv(dev);
|
||||
|
@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
|
|||
} else {
|
||||
struct mlx4_update_qp_params params = {.flags = 0};
|
||||
|
||||
mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
|
||||
err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
|
|||
qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
|
||||
qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mpt_mask(struct mlx4_dev *dev)
|
||||
|
|
|
@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev)
|
|||
if (mac->phydev)
|
||||
phy_start(mac->phydev);
|
||||
|
||||
init_timer(&mac->tx->clean_timer);
|
||||
mac->tx->clean_timer.function = pasemi_mac_tx_timer;
|
||||
mac->tx->clean_timer.data = (unsigned long)mac->tx;
|
||||
mac->tx->clean_timer.expires = jiffies+HZ;
|
||||
add_timer(&mac->tx->clean_timer);
|
||||
setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
|
||||
(unsigned long)mac->tx);
|
||||
mod_timer(&mac->tx->clean_timer, jiffies + HZ);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -354,7 +354,7 @@ struct cmd_desc_type0 {
|
|||
|
||||
} __attribute__ ((aligned(64)));
|
||||
|
||||
/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
|
||||
/* Note: sizeof(rcv_desc) should always be a multiple of 2 */
|
||||
struct rcv_desc {
|
||||
__le16 reference_handle;
|
||||
__le16 reserved;
|
||||
|
@ -499,7 +499,7 @@ struct uni_data_desc{
|
|||
#define NETXEN_IMAGE_START 0x43000 /* compressed image */
|
||||
#define NETXEN_SECONDARY_START 0x200000 /* backup images */
|
||||
#define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */
|
||||
#define NETXEN_USER_START 0x3E8000 /* Firmare info */
|
||||
#define NETXEN_USER_START 0x3E8000 /* Firmware info */
|
||||
#define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */
|
||||
#define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */
|
||||
|
||||
|
|
|
@ -314,7 +314,7 @@ struct qlcnic_fdt {
|
|||
#define QLCNIC_BRDCFG_START 0x4000 /* board config */
|
||||
#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
|
||||
#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
|
||||
#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
|
||||
#define QLCNIC_USER_START 0x3E8000 /* Firmware info */
|
||||
|
||||
#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
|
||||
#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
|
||||
|
|
|
@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
|
|||
int rc = -EINVAL;
|
||||
|
||||
if (!rtl_fw_format_ok(tp, rtl_fw)) {
|
||||
netif_err(tp, ifup, dev, "invalid firwmare\n");
|
||||
netif_err(tp, ifup, dev, "invalid firmware\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp)
|
|||
RTL_W8(ChipCmd, CmdReset);
|
||||
|
||||
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
|
||||
|
||||
netdev_reset_queue(tp->dev);
|
||||
}
|
||||
|
||||
static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
|
||||
|
@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||
u32 status, len;
|
||||
u32 opts[2];
|
||||
int frags;
|
||||
bool stop_queue;
|
||||
|
||||
if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
|
||||
netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
|
||||
|
@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||
|
||||
txd->opts2 = cpu_to_le32(opts[1]);
|
||||
|
||||
netdev_sent_queue(dev, skb->len);
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
/* Force memory writes to complete before releasing descriptor */
|
||||
|
@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
|||
|
||||
tp->cur_tx += frags + 1;
|
||||
|
||||
stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS);
|
||||
RTL_W8(TxPoll, NPQ);
|
||||
|
||||
if (!skb->xmit_more || stop_queue ||
|
||||
netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) {
|
||||
RTL_W8(TxPoll, NPQ);
|
||||
mmiowb();
|
||||
|
||||
mmiowb();
|
||||
}
|
||||
|
||||
if (stop_queue) {
|
||||
if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
|
||||
/* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
|
||||
* not miss a ring update when it notices a stopped queue.
|
||||
*/
|
||||
|
@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
|
|||
static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
||||
{
|
||||
unsigned int dirty_tx, tx_left;
|
||||
unsigned int bytes_compl = 0, pkts_compl = 0;
|
||||
|
||||
dirty_tx = tp->dirty_tx;
|
||||
smp_rmb();
|
||||
|
@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
|||
rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
|
||||
tp->TxDescArray + entry);
|
||||
if (status & LastFrag) {
|
||||
pkts_compl++;
|
||||
bytes_compl += tx_skb->skb->len;
|
||||
u64_stats_update_begin(&tp->tx_stats.syncp);
|
||||
tp->tx_stats.packets++;
|
||||
tp->tx_stats.bytes += tx_skb->skb->len;
|
||||
u64_stats_update_end(&tp->tx_stats.syncp);
|
||||
dev_kfree_skb_any(tx_skb->skb);
|
||||
tx_skb->skb = NULL;
|
||||
}
|
||||
|
@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
|
|||
}
|
||||
|
||||
if (tp->dirty_tx != dirty_tx) {
|
||||
netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
|
||||
|
||||
u64_stats_update_begin(&tp->tx_stats.syncp);
|
||||
tp->tx_stats.packets += pkts_compl;
|
||||
tp->tx_stats.bytes += bytes_compl;
|
||||
u64_stats_update_end(&tp->tx_stats.syncp);
|
||||
|
||||
tp->dirty_tx = dirty_tx;
|
||||
/* Sync with rtl8169_start_xmit:
|
||||
* - publish dirty_tx ring index (write barrier)
|
||||
|
|
|
@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = {
|
|||
.tpauser = 1,
|
||||
.hw_swap = 1,
|
||||
.rmiimode = 1,
|
||||
.shift_rd0 = 1,
|
||||
};
|
||||
|
||||
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
|
||||
|
@ -1392,6 +1391,9 @@ static void sh_eth_dev_exit(struct net_device *ndev)
|
|||
msleep(2); /* max frame time at 10 Mbps < 1250 us */
|
||||
sh_eth_get_stats(ndev);
|
||||
sh_eth_reset(ndev);
|
||||
|
||||
/* Set MAC address again */
|
||||
update_mac_address(ndev);
|
||||
}
|
||||
|
||||
/* free Tx skb function */
|
||||
|
@ -1407,6 +1409,8 @@ static int sh_eth_txfree(struct net_device *ndev)
|
|||
txdesc = &mdp->tx_ring[entry];
|
||||
if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
|
||||
break;
|
||||
/* TACT bit must be checked before all the following reads */
|
||||
rmb();
|
||||
/* Free the original skb. */
|
||||
if (mdp->tx_skbuff[entry]) {
|
||||
dma_unmap_single(&ndev->dev, txdesc->addr,
|
||||
|
@ -1444,6 +1448,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
limit = boguscnt;
|
||||
rxdesc = &mdp->rx_ring[entry];
|
||||
while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
|
||||
/* RACT bit must be checked before all the following reads */
|
||||
rmb();
|
||||
desc_status = edmac_to_cpu(mdp, rxdesc->status);
|
||||
pkt_len = rxdesc->frame_length;
|
||||
|
||||
|
@ -1455,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
|
||||
/* In case of almost all GETHER/ETHERs, the Receive Frame State
|
||||
* (RFS) bits in the Receive Descriptor 0 are from bit 9 to
|
||||
* bit 0. However, in case of the R8A7740, R8A779x, and
|
||||
* R7S72100 the RFS bits are from bit 25 to bit 16. So, the
|
||||
* bit 0. However, in case of the R8A7740 and R7S72100
|
||||
* the RFS bits are from bit 25 to bit 16. So, the
|
||||
* driver needs right shifting by 16.
|
||||
*/
|
||||
if (mdp->cd->shift_rd0)
|
||||
|
@ -1523,6 +1529,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
skb_checksum_none_assert(skb);
|
||||
rxdesc->addr = dma_addr;
|
||||
}
|
||||
wmb(); /* RACT bit must be set after all the above writes */
|
||||
if (entry >= mdp->num_rx_ring - 1)
|
||||
rxdesc->status |=
|
||||
cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
|
||||
|
@ -1535,7 +1542,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
|
|||
/* If we don't need to check status, don't. -KDU */
|
||||
if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
|
||||
/* fix the values for the next receiving if RDE is set */
|
||||
if (intr_status & EESR_RDE) {
|
||||
if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) {
|
||||
u32 count = (sh_eth_read(ndev, RDFAR) -
|
||||
sh_eth_read(ndev, RDLAR)) >> 4;
|
||||
|
||||
|
@ -2174,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
}
|
||||
spin_unlock_irqrestore(&mdp->lock, flags);
|
||||
|
||||
if (skb_padto(skb, ETH_ZLEN))
|
||||
if (skb_put_padto(skb, ETH_ZLEN))
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
entry = mdp->cur_tx % mdp->num_tx_ring;
|
||||
|
@ -2192,6 +2199,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
}
|
||||
txdesc->buffer_length = skb->len;
|
||||
|
||||
wmb(); /* TACT bit must be set after all the above writes */
|
||||
if (entry >= mdp->num_tx_ring - 1)
|
||||
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
|
||||
else
|
||||
|
|
|
@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
|
|||
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
|
||||
|
||||
if (enable)
|
||||
val |= 1 << rocker_port->lport;
|
||||
val |= 1ULL << rocker_port->lport;
|
||||
else
|
||||
val &= ~(1 << rocker_port->lport);
|
||||
val &= ~(1ULL << rocker_port->lport);
|
||||
rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
|
||||
}
|
||||
|
||||
|
@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker)
|
|||
|
||||
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
|
||||
rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
|
||||
if (!rocker->ports)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < rocker->port_count; i++) {
|
||||
err = rocker_probe_port(rocker, i);
|
||||
if (err)
|
||||
|
|
|
@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev)
|
|||
smc->packets_waiting = 0;
|
||||
|
||||
smc_reset(dev);
|
||||
init_timer(&smc->media);
|
||||
smc->media.function = media_check;
|
||||
smc->media.data = (u_long) dev;
|
||||
smc->media.expires = jiffies + HZ;
|
||||
add_timer(&smc->media);
|
||||
setup_timer(&smc->media, media_check, (u_long)dev);
|
||||
mod_timer(&smc->media, jiffies + HZ);
|
||||
|
||||
return 0;
|
||||
} /* smc_open */
|
||||
|
|
|
@ -91,6 +91,10 @@ static const char version[] =
|
|||
|
||||
#include "smc91x.h"
|
||||
|
||||
#if defined(CONFIG_ASSABET_NEPONSET)
|
||||
#include <mach/neponset.h>
|
||||
#endif
|
||||
|
||||
#ifndef SMC_NOWAIT
|
||||
# define SMC_NOWAIT 0
|
||||
#endif
|
||||
|
@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev)
|
|||
ret = smc_request_attrib(pdev, ndev);
|
||||
if (ret)
|
||||
goto out_release_io;
|
||||
#if defined(CONFIG_SA1100_ASSABET)
|
||||
neponset_ncr_set(NCR_ENET_OSC_EN);
|
||||
#if defined(CONFIG_ASSABET_NEPONSET)
|
||||
if (machine_is_assabet() && machine_has_neponset())
|
||||
neponset_ncr_set(NCR_ENET_OSC_EN);
|
||||
#endif
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
ret = smc_enable_device(pdev);
|
||||
|
|
|
@ -39,14 +39,7 @@
|
|||
* Define your architecture specific bus configuration parameters here.
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_ARCH_LUBBOCK) ||\
|
||||
defined(CONFIG_MACH_MAINSTONE) ||\
|
||||
defined(CONFIG_MACH_ZYLONITE) ||\
|
||||
defined(CONFIG_MACH_LITTLETON) ||\
|
||||
defined(CONFIG_MACH_ZYLONITE2) ||\
|
||||
defined(CONFIG_ARCH_VIPER) ||\
|
||||
defined(CONFIG_MACH_STARGATE2) ||\
|
||||
defined(CONFIG_ARCH_VERSATILE)
|
||||
#if defined(CONFIG_ARM)
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
|
||||
|
@ -74,95 +67,8 @@
|
|||
/* We actually can't write halfwords properly if not word aligned */
|
||||
static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
||||
{
|
||||
if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) {
|
||||
unsigned int v = val << 16;
|
||||
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
|
||||
writel(v, ioaddr + (reg & ~2));
|
||||
} else {
|
||||
writew(val, ioaddr + reg);
|
||||
}
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_SA1100_PLEB)
|
||||
/* We can only do 16-bit reads and writes in the static memory space. */
|
||||
#define SMC_CAN_USE_8BIT 1
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
#define SMC_CAN_USE_32BIT 0
|
||||
#define SMC_IO_SHIFT 0
|
||||
#define SMC_NOWAIT 1
|
||||
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
|
||||
#define SMC_IRQ_FLAGS (-1)
|
||||
|
||||
#elif defined(CONFIG_SA1100_ASSABET)
|
||||
|
||||
#include <mach/neponset.h>
|
||||
|
||||
/* We can only do 8-bit reads and writes in the static memory space. */
|
||||
#define SMC_CAN_USE_8BIT 1
|
||||
#define SMC_CAN_USE_16BIT 0
|
||||
#define SMC_CAN_USE_32BIT 0
|
||||
#define SMC_NOWAIT 1
|
||||
|
||||
/* The first two address lines aren't connected... */
|
||||
#define SMC_IO_SHIFT 2
|
||||
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l))
|
||||
#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l))
|
||||
#define SMC_IRQ_FLAGS (-1) /* from resource */
|
||||
|
||||
#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \
|
||||
defined(CONFIG_MACH_NOMADIK_8815NHK)
|
||||
|
||||
#define SMC_CAN_USE_8BIT 0
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
#define SMC_CAN_USE_32BIT 0
|
||||
#define SMC_IO_SHIFT 0
|
||||
#define SMC_NOWAIT 1
|
||||
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
|
||||
#elif defined(CONFIG_ARCH_INNOKOM) || \
|
||||
defined(CONFIG_ARCH_PXA_IDP) || \
|
||||
defined(CONFIG_ARCH_RAMSES) || \
|
||||
defined(CONFIG_ARCH_PCM027)
|
||||
|
||||
#define SMC_CAN_USE_8BIT 1
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
#define SMC_CAN_USE_32BIT 1
|
||||
#define SMC_IO_SHIFT 0
|
||||
#define SMC_NOWAIT 1
|
||||
#define SMC_USE_PXA_DMA 1
|
||||
|
||||
#define SMC_inb(a, r) readb((a) + (r))
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_inl(a, r) readl((a) + (r))
|
||||
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
|
||||
#define SMC_outl(v, a, r) writel(v, (a) + (r))
|
||||
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
|
||||
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
#define SMC_IRQ_FLAGS (-1) /* from resource */
|
||||
|
||||
/* We actually can't write halfwords properly if not word aligned */
|
||||
static inline void
|
||||
SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
||||
{
|
||||
if (reg & 2) {
|
||||
if ((machine_is_mainstone() || machine_is_stargate2() ||
|
||||
machine_is_pxa_idp()) && reg & 2) {
|
||||
unsigned int v = val << 16;
|
||||
v |= readl(ioaddr + (reg & ~2)) & 0xffff;
|
||||
writel(v, ioaddr + (reg & ~2));
|
||||
|
@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
|
|||
#define RPC_LSA_DEFAULT RPC_LED_100_10
|
||||
#define RPC_LSB_DEFAULT RPC_LED_TX_RX
|
||||
|
||||
#elif defined(CONFIG_ARCH_MSM)
|
||||
|
||||
#define SMC_CAN_USE_8BIT 0
|
||||
#define SMC_CAN_USE_16BIT 1
|
||||
#define SMC_CAN_USE_32BIT 0
|
||||
#define SMC_NOWAIT 1
|
||||
|
||||
#define SMC_inw(a, r) readw((a) + (r))
|
||||
#define SMC_outw(v, a, r) writew(v, (a) + (r))
|
||||
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
|
||||
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
|
||||
|
||||
#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH
|
||||
|
||||
#elif defined(CONFIG_COLDFIRE)
|
||||
|
||||
#define SMC_CAN_USE_8BIT 0
|
||||
|
|
|
@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
|
|||
spin_lock_irqsave(&priv->lock, flags);
|
||||
if (!priv->eee_active) {
|
||||
priv->eee_active = 1;
|
||||
init_timer(&priv->eee_ctrl_timer);
|
||||
priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
|
||||
priv->eee_ctrl_timer.data = (unsigned long)priv;
|
||||
priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
|
||||
add_timer(&priv->eee_ctrl_timer);
|
||||
setup_timer(&priv->eee_ctrl_timer,
|
||||
stmmac_eee_ctrl_timer,
|
||||
(unsigned long)priv);
|
||||
mod_timer(&priv->eee_ctrl_timer,
|
||||
STMMAC_LPI_T(eee_timer));
|
||||
|
||||
priv->hw->mac->set_eee_timer(priv->hw,
|
||||
STMMAC_DEFAULT_LIT_LS,
|
||||
|
|
|
@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type)
|
|||
*flow_type = IP_USER_FLOW;
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int niu_ethflow_to_class(int flow_type, u64 *class)
|
||||
|
@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
|
|||
class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >>
|
||||
TCAM_V4KEY0_CLASS_CODE_SHIFT;
|
||||
ret = niu_class_to_ethflow(class, &fsp->flow_type);
|
||||
|
||||
if (ret < 0) {
|
||||
netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
|
||||
parent->index);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
|
|||
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
|
||||
port_mask, ALE_VLAN, slave->port_vlan, 0);
|
||||
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
|
||||
priv->host_port, ALE_VLAN, slave->port_vlan);
|
||||
priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
|
||||
}
|
||||
|
||||
static void soft_reset_slave(struct cpsw_slave *slave)
|
||||
|
@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int cpsw_suspend(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
|
@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops cpsw_pm_ops = {
|
||||
.suspend = cpsw_suspend,
|
||||
.resume = cpsw_resume,
|
||||
};
|
||||
static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
|
||||
|
||||
static const struct of_device_id cpsw_of_mtable[] = {
|
||||
{ .compatible = "ti,cpsw", },
|
||||
|
|
|
@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int davinci_mdio_suspend(struct device *dev)
|
||||
{
|
||||
struct davinci_mdio_data *data = dev_get_drvdata(dev);
|
||||
|
@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops davinci_mdio_pm_ops = {
|
||||
.suspend_late = davinci_mdio_suspend,
|
||||
.resume_early = davinci_mdio_resume,
|
||||
SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_OF)
|
||||
|
|
|
@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev)
|
|||
int i;
|
||||
static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
|
||||
|
||||
if (dev->flags & IFF_ALLMULTI) {
|
||||
if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) {
|
||||
for (i = 0; i < ETH_ALEN; i++) {
|
||||
__raw_writel(allmulti[i], &port->regs->mcast_addr[i]);
|
||||
__raw_writel(allmulti[i], &port->regs->mcast_mask[i]);
|
||||
|
|
|
@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q,
|
|||
} /* else everything is zero */
|
||||
}
|
||||
|
||||
/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
|
||||
#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
|
||||
|
||||
/* Get packet from user space buffer */
|
||||
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
||||
struct iov_iter *from, int noblock)
|
||||
{
|
||||
int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN);
|
||||
int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
|
||||
struct sk_buff *skb;
|
||||
struct macvlan_dev *vlan;
|
||||
unsigned long total_len = iov_iter_count(from);
|
||||
|
@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||
linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
|
||||
}
|
||||
|
||||
skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen,
|
||||
skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
|
||||
linear, noblock, &err);
|
||||
if (!skb)
|
||||
goto err;
|
||||
|
|
|
@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|||
#define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate"
|
||||
#define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew"
|
||||
#define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp"
|
||||
#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config"
|
||||
#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable"
|
||||
|
||||
#define XGBE_PHY_SPEEDS 3
|
||||
#define XGBE_PHY_SPEED_1000 0
|
||||
|
@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|||
#define SPEED_10000_BLWC 0
|
||||
#define SPEED_10000_CDR 0x7
|
||||
#define SPEED_10000_PLL 0x1
|
||||
#define SPEED_10000_PQ 0x1e
|
||||
#define SPEED_10000_PQ 0x12
|
||||
#define SPEED_10000_RATE 0x0
|
||||
#define SPEED_10000_TXAMP 0xa
|
||||
#define SPEED_10000_WORD 0x7
|
||||
#define SPEED_10000_DFE_TAP_CONFIG 0x1
|
||||
#define SPEED_10000_DFE_TAP_ENABLE 0x7f
|
||||
|
||||
#define SPEED_2500_BLWC 1
|
||||
#define SPEED_2500_CDR 0x2
|
||||
|
@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|||
#define SPEED_2500_RATE 0x1
|
||||
#define SPEED_2500_TXAMP 0xf
|
||||
#define SPEED_2500_WORD 0x1
|
||||
#define SPEED_2500_DFE_TAP_CONFIG 0x3
|
||||
#define SPEED_2500_DFE_TAP_ENABLE 0x0
|
||||
|
||||
#define SPEED_1000_BLWC 1
|
||||
#define SPEED_1000_CDR 0x2
|
||||
|
@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
|
|||
#define SPEED_1000_RATE 0x3
|
||||
#define SPEED_1000_TXAMP 0xf
|
||||
#define SPEED_1000_WORD 0x1
|
||||
#define SPEED_1000_DFE_TAP_CONFIG 0x3
|
||||
#define SPEED_1000_DFE_TAP_ENABLE 0x0
|
||||
|
||||
/* SerDes RxTx register offsets */
|
||||
#define RXTX_REG6 0x0018
|
||||
#define RXTX_REG20 0x0050
|
||||
#define RXTX_REG22 0x0058
|
||||
#define RXTX_REG114 0x01c8
|
||||
#define RXTX_REG129 0x0204
|
||||
|
||||
/* SerDes RxTx register entry bit positions and sizes */
|
||||
#define RXTX_REG6_RESETB_RXD_INDEX 8
|
||||
#define RXTX_REG6_RESETB_RXD_WIDTH 1
|
||||
#define RXTX_REG20_BLWC_ENA_INDEX 2
|
||||
#define RXTX_REG20_BLWC_ENA_WIDTH 1
|
||||
#define RXTX_REG114_PQ_REG_INDEX 9
|
||||
#define RXTX_REG114_PQ_REG_WIDTH 7
|
||||
#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
|
||||
#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
|
||||
|
||||
/* Bit setting and getting macros
|
||||
* The get macro will extract the current bit field value from within
|
||||
|
@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = {
|
|||
SPEED_10000_TXAMP,
|
||||
};
|
||||
|
||||
static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = {
|
||||
SPEED_1000_DFE_TAP_CONFIG,
|
||||
SPEED_2500_DFE_TAP_CONFIG,
|
||||
SPEED_10000_DFE_TAP_CONFIG,
|
||||
};
|
||||
|
||||
static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = {
|
||||
SPEED_1000_DFE_TAP_ENABLE,
|
||||
SPEED_2500_DFE_TAP_ENABLE,
|
||||
SPEED_10000_DFE_TAP_ENABLE,
|
||||
};
|
||||
|
||||
enum amd_xgbe_phy_an {
|
||||
AMD_XGBE_AN_READY = 0,
|
||||
AMD_XGBE_AN_PAGE_RECEIVED,
|
||||
|
@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv {
|
|||
u32 serdes_cdr_rate[XGBE_PHY_SPEEDS];
|
||||
u32 serdes_pq_skew[XGBE_PHY_SPEEDS];
|
||||
u32 serdes_tx_amp[XGBE_PHY_SPEEDS];
|
||||
u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS];
|
||||
u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS];
|
||||
|
||||
/* Auto-negotiation state machine support */
|
||||
struct mutex an_mutex;
|
||||
|
@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
|
|||
status = XSIR0_IOREAD(priv, SIR0_STATUS);
|
||||
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
|
||||
XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
|
||||
return;
|
||||
goto rx_reset;
|
||||
}
|
||||
|
||||
netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
|
||||
status);
|
||||
|
||||
rx_reset:
|
||||
/* Perform Rx reset for the DFE changes */
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1);
|
||||
}
|
||||
|
||||
static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
|
||||
|
@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
|
|||
priv->serdes_blwc[XGBE_PHY_SPEED_10000]);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
|
||||
priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
|
||||
priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]);
|
||||
XRXTX_IOWRITE(priv, RXTX_REG22,
|
||||
priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]);
|
||||
|
||||
amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
||||
|
||||
|
@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev)
|
|||
priv->serdes_blwc[XGBE_PHY_SPEED_2500]);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
|
||||
priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
|
||||
priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]);
|
||||
XRXTX_IOWRITE(priv, RXTX_REG22,
|
||||
priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]);
|
||||
|
||||
amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
||||
|
||||
|
@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev)
|
|||
priv->serdes_blwc[XGBE_PHY_SPEED_1000]);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG,
|
||||
priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]);
|
||||
XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG,
|
||||
priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]);
|
||||
XRXTX_IOWRITE(priv, RXTX_REG22,
|
||||
priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]);
|
||||
|
||||
amd_xgbe_phy_serdes_complete_ratechange(phydev);
|
||||
|
||||
|
@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
|
|||
sizeof(priv->serdes_tx_amp));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_PHY_DFE_CFG_PROPERTY,
|
||||
priv->serdes_dfe_tap_cfg,
|
||||
XGBE_PHY_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_PHY_DFE_CFG_PROPERTY);
|
||||
goto err_sir1;
|
||||
}
|
||||
} else {
|
||||
memcpy(priv->serdes_dfe_tap_cfg,
|
||||
amd_xgbe_phy_serdes_dfe_tap_cfg,
|
||||
sizeof(priv->serdes_dfe_tap_cfg));
|
||||
}
|
||||
|
||||
if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) {
|
||||
ret = device_property_read_u32_array(phy_dev,
|
||||
XGBE_PHY_DFE_ENA_PROPERTY,
|
||||
priv->serdes_dfe_tap_ena,
|
||||
XGBE_PHY_SPEEDS);
|
||||
if (ret) {
|
||||
dev_err(dev, "invalid %s property\n",
|
||||
XGBE_PHY_DFE_ENA_PROPERTY);
|
||||
goto err_sir1;
|
||||
}
|
||||
} else {
|
||||
memcpy(priv->serdes_dfe_tap_ena,
|
||||
amd_xgbe_phy_serdes_dfe_tap_ena,
|
||||
sizeof(priv->serdes_dfe_tap_ena));
|
||||
}
|
||||
|
||||
phydev->priv = priv;
|
||||
|
||||
if (!priv->adev || acpi_disabled)
|
||||
|
|
|
@ -235,6 +235,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
|
|||
return idx < MAX_NUM_SETTINGS ? idx : MAX_NUM_SETTINGS - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_check_valid - check if there is a valid PHY setting which matches
|
||||
* speed, duplex, and feature mask
|
||||
* @speed: speed to match
|
||||
* @duplex: duplex to match
|
||||
* @features: A mask of the valid settings
|
||||
*
|
||||
* Description: Returns true if there is a valid setting, false otherwise.
|
||||
*/
|
||||
static inline bool phy_check_valid(int speed, int duplex, u32 features)
|
||||
{
|
||||
unsigned int idx;
|
||||
|
||||
idx = phy_find_valid(phy_find_setting(speed, duplex), features);
|
||||
|
||||
return settings[idx].speed == speed && settings[idx].duplex == duplex &&
|
||||
(settings[idx].setting & features);
|
||||
}
|
||||
|
||||
/**
|
||||
* phy_sanitize_settings - make sure the PHY is set to supported speed and duplex
|
||||
* @phydev: the target phy_device struct
|
||||
|
@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
|||
int eee_lp, eee_cap, eee_adv;
|
||||
u32 lp, cap, adv;
|
||||
int status;
|
||||
unsigned int idx;
|
||||
|
||||
/* Read phy status to properly get the right settings */
|
||||
status = phy_read_status(phydev);
|
||||
|
@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
|||
|
||||
adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
|
||||
lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
|
||||
idx = phy_find_setting(phydev->speed, phydev->duplex);
|
||||
if (!(lp & adv & settings[idx].setting))
|
||||
if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv))
|
||||
goto eee_exit_err;
|
||||
|
||||
if (clk_stop_enable) {
|
||||
|
|
|
@ -43,9 +43,7 @@
|
|||
|
||||
static struct team_port *team_port_get_rcu(const struct net_device *dev)
|
||||
{
|
||||
struct team_port *port = rcu_dereference(dev->rx_handler_data);
|
||||
|
||||
return team_port_exists(dev) ? port : NULL;
|
||||
return rcu_dereference(dev->rx_handler_data);
|
||||
}
|
||||
|
||||
static struct team_port *team_port_get_rtnl(const struct net_device *dev)
|
||||
|
|
|
@ -161,6 +161,7 @@ config USB_NET_AX8817X
|
|||
* Linksys USB200M
|
||||
* Netgear FA120
|
||||
* Sitecom LN-029
|
||||
* Sitecom LN-028
|
||||
* Intellinet USB 2.0 Ethernet
|
||||
* ST Lab USB 2.0 Ethernet
|
||||
* TrendNet TU2-ET100
|
||||
|
|
|
@ -978,6 +978,10 @@ static const struct usb_device_id products [] = {
|
|||
// Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
|
||||
USB_DEVICE (0x0df6, 0x0056),
|
||||
.driver_info = (unsigned long) &ax88178_info,
|
||||
}, {
|
||||
// Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter"
|
||||
USB_DEVICE (0x0df6, 0x061c),
|
||||
.driver_info = (unsigned long) &ax88178_info,
|
||||
}, {
|
||||
// corega FEther USB2-TX
|
||||
USB_DEVICE (0x07aa, 0x0017),
|
||||
|
|
|
@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg)
|
|||
}
|
||||
cprev = cnow;
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&tiocmget->waitq, &wait);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -134,6 +134,11 @@ static const struct usb_device_id products [] = {
|
|||
}, {
|
||||
USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */
|
||||
.driver_info = (unsigned long) &prolific_info,
|
||||
}, {
|
||||
USB_DEVICE(0x3923, 0x7825), /* National Instruments USB
|
||||
* Host-to-Host Cable
|
||||
*/
|
||||
.driver_info = (unsigned long) &prolific_info,
|
||||
},
|
||||
|
||||
{ }, // END
|
||||
|
|
|
@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file,
|
|||
spin_lock_irqsave(&cosa->lock, flags);
|
||||
add_wait_queue(&chan->rxwaitq, &wait);
|
||||
while (!chan->rx_status) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
schedule();
|
||||
spin_lock_irqsave(&cosa->lock, flags);
|
||||
if (signal_pending(current) && chan->rx_status == 0) {
|
||||
chan->rx_status = 1;
|
||||
remove_wait_queue(&chan->rxwaitq, &wait);
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
mutex_unlock(&chan->rlock);
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
}
|
||||
remove_wait_queue(&chan->rxwaitq, &wait);
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
kbuf = chan->rxdata;
|
||||
count = chan->rxsize;
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
|
@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file,
|
|||
spin_lock_irqsave(&cosa->lock, flags);
|
||||
add_wait_queue(&chan->txwaitq, &wait);
|
||||
while (!chan->tx_status) {
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
schedule();
|
||||
spin_lock_irqsave(&cosa->lock, flags);
|
||||
if (signal_pending(current) && chan->tx_status == 0) {
|
||||
chan->tx_status = 1;
|
||||
remove_wait_queue(&chan->txwaitq, &wait);
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
chan->tx_status = 1;
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
up(&chan->wsem);
|
||||
|
@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file,
|
|||
}
|
||||
}
|
||||
remove_wait_queue(&chan->txwaitq, &wait);
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
up(&chan->wsem);
|
||||
spin_unlock_irqrestore(&cosa->lock, flags);
|
||||
kfree(kbuf);
|
||||
|
|
|
@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
|
|||
goto nla_put_failure;
|
||||
|
||||
genlmsg_end(skb, msg_head);
|
||||
genlmsg_unicast(&init_net, skb, dst_portid);
|
||||
if (genlmsg_unicast(&init_net, skb, dst_portid))
|
||||
goto err_free_txskb;
|
||||
|
||||
/* Enqueue the packet */
|
||||
skb_queue_tail(&data->pending, my_skb);
|
||||
|
@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
|
|||
return;
|
||||
|
||||
nla_put_failure:
|
||||
nlmsg_free(skb);
|
||||
err_free_txskb:
|
||||
printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__);
|
||||
ieee80211_free_txskb(hw, my_skb);
|
||||
data->tx_failed++;
|
||||
|
|
|
@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
|
|||
unsigned long flags;
|
||||
|
||||
do {
|
||||
int notify;
|
||||
|
||||
spin_lock_irqsave(&queue->response_lock, flags);
|
||||
make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
|
||||
if (cons == end)
|
||||
break;
|
||||
txp = RING_GET_REQUEST(&queue->tx, cons++);
|
||||
|
@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
|
|||
{
|
||||
struct pending_tx_info *pending_tx_info;
|
||||
pending_ring_idx_t index;
|
||||
int notify;
|
||||
unsigned long flags;
|
||||
|
||||
pending_tx_info = &queue->pending_tx_info[pending_idx];
|
||||
|
||||
spin_lock_irqsave(&queue->response_lock, flags);
|
||||
|
||||
make_tx_response(queue, &pending_tx_info->req, status);
|
||||
index = pending_index(queue->pending_prod);
|
||||
|
||||
/* Release the pending index before pusing the Tx response so
|
||||
* its available before a new Tx request is pushed by the
|
||||
* frontend.
|
||||
*/
|
||||
index = pending_index(queue->pending_prod++);
|
||||
queue->pending_ring[index] = pending_idx;
|
||||
/* TX shouldn't use the index before we give it back here */
|
||||
mb();
|
||||
queue->pending_prod++;
|
||||
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
|
||||
spin_unlock_irqrestore(&queue->response_lock, flags);
|
||||
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue,
|
|||
{
|
||||
RING_IDX i = queue->tx.rsp_prod_pvt;
|
||||
struct xen_netif_tx_response *resp;
|
||||
int notify;
|
||||
|
||||
resp = RING_GET_RESPONSE(&queue->tx, i);
|
||||
resp->id = txp->id;
|
||||
|
@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue,
|
|||
RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
|
||||
|
||||
queue->tx.rsp_prod_pvt = ++i;
|
||||
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
|
||||
if (notify)
|
||||
notify_remote_via_irq(queue->tx_irq);
|
||||
}
|
||||
|
||||
static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
|
||||
|
|
|
@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net)
|
|||
* TODO: support TSO.
|
||||
*/
|
||||
iov_iter_advance(&msg.msg_iter, vhost_hlen);
|
||||
} else {
|
||||
/* It'll come from socket; we'll need to patch
|
||||
* ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
|
||||
*/
|
||||
iov_iter_advance(&fixup, sizeof(hdr));
|
||||
}
|
||||
err = sock->ops->recvmsg(NULL, sock, &msg,
|
||||
sock_len, MSG_DONTWAIT | MSG_TRUNC);
|
||||
|
@ -609,17 +604,25 @@ static void handle_rx(struct vhost_net *net)
|
|||
continue;
|
||||
}
|
||||
/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
|
||||
if (unlikely(vhost_hlen) &&
|
||||
copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) {
|
||||
vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
|
||||
vq->iov->iov_base);
|
||||
break;
|
||||
if (unlikely(vhost_hlen)) {
|
||||
if (copy_to_iter(&hdr, sizeof(hdr),
|
||||
&fixup) != sizeof(hdr)) {
|
||||
vq_err(vq, "Unable to write vnet_hdr "
|
||||
"at addr %p\n", vq->iov->iov_base);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* Header came from socket; we'll need to patch
|
||||
* ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
|
||||
*/
|
||||
iov_iter_advance(&fixup, sizeof(hdr));
|
||||
}
|
||||
/* TODO: Should check and handle checksum. */
|
||||
|
||||
num_buffers = cpu_to_vhost16(vq, headcount);
|
||||
if (likely(mergeable) &&
|
||||
copy_to_iter(&num_buffers, 2, &fixup) != 2) {
|
||||
copy_to_iter(&num_buffers, sizeof num_buffers,
|
||||
&fixup) != sizeof num_buffers) {
|
||||
vq_err(vq, "Failed num_buffers write");
|
||||
vhost_discard_vq_desc(vq, headcount);
|
||||
break;
|
||||
|
|
|
@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg {
|
|||
|
||||
enum mlx4_update_qp_attr {
|
||||
MLX4_UPDATE_QP_SMAC = 1 << 0,
|
||||
MLX4_UPDATE_QP_VSD = 1 << 2,
|
||||
MLX4_UPDATE_QP_VSD = 1 << 1,
|
||||
MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1
|
||||
};
|
||||
|
||||
|
|
|
@ -2342,6 +2342,7 @@ struct gro_remcsum {
|
|||
|
||||
static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
|
||||
{
|
||||
grc->offset = 0;
|
||||
grc->delta = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -54,10 +54,11 @@ struct rhash_head {
|
|||
* @buckets: size * hash buckets
|
||||
*/
|
||||
struct bucket_table {
|
||||
size_t size;
|
||||
unsigned int locks_mask;
|
||||
spinlock_t *locks;
|
||||
struct rhash_head __rcu *buckets[];
|
||||
size_t size;
|
||||
unsigned int locks_mask;
|
||||
spinlock_t *locks;
|
||||
|
||||
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
|
||||
};
|
||||
|
||||
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
|
||||
|
@ -78,12 +79,6 @@ struct rhashtable;
|
|||
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
|
||||
* @hashfn: Function to hash key
|
||||
* @obj_hashfn: Function to hash object
|
||||
* @grow_decision: If defined, may return true if table should expand
|
||||
* @shrink_decision: If defined, may return true if table should shrink
|
||||
*
|
||||
* Note: when implementing the grow and shrink decision function, min/max
|
||||
* shift must be enforced, otherwise, resizing watermarks they set may be
|
||||
* useless.
|
||||
*/
|
||||
struct rhashtable_params {
|
||||
size_t nelem_hint;
|
||||
|
@ -97,10 +92,6 @@ struct rhashtable_params {
|
|||
size_t locks_mul;
|
||||
rht_hashfn_t hashfn;
|
||||
rht_obj_hashfn_t obj_hashfn;
|
||||
bool (*grow_decision)(const struct rhashtable *ht,
|
||||
size_t new_size);
|
||||
bool (*shrink_decision)(const struct rhashtable *ht,
|
||||
size_t new_size);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -192,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
|
|||
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
|
||||
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
|
||||
|
||||
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
|
||||
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
|
||||
|
||||
int rhashtable_expand(struct rhashtable *ht);
|
||||
int rhashtable_shrink(struct rhashtable *ht);
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
|
|||
* @return Checksum of buffer.
|
||||
*/
|
||||
|
||||
u16 cfpkt_iterate(struct cfpkt *pkt,
|
||||
int cfpkt_iterate(struct cfpkt *pkt,
|
||||
u16 (*iter_func)(u16 chks, void *buf, u16 len),
|
||||
u16 data);
|
||||
|
||||
|
|
|
@ -9,3 +9,4 @@ header-y += tc_pedit.h
|
|||
header-y += tc_skbedit.h
|
||||
header-y += tc_vlan.h
|
||||
header-y += tc_bpf.h
|
||||
header-y += tc_connmark.h
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -217,15 +218,15 @@ static void bucket_table_free(const struct bucket_table *tbl)
|
|||
static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
||||
size_t nbuckets)
|
||||
{
|
||||
struct bucket_table *tbl;
|
||||
struct bucket_table *tbl = NULL;
|
||||
size_t size;
|
||||
int i;
|
||||
|
||||
size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
|
||||
tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
|
||||
tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
|
||||
if (tbl == NULL)
|
||||
tbl = vzalloc(size);
|
||||
|
||||
if (tbl == NULL)
|
||||
return NULL;
|
||||
|
||||
|
@ -247,26 +248,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
|
|||
* @ht: hash table
|
||||
* @new_size: new table size
|
||||
*/
|
||||
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
|
||||
static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
|
||||
{
|
||||
/* Expand table when exceeding 75% load */
|
||||
return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
|
||||
(ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
|
||||
(!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rht_grow_above_75);
|
||||
|
||||
/**
|
||||
* rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
|
||||
* @ht: hash table
|
||||
* @new_size: new table size
|
||||
*/
|
||||
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
|
||||
static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
|
||||
{
|
||||
/* Shrink table beneath 30% load */
|
||||
return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
|
||||
(atomic_read(&ht->shift) > ht->p.min_shift);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rht_shrink_below_30);
|
||||
|
||||
static void lock_buckets(struct bucket_table *new_tbl,
|
||||
struct bucket_table *old_tbl, unsigned int hash)
|
||||
|
@ -414,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht)
|
|||
}
|
||||
}
|
||||
unlock_buckets(new_tbl, old_tbl, new_hash);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Unzip interleaved hash chains */
|
||||
|
@ -437,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht)
|
|||
complete = false;
|
||||
|
||||
unlock_buckets(new_tbl, old_tbl, old_hash);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -495,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht)
|
|||
tbl->buckets[new_hash + new_tbl->size]);
|
||||
|
||||
unlock_buckets(new_tbl, tbl, new_hash);
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
/* Publish the new, valid hash table */
|
||||
|
@ -528,31 +530,19 @@ static void rht_deferred_worker(struct work_struct *work)
|
|||
list_for_each_entry(walker, &ht->walkers, list)
|
||||
walker->resize = true;
|
||||
|
||||
if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
|
||||
if (rht_grow_above_75(ht, tbl->size))
|
||||
rhashtable_expand(ht);
|
||||
else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
|
||||
else if (rht_shrink_below_30(ht, tbl->size))
|
||||
rhashtable_shrink(ht);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&ht->mutex);
|
||||
}
|
||||
|
||||
static void rhashtable_wakeup_worker(struct rhashtable *ht)
|
||||
{
|
||||
struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
|
||||
size_t size = tbl->size;
|
||||
|
||||
/* Only adjust the table if no resizing is currently in progress. */
|
||||
if (tbl == new_tbl &&
|
||||
((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
|
||||
(ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
|
||||
schedule_work(&ht->run_work);
|
||||
}
|
||||
|
||||
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
|
||||
struct bucket_table *tbl, u32 hash)
|
||||
struct bucket_table *tbl,
|
||||
const struct bucket_table *old_tbl, u32 hash)
|
||||
{
|
||||
bool no_resize_running = tbl == old_tbl;
|
||||
struct rhash_head *head;
|
||||
|
||||
hash = rht_bucket_index(tbl, hash);
|
||||
|
@ -568,8 +558,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
|
|||
rcu_assign_pointer(tbl->buckets[hash], obj);
|
||||
|
||||
atomic_inc(&ht->nelems);
|
||||
|
||||
rhashtable_wakeup_worker(ht);
|
||||
if (no_resize_running && rht_grow_above_75(ht, tbl->size))
|
||||
schedule_work(&ht->run_work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -599,7 +589,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
|
|||
hash = obj_raw_hashfn(ht, rht_obj(ht, obj));
|
||||
|
||||
lock_buckets(tbl, old_tbl, hash);
|
||||
__rhashtable_insert(ht, obj, tbl, hash);
|
||||
__rhashtable_insert(ht, obj, tbl, old_tbl, hash);
|
||||
unlock_buckets(tbl, old_tbl, hash);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -681,8 +671,11 @@ found:
|
|||
unlock_buckets(new_tbl, old_tbl, new_hash);
|
||||
|
||||
if (ret) {
|
||||
bool no_resize_running = new_tbl == old_tbl;
|
||||
|
||||
atomic_dec(&ht->nelems);
|
||||
rhashtable_wakeup_worker(ht);
|
||||
if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size))
|
||||
schedule_work(&ht->run_work);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
@ -852,7 +845,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
|
|||
goto exit;
|
||||
}
|
||||
|
||||
__rhashtable_insert(ht, obj, new_tbl, new_hash);
|
||||
__rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash);
|
||||
|
||||
exit:
|
||||
unlock_buckets(new_tbl, old_tbl, new_hash);
|
||||
|
@ -894,6 +887,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter)
|
|||
if (!iter->walker)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&iter->walker->list);
|
||||
iter->walker->resize = false;
|
||||
|
||||
mutex_lock(&ht->mutex);
|
||||
list_add(&iter->walker->list, &ht->walkers);
|
||||
mutex_unlock(&ht->mutex);
|
||||
|
@ -1111,8 +1107,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
|
|||
if (!ht->p.hash_rnd)
|
||||
get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
|
||||
|
||||
if (ht->p.grow_decision || ht->p.shrink_decision)
|
||||
INIT_WORK(&ht->run_work, rht_deferred_worker);
|
||||
INIT_WORK(&ht->run_work, rht_deferred_worker);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1130,8 +1125,7 @@ void rhashtable_destroy(struct rhashtable *ht)
|
|||
{
|
||||
ht->being_destroyed = true;
|
||||
|
||||
if (ht->p.grow_decision || ht->p.shrink_decision)
|
||||
cancel_work_sync(&ht->run_work);
|
||||
cancel_work_sync(&ht->run_work);
|
||||
|
||||
mutex_lock(&ht->mutex);
|
||||
bucket_table_free(rht_dereference(ht->tbl, ht));
|
||||
|
|
|
@ -191,18 +191,18 @@ error:
|
|||
return err;
|
||||
}
|
||||
|
||||
static struct rhashtable ht;
|
||||
|
||||
static int __init test_rht_init(void)
|
||||
{
|
||||
struct rhashtable ht;
|
||||
struct rhashtable_params params = {
|
||||
.nelem_hint = TEST_HT_SIZE,
|
||||
.head_offset = offsetof(struct test_obj, node),
|
||||
.key_offset = offsetof(struct test_obj, value),
|
||||
.key_len = sizeof(int),
|
||||
.hashfn = jhash,
|
||||
.max_shift = 1, /* we expand/shrink manually here */
|
||||
.nulls_base = (3U << RHT_BASE_SHIFT),
|
||||
.grow_decision = rht_grow_above_75,
|
||||
.shrink_decision = rht_shrink_below_30,
|
||||
};
|
||||
int err;
|
||||
|
||||
|
@ -222,6 +222,11 @@ static int __init test_rht_init(void)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void __exit test_rht_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(test_rht_init);
|
||||
module_exit(test_rht_exit);
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
|
|
|
@ -190,6 +190,8 @@ static int __init br_init(void)
|
|||
{
|
||||
int err;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
|
||||
|
||||
err = stp_proto_register(&br_stp_proto);
|
||||
if (err < 0) {
|
||||
pr_err("bridge: can't register sap for STP\n");
|
||||
|
|
|
@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
|
|||
u16 tmp;
|
||||
u16 len;
|
||||
u16 hdrchks;
|
||||
u16 pktchks;
|
||||
int pktchks;
|
||||
struct cffrml *this;
|
||||
this = container_obj(layr);
|
||||
|
||||
|
|
|
@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt)
|
|||
return skb->len;
|
||||
}
|
||||
|
||||
inline u16 cfpkt_iterate(struct cfpkt *pkt,
|
||||
u16 (*iter_func)(u16, void *, u16),
|
||||
u16 data)
|
||||
int cfpkt_iterate(struct cfpkt *pkt,
|
||||
u16 (*iter_func)(u16, void *, u16),
|
||||
u16 data)
|
||||
{
|
||||
/*
|
||||
* Don't care about the performance hit of linearizing,
|
||||
|
|
|
@ -711,24 +711,18 @@ static unsigned char nas[21] = {
|
|||
|
||||
COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
|
||||
{
|
||||
if (flags & MSG_CMSG_COMPAT)
|
||||
return -EINVAL;
|
||||
return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
|
||||
unsigned int, vlen, unsigned int, flags)
|
||||
{
|
||||
if (flags & MSG_CMSG_COMPAT)
|
||||
return -EINVAL;
|
||||
return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
|
||||
flags | MSG_CMSG_COMPAT);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags)
|
||||
{
|
||||
if (flags & MSG_CMSG_COMPAT)
|
||||
return -EINVAL;
|
||||
return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT);
|
||||
}
|
||||
|
||||
|
@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg,
|
|||
int datagrams;
|
||||
struct timespec ktspec;
|
||||
|
||||
if (flags & MSG_CMSG_COMPAT)
|
||||
return -EINVAL;
|
||||
|
||||
if (timeout == NULL)
|
||||
return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen,
|
||||
flags | MSG_CMSG_COMPAT, NULL);
|
||||
|
|
|
@ -946,7 +946,7 @@ bool dev_valid_name(const char *name)
|
|||
return false;
|
||||
|
||||
while (*name) {
|
||||
if (*name == '/' || isspace(*name))
|
||||
if (*name == '/' || *name == ':' || isspace(*name))
|
||||
return false;
|
||||
name++;
|
||||
}
|
||||
|
|
|
@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN]
|
|||
[NETIF_F_RXALL_BIT] = "rx-all",
|
||||
[NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
|
||||
[NETIF_F_BUSY_POLL_BIT] = "busy-poll",
|
||||
[NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload",
|
||||
};
|
||||
|
||||
static const char
|
||||
|
|
|
@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size)
|
|||
return 0;
|
||||
|
||||
nla_put_failure:
|
||||
kfree(d->xstats);
|
||||
d->xstats = NULL;
|
||||
d->xstats_len = 0;
|
||||
spin_unlock_bh(d->lock);
|
||||
return -1;
|
||||
}
|
||||
|
@ -305,7 +308,9 @@ int
|
|||
gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
|
||||
{
|
||||
if (d->compat_xstats) {
|
||||
d->xstats = st;
|
||||
d->xstats = kmemdup(st, len, GFP_ATOMIC);
|
||||
if (!d->xstats)
|
||||
goto err_out;
|
||||
d->xstats_len = len;
|
||||
}
|
||||
|
||||
|
@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
|
|||
return gnet_stats_copy(d, TCA_STATS_APP, st, len);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out:
|
||||
d->xstats_len = 0;
|
||||
spin_unlock_bh(d->lock);
|
||||
return -1;
|
||||
}
|
||||
EXPORT_SYMBOL(gnet_stats_copy_app);
|
||||
|
||||
|
@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d)
|
|||
return -1;
|
||||
}
|
||||
|
||||
kfree(d->xstats);
|
||||
d->xstats = NULL;
|
||||
d->xstats_len = 0;
|
||||
spin_unlock_bh(d->lock);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file,
|
|||
return len;
|
||||
|
||||
i += len;
|
||||
if ((value > 1) &&
|
||||
(!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING)))
|
||||
return -ENOTSUPP;
|
||||
pkt_dev->burst = value < 1 ? 1 : value;
|
||||
sprintf(pg_result, "OK: burst=%d", pkt_dev->burst);
|
||||
return count;
|
||||
|
|
|
@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
s_h = cb->args[0];
|
||||
s_idx = cb->args[1];
|
||||
|
||||
rcu_read_lock();
|
||||
cb->seq = net->dev_base_seq;
|
||||
|
||||
/* A hack to preserve kernel<->userspace interface.
|
||||
|
@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
|
||||
idx = 0;
|
||||
head = &net->dev_index_head[h];
|
||||
hlist_for_each_entry_rcu(dev, head, index_hlist) {
|
||||
hlist_for_each_entry(dev, head, index_hlist) {
|
||||
if (idx < s_idx)
|
||||
goto cont;
|
||||
err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
|
||||
|
@ -1344,7 +1343,6 @@ cont:
|
|||
}
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
cb->args[1] = idx;
|
||||
cb->args[0] = h;
|
||||
|
||||
|
@ -2012,8 +2010,8 @@ replay:
|
|||
}
|
||||
|
||||
if (1) {
|
||||
struct nlattr *attr[ops ? ops->maxtype + 1 : 0];
|
||||
struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0];
|
||||
struct nlattr *attr[ops ? ops->maxtype + 1 : 1];
|
||||
struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1];
|
||||
struct nlattr **data = NULL;
|
||||
struct nlattr **slave_data = NULL;
|
||||
struct net *dest_net, *link_net = NULL;
|
||||
|
@ -2122,6 +2120,10 @@ replay:
|
|||
if (IS_ERR(dest_net))
|
||||
return PTR_ERR(dest_net);
|
||||
|
||||
err = -EPERM;
|
||||
if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN))
|
||||
goto out;
|
||||
|
||||
if (tb[IFLA_LINK_NETNSID]) {
|
||||
int id = nla_get_s32(tb[IFLA_LINK_NETNSID]);
|
||||
|
||||
|
@ -2130,6 +2132,9 @@ replay:
|
|||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
err = -EPERM;
|
||||
if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN))
|
||||
goto out;
|
||||
}
|
||||
|
||||
dev = rtnl_create_link(link_net ? : dest_net, ifname,
|
||||
|
|
|
@ -3621,13 +3621,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
|
|||
{
|
||||
struct sk_buff_head *q = &sk->sk_error_queue;
|
||||
struct sk_buff *skb, *skb_next;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
spin_lock_bh(&q->lock);
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
skb = __skb_dequeue(q);
|
||||
if (skb && (skb_next = skb_peek(q)))
|
||||
err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
|
||||
spin_unlock_bh(&q->lock);
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
|
||||
sk->sk_err = err;
|
||||
if (err)
|
||||
|
|
|
@ -1062,7 +1062,7 @@ source_ok:
|
|||
if (decnet_debug_level & 16)
|
||||
printk(KERN_DEBUG
|
||||
"dn_route_output_slow: initial checks complete."
|
||||
" dst=%o4x src=%04x oif=%d try_hard=%d\n",
|
||||
" dst=%04x src=%04x oif=%d try_hard=%d\n",
|
||||
le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
|
||||
fld.flowidn_oif, try_hard);
|
||||
|
||||
|
|
|
@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev)
|
|||
struct hsr_port *port;
|
||||
|
||||
hsr = netdev_priv(hsr_dev);
|
||||
|
||||
rtnl_lock();
|
||||
hsr_for_each_port(hsr, port)
|
||||
hsr_del_port(port);
|
||||
rtnl_unlock();
|
||||
|
||||
del_timer_sync(&hsr->prune_timer);
|
||||
del_timer_sync(&hsr->announce_timer);
|
||||
|
|
|
@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
|
|||
return NOTIFY_DONE; /* Not an HSR device */
|
||||
hsr = netdev_priv(dev);
|
||||
port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
|
||||
if (port == NULL) {
|
||||
/* Resend of notification concerning removed device? */
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
} else {
|
||||
hsr = port->hsr;
|
||||
}
|
||||
|
|
|
@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port)
|
|||
list_del_rcu(&port->port_list);
|
||||
|
||||
if (port != master) {
|
||||
netdev_update_features(master->dev);
|
||||
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
|
||||
if (master != NULL) {
|
||||
netdev_update_features(master->dev);
|
||||
dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
|
||||
}
|
||||
netdev_rx_handler_unregister(port->dev);
|
||||
dev_set_promiscuity(port->dev, -1);
|
||||
}
|
||||
|
@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port)
|
|||
*/
|
||||
|
||||
synchronize_rcu();
|
||||
dev_put(port->dev);
|
||||
|
||||
if (port != master)
|
||||
dev_put(port->dev);
|
||||
}
|
||||
|
|
|
@ -664,7 +664,7 @@ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
|
|||
if (skb->protocol != htons(ETH_P_IP))
|
||||
return skb;
|
||||
|
||||
if (!skb_copy_bits(skb, 0, &iph, sizeof(iph)))
|
||||
if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0)
|
||||
return skb;
|
||||
|
||||
if (iph.ihl < 5 || iph.version != 4)
|
||||
|
|
|
@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk,
|
|||
cork->length += length;
|
||||
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen, transhdrlen,
|
||||
maxfraglen, flags);
|
||||
|
|
|
@ -4770,7 +4770,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
|
|||
return false;
|
||||
|
||||
/* If we filled the congestion window, do not expand. */
|
||||
if (tp->packets_out >= tp->snd_cwnd)
|
||||
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
|
|
@ -4903,6 +4903,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct inet6_dev *idev = ctl->extra1;
|
||||
int min_mtu = IPV6_MIN_MTU;
|
||||
struct ctl_table lctl;
|
||||
|
||||
lctl = *ctl;
|
||||
lctl.extra1 = &min_mtu;
|
||||
lctl.extra2 = idev ? &idev->dev->mtu : NULL;
|
||||
|
||||
return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
|
||||
}
|
||||
|
||||
static void dev_disable_change(struct inet6_dev *idev)
|
||||
{
|
||||
struct netdev_notifier_info info;
|
||||
|
@ -5054,7 +5069,7 @@ static struct addrconf_sysctl_table
|
|||
.data = &ipv6_devconf.mtu6,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
.proc_handler = addrconf_sysctl_mtu,
|
||||
},
|
||||
{
|
||||
.procname = "accept_ra",
|
||||
|
|
|
@ -1298,7 +1298,8 @@ emsgsize:
|
|||
if (((length > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO)) {
|
||||
(rt->dst.dev->features & NETIF_F_UFO) &&
|
||||
(sk->sk_type == SOCK_DGRAM)) {
|
||||
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
|
||||
hh_len, fragheaderlen,
|
||||
transhdrlen, mtu, flags, rt);
|
||||
|
|
|
@ -811,7 +811,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout)
|
|||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&self->spinlock, flags);
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket * ap,
|
|||
|
||||
/* Put ourselves on the wait queue to be woken up */
|
||||
add_wait_queue(&irnet_events.rwait, &wait);
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
for(;;)
|
||||
{
|
||||
/* If there is unread events */
|
||||
|
@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket * ap,
|
|||
/* Yield and wait to be woken up */
|
||||
schedule();
|
||||
}
|
||||
current->state = TASK_RUNNING;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
remove_wait_queue(&irnet_events.rwait, &wait);
|
||||
|
||||
/* Did we got it ? */
|
||||
|
|
|
@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata)
|
|||
if (ieee80211_chanctx_refcount(local, ctx) == 0)
|
||||
ieee80211_free_chanctx(local, ctx);
|
||||
|
||||
sdata->radar_required = false;
|
||||
|
||||
/* Unreserving may ready an in-place reservation. */
|
||||
if (use_reserved_switch)
|
||||
ieee80211_vif_use_reserved_switch(local);
|
||||
|
@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
|
|||
ieee80211_recalc_smps_chanctx(local, ctx);
|
||||
ieee80211_recalc_radar_chanctx(local, ctx);
|
||||
out:
|
||||
if (ret)
|
||||
sdata->radar_required = false;
|
||||
|
||||
mutex_unlock(&local->chanctx_mtx);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
|
|||
rate++;
|
||||
mi->sample_deferred++;
|
||||
} else {
|
||||
if (!msr->sample_limit != 0)
|
||||
if (!msr->sample_limit)
|
||||
return;
|
||||
|
||||
mi->sample_packets++;
|
||||
|
|
|
@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
|
|||
if (tx->sdata->control_port_no_encrypt)
|
||||
info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
|
||||
info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
|
||||
info->flags |= IEEE80211_TX_CTL_USE_MINRATE;
|
||||
}
|
||||
|
||||
return TX_CONTINUE;
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче