Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) e1000e computes header length incorrectly wrt vlans, fix from Vlad Yasevich. 2) ns_capable() check in sock_diag netlink code, from Andrew Lutomirski. 3) Fix invalid queue pairs handling in virtio_net, from Amos Kong. 4) Checksum offloading busted in sxgbe driver due to incorrect descriptor layout, fix from Byungho An. 5) Fix build failure with SMC_DEBUG set to 2 or larger, from Zi Shen Lim. 6) Fix uninitialized A and X registers in BPF interpreter, from Alexei Starovoitov. 7) Fix arch dependencies of candence driver. 8) Fix netlink capabilities checking tree-wide, from Eric W Biederman. 9) Don't dump IFLA_VF_PORTS if netlink request didn't ask for it in IFLA_EXT_MASK, from David Gibson. 10) IPV6 FIB dump restart doesn't handle table changes that happen meanwhile, causing the code to loop forever or emit dups, fix from Kumar Sandararajan. 11) Memory leak on VF removal in bnx2x, from Yuval Mintz. 12) Bug fixes for new Altera TSE driver from Vince Bridgers. 13) Fix route lookup key in SCTP, from Xugeng Zhang. 14) Use BH blocking spinlocks in SLIP, as per a similar fix to CAN/SLCAN driver. From Oliver Hartkopp. 15) TCP doesn't bump retransmit counters in some code paths, fix from Eric Dumazet. 16) Clamp delayed_ack in tcp_cubic to prevent theoretical divides by zero. Fix from Liu Yu. 17) Fix locking imbalance in error paths of HHF packet scheduler, from John Fastabend. 18) Properly reference the transport module when vsock_core_init() runs, from Andy King. 19) Fix buffer overflow in cdc_ncm driver, from Bjørn Mork. 20) IP_ECN_decapsulate() doesn't see a correct SKB network header in ip_tunnel_rcv(), fix from Ying Cai. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (132 commits) net: macb: Fix race between HW and driver net: macb: Remove 'unlikely' optimization net: macb: Re-enable RX interrupt only when RX is done net: macb: Clear interrupt flags net: macb: Pass same size to DMA_UNMAP as used for DMA_MAP ip_tunnel: Set network header properly for IP_ECN_decapsulate() e1000e: Restrict MDIO Slow Mode workaround to relevant parts e1000e: Fix issue with link flap on 82579 e1000e: Expand workaround for 10Mb HD throughput bug e1000e: Workaround for dropped packets in Gig/100 speeds on 82579 net/mlx4_core: Don't issue PCIe speed/width checks for VFs net/mlx4_core: Load the Eth driver first net/mlx4_core: Fix slave id computation for single port VF net/mlx4_core: Adjust port number in qp_attach wrapper when detaching net: cdc_ncm: fix buffer overflow Altera TSE: ALTERA_TSE should depend on HAS_DMA vsock: Make transport the proto owner net: sched: lock imbalance in hhf qdisc net: mvmdio: Check for a valid interrupt instead of an error net phy: Check for aneg completion before setting state to PHY_RUNNING ...
This commit is contained in:
Коммит
2080cee435
|
@ -4,11 +4,15 @@ Required properties:
|
|||
- compatible: Should be "snps,arc-emac"
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain the EMAC interrupts
|
||||
- clock-frequency: CPU frequency. It is needed to calculate and set polling
|
||||
period of EMAC.
|
||||
- max-speed: see ethernet.txt file in the same directory.
|
||||
- phy: see ethernet.txt file in the same directory.
|
||||
|
||||
Clock handling:
|
||||
The clock frequency is needed to calculate and set polling period of EMAC.
|
||||
It must be provided by one of:
|
||||
- clock-frequency: CPU frequency.
|
||||
- clocks: reference to the clock supplying the EMAC.
|
||||
|
||||
Child nodes of the driver are the individual PHY devices connected to the
|
||||
MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
|
||||
|
||||
|
@ -19,7 +23,11 @@ Examples:
|
|||
reg = <0xc0fc2000 0x3c>;
|
||||
interrupts = <6>;
|
||||
mac-address = [ 00 11 22 33 44 55 ];
|
||||
|
||||
clock-frequency = <80000000>;
|
||||
/* or */
|
||||
clocks = <&emac_clock>;
|
||||
|
||||
max-speed = <100>;
|
||||
phy = <&phy0>;
|
||||
|
||||
|
|
|
@ -429,7 +429,7 @@ RPS and RFS were introduced in kernel 2.6.35. XPS was incorporated into
|
|||
(therbert@google.com)
|
||||
|
||||
Accelerated RFS was introduced in 2.6.35. Original patches were
|
||||
submitted by Ben Hutchings (bhutchings@solarflare.com)
|
||||
submitted by Ben Hutchings (bwh@kernel.org)
|
||||
|
||||
Authors:
|
||||
Tom Herbert (therbert@google.com)
|
||||
|
|
|
@ -7288,7 +7288,6 @@ F: drivers/video/aty/aty128fb.c
|
|||
RALINK RT2X00 WIRELESS LAN DRIVER
|
||||
P: rt2x00 project
|
||||
M: Ivo van Doorn <IvDoorn@gmail.com>
|
||||
M: Gertjan van Wingerde <gwingerde@gmail.com>
|
||||
M: Helmut Schaa <helmut.schaa@googlemail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: users@rt2x00.serialmonkey.com (moderated for non-subscribers)
|
||||
|
@ -7685,7 +7684,6 @@ F: drivers/clk/samsung/
|
|||
SAMSUNG SXGBE DRIVERS
|
||||
M: Byungho An <bh74.an@samsung.com>
|
||||
M: Girish K S <ks.giri@samsung.com>
|
||||
M: Siva Reddy Kallam <siva.kallam@samsung.com>
|
||||
M: Vipul Pandya <vipul.pandya@samsung.com>
|
||||
S: Supported
|
||||
L: netdev@vger.kernel.org
|
||||
|
|
|
@ -466,7 +466,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
type -= CRYPTO_MSG_BASE;
|
||||
link = &crypto_dispatch[type];
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
if (!netlink_capable(skb, CAP_NET_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
|
||||
|
|
|
@ -82,6 +82,7 @@ static const struct usb_device_id ath3k_table[] = {
|
|||
{ USB_DEVICE(0x04CA, 0x3004) },
|
||||
{ USB_DEVICE(0x04CA, 0x3005) },
|
||||
{ USB_DEVICE(0x04CA, 0x3006) },
|
||||
{ USB_DEVICE(0x04CA, 0x3007) },
|
||||
{ USB_DEVICE(0x04CA, 0x3008) },
|
||||
{ USB_DEVICE(0x04CA, 0x300b) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
|
@ -131,6 +132,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
|||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
|
|
|
@ -152,6 +152,7 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
{ USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -1485,10 +1486,8 @@ static int btusb_probe(struct usb_interface *intf,
|
|||
if (id->driver_info & BTUSB_BCM92035)
|
||||
hdev->setup = btusb_setup_bcm92035;
|
||||
|
||||
if (id->driver_info & BTUSB_INTEL) {
|
||||
usb_enable_autosuspend(data->udev);
|
||||
if (id->driver_info & BTUSB_INTEL)
|
||||
hdev->setup = btusb_setup_intel;
|
||||
}
|
||||
|
||||
/* Interface numbers are hardcoded in the specification */
|
||||
data->isoc = usb_ifnum_to_if(data->udev, 1);
|
||||
|
|
|
@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
|
|||
return;
|
||||
|
||||
/* Can only change if privileged. */
|
||||
if (!capable(CAP_NET_ADMIN)) {
|
||||
if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
|
||||
err = EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -425,7 +425,7 @@ afterXPR:
|
|||
if (cs->debug & L1_DEB_MONITOR)
|
||||
debugl1(cs, "ICC %02x -> MOX1", cs->dc.icc.mon_tx[cs->dc.icc.mon_txp - 1]);
|
||||
}
|
||||
AfterMOX1:
|
||||
AfterMOX1: ;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
|
|
@ -534,7 +534,7 @@ static ssize_t bonding_show_min_links(struct device *d,
|
|||
{
|
||||
struct bonding *bond = to_bond(d);
|
||||
|
||||
return sprintf(buf, "%d\n", bond->params.min_links);
|
||||
return sprintf(buf, "%u\n", bond->params.min_links);
|
||||
}
|
||||
|
||||
static ssize_t bonding_store_min_links(struct device *d,
|
||||
|
|
|
@ -14,6 +14,13 @@ config CAN_C_CAN_PLATFORM
|
|||
SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com)
|
||||
boards like am335x, dm814x, dm813x and dm811x.
|
||||
|
||||
config CAN_C_CAN_STRICT_FRAME_ORDERING
|
||||
bool "Force a strict RX CAN frame order (may cause frame loss)"
|
||||
---help---
|
||||
The RX split buffer prevents packet reordering but can cause packet
|
||||
loss. Only enable this option when you accept to lose CAN frames
|
||||
in favour of getting the received CAN frames in the correct order.
|
||||
|
||||
config CAN_C_CAN_PCI
|
||||
tristate "Generic PCI Bus based C_CAN/D_CAN driver"
|
||||
depends on PCI
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -22,14 +22,6 @@
|
|||
#ifndef C_CAN_H
|
||||
#define C_CAN_H
|
||||
|
||||
/*
|
||||
* IFx register masks:
|
||||
* allow easy operation on 16-bit registers when the
|
||||
* argument is 32-bit instead
|
||||
*/
|
||||
#define IFX_WRITE_LOW_16BIT(x) ((x) & 0xFFFF)
|
||||
#define IFX_WRITE_HIGH_16BIT(x) (((x) & 0xFFFF0000) >> 16)
|
||||
|
||||
/* message object split */
|
||||
#define C_CAN_NO_OF_OBJECTS 32
|
||||
#define C_CAN_MSG_OBJ_RX_NUM 16
|
||||
|
@ -45,8 +37,6 @@
|
|||
|
||||
#define C_CAN_MSG_OBJ_RX_SPLIT 9
|
||||
#define C_CAN_MSG_RX_LOW_LAST (C_CAN_MSG_OBJ_RX_SPLIT - 1)
|
||||
|
||||
#define C_CAN_NEXT_MSG_OBJ_MASK (C_CAN_MSG_OBJ_TX_NUM - 1)
|
||||
#define RECEIVE_OBJECT_BITS 0x0000ffff
|
||||
|
||||
enum reg {
|
||||
|
@ -183,23 +173,20 @@ struct c_can_priv {
|
|||
struct napi_struct napi;
|
||||
struct net_device *dev;
|
||||
struct device *device;
|
||||
spinlock_t xmit_lock;
|
||||
int tx_object;
|
||||
int current_status;
|
||||
atomic_t tx_active;
|
||||
unsigned long tx_dir;
|
||||
int last_status;
|
||||
u16 (*read_reg) (struct c_can_priv *priv, enum reg index);
|
||||
void (*write_reg) (struct c_can_priv *priv, enum reg index, u16 val);
|
||||
void __iomem *base;
|
||||
const u16 *regs;
|
||||
unsigned long irq_flags; /* for request_irq() */
|
||||
unsigned int tx_next;
|
||||
unsigned int tx_echo;
|
||||
void *priv; /* for board-specific data */
|
||||
u16 irqstatus;
|
||||
enum c_can_dev_id type;
|
||||
u32 __iomem *raminit_ctrlreg;
|
||||
unsigned int instance;
|
||||
int instance;
|
||||
void (*raminit) (const struct c_can_priv *priv, bool enable);
|
||||
u32 comm_rcv_high;
|
||||
u32 rxmasked;
|
||||
u32 dlc[C_CAN_MSG_OBJ_TX_NUM];
|
||||
};
|
||||
|
||||
|
|
|
@ -84,8 +84,11 @@ static int c_can_pci_probe(struct pci_dev *pdev,
|
|||
goto out_disable_device;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_enable_msi(pdev);
|
||||
ret = pci_enable_msi(pdev);
|
||||
if (!ret) {
|
||||
dev_info(&pdev->dev, "MSI enabled\n");
|
||||
pci_set_master(pdev);
|
||||
}
|
||||
|
||||
addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
|
||||
if (!addr) {
|
||||
|
@ -132,6 +135,8 @@ static int c_can_pci_probe(struct pci_dev *pdev,
|
|||
goto out_free_c_can;
|
||||
}
|
||||
|
||||
priv->type = c_can_pci_data->type;
|
||||
|
||||
/* Configure access to registers */
|
||||
switch (c_can_pci_data->reg_align) {
|
||||
case C_CAN_REG_ALIGN_32:
|
||||
|
|
|
@ -222,7 +222,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
|
|||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
|
||||
if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
|
||||
dev_info(&pdev->dev, "control memory is not used for raminit\n");
|
||||
else
|
||||
priv->raminit = c_can_hw_raminit;
|
||||
|
|
|
@ -256,7 +256,7 @@ static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
|
|||
|
||||
/* Check if the CAN device has bit-timing parameters */
|
||||
if (!btc)
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/*
|
||||
* Depending on the given can_bittiming parameter structure the CAN
|
||||
|
|
|
@ -46,6 +46,7 @@ static int clk[MAXDEV];
|
|||
static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
|
||||
static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
|
||||
static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
|
||||
static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */
|
||||
|
||||
module_param_array(port, ulong, NULL, S_IRUGO);
|
||||
MODULE_PARM_DESC(port, "I/O port number");
|
||||
|
@ -101,19 +102,26 @@ static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv,
|
|||
static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv,
|
||||
int reg)
|
||||
{
|
||||
unsigned long base = (unsigned long)priv->reg_base;
|
||||
unsigned long flags, base = (unsigned long)priv->reg_base;
|
||||
u8 readval;
|
||||
|
||||
spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
|
||||
outb(reg, base);
|
||||
return inb(base + 1);
|
||||
readval = inb(base + 1);
|
||||
spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
|
||||
|
||||
return readval;
|
||||
}
|
||||
|
||||
static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
|
||||
int reg, u8 val)
|
||||
{
|
||||
unsigned long base = (unsigned long)priv->reg_base;
|
||||
unsigned long flags, base = (unsigned long)priv->reg_base;
|
||||
|
||||
spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags);
|
||||
outb(reg, base);
|
||||
outb(val, base + 1);
|
||||
spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags);
|
||||
}
|
||||
|
||||
static int sja1000_isa_probe(struct platform_device *pdev)
|
||||
|
@ -169,6 +177,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
|
|||
if (iosize == SJA1000_IOSIZE_INDIRECT) {
|
||||
priv->read_reg = sja1000_isa_port_read_reg_indirect;
|
||||
priv->write_reg = sja1000_isa_port_write_reg_indirect;
|
||||
spin_lock_init(&indirect_lock[idx]);
|
||||
} else {
|
||||
priv->read_reg = sja1000_isa_port_read_reg;
|
||||
priv->write_reg = sja1000_isa_port_write_reg;
|
||||
|
@ -198,6 +207,7 @@ static int sja1000_isa_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
dev->dev_id = idx;
|
||||
|
||||
err = register_sja1000dev(dev);
|
||||
if (err) {
|
||||
|
|
|
@ -322,13 +322,13 @@ static void slcan_write_wakeup(struct tty_struct *tty)
|
|||
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
|
||||
return;
|
||||
|
||||
spin_lock(&sl->lock);
|
||||
spin_lock_bh(&sl->lock);
|
||||
if (sl->xleft <= 0) {
|
||||
/* Now serial buffer is almost free & we can start
|
||||
* transmission of another packet */
|
||||
sl->dev->stats.tx_packets++;
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
||||
spin_unlock(&sl->lock);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
netif_wake_queue(sl->dev);
|
||||
return;
|
||||
}
|
||||
|
@ -336,7 +336,7 @@ static void slcan_write_wakeup(struct tty_struct *tty)
|
|||
actual = tty->ops->write(tty, sl->xhead, sl->xleft);
|
||||
sl->xleft -= actual;
|
||||
sl->xhead += actual;
|
||||
spin_unlock(&sl->lock);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
/* Send a can_frame to a TTY queue. */
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
config ALTERA_TSE
|
||||
tristate "Altera Triple-Speed Ethernet MAC support"
|
||||
depends on HAS_DMA
|
||||
select PHYLIB
|
||||
---help---
|
||||
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "altera_utils.h"
|
||||
#include "altera_tse.h"
|
||||
#include "altera_msgdmahw.h"
|
||||
#include "altera_msgdma.h"
|
||||
|
||||
/* No initialization work to do for MSGDMA */
|
||||
int msgdma_initialize(struct altera_tse_private *priv)
|
||||
|
@ -29,6 +30,10 @@ void msgdma_uninitialize(struct altera_tse_private *priv)
|
|||
{
|
||||
}
|
||||
|
||||
void msgdma_start_rxdma(struct altera_tse_private *priv)
|
||||
{
|
||||
}
|
||||
|
||||
void msgdma_reset(struct altera_tse_private *priv)
|
||||
{
|
||||
int counter;
|
||||
|
@ -154,7 +159,7 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
|
|||
|
||||
/* Put buffer to the mSGDMA RX FIFO
|
||||
*/
|
||||
int msgdma_add_rx_desc(struct altera_tse_private *priv,
|
||||
void msgdma_add_rx_desc(struct altera_tse_private *priv,
|
||||
struct tse_buffer *rxbuffer)
|
||||
{
|
||||
struct msgdma_extended_desc *desc = priv->rx_dma_desc;
|
||||
|
@ -175,7 +180,6 @@ int msgdma_add_rx_desc(struct altera_tse_private *priv,
|
|||
iowrite32(0, &desc->burst_seq_num);
|
||||
iowrite32(0x00010001, &desc->stride);
|
||||
iowrite32(control, &desc->control);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* status is returned on upper 16 bits,
|
||||
|
|
|
@ -25,10 +25,11 @@ void msgdma_disable_txirq(struct altera_tse_private *);
|
|||
void msgdma_clear_rxirq(struct altera_tse_private *);
|
||||
void msgdma_clear_txirq(struct altera_tse_private *);
|
||||
u32 msgdma_tx_completions(struct altera_tse_private *);
|
||||
int msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
|
||||
void msgdma_add_rx_desc(struct altera_tse_private *, struct tse_buffer *);
|
||||
int msgdma_tx_buffer(struct altera_tse_private *, struct tse_buffer *);
|
||||
u32 msgdma_rx_status(struct altera_tse_private *);
|
||||
int msgdma_initialize(struct altera_tse_private *);
|
||||
void msgdma_uninitialize(struct altera_tse_private *);
|
||||
void msgdma_start_rxdma(struct altera_tse_private *);
|
||||
|
||||
#endif /* __ALTERA_MSGDMA_H__ */
|
||||
|
|
|
@ -20,15 +20,15 @@
|
|||
#include "altera_sgdmahw.h"
|
||||
#include "altera_sgdma.h"
|
||||
|
||||
static void sgdma_descrip(struct sgdma_descrip *desc,
|
||||
struct sgdma_descrip *ndesc,
|
||||
dma_addr_t ndesc_phys,
|
||||
dma_addr_t raddr,
|
||||
dma_addr_t waddr,
|
||||
u16 length,
|
||||
int generate_eop,
|
||||
int rfixed,
|
||||
int wfixed);
|
||||
static void sgdma_setup_descrip(struct sgdma_descrip *desc,
|
||||
struct sgdma_descrip *ndesc,
|
||||
dma_addr_t ndesc_phys,
|
||||
dma_addr_t raddr,
|
||||
dma_addr_t waddr,
|
||||
u16 length,
|
||||
int generate_eop,
|
||||
int rfixed,
|
||||
int wfixed);
|
||||
|
||||
static int sgdma_async_write(struct altera_tse_private *priv,
|
||||
struct sgdma_descrip *desc);
|
||||
|
@ -64,11 +64,15 @@ queue_rx_peekhead(struct altera_tse_private *priv);
|
|||
|
||||
int sgdma_initialize(struct altera_tse_private *priv)
|
||||
{
|
||||
priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
|
||||
priv->txctrlreg = SGDMA_CTRLREG_ILASTD |
|
||||
SGDMA_CTRLREG_INTEN;
|
||||
|
||||
priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
|
||||
SGDMA_CTRLREG_INTEN |
|
||||
SGDMA_CTRLREG_ILASTD;
|
||||
|
||||
priv->sgdmadesclen = sizeof(struct sgdma_descrip);
|
||||
|
||||
INIT_LIST_HEAD(&priv->txlisthd);
|
||||
INIT_LIST_HEAD(&priv->rxlisthd);
|
||||
|
||||
|
@ -93,6 +97,16 @@ int sgdma_initialize(struct altera_tse_private *priv)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Initialize descriptor memory to all 0's, sync memory to cache */
|
||||
memset(priv->tx_dma_desc, 0, priv->txdescmem);
|
||||
memset(priv->rx_dma_desc, 0, priv->rxdescmem);
|
||||
|
||||
dma_sync_single_for_device(priv->device, priv->txdescphys,
|
||||
priv->txdescmem, DMA_TO_DEVICE);
|
||||
|
||||
dma_sync_single_for_device(priv->device, priv->rxdescphys,
|
||||
priv->rxdescmem, DMA_TO_DEVICE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -130,26 +144,23 @@ void sgdma_reset(struct altera_tse_private *priv)
|
|||
iowrite32(0, &prxsgdma->control);
|
||||
}
|
||||
|
||||
/* For SGDMA, interrupts remain enabled after initially enabling,
|
||||
* so no need to provide implementations for abstract enable
|
||||
* and disable
|
||||
*/
|
||||
|
||||
void sgdma_enable_rxirq(struct altera_tse_private *priv)
|
||||
{
|
||||
struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
|
||||
priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
|
||||
tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
|
||||
}
|
||||
|
||||
void sgdma_enable_txirq(struct altera_tse_private *priv)
|
||||
{
|
||||
struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
|
||||
priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
|
||||
tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
|
||||
}
|
||||
|
||||
/* for SGDMA, RX interrupts remain enabled after enabling */
|
||||
void sgdma_disable_rxirq(struct altera_tse_private *priv)
|
||||
{
|
||||
}
|
||||
|
||||
/* for SGDMA, TX interrupts remain enabled after enabling */
|
||||
void sgdma_disable_txirq(struct altera_tse_private *priv)
|
||||
{
|
||||
}
|
||||
|
@ -184,15 +195,15 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
|
|||
if (sgdma_txbusy(priv))
|
||||
return 0;
|
||||
|
||||
sgdma_descrip(cdesc, /* current descriptor */
|
||||
ndesc, /* next descriptor */
|
||||
sgdma_txphysaddr(priv, ndesc),
|
||||
buffer->dma_addr, /* address of packet to xmit */
|
||||
0, /* write addr 0 for tx dma */
|
||||
buffer->len, /* length of packet */
|
||||
SGDMA_CONTROL_EOP, /* Generate EOP */
|
||||
0, /* read fixed */
|
||||
SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
|
||||
sgdma_setup_descrip(cdesc, /* current descriptor */
|
||||
ndesc, /* next descriptor */
|
||||
sgdma_txphysaddr(priv, ndesc),
|
||||
buffer->dma_addr, /* address of packet to xmit */
|
||||
0, /* write addr 0 for tx dma */
|
||||
buffer->len, /* length of packet */
|
||||
SGDMA_CONTROL_EOP, /* Generate EOP */
|
||||
0, /* read fixed */
|
||||
SGDMA_CONTROL_WR_FIXED); /* Generate SOP */
|
||||
|
||||
pktstx = sgdma_async_write(priv, cdesc);
|
||||
|
||||
|
@ -219,11 +230,15 @@ u32 sgdma_tx_completions(struct altera_tse_private *priv)
|
|||
return ready;
|
||||
}
|
||||
|
||||
int sgdma_add_rx_desc(struct altera_tse_private *priv,
|
||||
struct tse_buffer *rxbuffer)
|
||||
void sgdma_start_rxdma(struct altera_tse_private *priv)
|
||||
{
|
||||
sgdma_async_read(priv);
|
||||
}
|
||||
|
||||
void sgdma_add_rx_desc(struct altera_tse_private *priv,
|
||||
struct tse_buffer *rxbuffer)
|
||||
{
|
||||
queue_rx(priv, rxbuffer);
|
||||
return sgdma_async_read(priv);
|
||||
}
|
||||
|
||||
/* status is returned on upper 16 bits,
|
||||
|
@ -240,28 +255,52 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
|
|||
unsigned int pktstatus = 0;
|
||||
struct tse_buffer *rxbuffer = NULL;
|
||||
|
||||
dma_sync_single_for_cpu(priv->device,
|
||||
priv->rxdescphys,
|
||||
priv->rxdescmem,
|
||||
DMA_BIDIRECTIONAL);
|
||||
u32 sts = ioread32(&csr->status);
|
||||
|
||||
desc = &base[0];
|
||||
if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
|
||||
(desc->status & SGDMA_STATUS_EOP)) {
|
||||
if (sts & SGDMA_STSREG_EOP) {
|
||||
dma_sync_single_for_cpu(priv->device,
|
||||
priv->rxdescphys,
|
||||
priv->sgdmadesclen,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
pktlength = desc->bytes_xferred;
|
||||
pktstatus = desc->status & 0x3f;
|
||||
rxstatus = pktstatus;
|
||||
rxstatus = rxstatus << 16;
|
||||
rxstatus |= (pktlength & 0xffff);
|
||||
|
||||
desc->status = 0;
|
||||
if (rxstatus) {
|
||||
desc->status = 0;
|
||||
|
||||
rxbuffer = dequeue_rx(priv);
|
||||
if (rxbuffer == NULL)
|
||||
rxbuffer = dequeue_rx(priv);
|
||||
if (rxbuffer == NULL)
|
||||
netdev_info(priv->dev,
|
||||
"sgdma rx and rx queue empty!\n");
|
||||
|
||||
/* Clear control */
|
||||
iowrite32(0, &csr->control);
|
||||
/* clear status */
|
||||
iowrite32(0xf, &csr->status);
|
||||
|
||||
/* kick the rx sgdma after reaping this descriptor */
|
||||
pktsrx = sgdma_async_read(priv);
|
||||
|
||||
} else {
|
||||
/* If the SGDMA indicated an end of packet on recv,
|
||||
* then it's expected that the rxstatus from the
|
||||
* descriptor is non-zero - meaning a valid packet
|
||||
* with a nonzero length, or an error has been
|
||||
* indicated. if not, then all we can do is signal
|
||||
* an error and return no packet received. Most likely
|
||||
* there is a system design error, or an error in the
|
||||
* underlying kernel (cache or cache management problem)
|
||||
*/
|
||||
netdev_err(priv->dev,
|
||||
"sgdma rx and rx queue empty!\n");
|
||||
|
||||
/* kick the rx sgdma after reaping this descriptor */
|
||||
"SGDMA RX Error Info: %x, %x, %x\n",
|
||||
sts, desc->status, rxstatus);
|
||||
}
|
||||
} else if (sts == 0) {
|
||||
pktsrx = sgdma_async_read(priv);
|
||||
}
|
||||
|
||||
|
@ -270,15 +309,15 @@ u32 sgdma_rx_status(struct altera_tse_private *priv)
|
|||
|
||||
|
||||
/* Private functions */
|
||||
static void sgdma_descrip(struct sgdma_descrip *desc,
|
||||
struct sgdma_descrip *ndesc,
|
||||
dma_addr_t ndesc_phys,
|
||||
dma_addr_t raddr,
|
||||
dma_addr_t waddr,
|
||||
u16 length,
|
||||
int generate_eop,
|
||||
int rfixed,
|
||||
int wfixed)
|
||||
static void sgdma_setup_descrip(struct sgdma_descrip *desc,
|
||||
struct sgdma_descrip *ndesc,
|
||||
dma_addr_t ndesc_phys,
|
||||
dma_addr_t raddr,
|
||||
dma_addr_t waddr,
|
||||
u16 length,
|
||||
int generate_eop,
|
||||
int rfixed,
|
||||
int wfixed)
|
||||
{
|
||||
/* Clear the next descriptor as not owned by hardware */
|
||||
u32 ctrl = ndesc->control;
|
||||
|
@ -319,35 +358,29 @@ static int sgdma_async_read(struct altera_tse_private *priv)
|
|||
struct sgdma_descrip *cdesc = &descbase[0];
|
||||
struct sgdma_descrip *ndesc = &descbase[1];
|
||||
|
||||
unsigned int sts = ioread32(&csr->status);
|
||||
struct tse_buffer *rxbuffer = NULL;
|
||||
|
||||
if (!sgdma_rxbusy(priv)) {
|
||||
rxbuffer = queue_rx_peekhead(priv);
|
||||
if (rxbuffer == NULL)
|
||||
if (rxbuffer == NULL) {
|
||||
netdev_err(priv->dev, "no rx buffers available\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
sgdma_descrip(cdesc, /* current descriptor */
|
||||
ndesc, /* next descriptor */
|
||||
sgdma_rxphysaddr(priv, ndesc),
|
||||
0, /* read addr 0 for rx dma */
|
||||
rxbuffer->dma_addr, /* write addr for rx dma */
|
||||
0, /* read 'til EOP */
|
||||
0, /* EOP: NA for rx dma */
|
||||
0, /* read fixed: NA for rx dma */
|
||||
0); /* SOP: NA for rx DMA */
|
||||
|
||||
/* clear control and status */
|
||||
iowrite32(0, &csr->control);
|
||||
|
||||
/* If status available, clear those bits */
|
||||
if (sts & 0xf)
|
||||
iowrite32(0xf, &csr->status);
|
||||
sgdma_setup_descrip(cdesc, /* current descriptor */
|
||||
ndesc, /* next descriptor */
|
||||
sgdma_rxphysaddr(priv, ndesc),
|
||||
0, /* read addr 0 for rx dma */
|
||||
rxbuffer->dma_addr, /* write addr for rx dma */
|
||||
0, /* read 'til EOP */
|
||||
0, /* EOP: NA for rx dma */
|
||||
0, /* read fixed: NA for rx dma */
|
||||
0); /* SOP: NA for rx DMA */
|
||||
|
||||
dma_sync_single_for_device(priv->device,
|
||||
priv->rxdescphys,
|
||||
priv->rxdescmem,
|
||||
DMA_BIDIRECTIONAL);
|
||||
priv->sgdmadesclen,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
|
||||
&csr->next_descrip);
|
||||
|
@ -374,7 +407,7 @@ static int sgdma_async_write(struct altera_tse_private *priv,
|
|||
iowrite32(0x1f, &csr->status);
|
||||
|
||||
dma_sync_single_for_device(priv->device, priv->txdescphys,
|
||||
priv->txdescmem, DMA_TO_DEVICE);
|
||||
priv->sgdmadesclen, DMA_TO_DEVICE);
|
||||
|
||||
iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
|
||||
&csr->next_descrip);
|
||||
|
|
|
@ -26,10 +26,11 @@ void sgdma_clear_rxirq(struct altera_tse_private *);
|
|||
void sgdma_clear_txirq(struct altera_tse_private *);
|
||||
int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *);
|
||||
u32 sgdma_tx_completions(struct altera_tse_private *);
|
||||
int sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
|
||||
void sgdma_add_rx_desc(struct altera_tse_private *priv, struct tse_buffer *);
|
||||
void sgdma_status(struct altera_tse_private *);
|
||||
u32 sgdma_rx_status(struct altera_tse_private *);
|
||||
int sgdma_initialize(struct altera_tse_private *);
|
||||
void sgdma_uninitialize(struct altera_tse_private *);
|
||||
void sgdma_start_rxdma(struct altera_tse_private *);
|
||||
|
||||
#endif /* __ALTERA_SGDMA_H__ */
|
||||
|
|
|
@ -58,6 +58,8 @@
|
|||
/* MAC function configuration default settings */
|
||||
#define ALTERA_TSE_TX_IPG_LENGTH 12
|
||||
|
||||
#define ALTERA_TSE_PAUSE_QUANTA 0xffff
|
||||
|
||||
#define GET_BIT_VALUE(v, bit) (((v) >> (bit)) & 0x1)
|
||||
|
||||
/* MAC Command_Config Register Bit Definitions
|
||||
|
@ -390,10 +392,11 @@ struct altera_dmaops {
|
|||
void (*clear_rxirq)(struct altera_tse_private *);
|
||||
int (*tx_buffer)(struct altera_tse_private *, struct tse_buffer *);
|
||||
u32 (*tx_completions)(struct altera_tse_private *);
|
||||
int (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
|
||||
void (*add_rx_desc)(struct altera_tse_private *, struct tse_buffer *);
|
||||
u32 (*get_rx_status)(struct altera_tse_private *);
|
||||
int (*init_dma)(struct altera_tse_private *);
|
||||
void (*uninit_dma)(struct altera_tse_private *);
|
||||
void (*start_rxdma)(struct altera_tse_private *);
|
||||
};
|
||||
|
||||
/* This structure is private to each device.
|
||||
|
@ -453,6 +456,7 @@ struct altera_tse_private {
|
|||
u32 rxctrlreg;
|
||||
dma_addr_t rxdescphys;
|
||||
dma_addr_t txdescphys;
|
||||
size_t sgdmadesclen;
|
||||
|
||||
struct list_head txlisthd;
|
||||
struct list_head rxlisthd;
|
||||
|
|
|
@ -77,7 +77,7 @@ static void tse_get_drvinfo(struct net_device *dev,
|
|||
struct altera_tse_private *priv = netdev_priv(dev);
|
||||
u32 rev = ioread32(&priv->mac_dev->megacore_revision);
|
||||
|
||||
strcpy(info->driver, "Altera TSE MAC IP Driver");
|
||||
strcpy(info->driver, "altera_tse");
|
||||
strcpy(info->version, "v8.0");
|
||||
snprintf(info->fw_version, ETHTOOL_FWVERS_LEN, "v%d.%d",
|
||||
rev & 0xFFFF, (rev & 0xFFFF0000) >> 16);
|
||||
|
@ -185,6 +185,12 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|||
* how to do any special formatting of this data.
|
||||
* This version number will need to change if and
|
||||
* when this register table is changed.
|
||||
*
|
||||
* version[31:0] = 1: Dump the first 128 TSE Registers
|
||||
* Upper bits are all 0 by default
|
||||
*
|
||||
* Upper 16-bits will indicate feature presence for
|
||||
* Ethtool register decoding in future version.
|
||||
*/
|
||||
|
||||
regs->version = 1;
|
||||
|
|
|
@ -224,6 +224,7 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
|
|||
dev_kfree_skb_any(rxbuffer->skb);
|
||||
return -EINVAL;
|
||||
}
|
||||
rxbuffer->dma_addr &= (dma_addr_t)~3;
|
||||
rxbuffer->len = len;
|
||||
return 0;
|
||||
}
|
||||
|
@ -425,9 +426,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
|
|||
priv->dev->stats.rx_bytes += pktlength;
|
||||
|
||||
entry = next_entry;
|
||||
|
||||
tse_rx_refill(priv);
|
||||
}
|
||||
|
||||
tse_rx_refill(priv);
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -520,7 +522,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
|
|||
struct altera_tse_private *priv;
|
||||
unsigned long int flags;
|
||||
|
||||
|
||||
if (unlikely(!dev)) {
|
||||
pr_err("%s: invalid dev pointer\n", __func__);
|
||||
return IRQ_NONE;
|
||||
|
@ -868,13 +869,13 @@ static int init_mac(struct altera_tse_private *priv)
|
|||
/* Disable RX/TX shift 16 for alignment of all received frames on 16-bit
|
||||
* start address
|
||||
*/
|
||||
tse_clear_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
|
||||
tse_set_bit(&mac->rx_cmd_stat, ALTERA_TSE_RX_CMD_STAT_RX_SHIFT16);
|
||||
tse_clear_bit(&mac->tx_cmd_stat, ALTERA_TSE_TX_CMD_STAT_TX_SHIFT16 |
|
||||
ALTERA_TSE_TX_CMD_STAT_OMIT_CRC);
|
||||
|
||||
/* Set the MAC options */
|
||||
cmd = ioread32(&mac->command_config);
|
||||
cmd |= MAC_CMDCFG_PAD_EN; /* Padding Removal on Receive */
|
||||
cmd &= ~MAC_CMDCFG_PAD_EN; /* No padding Removal on Receive */
|
||||
cmd &= ~MAC_CMDCFG_CRC_FWD; /* CRC Removal */
|
||||
cmd |= MAC_CMDCFG_RX_ERR_DISC; /* Automatically discard frames
|
||||
* with CRC errors
|
||||
|
@ -882,8 +883,16 @@ static int init_mac(struct altera_tse_private *priv)
|
|||
cmd |= MAC_CMDCFG_CNTL_FRM_ENA;
|
||||
cmd &= ~MAC_CMDCFG_TX_ENA;
|
||||
cmd &= ~MAC_CMDCFG_RX_ENA;
|
||||
|
||||
/* Default speed and duplex setting, full/100 */
|
||||
cmd &= ~MAC_CMDCFG_HD_ENA;
|
||||
cmd &= ~MAC_CMDCFG_ETH_SPEED;
|
||||
cmd &= ~MAC_CMDCFG_ENA_10;
|
||||
|
||||
iowrite32(cmd, &mac->command_config);
|
||||
|
||||
iowrite32(ALTERA_TSE_PAUSE_QUANTA, &mac->pause_quanta);
|
||||
|
||||
if (netif_msg_hw(priv))
|
||||
dev_dbg(priv->device,
|
||||
"MAC post-initialization: CMD_CONFIG = 0x%08x\n", cmd);
|
||||
|
@ -1085,17 +1094,19 @@ static int tse_open(struct net_device *dev)
|
|||
|
||||
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
|
||||
|
||||
/* Start MAC Rx/Tx */
|
||||
spin_lock(&priv->mac_cfg_lock);
|
||||
tse_set_mac(priv, true);
|
||||
spin_unlock(&priv->mac_cfg_lock);
|
||||
|
||||
if (priv->phydev)
|
||||
phy_start(priv->phydev);
|
||||
|
||||
napi_enable(&priv->napi);
|
||||
netif_start_queue(dev);
|
||||
|
||||
priv->dmaops->start_rxdma(priv);
|
||||
|
||||
/* Start MAC Rx/Tx */
|
||||
spin_lock(&priv->mac_cfg_lock);
|
||||
tse_set_mac(priv, true);
|
||||
spin_unlock(&priv->mac_cfg_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
tx_request_irq_error:
|
||||
|
@ -1167,7 +1178,6 @@ static struct net_device_ops altera_tse_netdev_ops = {
|
|||
.ndo_validate_addr = eth_validate_addr,
|
||||
};
|
||||
|
||||
|
||||
static int request_and_map(struct platform_device *pdev, const char *name,
|
||||
struct resource **res, void __iomem **ptr)
|
||||
{
|
||||
|
@ -1235,7 +1245,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
/* Get the mapped address to the SGDMA descriptor memory */
|
||||
ret = request_and_map(pdev, "s1", &dma_res, &descmap);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
/* Start of that memory is for transmit descriptors */
|
||||
priv->tx_dma_desc = descmap;
|
||||
|
@ -1254,24 +1264,24 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
if (upper_32_bits(priv->rxdescmem_busaddr)) {
|
||||
dev_dbg(priv->device,
|
||||
"SGDMA bus addresses greater than 32-bits\n");
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
if (upper_32_bits(priv->txdescmem_busaddr)) {
|
||||
dev_dbg(priv->device,
|
||||
"SGDMA bus addresses greater than 32-bits\n");
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
} else if (priv->dmaops &&
|
||||
priv->dmaops->altera_dtype == ALTERA_DTYPE_MSGDMA) {
|
||||
ret = request_and_map(pdev, "rx_resp", &dma_res,
|
||||
&priv->rx_dma_resp);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
ret = request_and_map(pdev, "tx_desc", &dma_res,
|
||||
&priv->tx_dma_desc);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
priv->txdescmem = resource_size(dma_res);
|
||||
priv->txdescmem_busaddr = dma_res->start;
|
||||
|
@ -1279,13 +1289,13 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
ret = request_and_map(pdev, "rx_desc", &dma_res,
|
||||
&priv->rx_dma_desc);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
priv->rxdescmem = resource_size(dma_res);
|
||||
priv->rxdescmem_busaddr = dma_res->start;
|
||||
|
||||
} else {
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
if (!dma_set_mask(priv->device, DMA_BIT_MASK(priv->dmaops->dmamask)))
|
||||
|
@ -1294,26 +1304,26 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
else if (!dma_set_mask(priv->device, DMA_BIT_MASK(32)))
|
||||
dma_set_coherent_mask(priv->device, DMA_BIT_MASK(32));
|
||||
else
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
/* MAC address space */
|
||||
ret = request_and_map(pdev, "control_port", &control_port,
|
||||
(void __iomem **)&priv->mac_dev);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
/* xSGDMA Rx Dispatcher address space */
|
||||
ret = request_and_map(pdev, "rx_csr", &dma_res,
|
||||
&priv->rx_dma_csr);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
|
||||
/* xSGDMA Tx Dispatcher address space */
|
||||
ret = request_and_map(pdev, "tx_csr", &dma_res,
|
||||
&priv->tx_dma_csr);
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
|
||||
/* Rx IRQ */
|
||||
|
@ -1321,7 +1331,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
if (priv->rx_irq == -ENXIO) {
|
||||
dev_err(&pdev->dev, "cannot obtain Rx IRQ\n");
|
||||
ret = -ENXIO;
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
/* Tx IRQ */
|
||||
|
@ -1329,7 +1339,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
if (priv->tx_irq == -ENXIO) {
|
||||
dev_err(&pdev->dev, "cannot obtain Tx IRQ\n");
|
||||
ret = -ENXIO;
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
/* get FIFO depths from device tree */
|
||||
|
@ -1337,14 +1347,14 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
&priv->rx_fifo_depth)) {
|
||||
dev_err(&pdev->dev, "cannot obtain rx-fifo-depth\n");
|
||||
ret = -ENXIO;
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
|
||||
&priv->rx_fifo_depth)) {
|
||||
dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n");
|
||||
ret = -ENXIO;
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
/* get hash filter settings for this instance */
|
||||
|
@ -1393,7 +1403,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
((priv->phy_addr >= 0) && (priv->phy_addr < PHY_MAX_ADDR)))) {
|
||||
dev_err(&pdev->dev, "invalid phy-addr specified %d\n",
|
||||
priv->phy_addr);
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
}
|
||||
|
||||
/* Create/attach to MDIO bus */
|
||||
|
@ -1401,7 +1411,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
atomic_add_return(1, &instance_count));
|
||||
|
||||
if (ret)
|
||||
goto out_free;
|
||||
goto err_free_netdev;
|
||||
|
||||
/* initialize netdev */
|
||||
ether_setup(ndev);
|
||||
|
@ -1438,7 +1448,7 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to register TSE net device\n");
|
||||
goto out_free_mdio;
|
||||
goto err_register_netdev;
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
|
@ -1455,13 +1465,16 @@ static int altera_tse_probe(struct platform_device *pdev)
|
|||
ret = init_phy(ndev);
|
||||
if (ret != 0) {
|
||||
netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
|
||||
goto out_free_mdio;
|
||||
goto err_init_phy;
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_free_mdio:
|
||||
err_init_phy:
|
||||
unregister_netdev(ndev);
|
||||
err_register_netdev:
|
||||
netif_napi_del(&priv->napi);
|
||||
altera_tse_mdio_destroy(ndev);
|
||||
out_free:
|
||||
err_free_netdev:
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1496,6 +1509,7 @@ struct altera_dmaops altera_dtype_sgdma = {
|
|||
.get_rx_status = sgdma_rx_status,
|
||||
.init_dma = sgdma_initialize,
|
||||
.uninit_dma = sgdma_uninitialize,
|
||||
.start_rxdma = sgdma_start_rxdma,
|
||||
};
|
||||
|
||||
struct altera_dmaops altera_dtype_msgdma = {
|
||||
|
@ -1514,6 +1528,7 @@ struct altera_dmaops altera_dtype_msgdma = {
|
|||
.get_rx_status = msgdma_rx_status,
|
||||
.init_dma = msgdma_initialize,
|
||||
.uninit_dma = msgdma_uninitialize,
|
||||
.start_rxdma = msgdma_start_rxdma,
|
||||
};
|
||||
|
||||
static struct of_device_id altera_tse_ids[] = {
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/phy.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
/* STATUS and ENABLE Register bit masks */
|
||||
#define TXINT_MASK (1<<0) /* Transmit interrupt */
|
||||
|
@ -131,6 +132,7 @@ struct arc_emac_priv {
|
|||
struct mii_bus *bus;
|
||||
|
||||
void __iomem *regs;
|
||||
struct clk *clk;
|
||||
|
||||
struct napi_struct napi;
|
||||
struct net_device_stats stats;
|
||||
|
|
|
@ -574,6 +574,18 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void arc_emac_set_address_internal(struct net_device *ndev)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
unsigned int addr_low, addr_hi;
|
||||
|
||||
addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
|
||||
addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
|
||||
|
||||
arc_reg_set(priv, R_ADDRL, addr_low);
|
||||
arc_reg_set(priv, R_ADDRH, addr_hi);
|
||||
}
|
||||
|
||||
/**
|
||||
* arc_emac_set_address - Set the MAC address for this device.
|
||||
* @ndev: Pointer to net_device structure.
|
||||
|
@ -587,9 +599,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
|
|||
*/
|
||||
static int arc_emac_set_address(struct net_device *ndev, void *p)
|
||||
{
|
||||
struct arc_emac_priv *priv = netdev_priv(ndev);
|
||||
struct sockaddr *addr = p;
|
||||
unsigned int addr_low, addr_hi;
|
||||
|
||||
if (netif_running(ndev))
|
||||
return -EBUSY;
|
||||
|
@ -599,11 +609,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
|
|||
|
||||
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
|
||||
|
||||
addr_low = le32_to_cpu(*(__le32 *) &ndev->dev_addr[0]);
|
||||
addr_hi = le16_to_cpu(*(__le16 *) &ndev->dev_addr[4]);
|
||||
|
||||
arc_reg_set(priv, R_ADDRL, addr_low);
|
||||
arc_reg_set(priv, R_ADDRH, addr_hi);
|
||||
arc_emac_set_address_internal(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -643,13 +649,6 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Get CPU clock frequency from device tree */
|
||||
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
|
||||
&clock_frequency)) {
|
||||
dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Get IRQ from device tree */
|
||||
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
|
||||
if (!irq) {
|
||||
|
@ -677,17 +676,36 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
|
||||
if (IS_ERR(priv->regs)) {
|
||||
err = PTR_ERR(priv->regs);
|
||||
goto out;
|
||||
goto out_netdev;
|
||||
}
|
||||
dev_dbg(&pdev->dev, "Registers base address is 0x%p\n", priv->regs);
|
||||
|
||||
priv->clk = of_clk_get(pdev->dev.of_node, 0);
|
||||
if (IS_ERR(priv->clk)) {
|
||||
/* Get CPU clock frequency from device tree */
|
||||
if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
|
||||
&clock_frequency)) {
|
||||
dev_err(&pdev->dev, "failed to retrieve <clock-frequency> from device tree\n");
|
||||
err = -EINVAL;
|
||||
goto out_netdev;
|
||||
}
|
||||
} else {
|
||||
err = clk_prepare_enable(priv->clk);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to enable clock\n");
|
||||
goto out_clkget;
|
||||
}
|
||||
|
||||
clock_frequency = clk_get_rate(priv->clk);
|
||||
}
|
||||
|
||||
id = arc_reg_get(priv, R_ID);
|
||||
|
||||
/* Check for EMAC revision 5 or 7, magic number */
|
||||
if (!(id == 0x0005fd02 || id == 0x0007fd02)) {
|
||||
dev_err(&pdev->dev, "ARC EMAC not detected, id=0x%x\n", id);
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
goto out_clken;
|
||||
}
|
||||
dev_info(&pdev->dev, "ARC EMAC detected with id: 0x%x\n", id);
|
||||
|
||||
|
@ -702,7 +720,7 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
ndev->name, ndev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "could not allocate IRQ\n");
|
||||
goto out;
|
||||
goto out_clken;
|
||||
}
|
||||
|
||||
/* Get MAC address from device tree */
|
||||
|
@ -713,6 +731,7 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
else
|
||||
eth_hw_addr_random(ndev);
|
||||
|
||||
arc_emac_set_address_internal(ndev);
|
||||
dev_info(&pdev->dev, "MAC address is now %pM\n", ndev->dev_addr);
|
||||
|
||||
/* Do 1 allocation instead of 2 separate ones for Rx and Tx BD rings */
|
||||
|
@ -722,7 +741,7 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
if (!priv->rxbd) {
|
||||
dev_err(&pdev->dev, "failed to allocate data buffers\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
goto out_clken;
|
||||
}
|
||||
|
||||
priv->txbd = priv->rxbd + RX_BD_NUM;
|
||||
|
@ -734,7 +753,7 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
err = arc_mdio_probe(pdev, priv);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to probe MII bus\n");
|
||||
goto out;
|
||||
goto out_clken;
|
||||
}
|
||||
|
||||
priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
|
||||
|
@ -742,7 +761,7 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
if (!priv->phy_dev) {
|
||||
dev_err(&pdev->dev, "of_phy_connect() failed\n");
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
goto out_mdio;
|
||||
}
|
||||
|
||||
dev_info(&pdev->dev, "connected to %s phy with id 0x%x\n",
|
||||
|
@ -752,14 +771,25 @@ static int arc_emac_probe(struct platform_device *pdev)
|
|||
|
||||
err = register_netdev(ndev);
|
||||
if (err) {
|
||||
netif_napi_del(&priv->napi);
|
||||
dev_err(&pdev->dev, "failed to register network device\n");
|
||||
goto out;
|
||||
goto out_netif_api;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out:
|
||||
out_netif_api:
|
||||
netif_napi_del(&priv->napi);
|
||||
phy_disconnect(priv->phy_dev);
|
||||
priv->phy_dev = NULL;
|
||||
out_mdio:
|
||||
arc_mdio_remove(priv);
|
||||
out_clken:
|
||||
if (!IS_ERR(priv->clk))
|
||||
clk_disable_unprepare(priv->clk);
|
||||
out_clkget:
|
||||
if (!IS_ERR(priv->clk))
|
||||
clk_put(priv->clk);
|
||||
out_netdev:
|
||||
free_netdev(ndev);
|
||||
return err;
|
||||
}
|
||||
|
@ -774,6 +804,12 @@ static int arc_emac_remove(struct platform_device *pdev)
|
|||
arc_mdio_remove(priv);
|
||||
unregister_netdev(ndev);
|
||||
netif_napi_del(&priv->napi);
|
||||
|
||||
if (!IS_ERR(priv->clk)) {
|
||||
clk_disable_unprepare(priv->clk);
|
||||
clk_put(priv->clk);
|
||||
}
|
||||
|
||||
free_netdev(ndev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -13233,6 +13233,8 @@ static void __bnx2x_remove(struct pci_dev *pdev,
|
|||
iounmap(bp->doorbells);
|
||||
|
||||
bnx2x_release_firmware(bp);
|
||||
} else {
|
||||
bnx2x_vf_pci_dealloc(bp);
|
||||
}
|
||||
bnx2x_free_mem_bp(bp);
|
||||
|
||||
|
|
|
@ -427,7 +427,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
|||
if (filter->add && filter->type == BNX2X_VF_FILTER_VLAN &&
|
||||
(atomic_read(&bnx2x_vfq(vf, qid, vlan_count)) >=
|
||||
vf_vlan_rules_cnt(vf))) {
|
||||
BNX2X_ERR("No credits for vlan\n");
|
||||
BNX2X_ERR("No credits for vlan [%d >= %d]\n",
|
||||
atomic_read(&bnx2x_vfq(vf, qid, vlan_count)),
|
||||
vf_vlan_rules_cnt(vf));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -610,6 +612,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
}
|
||||
|
||||
/* add new mcasts */
|
||||
mcast.mcast_list_len = mc_num;
|
||||
rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_ADD);
|
||||
if (rc)
|
||||
BNX2X_ERR("Faled to add multicasts\n");
|
||||
|
@ -837,6 +840,29 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bnx2x_iov_re_set_vlan_filters(struct bnx2x *bp,
|
||||
struct bnx2x_virtf *vf,
|
||||
int new)
|
||||
{
|
||||
int num = vf_vlan_rules_cnt(vf);
|
||||
int diff = new - num;
|
||||
bool rc = true;
|
||||
|
||||
DP(BNX2X_MSG_IOV, "vf[%d] - %d vlan filter credits [previously %d]\n",
|
||||
vf->abs_vfid, new, num);
|
||||
|
||||
if (diff > 0)
|
||||
rc = bp->vlans_pool.get(&bp->vlans_pool, diff);
|
||||
else if (diff < 0)
|
||||
rc = bp->vlans_pool.put(&bp->vlans_pool, -diff);
|
||||
|
||||
if (rc)
|
||||
vf_vlan_rules_cnt(vf) = new;
|
||||
else
|
||||
DP(BNX2X_MSG_IOV, "vf[%d] - Failed to configure vlan filter credits change\n",
|
||||
vf->abs_vfid);
|
||||
}
|
||||
|
||||
/* must be called after the number of PF queues and the number of VFs are
|
||||
* both known
|
||||
*/
|
||||
|
@ -854,9 +880,11 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
|||
resc->num_mac_filters = 1;
|
||||
|
||||
/* divvy up vlan rules */
|
||||
bnx2x_iov_re_set_vlan_filters(bp, vf, 0);
|
||||
vlan_count = bp->vlans_pool.check(&bp->vlans_pool);
|
||||
vlan_count = 1 << ilog2(vlan_count);
|
||||
resc->num_vlan_filters = vlan_count / BNX2X_NR_VIRTFN(bp);
|
||||
bnx2x_iov_re_set_vlan_filters(bp, vf,
|
||||
vlan_count / BNX2X_NR_VIRTFN(bp));
|
||||
|
||||
/* no real limitation */
|
||||
resc->num_mc_filters = 0;
|
||||
|
@ -1478,10 +1506,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
|
|||
bnx2x_iov_static_resc(bp, vf);
|
||||
|
||||
/* queues are initialized during VF-ACQUIRE */
|
||||
|
||||
/* reserve the vf vlan credit */
|
||||
bp->vlans_pool.get(&bp->vlans_pool, vf_vlan_rules_cnt(vf));
|
||||
|
||||
vf->filter_state = 0;
|
||||
vf->sp_cl_id = bnx2x_fp(bp, 0, cl_id);
|
||||
|
||||
|
@ -1912,11 +1936,12 @@ int bnx2x_vf_chk_avail_resc(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
u8 rxq_cnt = vf_rxq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
|
||||
u8 txq_cnt = vf_txq_count(vf) ? : bnx2x_vf_max_queue_cnt(bp, vf);
|
||||
|
||||
/* Save a vlan filter for the Hypervisor */
|
||||
return ((req_resc->num_rxqs <= rxq_cnt) &&
|
||||
(req_resc->num_txqs <= txq_cnt) &&
|
||||
(req_resc->num_sbs <= vf_sb_count(vf)) &&
|
||||
(req_resc->num_mac_filters <= vf_mac_rules_cnt(vf)) &&
|
||||
(req_resc->num_vlan_filters <= vf_vlan_rules_cnt(vf)));
|
||||
(req_resc->num_vlan_filters <= vf_vlan_rules_visible_cnt(vf)));
|
||||
}
|
||||
|
||||
/* CORE VF API */
|
||||
|
@ -1972,14 +1997,14 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
vf_txq_count(vf) = resc->num_txqs ? : bnx2x_vf_max_queue_cnt(bp, vf);
|
||||
if (resc->num_mac_filters)
|
||||
vf_mac_rules_cnt(vf) = resc->num_mac_filters;
|
||||
if (resc->num_vlan_filters)
|
||||
vf_vlan_rules_cnt(vf) = resc->num_vlan_filters;
|
||||
/* Add an additional vlan filter credit for the hypervisor */
|
||||
bnx2x_iov_re_set_vlan_filters(bp, vf, resc->num_vlan_filters + 1);
|
||||
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"Fulfilling vf request: sb count %d, tx_count %d, rx_count %d, mac_rules_count %d, vlan_rules_count %d\n",
|
||||
vf_sb_count(vf), vf_rxq_count(vf),
|
||||
vf_txq_count(vf), vf_mac_rules_cnt(vf),
|
||||
vf_vlan_rules_cnt(vf));
|
||||
vf_vlan_rules_visible_cnt(vf));
|
||||
|
||||
/* Initialize the queues */
|
||||
if (!vf->vfqs) {
|
||||
|
@ -2896,6 +2921,14 @@ void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
|
|||
return bp->regview + PXP_VF_ADDR_DB_START;
|
||||
}
|
||||
|
||||
void bnx2x_vf_pci_dealloc(struct bnx2x *bp)
|
||||
{
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
|
||||
sizeof(struct bnx2x_vf_mbx_msg));
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
|
||||
sizeof(union pf_vf_bulletin));
|
||||
}
|
||||
|
||||
int bnx2x_vf_pci_alloc(struct bnx2x *bp)
|
||||
{
|
||||
mutex_init(&bp->vf2pf_mutex);
|
||||
|
@ -2915,10 +2948,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
|
|||
return 0;
|
||||
|
||||
alloc_mem_err:
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
|
||||
sizeof(struct bnx2x_vf_mbx_msg));
|
||||
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
|
||||
sizeof(union pf_vf_bulletin));
|
||||
bnx2x_vf_pci_dealloc(bp);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
|
|
@ -159,6 +159,8 @@ struct bnx2x_virtf {
|
|||
#define vf_mac_rules_cnt(vf) ((vf)->alloc_resc.num_mac_filters)
|
||||
#define vf_vlan_rules_cnt(vf) ((vf)->alloc_resc.num_vlan_filters)
|
||||
#define vf_mc_rules_cnt(vf) ((vf)->alloc_resc.num_mc_filters)
|
||||
/* Hide a single vlan filter credit for the hypervisor */
|
||||
#define vf_vlan_rules_visible_cnt(vf) (vf_vlan_rules_cnt(vf) - 1)
|
||||
|
||||
u8 sb_count; /* actual number of SBs */
|
||||
u8 igu_base_id; /* base igu status block id */
|
||||
|
@ -502,6 +504,7 @@ static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
|
|||
enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
|
||||
void bnx2x_timer_sriov(struct bnx2x *bp);
|
||||
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp);
|
||||
void bnx2x_vf_pci_dealloc(struct bnx2x *bp);
|
||||
int bnx2x_vf_pci_alloc(struct bnx2x *bp);
|
||||
int bnx2x_enable_sriov(struct bnx2x *bp);
|
||||
void bnx2x_disable_sriov(struct bnx2x *bp);
|
||||
|
@ -568,6 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
|
||||
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
|
||||
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
|
||||
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
|
||||
|
|
|
@ -1163,7 +1163,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
|||
bnx2x_vf_max_queue_cnt(bp, vf);
|
||||
resc->num_sbs = vf_sb_count(vf);
|
||||
resc->num_mac_filters = vf_mac_rules_cnt(vf);
|
||||
resc->num_vlan_filters = vf_vlan_rules_cnt(vf);
|
||||
resc->num_vlan_filters = vf_vlan_rules_visible_cnt(vf);
|
||||
resc->num_mc_filters = 0;
|
||||
|
||||
if (status == PFVF_STATUS_SUCCESS) {
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
config NET_CADENCE
|
||||
bool "Cadence devices"
|
||||
depends on HAS_IOMEM && (ARM || AVR32 || COMPILE_TEST)
|
||||
depends on HAS_IOMEM && (ARM || AVR32 || MICROBLAZE || COMPILE_TEST)
|
||||
default y
|
||||
---help---
|
||||
If you have a network (Ethernet) card belonging to this class, say Y.
|
||||
|
@ -30,7 +30,7 @@ config ARM_AT91_ETHER
|
|||
|
||||
config MACB
|
||||
tristate "Cadence MACB/GEM support"
|
||||
depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || COMPILE_TEST)
|
||||
depends on HAS_DMA && (PLATFORM_AT32AP || ARCH_AT91 || ARCH_PICOXCELL || ARCH_ZYNQ || MICROBLAZE || COMPILE_TEST)
|
||||
select PHYLIB
|
||||
---help---
|
||||
The Cadence MACB ethernet interface is found on many Atmel AT32 and
|
||||
|
|
|
@ -599,25 +599,16 @@ static void gem_rx_refill(struct macb *bp)
|
|||
{
|
||||
unsigned int entry;
|
||||
struct sk_buff *skb;
|
||||
struct macb_dma_desc *desc;
|
||||
dma_addr_t paddr;
|
||||
|
||||
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
|
||||
u32 addr, ctrl;
|
||||
|
||||
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
|
||||
desc = &bp->rx_ring[entry];
|
||||
|
||||
/* Make hw descriptor updates visible to CPU */
|
||||
rmb();
|
||||
|
||||
addr = desc->addr;
|
||||
ctrl = desc->ctrl;
|
||||
bp->rx_prepared_head++;
|
||||
|
||||
if ((addr & MACB_BIT(RX_USED)))
|
||||
continue;
|
||||
|
||||
if (bp->rx_skbuff[entry] == NULL) {
|
||||
/* allocate sk_buff for this free entry in ring */
|
||||
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
|
||||
|
@ -698,7 +689,6 @@ static int gem_rx(struct macb *bp, int budget)
|
|||
if (!(addr & MACB_BIT(RX_USED)))
|
||||
break;
|
||||
|
||||
desc->addr &= ~MACB_BIT(RX_USED);
|
||||
bp->rx_tail++;
|
||||
count++;
|
||||
|
||||
|
@ -891,16 +881,15 @@ static int macb_poll(struct napi_struct *napi, int budget)
|
|||
if (work_done < budget) {
|
||||
napi_complete(napi);
|
||||
|
||||
/*
|
||||
* We've done what we can to clean the buffers. Make sure we
|
||||
* get notified when new packets arrive.
|
||||
*/
|
||||
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
|
||||
|
||||
/* Packets received while interrupts were disabled */
|
||||
status = macb_readl(bp, RSR);
|
||||
if (unlikely(status))
|
||||
if (status) {
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
macb_writel(bp, ISR, MACB_BIT(RCOMP));
|
||||
napi_reschedule(napi);
|
||||
} else {
|
||||
macb_writel(bp, IER, MACB_RX_INT_FLAGS);
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: Handle errors */
|
||||
|
@ -951,6 +940,10 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|||
if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
|
||||
macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
|
||||
schedule_work(&bp->tx_error_task);
|
||||
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -968,6 +961,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|||
bp->hw_stats.gem.rx_overruns++;
|
||||
else
|
||||
bp->hw_stats.macb.rx_overruns++;
|
||||
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
|
||||
}
|
||||
|
||||
if (status & MACB_BIT(HRESP)) {
|
||||
|
@ -977,6 +973,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
|||
* (work queue?)
|
||||
*/
|
||||
netdev_err(dev, "DMA bus error: HRESP not OK\n");
|
||||
|
||||
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
|
||||
macb_writel(bp, ISR, MACB_BIT(HRESP));
|
||||
}
|
||||
|
||||
status = macb_readl(bp, ISR);
|
||||
|
@ -1113,7 +1112,7 @@ static void gem_free_rx_buffers(struct macb *bp)
|
|||
|
||||
desc = &bp->rx_ring[i];
|
||||
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
|
||||
dma_unmap_single(&bp->pdev->dev, addr, skb->len,
|
||||
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
|
||||
DMA_FROM_DEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = NULL;
|
||||
|
|
|
@ -67,13 +67,13 @@ config CHELSIO_T3
|
|||
will be called cxgb3.
|
||||
|
||||
config CHELSIO_T4
|
||||
tristate "Chelsio Communications T4 Ethernet support"
|
||||
tristate "Chelsio Communications T4/T5 Ethernet support"
|
||||
depends on PCI
|
||||
select FW_LOADER
|
||||
select MDIO
|
||||
---help---
|
||||
This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
|
||||
adapters.
|
||||
This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
|
||||
adapter and T5 based 40Gb Ethernet adapter.
|
||||
|
||||
For general information about Chelsio and our products, visit
|
||||
our website at <http://www.chelsio.com>.
|
||||
|
@ -87,11 +87,12 @@ config CHELSIO_T4
|
|||
will be called cxgb4.
|
||||
|
||||
config CHELSIO_T4VF
|
||||
tristate "Chelsio Communications T4 Virtual Function Ethernet support"
|
||||
tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
|
||||
depends on PCI
|
||||
---help---
|
||||
This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
|
||||
adapters with PCI-E SR-IOV Virtual Functions.
|
||||
This driver supports Chelsio T4 and T5 based gigabit, 10Gb Ethernet
|
||||
adapters and T5 based 40Gb Ethernet adapters with PCI-E SR-IOV Virtual
|
||||
Functions.
|
||||
|
||||
For general information about Chelsio and our products, visit
|
||||
our website at <http://www.chelsio.com>.
|
||||
|
|
|
@ -5870,6 +5870,8 @@ static void print_port_info(const struct net_device *dev)
|
|||
spd = " 2.5 GT/s";
|
||||
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
|
||||
spd = " 5 GT/s";
|
||||
else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
|
||||
spd = " 8 GT/s";
|
||||
|
||||
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
|
||||
bufp += sprintf(bufp, "100/");
|
||||
|
|
|
@ -121,6 +121,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
|
|||
static irqreturn_t gfar_transmit(int irq, void *dev_id);
|
||||
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
|
||||
static void adjust_link(struct net_device *dev);
|
||||
static noinline void gfar_update_link_state(struct gfar_private *priv);
|
||||
static int init_phy(struct net_device *dev);
|
||||
static int gfar_probe(struct platform_device *ofdev);
|
||||
static int gfar_remove(struct platform_device *ofdev);
|
||||
|
@ -3076,41 +3077,6 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
|
||||
{
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
u32 val = 0;
|
||||
|
||||
if (!phydev->duplex)
|
||||
return val;
|
||||
|
||||
if (!priv->pause_aneg_en) {
|
||||
if (priv->tx_pause_en)
|
||||
val |= MACCFG1_TX_FLOW;
|
||||
if (priv->rx_pause_en)
|
||||
val |= MACCFG1_RX_FLOW;
|
||||
} else {
|
||||
u16 lcl_adv, rmt_adv;
|
||||
u8 flowctrl;
|
||||
/* get link partner capabilities */
|
||||
rmt_adv = 0;
|
||||
if (phydev->pause)
|
||||
rmt_adv = LPA_PAUSE_CAP;
|
||||
if (phydev->asym_pause)
|
||||
rmt_adv |= LPA_PAUSE_ASYM;
|
||||
|
||||
lcl_adv = mii_advertise_flowctrl(phydev->advertising);
|
||||
|
||||
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
|
||||
if (flowctrl & FLOW_CTRL_TX)
|
||||
val |= MACCFG1_TX_FLOW;
|
||||
if (flowctrl & FLOW_CTRL_RX)
|
||||
val |= MACCFG1_RX_FLOW;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/* Called every time the controller might need to be made
|
||||
* aware of new link state. The PHY code conveys this
|
||||
* information through variables in the phydev structure, and this
|
||||
|
@ -3120,83 +3086,12 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
|
|||
static void adjust_link(struct net_device *dev)
|
||||
{
|
||||
struct gfar_private *priv = netdev_priv(dev);
|
||||
struct gfar __iomem *regs = priv->gfargrp[0].regs;
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
int new_state = 0;
|
||||
|
||||
if (test_bit(GFAR_RESETTING, &priv->state))
|
||||
return;
|
||||
|
||||
if (phydev->link) {
|
||||
u32 tempval1 = gfar_read(®s->maccfg1);
|
||||
u32 tempval = gfar_read(®s->maccfg2);
|
||||
u32 ecntrl = gfar_read(®s->ecntrl);
|
||||
|
||||
/* Now we make sure that we can be in full duplex mode.
|
||||
* If not, we operate in half-duplex mode.
|
||||
*/
|
||||
if (phydev->duplex != priv->oldduplex) {
|
||||
new_state = 1;
|
||||
if (!(phydev->duplex))
|
||||
tempval &= ~(MACCFG2_FULL_DUPLEX);
|
||||
else
|
||||
tempval |= MACCFG2_FULL_DUPLEX;
|
||||
|
||||
priv->oldduplex = phydev->duplex;
|
||||
}
|
||||
|
||||
if (phydev->speed != priv->oldspeed) {
|
||||
new_state = 1;
|
||||
switch (phydev->speed) {
|
||||
case 1000:
|
||||
tempval =
|
||||
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
|
||||
|
||||
ecntrl &= ~(ECNTRL_R100);
|
||||
break;
|
||||
case 100:
|
||||
case 10:
|
||||
tempval =
|
||||
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
|
||||
|
||||
/* Reduced mode distinguishes
|
||||
* between 10 and 100
|
||||
*/
|
||||
if (phydev->speed == SPEED_100)
|
||||
ecntrl |= ECNTRL_R100;
|
||||
else
|
||||
ecntrl &= ~(ECNTRL_R100);
|
||||
break;
|
||||
default:
|
||||
netif_warn(priv, link, dev,
|
||||
"Ack! Speed (%d) is not 10/100/1000!\n",
|
||||
phydev->speed);
|
||||
break;
|
||||
}
|
||||
|
||||
priv->oldspeed = phydev->speed;
|
||||
}
|
||||
|
||||
tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
|
||||
tempval1 |= gfar_get_flowctrl_cfg(priv);
|
||||
|
||||
gfar_write(®s->maccfg1, tempval1);
|
||||
gfar_write(®s->maccfg2, tempval);
|
||||
gfar_write(®s->ecntrl, ecntrl);
|
||||
|
||||
if (!priv->oldlink) {
|
||||
new_state = 1;
|
||||
priv->oldlink = 1;
|
||||
}
|
||||
} else if (priv->oldlink) {
|
||||
new_state = 1;
|
||||
priv->oldlink = 0;
|
||||
priv->oldspeed = 0;
|
||||
priv->oldduplex = -1;
|
||||
}
|
||||
|
||||
if (new_state && netif_msg_link(priv))
|
||||
phy_print_status(phydev);
|
||||
if (unlikely(phydev->link != priv->oldlink ||
|
||||
phydev->duplex != priv->oldduplex ||
|
||||
phydev->speed != priv->oldspeed))
|
||||
gfar_update_link_state(priv);
|
||||
}
|
||||
|
||||
/* Update the hash table based on the current list of multicast
|
||||
|
@ -3442,6 +3337,114 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
|
||||
{
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
u32 val = 0;
|
||||
|
||||
if (!phydev->duplex)
|
||||
return val;
|
||||
|
||||
if (!priv->pause_aneg_en) {
|
||||
if (priv->tx_pause_en)
|
||||
val |= MACCFG1_TX_FLOW;
|
||||
if (priv->rx_pause_en)
|
||||
val |= MACCFG1_RX_FLOW;
|
||||
} else {
|
||||
u16 lcl_adv, rmt_adv;
|
||||
u8 flowctrl;
|
||||
/* get link partner capabilities */
|
||||
rmt_adv = 0;
|
||||
if (phydev->pause)
|
||||
rmt_adv = LPA_PAUSE_CAP;
|
||||
if (phydev->asym_pause)
|
||||
rmt_adv |= LPA_PAUSE_ASYM;
|
||||
|
||||
lcl_adv = mii_advertise_flowctrl(phydev->advertising);
|
||||
|
||||
flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
|
||||
if (flowctrl & FLOW_CTRL_TX)
|
||||
val |= MACCFG1_TX_FLOW;
|
||||
if (flowctrl & FLOW_CTRL_RX)
|
||||
val |= MACCFG1_RX_FLOW;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static noinline void gfar_update_link_state(struct gfar_private *priv)
|
||||
{
|
||||
struct gfar __iomem *regs = priv->gfargrp[0].regs;
|
||||
struct phy_device *phydev = priv->phydev;
|
||||
|
||||
if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
|
||||
return;
|
||||
|
||||
if (phydev->link) {
|
||||
u32 tempval1 = gfar_read(®s->maccfg1);
|
||||
u32 tempval = gfar_read(®s->maccfg2);
|
||||
u32 ecntrl = gfar_read(®s->ecntrl);
|
||||
|
||||
if (phydev->duplex != priv->oldduplex) {
|
||||
if (!(phydev->duplex))
|
||||
tempval &= ~(MACCFG2_FULL_DUPLEX);
|
||||
else
|
||||
tempval |= MACCFG2_FULL_DUPLEX;
|
||||
|
||||
priv->oldduplex = phydev->duplex;
|
||||
}
|
||||
|
||||
if (phydev->speed != priv->oldspeed) {
|
||||
switch (phydev->speed) {
|
||||
case 1000:
|
||||
tempval =
|
||||
((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
|
||||
|
||||
ecntrl &= ~(ECNTRL_R100);
|
||||
break;
|
||||
case 100:
|
||||
case 10:
|
||||
tempval =
|
||||
((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
|
||||
|
||||
/* Reduced mode distinguishes
|
||||
* between 10 and 100
|
||||
*/
|
||||
if (phydev->speed == SPEED_100)
|
||||
ecntrl |= ECNTRL_R100;
|
||||
else
|
||||
ecntrl &= ~(ECNTRL_R100);
|
||||
break;
|
||||
default:
|
||||
netif_warn(priv, link, priv->ndev,
|
||||
"Ack! Speed (%d) is not 10/100/1000!\n",
|
||||
phydev->speed);
|
||||
break;
|
||||
}
|
||||
|
||||
priv->oldspeed = phydev->speed;
|
||||
}
|
||||
|
||||
tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
|
||||
tempval1 |= gfar_get_flowctrl_cfg(priv);
|
||||
|
||||
gfar_write(®s->maccfg1, tempval1);
|
||||
gfar_write(®s->maccfg2, tempval);
|
||||
gfar_write(®s->ecntrl, ecntrl);
|
||||
|
||||
if (!priv->oldlink)
|
||||
priv->oldlink = 1;
|
||||
|
||||
} else if (priv->oldlink) {
|
||||
priv->oldlink = 0;
|
||||
priv->oldspeed = 0;
|
||||
priv->oldduplex = -1;
|
||||
}
|
||||
|
||||
if (netif_msg_link(priv))
|
||||
phy_print_status(phydev);
|
||||
}
|
||||
|
||||
static struct of_device_id gfar_match[] =
|
||||
{
|
||||
{
|
||||
|
|
|
@ -533,6 +533,9 @@ static int gfar_spauseparam(struct net_device *dev,
|
|||
struct gfar __iomem *regs = priv->gfargrp[0].regs;
|
||||
u32 oldadv, newadv;
|
||||
|
||||
if (!phydev)
|
||||
return -ENODEV;
|
||||
|
||||
if (!(phydev->supported & SUPPORTED_Pause) ||
|
||||
(!(phydev->supported & SUPPORTED_Asym_Pause) &&
|
||||
(epause->rx_pause != epause->tx_pause)))
|
||||
|
|
|
@ -186,7 +186,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
|
|||
{
|
||||
u16 phy_reg = 0;
|
||||
u32 phy_id = 0;
|
||||
s32 ret_val;
|
||||
s32 ret_val = 0;
|
||||
u16 retry_count;
|
||||
u32 mac_reg = 0;
|
||||
|
||||
|
@ -217,11 +217,13 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
|
|||
/* In case the PHY needs to be in mdio slow mode,
|
||||
* set slow mode and try to get the PHY id again.
|
||||
*/
|
||||
hw->phy.ops.release(hw);
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw);
|
||||
if (!ret_val)
|
||||
ret_val = e1000e_get_phy_id(hw);
|
||||
hw->phy.ops.acquire(hw);
|
||||
if (hw->mac.type < e1000_pch_lpt) {
|
||||
hw->phy.ops.release(hw);
|
||||
ret_val = e1000_set_mdio_slow_mode_hv(hw);
|
||||
if (!ret_val)
|
||||
ret_val = e1000e_get_phy_id(hw);
|
||||
hw->phy.ops.acquire(hw);
|
||||
}
|
||||
|
||||
if (ret_val)
|
||||
return false;
|
||||
|
@ -842,6 +844,17 @@ s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
|
|||
}
|
||||
}
|
||||
|
||||
if (hw->phy.type == e1000_phy_82579) {
|
||||
ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
|
||||
&data);
|
||||
if (ret_val)
|
||||
goto release;
|
||||
|
||||
data &= ~I82579_LPI_100_PLL_SHUT;
|
||||
ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT,
|
||||
data);
|
||||
}
|
||||
|
||||
/* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */
|
||||
ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data);
|
||||
if (ret_val)
|
||||
|
@ -1314,14 +1327,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
|||
return ret_val;
|
||||
}
|
||||
|
||||
/* When connected at 10Mbps half-duplex, 82579 parts are excessively
|
||||
/* When connected at 10Mbps half-duplex, some parts are excessively
|
||||
* aggressive resulting in many collisions. To avoid this, increase
|
||||
* the IPG and reduce Rx latency in the PHY.
|
||||
*/
|
||||
if ((hw->mac.type == e1000_pch2lan) && link) {
|
||||
if (((hw->mac.type == e1000_pch2lan) ||
|
||||
(hw->mac.type == e1000_pch_lpt)) && link) {
|
||||
u32 reg;
|
||||
reg = er32(STATUS);
|
||||
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
|
||||
u16 emi_addr;
|
||||
|
||||
reg = er32(TIPG);
|
||||
reg &= ~E1000_TIPG_IPGT_MASK;
|
||||
reg |= 0xFF;
|
||||
|
@ -1332,8 +1348,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
|
|||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
ret_val =
|
||||
e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0);
|
||||
if (hw->mac.type == e1000_pch2lan)
|
||||
emi_addr = I82579_RX_CONFIG;
|
||||
else
|
||||
emi_addr = I217_RX_CONFIG;
|
||||
|
||||
ret_val = e1000_write_emi_reg_locked(hw, emi_addr, 0);
|
||||
|
||||
hw->phy.ops.release(hw);
|
||||
|
||||
|
@ -2493,51 +2513,44 @@ release:
|
|||
* e1000_k1_gig_workaround_lv - K1 Si workaround
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* Workaround to set the K1 beacon duration for 82579 parts
|
||||
* Workaround to set the K1 beacon duration for 82579 parts in 10Mbps
|
||||
* Disable K1 in 1000Mbps and 100Mbps
|
||||
**/
|
||||
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
|
||||
{
|
||||
s32 ret_val = 0;
|
||||
u16 status_reg = 0;
|
||||
u32 mac_reg;
|
||||
u16 phy_reg;
|
||||
|
||||
if (hw->mac.type != e1000_pch2lan)
|
||||
return 0;
|
||||
|
||||
/* Set K1 beacon duration based on 1Gbps speed or otherwise */
|
||||
/* Set K1 beacon duration based on 10Mbs speed */
|
||||
ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
|
||||
== (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
|
||||
mac_reg = er32(FEXTNVM4);
|
||||
mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
|
||||
|
||||
ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
|
||||
if (status_reg & HV_M_STATUS_SPEED_1000) {
|
||||
if (status_reg &
|
||||
(HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) {
|
||||
u16 pm_phy_reg;
|
||||
|
||||
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
|
||||
phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
|
||||
/* LV 1G Packet drop issue wa */
|
||||
/* LV 1G/100 Packet drop issue wa */
|
||||
ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA;
|
||||
pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE;
|
||||
ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg);
|
||||
if (ret_val)
|
||||
return ret_val;
|
||||
} else {
|
||||
u32 mac_reg;
|
||||
|
||||
mac_reg = er32(FEXTNVM4);
|
||||
mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
|
||||
mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
|
||||
phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
|
||||
ew32(FEXTNVM4, mac_reg);
|
||||
}
|
||||
ew32(FEXTNVM4, mac_reg);
|
||||
ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
|
||||
}
|
||||
|
||||
return ret_val;
|
||||
|
|
|
@ -232,16 +232,19 @@
|
|||
#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */
|
||||
#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */
|
||||
#define I82579_RX_CONFIG 0x3412 /* Receive configuration */
|
||||
#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */
|
||||
#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */
|
||||
#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */
|
||||
#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */
|
||||
#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */
|
||||
#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */
|
||||
#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */
|
||||
#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */
|
||||
#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */
|
||||
#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */
|
||||
#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */
|
||||
#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */
|
||||
#define I217_RX_CONFIG 0xB20C /* Receive configuration */
|
||||
|
||||
#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */
|
||||
#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */
|
||||
|
|
|
@ -1165,7 +1165,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
|
|||
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
|
||||
adapter->tx_hwtstamp_skb = NULL;
|
||||
adapter->tx_hwtstamp_timeouts++;
|
||||
e_warn("clearing Tx timestamp hang");
|
||||
e_warn("clearing Tx timestamp hang\n");
|
||||
} else {
|
||||
/* reschedule to check later */
|
||||
schedule_work(&adapter->tx_hwtstamp_work);
|
||||
|
@ -5687,7 +5687,7 @@ struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
|
|||
static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
|
||||
int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
|
||||
|
||||
/* Jumbo frame support */
|
||||
if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
|
||||
|
@ -6235,6 +6235,7 @@ static int __e1000_resume(struct pci_dev *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int e1000e_pm_thaw(struct device *dev)
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
|
||||
|
@ -6255,7 +6256,6 @@ static int e1000e_pm_thaw(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int e1000e_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
|
|
@ -164,6 +164,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
|
|||
#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000
|
||||
#define HV_M_STATUS_SPEED_MASK 0x0300
|
||||
#define HV_M_STATUS_SPEED_1000 0x0200
|
||||
#define HV_M_STATUS_SPEED_100 0x0100
|
||||
#define HV_M_STATUS_LINK_UP 0x0040
|
||||
|
||||
#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4
|
||||
|
|
|
@ -2897,12 +2897,9 @@ static irqreturn_t i40e_intr(int irq, void *data)
|
|||
u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
|
||||
|
||||
if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
|
||||
ena_mask &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
|
||||
icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
|
||||
i40e_ptp_tx_hwtstamp(pf);
|
||||
prttsyn_stat &= ~I40E_PRTTSYN_STAT_0_TXTIME_MASK;
|
||||
}
|
||||
|
||||
wr32(hw, I40E_PRTTSYN_STAT_0, prttsyn_stat);
|
||||
}
|
||||
|
||||
/* If a critical error is pending we have no choice but to reset the
|
||||
|
@ -4271,6 +4268,14 @@ static int i40e_open(struct net_device *netdev)
|
|||
if (err)
|
||||
return err;
|
||||
|
||||
/* configure global TSO hardware offload settings */
|
||||
wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
|
||||
TCP_FLAG_FIN) >> 16);
|
||||
wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
|
||||
TCP_FLAG_FIN |
|
||||
TCP_FLAG_CWR) >> 16);
|
||||
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
|
||||
|
||||
#ifdef CONFIG_I40E_VXLAN
|
||||
vxlan_get_rx_port(netdev);
|
||||
#endif
|
||||
|
@ -6712,6 +6717,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
|
|||
NETIF_F_HW_VLAN_CTAG_FILTER |
|
||||
NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO |
|
||||
NETIF_F_TSO_ECN |
|
||||
NETIF_F_TSO6 |
|
||||
NETIF_F_RXCSUM |
|
||||
NETIF_F_NTUPLE |
|
||||
|
|
|
@ -160,7 +160,7 @@ static i40e_status i40e_poll_sr_srctl_done_bit(struct i40e_hw *hw)
|
|||
udelay(5);
|
||||
}
|
||||
if (ret_code == I40E_ERR_TIMEOUT)
|
||||
hw_dbg(hw, "Done bit in GLNVM_SRCTL not set");
|
||||
hw_dbg(hw, "Done bit in GLNVM_SRCTL not set\n");
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
|
|
@ -239,7 +239,7 @@ static void i40e_ptp_tx_work(struct work_struct *work)
|
|||
dev_kfree_skb_any(pf->ptp_tx_skb);
|
||||
pf->ptp_tx_skb = NULL;
|
||||
pf->tx_hwtstamp_timeouts++;
|
||||
dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang");
|
||||
dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
|
|||
pf->last_rx_ptp_check = jiffies;
|
||||
pf->rx_hwtstamp_cleared++;
|
||||
dev_warn(&vsi->back->pdev->dev,
|
||||
"%s: clearing Rx timestamp hang",
|
||||
"%s: clearing Rx timestamp hang\n",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -418,7 +418,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
|
|||
}
|
||||
break;
|
||||
default:
|
||||
dev_info(&pf->pdev->dev, "Could not specify spec type %d",
|
||||
dev_info(&pf->pdev->dev, "Could not specify spec type %d\n",
|
||||
input->flow_type);
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
@ -478,7 +478,7 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
|
|||
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
|
||||
}
|
||||
} else {
|
||||
dev_info(&pdev->dev, "FD filter programming error");
|
||||
dev_info(&pdev->dev, "FD filter programming error\n");
|
||||
}
|
||||
} else if (error ==
|
||||
(0x1 << I40E_RX_PROG_STATUS_DESC_NO_FD_ENTRY_SHIFT)) {
|
||||
|
@ -1713,9 +1713,11 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
|
|||
I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
|
||||
if (tx_flags & I40E_TX_FLAGS_SW_VLAN) {
|
||||
struct vlan_ethhdr *vhdr;
|
||||
if (skb_header_cloned(skb) &&
|
||||
pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
|
||||
return -ENOMEM;
|
||||
int rc;
|
||||
|
||||
rc = skb_cow_head(skb, 0);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
vhdr = (struct vlan_ethhdr *)skb->data;
|
||||
vhdr->h_vlan_TCI = htons(tx_flags >>
|
||||
I40E_TX_FLAGS_VLAN_SHIFT);
|
||||
|
@ -1743,20 +1745,18 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
|
|||
u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
|
||||
{
|
||||
u32 cd_cmd, cd_tso_len, cd_mss;
|
||||
struct ipv6hdr *ipv6h;
|
||||
struct tcphdr *tcph;
|
||||
struct iphdr *iph;
|
||||
u32 l4len;
|
||||
int err;
|
||||
struct ipv6hdr *ipv6h;
|
||||
|
||||
if (!skb_is_gso(skb))
|
||||
return 0;
|
||||
|
||||
if (skb_header_cloned(skb)) {
|
||||
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = skb_cow_head(skb, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (protocol == htons(ETH_P_IP)) {
|
||||
iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
|
||||
|
|
|
@ -365,7 +365,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
|
|||
word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
|
||||
if (word_address == address) {
|
||||
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
|
||||
hw_dbg("Read INVM Word 0x%02x = %x",
|
||||
hw_dbg("Read INVM Word 0x%02x = %x\n",
|
||||
address, *data);
|
||||
status = E1000_SUCCESS;
|
||||
break;
|
||||
|
|
|
@ -929,11 +929,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
|
|||
*/
|
||||
if (hw->fc.requested_mode == e1000_fc_full) {
|
||||
hw->fc.current_mode = e1000_fc_full;
|
||||
hw_dbg("Flow Control = FULL.\r\n");
|
||||
hw_dbg("Flow Control = FULL.\n");
|
||||
} else {
|
||||
hw->fc.current_mode = e1000_fc_rx_pause;
|
||||
hw_dbg("Flow Control = "
|
||||
"RX PAUSE frames only.\r\n");
|
||||
hw_dbg("Flow Control = RX PAUSE frames only.\n");
|
||||
}
|
||||
}
|
||||
/* For receiving PAUSE frames ONLY.
|
||||
|
@ -948,7 +947,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
|
|||
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
|
||||
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
|
||||
hw->fc.current_mode = e1000_fc_tx_pause;
|
||||
hw_dbg("Flow Control = TX PAUSE frames only.\r\n");
|
||||
hw_dbg("Flow Control = TX PAUSE frames only.\n");
|
||||
}
|
||||
/* For transmitting PAUSE frames ONLY.
|
||||
*
|
||||
|
@ -962,7 +961,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
|
|||
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
|
||||
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
|
||||
hw->fc.current_mode = e1000_fc_rx_pause;
|
||||
hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
|
||||
hw_dbg("Flow Control = RX PAUSE frames only.\n");
|
||||
}
|
||||
/* Per the IEEE spec, at this point flow control should be
|
||||
* disabled. However, we want to consider that we could
|
||||
|
@ -988,10 +987,10 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
|
|||
(hw->fc.requested_mode == e1000_fc_tx_pause) ||
|
||||
(hw->fc.strict_ieee)) {
|
||||
hw->fc.current_mode = e1000_fc_none;
|
||||
hw_dbg("Flow Control = NONE.\r\n");
|
||||
hw_dbg("Flow Control = NONE.\n");
|
||||
} else {
|
||||
hw->fc.current_mode = e1000_fc_rx_pause;
|
||||
hw_dbg("Flow Control = RX PAUSE frames only.\r\n");
|
||||
hw_dbg("Flow Control = RX PAUSE frames only.\n");
|
||||
}
|
||||
|
||||
/* Now we need to do one last check... If we auto-
|
||||
|
|
|
@ -5193,8 +5193,10 @@ void igb_update_stats(struct igb_adapter *adapter,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < adapter->num_rx_queues; i++) {
|
||||
u32 rqdpc = rd32(E1000_RQDPC(i));
|
||||
struct igb_ring *ring = adapter->rx_ring[i];
|
||||
u32 rqdpc = rd32(E1000_RQDPC(i));
|
||||
if (hw->mac.type >= e1000_i210)
|
||||
wr32(E1000_RQDPC(i), 0);
|
||||
|
||||
if (rqdpc) {
|
||||
ring->rx_stats.drops += rqdpc;
|
||||
|
|
|
@ -389,7 +389,7 @@ static void igb_ptp_tx_work(struct work_struct *work)
|
|||
adapter->ptp_tx_skb = NULL;
|
||||
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
|
||||
adapter->tx_hwtstamp_timeouts++;
|
||||
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang");
|
||||
dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -451,7 +451,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
|
|||
rd32(E1000_RXSTMPH);
|
||||
adapter->last_rx_ptp_check = jiffies;
|
||||
adapter->rx_hwtstamp_cleared++;
|
||||
dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang");
|
||||
dev_warn(&adapter->pdev->dev, "clearing Rx timestamp hang\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -256,7 +256,6 @@ struct ixgbe_ring {
|
|||
struct ixgbe_tx_buffer *tx_buffer_info;
|
||||
struct ixgbe_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
unsigned long last_rx_timestamp;
|
||||
unsigned long state;
|
||||
u8 __iomem *tail;
|
||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||
|
@ -770,6 +769,7 @@ struct ixgbe_adapter {
|
|||
unsigned long ptp_tx_start;
|
||||
unsigned long last_overflow_check;
|
||||
unsigned long last_rx_ptp_check;
|
||||
unsigned long last_rx_timestamp;
|
||||
spinlock_t tmreg_lock;
|
||||
struct cyclecounter cc;
|
||||
struct timecounter tc;
|
||||
|
@ -944,24 +944,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
|
|||
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
|
||||
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
|
||||
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
|
||||
void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb);
|
||||
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
|
||||
return;
|
||||
|
||||
__ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
|
||||
|
||||
/*
|
||||
* Update the last_rx_timestamp timer in order to enable watchdog check
|
||||
* for error case of latched timestamp on a dropped packet.
|
||||
*/
|
||||
rx_ring->last_rx_timestamp = jiffies;
|
||||
}
|
||||
|
||||
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb);
|
||||
int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
|
||||
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
|
||||
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
|
||||
|
|
|
@ -1195,7 +1195,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
|
|||
*/
|
||||
hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0];
|
||||
|
||||
hw_dbg(hw, "Detected EEPROM page size = %d words.",
|
||||
hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
|
||||
hw->eeprom.word_page_size);
|
||||
out:
|
||||
return status;
|
||||
|
|
|
@ -1664,7 +1664,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
|||
|
||||
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
|
||||
|
||||
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
|
||||
if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
|
||||
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb);
|
||||
|
||||
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
|
||||
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
|
||||
|
|
|
@ -536,7 +536,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
|
|||
|
||||
if (time_out == max_time_out) {
|
||||
status = IXGBE_ERR_LINK_SETUP;
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out");
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
|
||||
}
|
||||
|
||||
return status;
|
||||
|
@ -745,7 +745,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
|
|||
|
||||
if (time_out == max_time_out) {
|
||||
status = IXGBE_ERR_LINK_SETUP;
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out");
|
||||
hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
|
||||
}
|
||||
|
||||
return status;
|
||||
|
@ -1175,7 +1175,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
|
|||
status = 0;
|
||||
} else {
|
||||
if (hw->allow_unsupported_sfp) {
|
||||
e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.");
|
||||
e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
|
||||
status = 0;
|
||||
} else {
|
||||
hw_dbg(hw,
|
||||
|
|
|
@ -435,10 +435,8 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
|
|||
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct ixgbe_ring *rx_ring;
|
||||
u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
|
||||
unsigned long rx_event;
|
||||
int n;
|
||||
|
||||
/* if we don't have a valid timestamp in the registers, just update the
|
||||
* timeout counter and exit
|
||||
|
@ -450,18 +448,15 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
|
|||
|
||||
/* determine the most recent watchdog or rx_timestamp event */
|
||||
rx_event = adapter->last_rx_ptp_check;
|
||||
for (n = 0; n < adapter->num_rx_queues; n++) {
|
||||
rx_ring = adapter->rx_ring[n];
|
||||
if (time_after(rx_ring->last_rx_timestamp, rx_event))
|
||||
rx_event = rx_ring->last_rx_timestamp;
|
||||
}
|
||||
if (time_after(adapter->last_rx_timestamp, rx_event))
|
||||
rx_event = adapter->last_rx_timestamp;
|
||||
|
||||
/* only need to read the high RXSTMP register to clear the lock */
|
||||
if (time_is_before_jiffies(rx_event + 5*HZ)) {
|
||||
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
|
||||
adapter->last_rx_ptp_check = jiffies;
|
||||
|
||||
e_warn(drv, "clearing RX Timestamp hang");
|
||||
e_warn(drv, "clearing RX Timestamp hang\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -517,7 +512,7 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
|
|||
dev_kfree_skb_any(adapter->ptp_tx_skb);
|
||||
adapter->ptp_tx_skb = NULL;
|
||||
clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
|
||||
e_warn(drv, "clearing Tx Timestamp hang");
|
||||
e_warn(drv, "clearing Tx Timestamp hang\n");
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -530,35 +525,22 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
/**
|
||||
* __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
|
||||
* @q_vector: structure containing interrupt and ring information
|
||||
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
|
||||
* @adapter: pointer to adapter struct
|
||||
* @skb: particular skb to send timestamp with
|
||||
*
|
||||
* if the timestamp is valid, we convert it into the timecounter ns
|
||||
* value, then store that result into the shhwtstamps structure which
|
||||
* is passed up the network stack
|
||||
*/
|
||||
void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb)
|
||||
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb)
|
||||
{
|
||||
struct ixgbe_adapter *adapter;
|
||||
struct ixgbe_hw *hw;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct skb_shared_hwtstamps *shhwtstamps;
|
||||
u64 regval = 0, ns;
|
||||
u32 tsyncrxctl;
|
||||
unsigned long flags;
|
||||
|
||||
/* we cannot process timestamps on a ring without a q_vector */
|
||||
if (!q_vector || !q_vector->adapter)
|
||||
return;
|
||||
|
||||
adapter = q_vector->adapter;
|
||||
hw = &adapter->hw;
|
||||
|
||||
/*
|
||||
* Read the tsyncrxctl register afterwards in order to prevent taking an
|
||||
* I/O hit on every packet.
|
||||
*/
|
||||
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
|
||||
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
|
||||
return;
|
||||
|
@ -566,13 +548,17 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
|||
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
|
||||
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
|
||||
|
||||
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
ns = timecounter_cyc2time(&adapter->tc, regval);
|
||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||
|
||||
shhwtstamps = skb_hwtstamps(skb);
|
||||
shhwtstamps->hwtstamp = ns_to_ktime(ns);
|
||||
|
||||
/* Update the last_rx_timestamp timer in order to enable watchdog check
|
||||
* for error case of latched timestamp on a dropped packet.
|
||||
*/
|
||||
adapter->last_rx_timestamp = jiffies;
|
||||
}
|
||||
|
||||
int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
|
||||
|
|
|
@ -232,7 +232,7 @@ static int orion_mdio_probe(struct platform_device *pdev)
|
|||
clk_prepare_enable(dev->clk);
|
||||
|
||||
dev->err_interrupt = platform_get_irq(pdev, 0);
|
||||
if (dev->err_interrupt != -ENXIO) {
|
||||
if (dev->err_interrupt > 0) {
|
||||
ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
|
||||
orion_mdio_err_irq,
|
||||
IRQF_SHARED, pdev->name, dev);
|
||||
|
@ -241,6 +241,9 @@ static int orion_mdio_probe(struct platform_device *pdev)
|
|||
|
||||
writel(MVMDIO_ERR_INT_SMI_DONE,
|
||||
dev->regs + MVMDIO_ERR_INT_MASK);
|
||||
|
||||
} else if (dev->err_interrupt == -EPROBE_DEFER) {
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
mutex_init(&dev->lock);
|
||||
|
|
|
@ -754,10 +754,10 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
|
|||
has_eth_port = true;
|
||||
}
|
||||
|
||||
if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
|
||||
request_module_nowait(IB_DRV_NAME);
|
||||
if (has_eth_port)
|
||||
request_module_nowait(EN_DRV_NAME);
|
||||
if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
|
||||
request_module_nowait(IB_DRV_NAME);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2440,7 +2440,8 @@ slave_start:
|
|||
* No return code for this call, just warn the user in case of PCI
|
||||
* express device capabilities are under-satisfied by the bus.
|
||||
*/
|
||||
mlx4_check_pcie_caps(dev);
|
||||
if (!mlx4_is_slave(dev))
|
||||
mlx4_check_pcie_caps(dev);
|
||||
|
||||
/* In master functions, the communication channel must be initialized
|
||||
* after obtaining its address from fw */
|
||||
|
|
|
@ -1106,6 +1106,9 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
|
|||
}
|
||||
|
||||
if (found_ix >= 0) {
|
||||
/* Calculate a slave_gid which is the slave number in the gid
|
||||
* table and not a globally unique slave number.
|
||||
*/
|
||||
if (found_ix < MLX4_ROCE_PF_GIDS)
|
||||
slave_gid = 0;
|
||||
else if (found_ix < MLX4_ROCE_PF_GIDS + (vf_gids % num_vfs) *
|
||||
|
@ -1118,41 +1121,43 @@ int mlx4_get_slave_from_roce_gid(struct mlx4_dev *dev, int port, u8 *gid,
|
|||
((vf_gids % num_vfs) * ((vf_gids / num_vfs + 1)))) /
|
||||
(vf_gids / num_vfs)) + vf_gids % num_vfs + 1;
|
||||
|
||||
/* Calculate the globally unique slave id */
|
||||
if (slave_gid) {
|
||||
struct mlx4_active_ports exclusive_ports;
|
||||
struct mlx4_active_ports actv_ports;
|
||||
struct mlx4_slaves_pport slaves_pport_actv;
|
||||
unsigned max_port_p_one;
|
||||
int num_slaves_before = 1;
|
||||
int num_vfs_before = 0;
|
||||
int candidate_slave_gid;
|
||||
|
||||
/* Calculate how many VFs are on the previous port, if exists */
|
||||
for (i = 1; i < port; i++) {
|
||||
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
|
||||
set_bit(i, exclusive_ports.ports);
|
||||
set_bit(i - 1, exclusive_ports.ports);
|
||||
slaves_pport_actv =
|
||||
mlx4_phys_to_slaves_pport_actv(
|
||||
dev, &exclusive_ports);
|
||||
num_slaves_before += bitmap_weight(
|
||||
num_vfs_before += bitmap_weight(
|
||||
slaves_pport_actv.slaves,
|
||||
dev->num_vfs + 1);
|
||||
}
|
||||
|
||||
if (slave_gid < num_slaves_before) {
|
||||
bitmap_zero(exclusive_ports.ports, dev->caps.num_ports);
|
||||
set_bit(port - 1, exclusive_ports.ports);
|
||||
slaves_pport_actv =
|
||||
mlx4_phys_to_slaves_pport_actv(
|
||||
dev, &exclusive_ports);
|
||||
slave_gid += bitmap_weight(
|
||||
slaves_pport_actv.slaves,
|
||||
dev->num_vfs + 1) -
|
||||
num_slaves_before;
|
||||
}
|
||||
actv_ports = mlx4_get_active_ports(dev, slave_gid);
|
||||
/* candidate_slave_gid isn't necessarily the correct slave, but
|
||||
* it has the same number of ports and is assigned to the same
|
||||
* ports as the real slave we're looking for. On dual port VF,
|
||||
* slave_gid = [single port VFs on port <port>] +
|
||||
* [offset of the current slave from the first dual port VF] +
|
||||
* 1 (for the PF).
|
||||
*/
|
||||
candidate_slave_gid = slave_gid + num_vfs_before;
|
||||
|
||||
actv_ports = mlx4_get_active_ports(dev, candidate_slave_gid);
|
||||
max_port_p_one = find_first_bit(
|
||||
actv_ports.ports, dev->caps.num_ports) +
|
||||
bitmap_weight(actv_ports.ports,
|
||||
dev->caps.num_ports) + 1;
|
||||
|
||||
/* Calculate the real slave number */
|
||||
for (i = 1; i < max_port_p_one; i++) {
|
||||
if (i == port)
|
||||
continue;
|
||||
|
|
|
@ -3733,6 +3733,25 @@ static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
|
|||
}
|
||||
}
|
||||
|
||||
static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
|
||||
u8 *gid, enum mlx4_protocol prot)
|
||||
{
|
||||
int real_port;
|
||||
|
||||
if (prot != MLX4_PROT_ETH)
|
||||
return 0;
|
||||
|
||||
if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
|
||||
dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
|
||||
real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
|
||||
if (real_port < 0)
|
||||
return -EINVAL;
|
||||
gid[5] = real_port;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
||||
struct mlx4_vhcr *vhcr,
|
||||
struct mlx4_cmd_mailbox *inbox,
|
||||
|
@ -3768,6 +3787,10 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
if (err)
|
||||
goto ex_detach;
|
||||
} else {
|
||||
err = mlx4_adjust_port(dev, slave, gid, prot);
|
||||
if (err)
|
||||
goto ex_put;
|
||||
|
||||
err = rem_mcg_res(dev, slave, rqp, gid, prot, type, ®_id);
|
||||
if (err)
|
||||
goto ex_put;
|
||||
|
|
|
@ -2374,6 +2374,14 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
|
|||
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
|
||||
}
|
||||
|
||||
/* Reset firmware API lock */
|
||||
static void qlcnic_reset_api_lock(struct qlcnic_adapter *adapter)
|
||||
{
|
||||
qlcnic_api_lock(adapter);
|
||||
qlcnic_api_unlock(adapter);
|
||||
}
|
||||
|
||||
|
||||
static int
|
||||
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
|
@ -2476,6 +2484,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
if (qlcnic_82xx_check(adapter)) {
|
||||
qlcnic_check_vf(adapter, ent);
|
||||
adapter->portnum = adapter->ahw->pci_func;
|
||||
qlcnic_reset_api_lock(adapter);
|
||||
err = qlcnic_start_firmware(adapter);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"
|
||||
|
|
|
@ -1370,7 +1370,7 @@ static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
|
|||
|
||||
rsp = qlcnic_sriov_alloc_bc_trans(&trans);
|
||||
if (rsp)
|
||||
return rsp;
|
||||
goto free_cmd;
|
||||
|
||||
rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
|
||||
if (rsp)
|
||||
|
@ -1425,6 +1425,13 @@ err_out:
|
|||
|
||||
cleanup_transaction:
|
||||
qlcnic_sriov_cleanup_transaction(trans);
|
||||
|
||||
free_cmd:
|
||||
if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
|
||||
qlcnic_free_mbx_args(cmd);
|
||||
kfree(cmd);
|
||||
}
|
||||
|
||||
return rsp;
|
||||
}
|
||||
|
||||
|
|
|
@ -358,6 +358,8 @@ struct sxgbe_core_ops {
|
|||
/* Enable disable checksum offload operations */
|
||||
void (*enable_rx_csum)(void __iomem *ioaddr);
|
||||
void (*disable_rx_csum)(void __iomem *ioaddr);
|
||||
void (*enable_rxqueue)(void __iomem *ioaddr, int queue_num);
|
||||
void (*disable_rxqueue)(void __iomem *ioaddr, int queue_num);
|
||||
};
|
||||
|
||||
const struct sxgbe_core_ops *sxgbe_get_core_ops(void);
|
||||
|
|
|
@ -165,6 +165,26 @@ static void sxgbe_core_set_speed(void __iomem *ioaddr, unsigned char speed)
|
|||
writel(tx_cfg, ioaddr + SXGBE_CORE_TX_CONFIG_REG);
|
||||
}
|
||||
|
||||
static void sxgbe_core_enable_rxqueue(void __iomem *ioaddr, int queue_num)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
|
||||
reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
|
||||
reg_val |= SXGBE_CORE_RXQ_ENABLE;
|
||||
writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
|
||||
}
|
||||
|
||||
static void sxgbe_core_disable_rxqueue(void __iomem *ioaddr, int queue_num)
|
||||
{
|
||||
u32 reg_val;
|
||||
|
||||
reg_val = readl(ioaddr + SXGBE_CORE_RX_CTL0_REG);
|
||||
reg_val &= ~(SXGBE_CORE_RXQ_ENABLE_MASK << queue_num);
|
||||
reg_val |= SXGBE_CORE_RXQ_DISABLE;
|
||||
writel(reg_val, ioaddr + SXGBE_CORE_RX_CTL0_REG);
|
||||
}
|
||||
|
||||
static void sxgbe_set_eee_mode(void __iomem *ioaddr)
|
||||
{
|
||||
u32 ctrl;
|
||||
|
@ -254,6 +274,8 @@ static const struct sxgbe_core_ops core_ops = {
|
|||
.set_eee_pls = sxgbe_set_eee_pls,
|
||||
.enable_rx_csum = sxgbe_enable_rx_csum,
|
||||
.disable_rx_csum = sxgbe_disable_rx_csum,
|
||||
.enable_rxqueue = sxgbe_core_enable_rxqueue,
|
||||
.disable_rxqueue = sxgbe_core_disable_rxqueue,
|
||||
};
|
||||
|
||||
const struct sxgbe_core_ops *sxgbe_get_core_ops(void)
|
||||
|
|
|
@ -45,10 +45,10 @@ static void sxgbe_prepare_tx_desc(struct sxgbe_tx_norm_desc *p, u8 is_fd,
|
|||
p->tdes23.tx_rd_des23.first_desc = is_fd;
|
||||
p->tdes23.tx_rd_des23.buf1_size = buf1_len;
|
||||
|
||||
p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.total_pkt_len = pkt_len;
|
||||
p->tdes23.tx_rd_des23.tx_pkt_len.pkt_len.total_pkt_len = pkt_len;
|
||||
|
||||
if (cksum)
|
||||
p->tdes23.tx_rd_des23.tx_pkt_len.cksum_pktlen.cksum_ctl = cic_full;
|
||||
p->tdes23.tx_rd_des23.cksum_ctl = cic_full;
|
||||
}
|
||||
|
||||
/* Set VLAN control information */
|
||||
|
@ -233,6 +233,12 @@ static void sxgbe_set_rx_owner(struct sxgbe_rx_norm_desc *p)
|
|||
p->rdes23.rx_rd_des23.own_bit = 1;
|
||||
}
|
||||
|
||||
/* Set Interrupt on completion bit */
|
||||
static void sxgbe_set_rx_int_on_com(struct sxgbe_rx_norm_desc *p)
|
||||
{
|
||||
p->rdes23.rx_rd_des23.int_on_com = 1;
|
||||
}
|
||||
|
||||
/* Get the receive frame size */
|
||||
static int sxgbe_get_rx_frame_len(struct sxgbe_rx_norm_desc *p)
|
||||
{
|
||||
|
@ -498,6 +504,7 @@ static const struct sxgbe_desc_ops desc_ops = {
|
|||
.init_rx_desc = sxgbe_init_rx_desc,
|
||||
.get_rx_owner = sxgbe_get_rx_owner,
|
||||
.set_rx_owner = sxgbe_set_rx_owner,
|
||||
.set_rx_int_on_com = sxgbe_set_rx_int_on_com,
|
||||
.get_rx_frame_len = sxgbe_get_rx_frame_len,
|
||||
.get_rx_fd_status = sxgbe_get_rx_fd_status,
|
||||
.get_rx_ld_status = sxgbe_get_rx_ld_status,
|
||||
|
|
|
@ -39,22 +39,22 @@ struct sxgbe_tx_norm_desc {
|
|||
u32 int_on_com:1;
|
||||
/* TDES3 */
|
||||
union {
|
||||
u32 tcp_payload_len:18;
|
||||
u16 tcp_payload_len;
|
||||
struct {
|
||||
u32 total_pkt_len:15;
|
||||
u32 reserved1:1;
|
||||
u32 cksum_ctl:2;
|
||||
} cksum_pktlen;
|
||||
} pkt_len;
|
||||
} tx_pkt_len;
|
||||
|
||||
u32 tse_bit:1;
|
||||
u32 tcp_hdr_len:4;
|
||||
u32 sa_insert_ctl:3;
|
||||
u32 crc_pad_ctl:2;
|
||||
u32 last_desc:1;
|
||||
u32 first_desc:1;
|
||||
u32 ctxt_bit:1;
|
||||
u32 own_bit:1;
|
||||
u16 cksum_ctl:2;
|
||||
u16 tse_bit:1;
|
||||
u16 tcp_hdr_len:4;
|
||||
u16 sa_insert_ctl:3;
|
||||
u16 crc_pad_ctl:2;
|
||||
u16 last_desc:1;
|
||||
u16 first_desc:1;
|
||||
u16 ctxt_bit:1;
|
||||
u16 own_bit:1;
|
||||
} tx_rd_des23;
|
||||
|
||||
/* tx write back Desc 2,3 */
|
||||
|
@ -70,25 +70,20 @@ struct sxgbe_tx_norm_desc {
|
|||
|
||||
struct sxgbe_rx_norm_desc {
|
||||
union {
|
||||
u32 rdes0; /* buf1 address */
|
||||
struct {
|
||||
u64 rdes01; /* buf1 address */
|
||||
union {
|
||||
u32 out_vlan_tag:16;
|
||||
u32 in_vlan_tag:16;
|
||||
} wb_rx_des0;
|
||||
} rd_wb_des0;
|
||||
|
||||
union {
|
||||
u32 rdes1; /* buf2 address or buf1[63:32] */
|
||||
u32 rss_hash; /* Write-back RX */
|
||||
} rd_wb_des1;
|
||||
u32 rss_hash;
|
||||
} rx_wb_des01;
|
||||
} rdes01;
|
||||
|
||||
union {
|
||||
/* RX Read format Desc 2,3 */
|
||||
struct{
|
||||
/* RDES2 */
|
||||
u32 buf2_addr;
|
||||
u64 buf2_addr:62;
|
||||
/* RDES3 */
|
||||
u32 buf2_hi_addr:30;
|
||||
u32 int_on_com:1;
|
||||
u32 own_bit:1;
|
||||
} rx_rd_des23;
|
||||
|
@ -263,6 +258,9 @@ struct sxgbe_desc_ops {
|
|||
/* Set own bit */
|
||||
void (*set_rx_owner)(struct sxgbe_rx_norm_desc *p);
|
||||
|
||||
/* Set Interrupt on completion bit */
|
||||
void (*set_rx_int_on_com)(struct sxgbe_rx_norm_desc *p);
|
||||
|
||||
/* Get the receive frame size */
|
||||
int (*get_rx_frame_len)(struct sxgbe_rx_norm_desc *p);
|
||||
|
||||
|
|
|
@ -23,21 +23,8 @@
|
|||
/* DMA core initialization */
|
||||
static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
|
||||
{
|
||||
int retry_count = 10;
|
||||
u32 reg_val;
|
||||
|
||||
/* reset the DMA */
|
||||
writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
|
||||
while (retry_count--) {
|
||||
if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
|
||||
SXGBE_DMA_SOFT_RESET))
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
if (retry_count < 0)
|
||||
return -EBUSY;
|
||||
|
||||
reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
|
||||
|
||||
/* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
|
||||
|
|
|
@ -1076,6 +1076,9 @@ static int sxgbe_open(struct net_device *dev)
|
|||
|
||||
/* Initialize the MAC Core */
|
||||
priv->hw->mac->core_init(priv->ioaddr);
|
||||
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
|
||||
priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num);
|
||||
}
|
||||
|
||||
/* Request the IRQ lines */
|
||||
ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt,
|
||||
|
@ -1453,6 +1456,7 @@ static void sxgbe_rx_refill(struct sxgbe_priv_data *priv)
|
|||
/* Added memory barrier for RX descriptor modification */
|
||||
wmb();
|
||||
priv->hw->desc->set_rx_owner(p);
|
||||
priv->hw->desc->set_rx_int_on_com(p);
|
||||
/* Added memory barrier for RX descriptor modification */
|
||||
wmb();
|
||||
}
|
||||
|
@ -2070,6 +2074,24 @@ static int sxgbe_hw_init(struct sxgbe_priv_data * const priv)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int sxgbe_sw_reset(void __iomem *addr)
|
||||
{
|
||||
int retry_count = 10;
|
||||
|
||||
writel(SXGBE_DMA_SOFT_RESET, addr + SXGBE_DMA_MODE_REG);
|
||||
while (retry_count--) {
|
||||
if (!(readl(addr + SXGBE_DMA_MODE_REG) &
|
||||
SXGBE_DMA_SOFT_RESET))
|
||||
break;
|
||||
mdelay(10);
|
||||
}
|
||||
|
||||
if (retry_count < 0)
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* sxgbe_drv_probe
|
||||
* @device: device pointer
|
||||
|
@ -2102,6 +2124,10 @@ struct sxgbe_priv_data *sxgbe_drv_probe(struct device *device,
|
|||
priv->plat = plat_dat;
|
||||
priv->ioaddr = addr;
|
||||
|
||||
ret = sxgbe_sw_reset(priv->ioaddr);
|
||||
if (ret)
|
||||
goto error_free_netdev;
|
||||
|
||||
/* Verify driver arguments */
|
||||
sxgbe_verify_args();
|
||||
|
||||
|
@ -2218,9 +2244,14 @@ error_free_netdev:
|
|||
int sxgbe_drv_remove(struct net_device *ndev)
|
||||
{
|
||||
struct sxgbe_priv_data *priv = netdev_priv(ndev);
|
||||
u8 queue_num;
|
||||
|
||||
netdev_info(ndev, "%s: removing driver\n", __func__);
|
||||
|
||||
SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, queue_num) {
|
||||
priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num);
|
||||
}
|
||||
|
||||
priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES);
|
||||
priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES);
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
#define SXGBE_SMA_PREAD_CMD 0x02 /* post read increament address */
|
||||
#define SXGBE_SMA_READ_CMD 0x03 /* read command */
|
||||
#define SXGBE_SMA_SKIP_ADDRFRM 0x00040000 /* skip the address frame */
|
||||
#define SXGBE_MII_BUSY 0x00800000 /* mii busy */
|
||||
#define SXGBE_MII_BUSY 0x00400000 /* mii busy */
|
||||
|
||||
static int sxgbe_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_data)
|
||||
{
|
||||
|
@ -147,6 +147,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
|
|||
struct sxgbe_mdio_bus_data *mdio_data = priv->plat->mdio_bus_data;
|
||||
int err, phy_addr;
|
||||
int *irqlist;
|
||||
bool phy_found = false;
|
||||
bool act;
|
||||
|
||||
/* allocate the new mdio bus */
|
||||
|
@ -162,7 +163,7 @@ int sxgbe_mdio_register(struct net_device *ndev)
|
|||
irqlist = priv->mii_irq;
|
||||
|
||||
/* assign mii bus fields */
|
||||
mdio_bus->name = "samsxgbe";
|
||||
mdio_bus->name = "sxgbe";
|
||||
mdio_bus->read = &sxgbe_mdio_read;
|
||||
mdio_bus->write = &sxgbe_mdio_write;
|
||||
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%x",
|
||||
|
@ -216,13 +217,22 @@ int sxgbe_mdio_register(struct net_device *ndev)
|
|||
netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
|
||||
phy->phy_id, phy_addr, irq_str,
|
||||
dev_name(&phy->dev), act ? " active" : "");
|
||||
phy_found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!phy_found) {
|
||||
netdev_err(ndev, "PHY not found\n");
|
||||
goto phyfound_err;
|
||||
}
|
||||
|
||||
priv->mii = mdio_bus;
|
||||
|
||||
return 0;
|
||||
|
||||
phyfound_err:
|
||||
err = -ENODEV;
|
||||
mdiobus_unregister(mdio_bus);
|
||||
mdiobus_err:
|
||||
mdiobus_free(mdio_bus);
|
||||
return err;
|
||||
|
|
|
@ -52,6 +52,10 @@
|
|||
#define SXGBE_CORE_RX_CTL2_REG 0x00A8
|
||||
#define SXGBE_CORE_RX_CTL3_REG 0x00AC
|
||||
|
||||
#define SXGBE_CORE_RXQ_ENABLE_MASK 0x0003
|
||||
#define SXGBE_CORE_RXQ_ENABLE 0x0002
|
||||
#define SXGBE_CORE_RXQ_DISABLE 0x0000
|
||||
|
||||
/* Interrupt Registers */
|
||||
#define SXGBE_CORE_INT_STATUS_REG 0x00B0
|
||||
#define SXGBE_CORE_INT_ENABLE_REG 0x00B4
|
||||
|
|
|
@ -147,18 +147,19 @@ MODULE_ALIAS("platform:smc91x");
|
|||
*/
|
||||
#define MII_DELAY 1
|
||||
|
||||
#if SMC_DEBUG > 0
|
||||
#define DBG(n, dev, args...) \
|
||||
do { \
|
||||
if (SMC_DEBUG >= (n)) \
|
||||
netdev_dbg(dev, args); \
|
||||
#define DBG(n, dev, fmt, ...) \
|
||||
do { \
|
||||
if (SMC_DEBUG >= (n)) \
|
||||
netdev_dbg(dev, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define PRINTK(dev, args...) netdev_info(dev, args)
|
||||
#else
|
||||
#define DBG(n, dev, args...) do { } while (0)
|
||||
#define PRINTK(dev, args...) netdev_dbg(dev, args)
|
||||
#endif
|
||||
#define PRINTK(dev, fmt, ...) \
|
||||
do { \
|
||||
if (SMC_DEBUG > 0) \
|
||||
netdev_info(dev, fmt, ##__VA_ARGS__); \
|
||||
else \
|
||||
netdev_dbg(dev, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#if SMC_DEBUG > 3
|
||||
static void PRINT_PKT(u_char *buf, int length)
|
||||
|
@ -191,7 +192,7 @@ static void PRINT_PKT(u_char *buf, int length)
|
|||
pr_cont("\n");
|
||||
}
|
||||
#else
|
||||
#define PRINT_PKT(x...) do { } while (0)
|
||||
static inline void PRINT_PKT(u_char *buf, int length) { }
|
||||
#endif
|
||||
|
||||
|
||||
|
@ -1781,7 +1782,7 @@ static int smc_findirq(struct smc_local *lp)
|
|||
int timeout = 20;
|
||||
unsigned long cookie;
|
||||
|
||||
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
|
||||
DBG(2, lp->dev, "%s: %s\n", CARDNAME, __func__);
|
||||
|
||||
cookie = probe_irq_on();
|
||||
|
||||
|
|
|
@ -382,6 +382,10 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
|
|||
if (skb_is_gso(skb))
|
||||
goto do_lso;
|
||||
|
||||
if ((skb->ip_summed == CHECKSUM_NONE) ||
|
||||
(skb->ip_summed == CHECKSUM_UNNECESSARY))
|
||||
goto do_send;
|
||||
|
||||
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
|
||||
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
|
||||
TCPIP_CHKSUM_PKTINFO);
|
||||
|
|
|
@ -263,11 +263,9 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
const struct macvlan_dev *vlan = netdev_priv(dev);
|
||||
const struct macvlan_port *port = vlan->port;
|
||||
const struct macvlan_dev *dest;
|
||||
__u8 ip_summed = skb->ip_summed;
|
||||
|
||||
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
|
||||
const struct ethhdr *eth = (void *)skb->data;
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
|
||||
/* send to other bridge ports directly */
|
||||
if (is_multicast_ether_addr(eth->h_dest)) {
|
||||
|
@ -285,7 +283,6 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
}
|
||||
|
||||
xmit_world:
|
||||
skb->ip_summed = ip_summed;
|
||||
skb->dev = vlan->lowerdev;
|
||||
return dev_queue_xmit(skb);
|
||||
}
|
||||
|
|
|
@ -322,6 +322,15 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
|
|||
segs = nskb;
|
||||
}
|
||||
} else {
|
||||
/* If we receive a partial checksum and the tap side
|
||||
* doesn't support checksum offload, compute the checksum.
|
||||
* Note: it doesn't matter which checksum feature to
|
||||
* check, we either support them all or none.
|
||||
*/
|
||||
if (skb->ip_summed == CHECKSUM_PARTIAL &&
|
||||
!(features & NETIF_F_ALL_CSUM) &&
|
||||
skb_checksum_help(skb))
|
||||
goto drop;
|
||||
skb_queue_tail(&q->sk.sk_receive_queue, skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -246,13 +246,13 @@ static int ksz9021_load_values_from_of(struct phy_device *phydev,
|
|||
if (val1 != -1)
|
||||
newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
|
||||
|
||||
if (val2 != -1)
|
||||
if (val2 != -2)
|
||||
newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
|
||||
|
||||
if (val3 != -1)
|
||||
if (val3 != -3)
|
||||
newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
|
||||
|
||||
if (val4 != -1)
|
||||
if (val4 != -4)
|
||||
newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
|
||||
|
||||
return kszphy_extended_write(phydev, reg, newval);
|
||||
|
|
|
@ -765,6 +765,17 @@ void phy_state_machine(struct work_struct *work)
|
|||
break;
|
||||
|
||||
if (phydev->link) {
|
||||
if (AUTONEG_ENABLE == phydev->autoneg) {
|
||||
err = phy_aneg_done(phydev);
|
||||
if (err < 0)
|
||||
break;
|
||||
|
||||
if (!err) {
|
||||
phydev->state = PHY_AN;
|
||||
phydev->link_timeout = PHY_AN_TIMEOUT;
|
||||
break;
|
||||
}
|
||||
}
|
||||
phydev->state = PHY_RUNNING;
|
||||
netif_carrier_on(phydev->attached_dev);
|
||||
phydev->adjust_link(phydev->attached_dev);
|
||||
|
|
|
@ -429,13 +429,13 @@ static void slip_write_wakeup(struct tty_struct *tty)
|
|||
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
|
||||
return;
|
||||
|
||||
spin_lock(&sl->lock);
|
||||
spin_lock_bh(&sl->lock);
|
||||
if (sl->xleft <= 0) {
|
||||
/* Now serial buffer is almost free & we can start
|
||||
* transmission of another packet */
|
||||
sl->dev->stats.tx_packets++;
|
||||
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
|
||||
spin_unlock(&sl->lock);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
sl_unlock(sl);
|
||||
return;
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
|
|||
actual = tty->ops->write(tty, sl->xhead, sl->xleft);
|
||||
sl->xleft -= actual;
|
||||
sl->xhead += actual;
|
||||
spin_unlock(&sl->lock);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
static void sl_tx_timeout(struct net_device *dev)
|
||||
|
|
|
@ -2834,8 +2834,10 @@ static int team_device_event(struct notifier_block *unused,
|
|||
case NETDEV_UP:
|
||||
if (netif_carrier_ok(dev))
|
||||
team_port_change_check(port, true);
|
||||
break;
|
||||
case NETDEV_DOWN:
|
||||
team_port_change_check(port, false);
|
||||
break;
|
||||
case NETDEV_CHANGE:
|
||||
if (netif_running(port->dev))
|
||||
team_port_change_check(port,
|
||||
|
|
|
@ -785,7 +785,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
|||
skb_out->len > CDC_NCM_MIN_TX_PKT)
|
||||
memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0,
|
||||
ctx->tx_max - skb_out->len);
|
||||
else if ((skb_out->len % dev->maxpacket) == 0)
|
||||
else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0)
|
||||
*skb_put(skb_out, 1) = 0; /* force short packet */
|
||||
|
||||
/* set final frame length */
|
||||
|
|
|
@ -669,6 +669,22 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
|
||||
{QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
|
||||
{QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x6280, 0)}, /* CMOTech CHU-628 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7001, 0)}, /* CMOTech CHU-720S */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7002, 0)}, /* CMOTech 7002 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7003, 4)}, /* CMOTech CHU-629K */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7004, 3)}, /* CMOTech 7004 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7006, 5)}, /* CMOTech CGU-629 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x700a, 4)}, /* CMOTech CHU-629S */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7211, 0)}, /* CMOTech CHU-720I */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7212, 0)}, /* CMOTech 7212 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7213, 0)}, /* CMOTech 7213 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7251, 1)}, /* CMOTech 7251 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7252, 1)}, /* CMOTech 7252 */
|
||||
{QMI_FIXED_INTF(0x16d8, 0x7253, 1)}, /* CMOTech 7253 */
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0002, 1)},
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0012, 1)},
|
||||
{QMI_FIXED_INTF(0x19d2, 0x0017, 3)},
|
||||
|
@ -730,16 +746,28 @@ static const struct usb_device_id products[] = {
|
|||
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC73xx */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC73xx */
|
||||
{QMI_FIXED_INTF(0x1199, 0x68c0, 11)}, /* Sierra Wireless MC73xx */
|
||||
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
|
||||
{QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
|
||||
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
|
||||
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
|
||||
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
|
||||
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
|
||||
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
|
||||
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
|
||||
|
||||
/* 4. Gobi 1000 devices */
|
||||
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
|
||||
|
|
|
@ -1285,7 +1285,7 @@ static int virtnet_set_channels(struct net_device *dev,
|
|||
if (channels->rx_count || channels->tx_count || channels->other_count)
|
||||
return -EINVAL;
|
||||
|
||||
if (queue_pairs > vi->max_queue_pairs)
|
||||
if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
|
||||
return -EINVAL;
|
||||
|
||||
get_online_cpus();
|
||||
|
|
|
@ -389,8 +389,8 @@ static inline size_t vxlan_nlmsg_size(void)
|
|||
+ nla_total_size(sizeof(struct nda_cacheinfo));
|
||||
}
|
||||
|
||||
static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
|
||||
struct vxlan_fdb *fdb, int type)
|
||||
static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb,
|
||||
struct vxlan_rdst *rd, int type)
|
||||
{
|
||||
struct net *net = dev_net(vxlan->dev);
|
||||
struct sk_buff *skb;
|
||||
|
@ -400,8 +400,7 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
|
|||
if (skb == NULL)
|
||||
goto errout;
|
||||
|
||||
err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
|
||||
first_remote_rtnl(fdb));
|
||||
err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, rd);
|
||||
if (err < 0) {
|
||||
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
|
||||
WARN_ON(err == -EMSGSIZE);
|
||||
|
@ -427,10 +426,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
|
|||
.remote_vni = VXLAN_N_VID,
|
||||
};
|
||||
|
||||
INIT_LIST_HEAD(&f.remotes);
|
||||
list_add_rcu(&remote.list, &f.remotes);
|
||||
|
||||
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
|
||||
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
|
||||
}
|
||||
|
||||
static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
|
||||
|
@ -438,11 +434,11 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
|
|||
struct vxlan_fdb f = {
|
||||
.state = NUD_STALE,
|
||||
};
|
||||
struct vxlan_rdst remote = { };
|
||||
|
||||
INIT_LIST_HEAD(&f.remotes);
|
||||
memcpy(f.eth_addr, eth_addr, ETH_ALEN);
|
||||
|
||||
vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
|
||||
vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH);
|
||||
}
|
||||
|
||||
/* Hash Ethernet address */
|
||||
|
@ -533,7 +529,8 @@ static int vxlan_fdb_replace(struct vxlan_fdb *f,
|
|||
|
||||
/* Add/update destinations for multicast */
|
||||
static int vxlan_fdb_append(struct vxlan_fdb *f,
|
||||
union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
|
||||
union vxlan_addr *ip, __be16 port, __u32 vni,
|
||||
__u32 ifindex, struct vxlan_rdst **rdp)
|
||||
{
|
||||
struct vxlan_rdst *rd;
|
||||
|
||||
|
@ -551,6 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
|
|||
|
||||
list_add_tail_rcu(&rd->list, &f->remotes);
|
||||
|
||||
*rdp = rd;
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -690,6 +688,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
|
|||
__be16 port, __u32 vni, __u32 ifindex,
|
||||
__u8 ndm_flags)
|
||||
{
|
||||
struct vxlan_rdst *rd = NULL;
|
||||
struct vxlan_fdb *f;
|
||||
int notify = 0;
|
||||
|
||||
|
@ -726,7 +725,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
|
|||
if ((flags & NLM_F_APPEND) &&
|
||||
(is_multicast_ether_addr(f->eth_addr) ||
|
||||
is_zero_ether_addr(f->eth_addr))) {
|
||||
int rc = vxlan_fdb_append(f, ip, port, vni, ifindex);
|
||||
int rc = vxlan_fdb_append(f, ip, port, vni, ifindex,
|
||||
&rd);
|
||||
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
@ -756,15 +756,18 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
|
|||
INIT_LIST_HEAD(&f->remotes);
|
||||
memcpy(f->eth_addr, mac, ETH_ALEN);
|
||||
|
||||
vxlan_fdb_append(f, ip, port, vni, ifindex);
|
||||
vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
|
||||
|
||||
++vxlan->addrcnt;
|
||||
hlist_add_head_rcu(&f->hlist,
|
||||
vxlan_fdb_head(vxlan, mac));
|
||||
}
|
||||
|
||||
if (notify)
|
||||
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
|
||||
if (notify) {
|
||||
if (rd == NULL)
|
||||
rd = first_remote_rtnl(f);
|
||||
vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -785,7 +788,7 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
|
|||
"delete %pM\n", f->eth_addr);
|
||||
|
||||
--vxlan->addrcnt;
|
||||
vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
|
||||
vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
|
||||
|
||||
hlist_del_rcu(&f->hlist);
|
||||
call_rcu(&f->rcu, vxlan_fdb_free);
|
||||
|
@ -919,6 +922,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
|
|||
*/
|
||||
if (rd && !list_is_singular(&f->remotes)) {
|
||||
list_del_rcu(&rd->list);
|
||||
vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH);
|
||||
kfree_rcu(rd, rcu);
|
||||
goto out;
|
||||
}
|
||||
|
@ -993,7 +997,7 @@ static bool vxlan_snoop(struct net_device *dev,
|
|||
|
||||
rdst->remote_ip = *src_ip;
|
||||
f->updated = jiffies;
|
||||
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
|
||||
vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH);
|
||||
} else {
|
||||
/* learned new entry */
|
||||
spin_lock(&vxlan->hash_lock);
|
||||
|
|
|
@ -86,7 +86,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
int irq;
|
||||
int ret = 0;
|
||||
struct ath_hw *ah;
|
||||
struct ath_common *common;
|
||||
char hw_name[64];
|
||||
|
||||
if (!dev_get_platdata(&pdev->dev)) {
|
||||
|
@ -146,9 +145,6 @@ static int ath_ahb_probe(struct platform_device *pdev)
|
|||
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
|
||||
hw_name, (unsigned long)mem, irq);
|
||||
|
||||
common = ath9k_hw_common(sc->sc_ah);
|
||||
/* Will be cleared in ath9k_start() */
|
||||
set_bit(ATH_OP_INVALID, &common->op_flags);
|
||||
return 0;
|
||||
|
||||
err_irq:
|
||||
|
|
|
@ -155,6 +155,9 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel,
|
|||
ATH9K_ANI_RSSI_THR_LOW,
|
||||
ATH9K_ANI_RSSI_THR_HIGH);
|
||||
|
||||
if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_OFDM_DEF_LEVEL)
|
||||
immunityLevel = ATH9K_ANI_OFDM_DEF_LEVEL;
|
||||
|
||||
if (!scan)
|
||||
aniState->ofdmNoiseImmunityLevel = immunityLevel;
|
||||
|
||||
|
@ -235,6 +238,9 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel,
|
|||
BEACON_RSSI(ah), ATH9K_ANI_RSSI_THR_LOW,
|
||||
ATH9K_ANI_RSSI_THR_HIGH);
|
||||
|
||||
if (AR_SREV_9100(ah) && immunityLevel < ATH9K_ANI_CCK_DEF_LEVEL)
|
||||
immunityLevel = ATH9K_ANI_CCK_DEF_LEVEL;
|
||||
|
||||
if (ah->opmode == NL80211_IFTYPE_STATION &&
|
||||
BEACON_RSSI(ah) <= ATH9K_ANI_RSSI_THR_LOW &&
|
||||
immunityLevel > ATH9K_ANI_CCK_MAX_LEVEL_LOW_RSSI)
|
||||
|
|
|
@ -251,7 +251,6 @@ struct ath_atx_tid {
|
|||
|
||||
s8 bar_index;
|
||||
bool sched;
|
||||
bool paused;
|
||||
bool active;
|
||||
};
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
|
|||
ath_txq_lock(sc, txq);
|
||||
if (tid->active) {
|
||||
len += scnprintf(buf + len, size - len,
|
||||
"%3d%11d%10d%10d%10d%10d%9d%6d%8d\n",
|
||||
"%3d%11d%10d%10d%10d%10d%9d%6d\n",
|
||||
tid->tidno,
|
||||
tid->seq_start,
|
||||
tid->seq_next,
|
||||
|
@ -80,8 +80,7 @@ static ssize_t read_file_node_aggr(struct file *file, char __user *user_buf,
|
|||
tid->baw_head,
|
||||
tid->baw_tail,
|
||||
tid->bar_index,
|
||||
tid->sched,
|
||||
tid->paused);
|
||||
tid->sched);
|
||||
}
|
||||
ath_txq_unlock(sc, txq);
|
||||
}
|
||||
|
|
|
@ -783,6 +783,9 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
|
|||
common = ath9k_hw_common(ah);
|
||||
ath9k_set_hw_capab(sc, hw);
|
||||
|
||||
/* Will be cleared in ath9k_start() */
|
||||
set_bit(ATH_OP_INVALID, &common->op_flags);
|
||||
|
||||
/* Initialize regulatory */
|
||||
error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
|
||||
ath9k_reg_notifier);
|
||||
|
|
|
@ -784,7 +784,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
{
|
||||
struct ath_softc *sc;
|
||||
struct ieee80211_hw *hw;
|
||||
struct ath_common *common;
|
||||
u8 csz;
|
||||
u32 val;
|
||||
int ret = 0;
|
||||
|
@ -877,10 +876,6 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
|
||||
hw_name, (unsigned long)sc->mem, pdev->irq);
|
||||
|
||||
/* Will be cleared in ath9k_start() */
|
||||
common = ath9k_hw_common(sc->sc_ah);
|
||||
set_bit(ATH_OP_INVALID, &common->op_flags);
|
||||
|
||||
return 0;
|
||||
|
||||
err_init:
|
||||
|
|
|
@ -975,6 +975,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
|
|||
u64 tsf = 0;
|
||||
unsigned long flags;
|
||||
dma_addr_t new_buf_addr;
|
||||
unsigned int budget = 512;
|
||||
|
||||
if (edma)
|
||||
dma_type = DMA_BIDIRECTIONAL;
|
||||
|
@ -1113,15 +1114,17 @@ requeue_drop_frag:
|
|||
}
|
||||
requeue:
|
||||
list_add_tail(&bf->list, &sc->rx.rxbuf);
|
||||
if (flush)
|
||||
continue;
|
||||
|
||||
if (edma) {
|
||||
ath_rx_edma_buf_link(sc, qtype);
|
||||
} else {
|
||||
ath_rx_buf_relink(sc, bf);
|
||||
ath9k_hw_rxena(ah);
|
||||
if (!flush)
|
||||
ath9k_hw_rxena(ah);
|
||||
}
|
||||
|
||||
if (!budget--)
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
if (!(ah->imask & ATH9K_INT_RXEOL)) {
|
||||
|
|
|
@ -107,9 +107,6 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
|
|||
{
|
||||
struct ath_atx_ac *ac = tid->ac;
|
||||
|
||||
if (tid->paused)
|
||||
return;
|
||||
|
||||
if (tid->sched)
|
||||
return;
|
||||
|
||||
|
@ -1407,7 +1404,6 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|||
ath_tx_tid_change_state(sc, txtid);
|
||||
|
||||
txtid->active = true;
|
||||
txtid->paused = true;
|
||||
*ssn = txtid->seq_start = txtid->seq_next;
|
||||
txtid->bar_index = -1;
|
||||
|
||||
|
@ -1427,7 +1423,6 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
|
|||
|
||||
ath_txq_lock(sc, txq);
|
||||
txtid->active = false;
|
||||
txtid->paused = false;
|
||||
ath_tx_flush_tid(sc, txtid);
|
||||
ath_tx_tid_change_state(sc, txtid);
|
||||
ath_txq_unlock_complete(sc, txq);
|
||||
|
@ -1487,7 +1482,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
|
|||
ath_txq_lock(sc, txq);
|
||||
ac->clear_ps_filter = true;
|
||||
|
||||
if (!tid->paused && ath_tid_has_buffered(tid)) {
|
||||
if (ath_tid_has_buffered(tid)) {
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
ath_txq_schedule(sc, txq);
|
||||
}
|
||||
|
@ -1510,7 +1505,6 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
|
|||
ath_txq_lock(sc, txq);
|
||||
|
||||
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
|
||||
tid->paused = false;
|
||||
|
||||
if (ath_tid_has_buffered(tid)) {
|
||||
ath_tx_queue_tid(txq, tid);
|
||||
|
@ -1544,8 +1538,6 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
|
|||
continue;
|
||||
|
||||
tid = ATH_AN_2_TID(an, i);
|
||||
if (tid->paused)
|
||||
continue;
|
||||
|
||||
ath_txq_lock(sc, tid->ac->txq);
|
||||
while (nframes > 0) {
|
||||
|
@ -1844,9 +1836,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
|
|||
list_del(&tid->list);
|
||||
tid->sched = false;
|
||||
|
||||
if (tid->paused)
|
||||
continue;
|
||||
|
||||
if (ath_tx_sched_aggr(sc, txq, tid, &stop))
|
||||
sent = true;
|
||||
|
||||
|
@ -2698,7 +2687,6 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
|
|||
tid->baw_size = WME_MAX_BA;
|
||||
tid->baw_head = tid->baw_tail = 0;
|
||||
tid->sched = false;
|
||||
tid->paused = false;
|
||||
tid->active = false;
|
||||
__skb_queue_head_init(&tid->buf_q);
|
||||
__skb_queue_head_init(&tid->retry_q);
|
||||
|
|
|
@ -303,10 +303,10 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
|
|||
|
||||
ci = core->chip;
|
||||
|
||||
/* if core is already in reset, just return */
|
||||
/* if core is already in reset, skip reset */
|
||||
regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
|
||||
if ((regdata & BCMA_RESET_CTL_RESET) != 0)
|
||||
return;
|
||||
goto in_reset_configure;
|
||||
|
||||
/* configure reset */
|
||||
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
|
||||
|
@ -322,6 +322,7 @@ static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
|
|||
SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
|
||||
BCMA_RESET_CTL_RESET, 300);
|
||||
|
||||
in_reset_configure:
|
||||
/* in-reset configure */
|
||||
ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
|
||||
reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
|
||||
|
|
|
@ -620,21 +620,19 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
|
|||
rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
|
||||
bss_conf->bssid);
|
||||
|
||||
/*
|
||||
* Update the beacon. This is only required on USB devices. PCI
|
||||
* devices fetch beacons periodically.
|
||||
*/
|
||||
if (changes & BSS_CHANGED_BEACON && rt2x00_is_usb(rt2x00dev))
|
||||
rt2x00queue_update_beacon(rt2x00dev, vif);
|
||||
|
||||
/*
|
||||
* Start/stop beaconing.
|
||||
*/
|
||||
if (changes & BSS_CHANGED_BEACON_ENABLED) {
|
||||
if (!bss_conf->enable_beacon && intf->enable_beacon) {
|
||||
rt2x00queue_clear_beacon(rt2x00dev, vif);
|
||||
rt2x00dev->intf_beaconing--;
|
||||
intf->enable_beacon = false;
|
||||
/*
|
||||
* Clear beacon in the H/W for this vif. This is needed
|
||||
* to disable beaconing on this particular interface
|
||||
* and keep it running on other interfaces.
|
||||
*/
|
||||
rt2x00queue_clear_beacon(rt2x00dev, vif);
|
||||
|
||||
if (rt2x00dev->intf_beaconing == 0) {
|
||||
/*
|
||||
|
@ -645,11 +643,15 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
|
|||
rt2x00queue_stop_queue(rt2x00dev->bcn);
|
||||
mutex_unlock(&intf->beacon_skb_mutex);
|
||||
}
|
||||
|
||||
|
||||
} else if (bss_conf->enable_beacon && !intf->enable_beacon) {
|
||||
rt2x00dev->intf_beaconing++;
|
||||
intf->enable_beacon = true;
|
||||
/*
|
||||
* Upload beacon to the H/W. This is only required on
|
||||
* USB devices. PCI devices fetch beacons periodically.
|
||||
*/
|
||||
if (rt2x00_is_usb(rt2x00dev))
|
||||
rt2x00queue_update_beacon(rt2x00dev, vif);
|
||||
|
||||
if (rt2x00dev->intf_beaconing == 1) {
|
||||
/*
|
||||
|
|
|
@ -293,7 +293,7 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
|
|||
u8 *psaddr;
|
||||
__le16 fc;
|
||||
u16 type, ufc;
|
||||
bool match_bssid, packet_toself, packet_beacon, addr;
|
||||
bool match_bssid, packet_toself, packet_beacon = false, addr;
|
||||
|
||||
tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift;
|
||||
|
||||
|
|
|
@ -1001,7 +1001,7 @@ int rtl92cu_hw_init(struct ieee80211_hw *hw)
|
|||
err = _rtl92cu_init_mac(hw);
|
||||
if (err) {
|
||||
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "init mac failed!\n");
|
||||
return err;
|
||||
goto exit;
|
||||
}
|
||||
err = rtl92c_download_fw(hw);
|
||||
if (err) {
|
||||
|
|
|
@ -49,6 +49,12 @@ static u8 _rtl92se_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 skb_queue)
|
|||
if (ieee80211_is_nullfunc(fc))
|
||||
return QSLT_HIGH;
|
||||
|
||||
/* Kernel commit 1bf4bbb4024dcdab changed EAPOL packets to use
|
||||
* queue V0 at priority 7; however, the RTL8192SE appears to have
|
||||
* that queue at priority 6
|
||||
*/
|
||||
if (skb->priority == 7)
|
||||
return QSLT_VO;
|
||||
return skb->priority;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
|
|||
goto next_msg;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN)) {
|
||||
if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
|
||||
err = -EPERM;
|
||||
goto next_msg;
|
||||
}
|
||||
|
|
|
@ -169,4 +169,11 @@ struct netlink_tap {
|
|||
extern int netlink_add_tap(struct netlink_tap *nt);
|
||||
extern int netlink_remove_tap(struct netlink_tap *nt);
|
||||
|
||||
bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
|
||||
struct user_namespace *ns, int cap);
|
||||
bool netlink_ns_capable(const struct sk_buff *skb,
|
||||
struct user_namespace *ns, int cap);
|
||||
bool netlink_capable(const struct sk_buff *skb, int cap);
|
||||
bool netlink_net_capable(const struct sk_buff *skb, int cap);
|
||||
|
||||
#endif /* __LINUX_NETLINK_H */
|
||||
|
|
|
@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
|
|||
void sock_diag_save_cookie(void *sk, __u32 *cookie);
|
||||
|
||||
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
|
||||
int sock_diag_put_filterinfo(struct user_namespace *user_ns, struct sock *sk,
|
||||
int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
|
||||
struct sk_buff *skb, int attrtype);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -155,7 +155,11 @@ struct vsock_transport {
|
|||
|
||||
/**** CORE ****/
|
||||
|
||||
int vsock_core_init(const struct vsock_transport *t);
|
||||
int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
|
||||
static inline int vsock_core_init(const struct vsock_transport *t)
|
||||
{
|
||||
return __vsock_core_init(t, THIS_MODULE);
|
||||
}
|
||||
void vsock_core_exit(void);
|
||||
|
||||
/**** UTILS ****/
|
||||
|
|
|
@ -2255,6 +2255,11 @@ int sock_get_timestampns(struct sock *, struct timespec __user *);
|
|||
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
|
||||
int type);
|
||||
|
||||
bool sk_ns_capable(const struct sock *sk,
|
||||
struct user_namespace *user_ns, int cap);
|
||||
bool sk_capable(const struct sock *sk, int cap);
|
||||
bool sk_net_capable(const struct sock *sk, int cap);
|
||||
|
||||
/*
|
||||
* Enable debug/info messages
|
||||
*/
|
||||
|
|
|
@ -643,13 +643,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
|
|||
if ((task_active_pid_ns(current) != &init_pid_ns))
|
||||
return -EPERM;
|
||||
|
||||
if (!capable(CAP_AUDIT_CONTROL))
|
||||
if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
|
||||
err = -EPERM;
|
||||
break;
|
||||
case AUDIT_USER:
|
||||
case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
|
||||
case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
|
||||
if (!capable(CAP_AUDIT_WRITE))
|
||||
if (!netlink_capable(skb, CAP_AUDIT_WRITE))
|
||||
err = -EPERM;
|
||||
break;
|
||||
default: /* bad msg */
|
||||
|
|
|
@ -819,14 +819,17 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
|
|||
if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
|
||||
struct hci_cp_auth_requested cp;
|
||||
|
||||
/* encrypt must be pending if auth is also pending */
|
||||
set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
|
||||
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
|
||||
sizeof(cp), &cp);
|
||||
|
||||
/* If we're already encrypted set the REAUTH_PEND flag,
|
||||
* otherwise set the ENCRYPT_PEND.
|
||||
*/
|
||||
if (conn->key_type != 0xff)
|
||||
set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
|
||||
else
|
||||
set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче