Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Sanity check URB networking device parameters to avoid divide by zero, from Oliver Neukum. 2) Disable global multicast filter in NCSI, otherwise LLDP and IPV6 don't work properly. Longer term this needs a better fix tho. From Vijay Khemka. 3) Small fixes to selftests (use ping when ping6 is not present, etc.) from David Ahern. 4) Bring back rt_uses_gateway member of struct rtable, it's semantics were not well understood and trying to remove it broke things. From David Ahern. 5) Move usbnet snaity checking, ignore endpoints with invalid wMaxPacketSize. From Bjørn Mork. 6) Missing Kconfig deps for sja1105 driver, from Mao Wenan. 7) Various small fixes to the mlx5 DR steering code, from Alaa Hleihel, Alex Vesker, and Yevgeny Kliteynik 8) Missing CAP_NET_RAW checks in various places, from Ori Nimron. 9) Fix crash when removing sch_cbs entry while offloading is enabled, from Vinicius Costa Gomes. 10) Signedness bug fixes, generally in looking at the result given by of_get_phy_mode() and friends. From Dan Crapenter. 11) Disable preemption around BPF_PROG_RUN() calls, from Eric Dumazet. 12) Don't create VRF ipv6 rules if ipv6 is disabled, from David Ahern. 13) Fix quantization code in tcp_bbr, from Kevin Yang. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (127 commits) net: tap: clean up an indentation issue nfp: abm: fix memory leak in nfp_abm_u32_knode_replace tcp: better handle TCP_USER_TIMEOUT in SYN_SENT state sk_buff: drop all skb extensions on free and skb scrubbing tcp_bbr: fix quantization code to not raise cwnd if not probing bandwidth mlxsw: spectrum_flower: Fail in case user specifies multiple mirror actions Documentation: Clarify trap's description mlxsw: spectrum: Clear VLAN filters during port initialization net: ena: clean up indentation issue NFC: st95hf: clean up indentation issue net: phy: micrel: add Asym Pause workaround for KSZ9021 net: socionext: ave: Avoid using netdev_err() before calling register_netdev() ptp: correctly disable flags on old ioctls lib: dimlib: fix help text typos net: dsa: microchip: Always set regmap stride to 1 nfp: flower: fix memory leak in nfp_flower_spawn_vnic_reprs nfp: flower: prevent memory leak in nfp_flower_spawn_phy_reprs net/sched: Set default of CONFIG_NET_TC_SKB_EXT to N vrf: Do not attempt to create IPv6 mcast rule if IPv6 is disabled net: sched: sch_sfb: don't call qdisc_put() while holding tree lock ...
This commit is contained in:
Коммит
02dc96ef6c
|
@ -36,12 +36,6 @@ properties:
|
|||
enum: [ 4, 8, 12, 16, 20, 24 ]
|
||||
default: 8
|
||||
|
||||
adi,disable-energy-detect:
|
||||
description: |
|
||||
Disables Energy Detect Powerdown Mode (default disabled, i.e energy detect
|
||||
is enabled if this property is unspecified)
|
||||
type: boolean
|
||||
|
||||
examples:
|
||||
- |
|
||||
ethernet {
|
||||
|
@ -68,6 +62,5 @@ examples:
|
|||
reg = <1>;
|
||||
|
||||
adi,fifo-depth-bits = <16>;
|
||||
adi,disable-energy-detect;
|
||||
};
|
||||
};
|
||||
|
|
|
@ -12,8 +12,36 @@ and therefore may overwrite them.
|
|||
KSZ9021:
|
||||
|
||||
All skew control options are specified in picoseconds. The minimum
|
||||
value is 0, the maximum value is 3000, and it is incremented by 200ps
|
||||
steps.
|
||||
value is 0, the maximum value is 3000, and it can be specified in 200ps
|
||||
steps, *but* these values are in not fact what you get because this chip's
|
||||
skew values actually increase in 120ps steps, starting from -840ps. The
|
||||
incorrect values came from an error in the original KSZ9021 datasheet
|
||||
before it was corrected in revision 1.2 (Feb 2014), but it is too late to
|
||||
change the driver now because of the many existing device trees that have
|
||||
been created using values that go up in increments of 200.
|
||||
|
||||
The following table shows the actual skew delay you will get for each of the
|
||||
possible devicetree values, and the number that will be programmed into the
|
||||
corresponding pad skew register:
|
||||
|
||||
Device Tree Value Delay Pad Skew Register Value
|
||||
-----------------------------------------------------
|
||||
0 -840ps 0000
|
||||
200 -720ps 0001
|
||||
400 -600ps 0010
|
||||
600 -480ps 0011
|
||||
800 -360ps 0100
|
||||
1000 -240ps 0101
|
||||
1200 -120ps 0110
|
||||
1400 0ps 0111
|
||||
1600 120ps 1000
|
||||
1800 240ps 1001
|
||||
2000 360ps 1010
|
||||
2200 480ps 1011
|
||||
2400 600ps 1100
|
||||
2600 720ps 1101
|
||||
2800 840ps 1110
|
||||
3000 960ps 1111
|
||||
|
||||
Optional properties:
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ Required properties:
|
|||
R-Car Gen2 and RZ/G1 devices.
|
||||
|
||||
- "renesas,etheravb-r8a774a1" for the R8A774A1 SoC.
|
||||
- "renesas,etheravb-r8a774b1" for the R8A774B1 SoC.
|
||||
- "renesas,etheravb-r8a774c0" for the R8A774C0 SoC.
|
||||
- "renesas,etheravb-r8a7795" for the R8A7795 SoC.
|
||||
- "renesas,etheravb-r8a7796" for the R8A7796 SoC.
|
||||
|
|
|
@ -113,7 +113,7 @@ properties:
|
|||
const: stmmaceth
|
||||
|
||||
mac-mode:
|
||||
maxItems: 1
|
||||
$ref: ethernet-controller.yaml#/properties/phy-connection-type
|
||||
description:
|
||||
The property is identical to 'phy-mode', and assumes that there is mode
|
||||
converter in-between the MAC & PHY (e.g. GMII-to-RGMII). This converter
|
||||
|
|
|
@ -143,7 +143,8 @@ be added to the following table:
|
|||
* - ``port_list_is_empty``
|
||||
- ``drop``
|
||||
- Traps packets that the device decided to drop in case they need to be
|
||||
flooded and the flood list is empty
|
||||
flooded (e.g., unknown unicast, unregistered multicast) and there are
|
||||
no ports the packets should be flooded to
|
||||
* - ``port_loopback_filter``
|
||||
- ``drop``
|
||||
- Traps packets that the device decided to drop in case after layer 2
|
||||
|
|
|
@ -643,6 +643,7 @@ F: drivers/net/ethernet/alacritech/*
|
|||
|
||||
FORCEDETH GIGABIT ETHERNET DRIVER
|
||||
M: Rain River <rain.1986.08.12@gmail.com>
|
||||
M: Zhu Yanjun <yanjun.zhu@oracle.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/nvidia/*
|
||||
|
|
|
@ -1690,7 +1690,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
|
|||
|
||||
if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
|
||||
hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
|
||||
atomic_inc(&vcc->stats->rx_drop);
|
||||
atomic_inc(&vcc->stats->rx_drop);
|
||||
goto return_host_buffers;
|
||||
}
|
||||
|
||||
|
|
|
@ -352,7 +352,7 @@ static bool has_gateway(const struct dst_entry *dst, sa_family_t family)
|
|||
|
||||
if (family == AF_INET) {
|
||||
rt = container_of(dst, struct rtable, dst);
|
||||
return rt->rt_gw_family == AF_INET;
|
||||
return rt->rt_uses_gateway;
|
||||
}
|
||||
|
||||
rt6 = container_of(dst, struct rt6_info, dst);
|
||||
|
|
|
@ -754,6 +754,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
|
|||
|
||||
if (sock->type != SOCK_RAW)
|
||||
return -ESOCKTNOSUPPORT;
|
||||
if (!capable(CAP_NET_RAW))
|
||||
return -EPERM;
|
||||
|
||||
sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
|
||||
if (!sk)
|
||||
|
|
|
@ -487,7 +487,7 @@ config FUJITSU_ES
|
|||
depends on ACPI
|
||||
help
|
||||
This driver provides support for Extended Socket network device
|
||||
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
|
||||
on Extended Partitioning of FUJITSU PRIMEQUEST 2000 E2 series.
|
||||
|
||||
config THUNDERBOLT_NET
|
||||
tristate "Networking over Thunderbolt cable"
|
||||
|
|
|
@ -56,19 +56,19 @@ config ARCNET_CAP
|
|||
tristate "Enable CAP mode packet interface"
|
||||
help
|
||||
ARCnet "cap mode" packet encapsulation. Used to get the hardware
|
||||
acknowledge back to userspace. After the initial protocol byte every
|
||||
packet is stuffed with an extra 4 byte "cookie" which doesn't
|
||||
actually appear on the network. After transmit the driver will send
|
||||
back a packet with protocol byte 0 containing the status of the
|
||||
transmission:
|
||||
0=no hardware acknowledge
|
||||
1=excessive nak
|
||||
2=transmission accepted by the receiver hardware
|
||||
acknowledge back to userspace. After the initial protocol byte every
|
||||
packet is stuffed with an extra 4 byte "cookie" which doesn't
|
||||
actually appear on the network. After transmit the driver will send
|
||||
back a packet with protocol byte 0 containing the status of the
|
||||
transmission:
|
||||
0=no hardware acknowledge
|
||||
1=excessive nak
|
||||
2=transmission accepted by the receiver hardware
|
||||
|
||||
Received packets are also stuffed with the extra 4 bytes but it will
|
||||
be random data.
|
||||
Received packets are also stuffed with the extra 4 bytes but it will
|
||||
be random data.
|
||||
|
||||
Cap only listens to protocol 1-8.
|
||||
Cap only listens to protocol 1-8.
|
||||
|
||||
config ARCNET_COM90xx
|
||||
tristate "ARCnet COM90xx (normal) chipset driver"
|
||||
|
|
|
@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
|
|||
static void arcnet_rx(struct net_device *dev, int bufnum)
|
||||
{
|
||||
struct arcnet_local *lp = netdev_priv(dev);
|
||||
struct archdr pkt;
|
||||
union {
|
||||
struct archdr pkt;
|
||||
char buf[512];
|
||||
} rxdata;
|
||||
struct arc_rfc1201 *soft;
|
||||
int length, ofs;
|
||||
|
||||
soft = &pkt.soft.rfc1201;
|
||||
soft = &rxdata.pkt.soft.rfc1201;
|
||||
|
||||
lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
|
||||
if (pkt.hard.offset[0]) {
|
||||
ofs = pkt.hard.offset[0];
|
||||
lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
|
||||
if (rxdata.pkt.hard.offset[0]) {
|
||||
ofs = rxdata.pkt.hard.offset[0];
|
||||
length = 256 - ofs;
|
||||
} else {
|
||||
ofs = pkt.hard.offset[1];
|
||||
ofs = rxdata.pkt.hard.offset[1];
|
||||
length = 512 - ofs;
|
||||
}
|
||||
|
||||
/* get the full header, if possible */
|
||||
if (sizeof(pkt.soft) <= length) {
|
||||
lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
|
||||
if (sizeof(rxdata.pkt.soft) <= length) {
|
||||
lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
|
||||
} else {
|
||||
memset(&pkt.soft, 0, sizeof(pkt.soft));
|
||||
memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
|
||||
lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
|
||||
}
|
||||
|
||||
arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
|
||||
bufnum, pkt.hard.source, pkt.hard.dest, length);
|
||||
bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
|
||||
|
||||
dev->stats.rx_packets++;
|
||||
dev->stats.rx_bytes += length + ARC_HDR_SIZE;
|
||||
|
@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
|
|||
if (arc_proto_map[soft->proto]->is_ip) {
|
||||
if (BUGLVL(D_PROTO)) {
|
||||
struct ArcProto
|
||||
*oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
|
||||
*oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
|
||||
*newp = arc_proto_map[soft->proto];
|
||||
|
||||
if (oldp != newp) {
|
||||
arc_printk(D_PROTO, dev,
|
||||
"got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
|
||||
soft->proto, pkt.hard.source,
|
||||
soft->proto, rxdata.pkt.hard.source,
|
||||
newp->suffix, oldp->suffix);
|
||||
}
|
||||
}
|
||||
|
@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
|
|||
lp->default_proto[0] = soft->proto;
|
||||
|
||||
/* in striking contrast, the following isn't a hack. */
|
||||
lp->default_proto[pkt.hard.source] = soft->proto;
|
||||
lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
|
||||
}
|
||||
/* call the protocol-specific receiver. */
|
||||
arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
|
||||
arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
|
||||
}
|
||||
|
||||
static void null_rx(struct net_device *dev, int bufnum,
|
||||
|
|
|
@ -15,10 +15,10 @@ config CAN_EMS_USB
|
|||
from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
|
||||
|
||||
config CAN_ESD_USB2
|
||||
tristate "ESD USB/2 CAN/USB interface"
|
||||
---help---
|
||||
This driver supports the CAN-USB/2 interface
|
||||
from esd electronic system design gmbh (http://www.esd.eu).
|
||||
tristate "ESD USB/2 CAN/USB interface"
|
||||
---help---
|
||||
This driver supports the CAN-USB/2 interface
|
||||
from esd electronic system design gmbh (http://www.esd.eu).
|
||||
|
||||
config CAN_GS_USB
|
||||
tristate "Geschwister Schneider UG interfaces"
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
|
||||
*
|
||||
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
|
||||
/*
|
||||
* Northstar Plus switch SerDes/SGMII PHY definitions
|
||||
*
|
||||
* Copyright (C) 2018 Florian Fainelli <f.fainelli@gmail.com>
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* PCE microcode extracted from UGW 7.1.1 switch api
|
||||
*
|
||||
|
|
|
@ -303,7 +303,7 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
|
|||
{ \
|
||||
.name = #width, \
|
||||
.val_bits = (width), \
|
||||
.reg_stride = (width) / 8, \
|
||||
.reg_stride = 1, \
|
||||
.reg_bits = (regbits) + (regalign), \
|
||||
.pad_bits = (regpad), \
|
||||
.max_register = BIT(regbits) - 1, \
|
||||
|
|
|
@ -936,6 +936,9 @@ qca8k_port_enable(struct dsa_switch *ds, int port,
|
|||
{
|
||||
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
|
||||
|
||||
if (!dsa_is_user_port(ds, port))
|
||||
return 0;
|
||||
|
||||
qca8k_port_set_status(priv, port, 1);
|
||||
priv->port_sts[port].enabled = 1;
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ config NET_DSA_SJA1105_PTP
|
|||
config NET_DSA_SJA1105_TAS
|
||||
bool "Support for the Time-Aware Scheduler on NXP SJA1105"
|
||||
depends on NET_DSA_SJA1105
|
||||
depends on NET_SCH_TAPRIO
|
||||
help
|
||||
This enables support for the TTEthernet-based egress scheduling
|
||||
engine in the SJA1105 DSA driver, which is controlled using a
|
||||
|
|
|
@ -140,17 +140,6 @@ source "drivers/net/ethernet/neterion/Kconfig"
|
|||
source "drivers/net/ethernet/netronome/Kconfig"
|
||||
source "drivers/net/ethernet/ni/Kconfig"
|
||||
source "drivers/net/ethernet/8390/Kconfig"
|
||||
|
||||
config NET_NETX
|
||||
tristate "NetX Ethernet support"
|
||||
select MII
|
||||
depends on ARCH_NETX
|
||||
---help---
|
||||
This is support for the Hilscher netX builtin Ethernet ports
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called netx-eth.
|
||||
|
||||
source "drivers/net/ethernet/nvidia/Kconfig"
|
||||
source "drivers/net/ethernet/nxp/Kconfig"
|
||||
source "drivers/net/ethernet/oki-semi/Kconfig"
|
||||
|
|
|
@ -64,7 +64,6 @@ obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
|
|||
obj-$(CONFIG_NET_VENDOR_NETERION) += neterion/
|
||||
obj-$(CONFIG_NET_VENDOR_NETRONOME) += netronome/
|
||||
obj-$(CONFIG_NET_VENDOR_NI) += ni/
|
||||
obj-$(CONFIG_NET_NETX) += netx-eth.o
|
||||
obj-$(CONFIG_NET_VENDOR_NVIDIA) += nvidia/
|
||||
obj-$(CONFIG_LPC_ENET) += nxp/
|
||||
obj-$(CONFIG_NET_VENDOR_OKI) += oki-semi/
|
||||
|
|
|
@ -21,17 +21,17 @@ config NET_VENDOR_ALLWINNER
|
|||
if NET_VENDOR_ALLWINNER
|
||||
|
||||
config SUN4I_EMAC
|
||||
tristate "Allwinner A10 EMAC support"
|
||||
tristate "Allwinner A10 EMAC support"
|
||||
depends on ARCH_SUNXI
|
||||
depends on OF
|
||||
select CRC32
|
||||
select MII
|
||||
select PHYLIB
|
||||
select MDIO_SUN4I
|
||||
---help---
|
||||
Support for Allwinner A10 EMAC ethernet driver.
|
||||
---help---
|
||||
Support for Allwinner A10 EMAC ethernet driver.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called sun4i-emac.
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called sun4i-emac.
|
||||
|
||||
endif # NET_VENDOR_ALLWINNER
|
||||
|
|
|
@ -19,6 +19,7 @@ if NET_VENDOR_AMAZON
|
|||
config ENA_ETHERNET
|
||||
tristate "Elastic Network Adapter (ENA) support"
|
||||
depends on PCI_MSI && !CPU_BIG_ENDIAN
|
||||
select DIMLIB
|
||||
---help---
|
||||
This driver supports Elastic Network Adapter (ENA)"
|
||||
|
||||
|
|
|
@ -211,8 +211,8 @@ static int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
|
|||
|
||||
pkt_ctrl->curr_bounce_buf =
|
||||
ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
|
||||
memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
||||
0x0, llq_info->desc_list_entry_size);
|
||||
memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
|
||||
0x0, llq_info->desc_list_entry_size);
|
||||
|
||||
pkt_ctrl->idx = 0;
|
||||
if (unlikely(llq_info->desc_stride_ctrl == ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
|
||||
|
|
|
@ -306,15 +306,13 @@ irqreturn_t aq_vec_isr_legacy(int irq, void *private)
|
|||
{
|
||||
struct aq_vec_s *self = private;
|
||||
u64 irq_mask = 0U;
|
||||
irqreturn_t err = 0;
|
||||
int err;
|
||||
|
||||
if (!self) {
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
if (!self)
|
||||
return IRQ_NONE;
|
||||
err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
return IRQ_NONE;
|
||||
|
||||
if (irq_mask) {
|
||||
self->aq_hw_ops->hw_irq_disable(self->aq_hw,
|
||||
|
@ -322,11 +320,10 @@ irqreturn_t aq_vec_isr_legacy(int irq, void *private)
|
|||
napi_schedule(&self->napi);
|
||||
} else {
|
||||
self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
|
||||
err = IRQ_NONE;
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
err_exit:
|
||||
return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
|
||||
|
|
|
@ -2481,7 +2481,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
|
|||
|
||||
priv->phy_interface = of_get_phy_mode(dn);
|
||||
/* Default to GMII interface mode */
|
||||
if (priv->phy_interface < 0)
|
||||
if ((int)priv->phy_interface < 0)
|
||||
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
|
||||
|
||||
/* In the case of a fixed PHY, the DT node associated
|
||||
|
|
|
@ -165,9 +165,8 @@ static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx
|
|||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
|
||||
{
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
||||
return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
|
||||
return NULL;
|
||||
return (struct macb_dma_desc_64 *)((void *)desc
|
||||
+ sizeof(struct macb_dma_desc));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -5701,7 +5701,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
whoami = t4_read_reg(adapter, PL_WHOAMI_A);
|
||||
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
|
||||
chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
|
||||
if (chip < 0) {
|
||||
if ((int)chip < 0) {
|
||||
dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
|
||||
err = chip;
|
||||
goto out_free_adapter;
|
||||
|
|
|
@ -48,5 +48,5 @@ config BE2NET_SKYHAWK
|
|||
chipsets. (e.g. OneConnect OCe14xxx)
|
||||
|
||||
comment "WARNING: be2net is useless without any enabled chip"
|
||||
depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
|
||||
depends on BE2NET_BE2=n && BE2NET_BE3=n && BE2NET_LANCER=n && \
|
||||
BE2NET_SKYHAWK=n && BE2NET
|
||||
|
|
|
@ -785,7 +785,7 @@ static int enetc_of_get_phy(struct enetc_ndev_priv *priv)
|
|||
}
|
||||
|
||||
priv->if_mode = of_get_phy_mode(np);
|
||||
if (priv->if_mode < 0) {
|
||||
if ((int)priv->if_mode < 0) {
|
||||
dev_err(priv->dev, "missing phy type\n");
|
||||
of_node_put(priv->phy_node);
|
||||
if (of_phy_is_fixed_link(np))
|
||||
|
|
|
@ -2067,7 +2067,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void reset_gfar(struct net_device *ndev)
|
||||
static void reset_gfar(struct net_device *ndev)
|
||||
{
|
||||
struct gfar_private *priv = netdev_priv(ndev);
|
||||
|
||||
|
|
|
@ -1194,7 +1194,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
|
|||
goto err_free_mdio;
|
||||
|
||||
priv->phy_mode = of_get_phy_mode(node);
|
||||
if (priv->phy_mode < 0) {
|
||||
if ((int)priv->phy_mode < 0) {
|
||||
netdev_err(ndev, "not find phy-mode\n");
|
||||
ret = -EINVAL;
|
||||
goto err_mdiobus;
|
||||
|
|
|
@ -1207,7 +1207,7 @@ static void ibmvnic_cleanup(struct net_device *netdev)
|
|||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
/* ensure that transmissions are stopped if called by do_reset */
|
||||
if (adapter->resetting)
|
||||
if (test_bit(0, &adapter->resetting))
|
||||
netif_tx_disable(netdev);
|
||||
else
|
||||
netif_tx_stop_all_queues(netdev);
|
||||
|
@ -1428,7 +1428,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
u8 proto = 0;
|
||||
netdev_tx_t ret = NETDEV_TX_OK;
|
||||
|
||||
if (adapter->resetting) {
|
||||
if (test_bit(0, &adapter->resetting)) {
|
||||
if (!netif_subqueue_stopped(netdev, skb))
|
||||
netif_stop_subqueue(netdev, queue_num);
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -1723,6 +1723,86 @@ static int ibmvnic_set_mac(struct net_device *netdev, void *p)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_change_param_reset returns zero if we are able to keep processing reset
|
||||
* events, or non-zero if we hit a fatal error and must halt.
|
||||
*/
|
||||
static int do_change_param_reset(struct ibmvnic_adapter *adapter,
|
||||
struct ibmvnic_rwi *rwi,
|
||||
u32 reset_state)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int i, rc;
|
||||
|
||||
netdev_dbg(adapter->netdev, "Change param resetting driver (%d)\n",
|
||||
rwi->reset_reason);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
adapter->reset_reason = rwi->reset_reason;
|
||||
|
||||
ibmvnic_cleanup(netdev);
|
||||
|
||||
if (reset_state == VNIC_OPEN) {
|
||||
rc = __ibmvnic_close(netdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
release_resources(adapter);
|
||||
release_sub_crqs(adapter, 1);
|
||||
release_crq_queue(adapter);
|
||||
|
||||
adapter->state = VNIC_PROBED;
|
||||
|
||||
rc = init_crq_queue(adapter);
|
||||
|
||||
if (rc) {
|
||||
netdev_err(adapter->netdev,
|
||||
"Couldn't initialize crq. rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ibmvnic_reset_init(adapter);
|
||||
if (rc)
|
||||
return IBMVNIC_INIT_FAILED;
|
||||
|
||||
/* If the adapter was in PROBE state prior to the reset,
|
||||
* exit here.
|
||||
*/
|
||||
if (reset_state == VNIC_PROBED)
|
||||
return 0;
|
||||
|
||||
rc = ibmvnic_login(netdev);
|
||||
if (rc) {
|
||||
adapter->state = reset_state;
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = init_resources(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
ibmvnic_disable_irqs(adapter);
|
||||
|
||||
adapter->state = VNIC_CLOSED;
|
||||
|
||||
if (reset_state == VNIC_CLOSED)
|
||||
return 0;
|
||||
|
||||
rc = __ibmvnic_open(netdev);
|
||||
if (rc)
|
||||
return IBMVNIC_OPEN_FAILED;
|
||||
|
||||
/* refresh device's multicast list */
|
||||
ibmvnic_set_multi(netdev);
|
||||
|
||||
/* kick napi */
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_schedule(&adapter->napi[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_reset returns zero if we are able to keep processing reset events, or
|
||||
* non-zero if we hit a fatal error and must halt.
|
||||
|
@ -1738,6 +1818,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
netdev_dbg(adapter->netdev, "Re-setting driver (%d)\n",
|
||||
rwi->reset_reason);
|
||||
|
||||
rtnl_lock();
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
adapter->reset_reason = rwi->reset_reason;
|
||||
|
||||
|
@ -1751,16 +1833,25 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
if (reset_state == VNIC_OPEN &&
|
||||
adapter->reset_reason != VNIC_RESET_MOBILITY &&
|
||||
adapter->reset_reason != VNIC_RESET_FAILOVER) {
|
||||
rc = __ibmvnic_close(netdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
adapter->state = VNIC_CLOSING;
|
||||
|
||||
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
|
||||
adapter->wait_for_reset) {
|
||||
release_resources(adapter);
|
||||
release_sub_crqs(adapter, 1);
|
||||
release_crq_queue(adapter);
|
||||
/* Release the RTNL lock before link state change and
|
||||
* re-acquire after the link state change to allow
|
||||
* linkwatch_event to grab the RTNL lock and run during
|
||||
* a reset.
|
||||
*/
|
||||
rtnl_unlock();
|
||||
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
|
||||
rtnl_lock();
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (adapter->state != VNIC_CLOSING) {
|
||||
rc = -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
adapter->state = VNIC_CLOSED;
|
||||
}
|
||||
|
||||
if (adapter->reset_reason != VNIC_RESET_NON_FATAL) {
|
||||
|
@ -1769,9 +1860,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
*/
|
||||
adapter->state = VNIC_PROBED;
|
||||
|
||||
if (adapter->wait_for_reset) {
|
||||
rc = init_crq_queue(adapter);
|
||||
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
rc = ibmvnic_reenable_crq_queue(adapter);
|
||||
release_sub_crqs(adapter, 1);
|
||||
} else {
|
||||
|
@ -1783,36 +1872,35 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
if (rc) {
|
||||
netdev_err(adapter->netdev,
|
||||
"Couldn't initialize crq. rc=%d\n", rc);
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ibmvnic_reset_init(adapter);
|
||||
if (rc)
|
||||
return IBMVNIC_INIT_FAILED;
|
||||
if (rc) {
|
||||
rc = IBMVNIC_INIT_FAILED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* If the adapter was in PROBE state prior to the reset,
|
||||
* exit here.
|
||||
*/
|
||||
if (reset_state == VNIC_PROBED)
|
||||
return 0;
|
||||
if (reset_state == VNIC_PROBED) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = ibmvnic_login(netdev);
|
||||
if (rc) {
|
||||
adapter->state = reset_state;
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
|
||||
adapter->wait_for_reset) {
|
||||
rc = init_resources(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
} else if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||
adapter->req_tx_queues != old_num_tx_queues ||
|
||||
adapter->req_rx_add_entries_per_subcrq !=
|
||||
old_num_rx_slots ||
|
||||
adapter->req_tx_entries_per_subcrq !=
|
||||
old_num_tx_slots) {
|
||||
if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||
adapter->req_tx_queues != old_num_tx_queues ||
|
||||
adapter->req_rx_add_entries_per_subcrq !=
|
||||
old_num_rx_slots ||
|
||||
adapter->req_tx_entries_per_subcrq !=
|
||||
old_num_tx_slots) {
|
||||
release_rx_pools(adapter);
|
||||
release_tx_pools(adapter);
|
||||
release_napi(adapter);
|
||||
|
@ -1820,32 +1908,30 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
|
||||
rc = init_resources(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
} else {
|
||||
rc = reset_tx_pools(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
rc = reset_rx_pools(adapter);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
}
|
||||
ibmvnic_disable_irqs(adapter);
|
||||
}
|
||||
adapter->state = VNIC_CLOSED;
|
||||
|
||||
if (reset_state == VNIC_CLOSED)
|
||||
return 0;
|
||||
if (reset_state == VNIC_CLOSED) {
|
||||
rc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = __ibmvnic_open(netdev);
|
||||
if (rc) {
|
||||
if (list_empty(&adapter->rwi_list))
|
||||
adapter->state = VNIC_CLOSED;
|
||||
else
|
||||
adapter->state = reset_state;
|
||||
|
||||
return 0;
|
||||
rc = IBMVNIC_OPEN_FAILED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* refresh device's multicast list */
|
||||
|
@ -1855,11 +1941,15 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
napi_schedule(&adapter->napi[i]);
|
||||
|
||||
if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
|
||||
adapter->reset_reason != VNIC_RESET_CHANGE_PARAM)
|
||||
if (adapter->reset_reason != VNIC_RESET_FAILOVER)
|
||||
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
|
||||
|
||||
return 0;
|
||||
rc = 0;
|
||||
|
||||
out:
|
||||
rtnl_unlock();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int do_hard_reset(struct ibmvnic_adapter *adapter,
|
||||
|
@ -1919,14 +2009,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
|
|||
return 0;
|
||||
|
||||
rc = __ibmvnic_open(netdev);
|
||||
if (rc) {
|
||||
if (list_empty(&adapter->rwi_list))
|
||||
adapter->state = VNIC_CLOSED;
|
||||
else
|
||||
adapter->state = reset_state;
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (rc)
|
||||
return IBMVNIC_OPEN_FAILED;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1965,20 +2049,17 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
{
|
||||
struct ibmvnic_rwi *rwi;
|
||||
struct ibmvnic_adapter *adapter;
|
||||
bool we_lock_rtnl = false;
|
||||
u32 reset_state;
|
||||
int rc = 0;
|
||||
|
||||
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
|
||||
|
||||
/* netif_set_real_num_xx_queues needs to take rtnl lock here
|
||||
* unless wait_for_reset is set, in which case the rtnl lock
|
||||
* has already been taken before initializing the reset
|
||||
*/
|
||||
if (!adapter->wait_for_reset) {
|
||||
rtnl_lock();
|
||||
we_lock_rtnl = true;
|
||||
if (test_and_set_bit_lock(0, &adapter->resetting)) {
|
||||
schedule_delayed_work(&adapter->ibmvnic_delayed_reset,
|
||||
IBMVNIC_RESET_DELAY);
|
||||
return;
|
||||
}
|
||||
|
||||
reset_state = adapter->state;
|
||||
|
||||
rwi = get_next_rwi(adapter);
|
||||
|
@ -1990,22 +2071,43 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
break;
|
||||
}
|
||||
|
||||
if (adapter->force_reset_recovery) {
|
||||
adapter->force_reset_recovery = false;
|
||||
rc = do_hard_reset(adapter, rwi, reset_state);
|
||||
if (rwi->reset_reason == VNIC_RESET_CHANGE_PARAM) {
|
||||
/* CHANGE_PARAM requestor holds rtnl_lock */
|
||||
rc = do_change_param_reset(adapter, rwi, reset_state);
|
||||
} else if (adapter->force_reset_recovery) {
|
||||
/* Transport event occurred during previous reset */
|
||||
if (adapter->wait_for_reset) {
|
||||
/* Previous was CHANGE_PARAM; caller locked */
|
||||
adapter->force_reset_recovery = false;
|
||||
rc = do_hard_reset(adapter, rwi, reset_state);
|
||||
} else {
|
||||
rtnl_lock();
|
||||
adapter->force_reset_recovery = false;
|
||||
rc = do_hard_reset(adapter, rwi, reset_state);
|
||||
rtnl_unlock();
|
||||
}
|
||||
} else {
|
||||
rc = do_reset(adapter, rwi, reset_state);
|
||||
}
|
||||
kfree(rwi);
|
||||
if (rc && rc != IBMVNIC_INIT_FAILED &&
|
||||
if (rc == IBMVNIC_OPEN_FAILED) {
|
||||
if (list_empty(&adapter->rwi_list))
|
||||
adapter->state = VNIC_CLOSED;
|
||||
else
|
||||
adapter->state = reset_state;
|
||||
rc = 0;
|
||||
} else if (rc && rc != IBMVNIC_INIT_FAILED &&
|
||||
!adapter->force_reset_recovery)
|
||||
break;
|
||||
|
||||
rwi = get_next_rwi(adapter);
|
||||
|
||||
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
|
||||
rwi->reset_reason == VNIC_RESET_MOBILITY))
|
||||
adapter->force_reset_recovery = true;
|
||||
}
|
||||
|
||||
if (adapter->wait_for_reset) {
|
||||
adapter->wait_for_reset = false;
|
||||
adapter->reset_done_rc = rc;
|
||||
complete(&adapter->reset_done);
|
||||
}
|
||||
|
@ -2015,9 +2117,16 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
free_all_rwi(adapter);
|
||||
}
|
||||
|
||||
adapter->resetting = false;
|
||||
if (we_lock_rtnl)
|
||||
rtnl_unlock();
|
||||
clear_bit_unlock(0, &adapter->resetting);
|
||||
}
|
||||
|
||||
static void __ibmvnic_delayed_reset(struct work_struct *work)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter;
|
||||
|
||||
adapter = container_of(work, struct ibmvnic_adapter,
|
||||
ibmvnic_delayed_reset.work);
|
||||
__ibmvnic_reset(&adapter->ibmvnic_reset);
|
||||
}
|
||||
|
||||
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||
|
@ -2072,14 +2181,11 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||
rwi->reset_reason = reason;
|
||||
list_add_tail(&rwi->list, &adapter->rwi_list);
|
||||
spin_unlock_irqrestore(&adapter->rwi_lock, flags);
|
||||
adapter->resetting = true;
|
||||
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
||||
schedule_work(&adapter->ibmvnic_reset);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
if (adapter->wait_for_reset)
|
||||
adapter->wait_for_reset = false;
|
||||
return -ret;
|
||||
}
|
||||
|
||||
|
@ -2119,7 +2225,7 @@ restart_poll:
|
|||
u16 offset;
|
||||
u8 flags = 0;
|
||||
|
||||
if (unlikely(adapter->resetting &&
|
||||
if (unlikely(test_bit(0, &adapter->resetting) &&
|
||||
adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
|
||||
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
|
||||
napi_complete_done(napi, frames_processed);
|
||||
|
@ -2770,7 +2876,7 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (adapter->resetting &&
|
||||
if (test_bit(0, &adapter->resetting) &&
|
||||
adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
u64 val = (0xff000000) | scrq->hw_irq;
|
||||
|
||||
|
@ -3320,7 +3426,7 @@ static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
|
|||
if (rc) {
|
||||
if (rc == H_CLOSED) {
|
||||
dev_warn(dev, "CRQ Queue closed\n");
|
||||
if (adapter->resetting)
|
||||
if (test_bit(0, &adapter->resetting))
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
|
||||
}
|
||||
|
||||
|
@ -4312,13 +4418,14 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
|
|||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
int rc;
|
||||
__be32 rspeed = cpu_to_be32(crq->query_phys_parms_rsp.speed);
|
||||
|
||||
rc = crq->query_phys_parms_rsp.rc.code;
|
||||
if (rc) {
|
||||
netdev_err(netdev, "Error %d in QUERY_PHYS_PARMS\n", rc);
|
||||
return rc;
|
||||
}
|
||||
switch (cpu_to_be32(crq->query_phys_parms_rsp.speed)) {
|
||||
switch (rspeed) {
|
||||
case IBMVNIC_10MBPS:
|
||||
adapter->speed = SPEED_10;
|
||||
break;
|
||||
|
@ -4344,8 +4451,8 @@ static int handle_query_phys_parms_rsp(union ibmvnic_crq *crq,
|
|||
adapter->speed = SPEED_100000;
|
||||
break;
|
||||
default:
|
||||
netdev_warn(netdev, "Unknown speed 0x%08x\n",
|
||||
cpu_to_be32(crq->query_phys_parms_rsp.speed));
|
||||
if (netif_carrier_ok(netdev))
|
||||
netdev_warn(netdev, "Unknown speed 0x%08x\n", rspeed);
|
||||
adapter->speed = SPEED_UNKNOWN;
|
||||
}
|
||||
if (crq->query_phys_parms_rsp.flags1 & IBMVNIC_FULL_DUPLEX)
|
||||
|
@ -4395,7 +4502,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
case IBMVNIC_CRQ_XPORT_EVENT:
|
||||
netif_carrier_off(netdev);
|
||||
adapter->crq.active = false;
|
||||
if (adapter->resetting)
|
||||
if (test_bit(0, &adapter->resetting))
|
||||
adapter->force_reset_recovery = true;
|
||||
if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
|
||||
dev_info(dev, "Migrated, re-enabling adapter\n");
|
||||
|
@ -4733,7 +4840,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (adapter->resetting && !adapter->wait_for_reset &&
|
||||
if (test_bit(0, &adapter->resetting) && !adapter->wait_for_reset &&
|
||||
adapter->reset_reason != VNIC_RESET_MOBILITY) {
|
||||
if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||
adapter->req_tx_queues != old_num_tx_queues) {
|
||||
|
@ -4845,10 +4952,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
spin_lock_init(&adapter->stats_lock);
|
||||
|
||||
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
|
||||
INIT_DELAYED_WORK(&adapter->ibmvnic_delayed_reset,
|
||||
__ibmvnic_delayed_reset);
|
||||
INIT_LIST_HEAD(&adapter->rwi_list);
|
||||
spin_lock_init(&adapter->rwi_lock);
|
||||
init_completion(&adapter->init_done);
|
||||
adapter->resetting = false;
|
||||
clear_bit(0, &adapter->resetting);
|
||||
|
||||
do {
|
||||
rc = init_crq_queue(adapter);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#define IBMVNIC_INVALID_MAP -1
|
||||
#define IBMVNIC_STATS_TIMEOUT 1
|
||||
#define IBMVNIC_INIT_FAILED 2
|
||||
#define IBMVNIC_OPEN_FAILED 3
|
||||
|
||||
/* basic structures plus 100 2k buffers */
|
||||
#define IBMVNIC_IO_ENTITLEMENT_DEFAULT 610305
|
||||
|
@ -38,6 +39,8 @@
|
|||
#define IBMVNIC_MAX_LTB_SIZE ((1 << (MAX_ORDER - 1)) * PAGE_SIZE)
|
||||
#define IBMVNIC_BUFFER_HLEN 500
|
||||
|
||||
#define IBMVNIC_RESET_DELAY 100
|
||||
|
||||
static const char ibmvnic_priv_flags[][ETH_GSTRING_LEN] = {
|
||||
#define IBMVNIC_USE_SERVER_MAXES 0x1
|
||||
"use-server-maxes"
|
||||
|
@ -1076,7 +1079,8 @@ struct ibmvnic_adapter {
|
|||
spinlock_t rwi_lock;
|
||||
struct list_head rwi_list;
|
||||
struct work_struct ibmvnic_reset;
|
||||
bool resetting;
|
||||
struct delayed_work ibmvnic_delayed_reset;
|
||||
unsigned long resetting;
|
||||
bool napi_enabled, from_passive_init;
|
||||
|
||||
bool failover_pending;
|
||||
|
|
|
@ -3108,7 +3108,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
|
|||
skb_put(skb, len);
|
||||
|
||||
if (dev->features & NETIF_F_RXCSUM) {
|
||||
skb->csum = csum;
|
||||
skb->csum = le16_to_cpu(csum);
|
||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,15 +20,15 @@ config MLX5_ACCEL
|
|||
bool
|
||||
|
||||
config MLX5_FPGA
|
||||
bool "Mellanox Technologies Innova support"
|
||||
depends on MLX5_CORE
|
||||
bool "Mellanox Technologies Innova support"
|
||||
depends on MLX5_CORE
|
||||
select MLX5_ACCEL
|
||||
---help---
|
||||
Build support for the Innova family of network cards by Mellanox
|
||||
Technologies. Innova network cards are comprised of a ConnectX chip
|
||||
and an FPGA chip on one board. If you select this option, the
|
||||
mlx5_core driver will include the Innova FPGA core and allow building
|
||||
sandbox-specific client drivers.
|
||||
---help---
|
||||
Build support for the Innova family of network cards by Mellanox
|
||||
Technologies. Innova network cards are comprised of a ConnectX chip
|
||||
and an FPGA chip on one board. If you select this option, the
|
||||
mlx5_core driver will include the Innova FPGA core and allow building
|
||||
sandbox-specific client drivers.
|
||||
|
||||
config MLX5_CORE_EN
|
||||
bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support"
|
||||
|
@ -58,14 +58,14 @@ config MLX5_EN_RXNFC
|
|||
API.
|
||||
|
||||
config MLX5_MPFS
|
||||
bool "Mellanox Technologies MLX5 MPFS support"
|
||||
depends on MLX5_CORE_EN
|
||||
bool "Mellanox Technologies MLX5 MPFS support"
|
||||
depends on MLX5_CORE_EN
|
||||
default y
|
||||
---help---
|
||||
---help---
|
||||
Mellanox Technologies Ethernet Multi-Physical Function Switch (MPFS)
|
||||
support in ConnectX NIC. MPFs is required for when multi-PF configuration
|
||||
is enabled to allow passing user configured unicast MAC addresses to the
|
||||
requesting PF.
|
||||
support in ConnectX NIC. MPFs is required for when multi-PF configuration
|
||||
is enabled to allow passing user configured unicast MAC addresses to the
|
||||
requesting PF.
|
||||
|
||||
config MLX5_ESWITCH
|
||||
bool "Mellanox Technologies MLX5 SRIOV E-Switch support"
|
||||
|
@ -73,10 +73,10 @@ config MLX5_ESWITCH
|
|||
default y
|
||||
---help---
|
||||
Mellanox Technologies Ethernet SRIOV E-Switch support in ConnectX NIC.
|
||||
E-Switch provides internal SRIOV packet steering and switching for the
|
||||
enabled VFs and PF in two available modes:
|
||||
Legacy SRIOV mode (L2 mac vlan steering based).
|
||||
Switchdev mode (eswitch offloads).
|
||||
E-Switch provides internal SRIOV packet steering and switching for the
|
||||
enabled VFs and PF in two available modes:
|
||||
Legacy SRIOV mode (L2 mac vlan steering based).
|
||||
Switchdev mode (eswitch offloads).
|
||||
|
||||
config MLX5_CORE_EN_DCB
|
||||
bool "Data Center Bridging (DCB) Support"
|
||||
|
|
|
@ -399,10 +399,10 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv,
|
|||
struct mlx5_flow_table *ft,
|
||||
struct ethtool_rx_flow_spec *fs)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
|
|
|
@ -1664,46 +1664,63 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
|||
return err;
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
|
||||
struct flow_match_control match;
|
||||
u16 addr_type;
|
||||
|
||||
flow_rule_match_enc_ipv4_addrs(rule, &match);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->src));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->src));
|
||||
flow_rule_match_enc_control(rule, &match);
|
||||
addr_type = match.key->addr_type;
|
||||
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->dst));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->dst));
|
||||
/* For tunnel addr_type used same key id`s as for non-tunnel */
|
||||
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
|
||||
struct flow_match_ipv4_addrs match;
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
|
||||
} else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
flow_rule_match_enc_ipv4_addrs(rule, &match);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->src));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->src));
|
||||
|
||||
flow_rule_match_enc_ipv6_addrs(rule, &match);
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.mask->dst));
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
|
||||
ntohl(match.key->dst));
|
||||
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
||||
ETH_P_IP);
|
||||
} else if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
|
||||
struct flow_match_ipv6_addrs match;
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
|
||||
flow_rule_match_enc_ipv6_addrs(rule, &match);
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
src_ipv4_src_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
|
||||
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
|
||||
&match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout,
|
||||
ipv6));
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c,
|
||||
ethertype);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
|
||||
ETH_P_IPV6);
|
||||
}
|
||||
}
|
||||
|
||||
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
|
||||
|
|
|
@ -1568,6 +1568,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
|
|||
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
|
||||
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
|
|
|
@ -615,7 +615,7 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
|
|||
* that recalculates the CS and forwards to the vport.
|
||||
*/
|
||||
ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport.dmn,
|
||||
dest_action->vport.num,
|
||||
dest_action->vport.caps->num,
|
||||
final_icm_addr);
|
||||
if (ret) {
|
||||
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
|
||||
|
@ -744,7 +744,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
|
|||
dest_action = action;
|
||||
if (rx_rule) {
|
||||
/* Loopback on WIRE vport is not supported */
|
||||
if (action->vport.num == WIRE_PORT)
|
||||
if (action->vport.caps->num == WIRE_PORT)
|
||||
goto out_invalid_arg;
|
||||
|
||||
attr.final_icm_addr = action->vport.caps->icm_address_rx;
|
||||
|
|
|
@ -230,8 +230,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
|
|||
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
|
||||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
|
||||
ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
|
||||
&dmn->info.caps,
|
||||
inner, rx);
|
||||
dmn, inner, rx);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -458,13 +457,11 @@ static int dr_matcher_add_to_tbl(struct mlx5dr_matcher *matcher)
|
|||
|
||||
prev_matcher = NULL;
|
||||
if (next_matcher && !first)
|
||||
prev_matcher = list_entry(next_matcher->matcher_list.prev,
|
||||
struct mlx5dr_matcher,
|
||||
matcher_list);
|
||||
prev_matcher = list_prev_entry(next_matcher, matcher_list);
|
||||
else if (!first)
|
||||
prev_matcher = list_entry(tbl->matcher_list.prev,
|
||||
struct mlx5dr_matcher,
|
||||
matcher_list);
|
||||
prev_matcher = list_last_entry(&tbl->matcher_list,
|
||||
struct mlx5dr_matcher,
|
||||
matcher_list);
|
||||
|
||||
if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
|
||||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
|
||||
|
|
|
@ -18,7 +18,7 @@ static int dr_rule_append_to_miss_list(struct mlx5dr_ste *new_last_ste,
|
|||
struct mlx5dr_ste *last_ste;
|
||||
|
||||
/* The new entry will be inserted after the last */
|
||||
last_ste = list_entry(miss_list->prev, struct mlx5dr_ste, miss_list_node);
|
||||
last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
|
||||
WARN_ON(!last_ste);
|
||||
|
||||
ste_info_last = kzalloc(sizeof(*ste_info_last), GFP_KERNEL);
|
||||
|
|
|
@ -429,12 +429,9 @@ static void dr_ste_remove_middle_ste(struct mlx5dr_ste *ste,
|
|||
struct mlx5dr_ste *prev_ste;
|
||||
u64 miss_addr;
|
||||
|
||||
prev_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->prev, struct mlx5dr_ste,
|
||||
miss_list_node);
|
||||
if (!prev_ste) {
|
||||
WARN_ON(true);
|
||||
prev_ste = list_prev_entry(ste, miss_list_node);
|
||||
if (WARN_ON(!prev_ste))
|
||||
return;
|
||||
}
|
||||
|
||||
miss_addr = mlx5dr_ste_get_miss_addr(ste->hw_ste);
|
||||
mlx5dr_ste_set_miss_addr(prev_ste->hw_ste, miss_addr);
|
||||
|
@ -461,8 +458,8 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
|
|||
struct mlx5dr_ste_htbl *stats_tbl;
|
||||
LIST_HEAD(send_ste_list);
|
||||
|
||||
first_ste = list_entry(mlx5dr_ste_get_miss_list(ste)->next,
|
||||
struct mlx5dr_ste, miss_list_node);
|
||||
first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
|
||||
struct mlx5dr_ste, miss_list_node);
|
||||
stats_tbl = first_ste->htbl;
|
||||
|
||||
/* Two options:
|
||||
|
@ -479,8 +476,7 @@ void mlx5dr_ste_free(struct mlx5dr_ste *ste,
|
|||
if (last_ste == first_ste)
|
||||
next_ste = NULL;
|
||||
else
|
||||
next_ste = list_entry(ste->miss_list_node.next,
|
||||
struct mlx5dr_ste, miss_list_node);
|
||||
next_ste = list_next_entry(ste, miss_list_node);
|
||||
|
||||
if (!next_ste) {
|
||||
/* One and only entry in the list */
|
||||
|
@ -841,6 +837,8 @@ static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
|
|||
spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
|
||||
|
||||
spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
|
||||
spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
|
||||
source_eswitch_owner_vhca_id);
|
||||
|
||||
spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
|
||||
spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
|
||||
|
@ -2254,11 +2252,18 @@ static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
|
|||
{
|
||||
struct mlx5dr_match_misc *misc_mask = &value->misc;
|
||||
|
||||
if (misc_mask->source_port != 0xffff)
|
||||
/* Partial misc source_port is not supported */
|
||||
if (misc_mask->source_port && misc_mask->source_port != 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
/* Partial misc source_eswitch_owner_vhca_id is not supported */
|
||||
if (misc_mask->source_eswitch_owner_vhca_id &&
|
||||
misc_mask->source_eswitch_owner_vhca_id != 0xffff)
|
||||
return -EINVAL;
|
||||
|
||||
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
|
||||
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
|
||||
misc_mask->source_eswitch_owner_vhca_id = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2270,17 +2275,33 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
|
|||
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
|
||||
struct mlx5dr_match_misc *misc = &value->misc;
|
||||
struct mlx5dr_cmd_vport_cap *vport_cap;
|
||||
struct mlx5dr_domain *dmn = sb->dmn;
|
||||
struct mlx5dr_cmd_caps *caps;
|
||||
u8 *tag = hw_ste->tag;
|
||||
|
||||
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
|
||||
|
||||
vport_cap = mlx5dr_get_vport_cap(sb->caps, misc->source_port);
|
||||
if (sb->vhca_id_valid) {
|
||||
/* Find port GVMI based on the eswitch_owner_vhca_id */
|
||||
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
|
||||
caps = &dmn->info.caps;
|
||||
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
|
||||
dmn->peer_dmn->info.caps.gvmi))
|
||||
caps = &dmn->peer_dmn->info.caps;
|
||||
else
|
||||
return -EINVAL;
|
||||
} else {
|
||||
caps = &dmn->info.caps;
|
||||
}
|
||||
|
||||
vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
|
||||
if (!vport_cap)
|
||||
return -EINVAL;
|
||||
|
||||
if (vport_cap->vport_gvmi)
|
||||
MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
|
||||
|
||||
misc->source_eswitch_owner_vhca_id = 0;
|
||||
misc->source_port = 0;
|
||||
|
||||
return 0;
|
||||
|
@ -2288,17 +2309,20 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
|
|||
|
||||
int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
struct mlx5dr_cmd_caps *caps,
|
||||
struct mlx5dr_domain *dmn,
|
||||
bool inner, bool rx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
|
||||
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
|
||||
|
||||
ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
sb->rx = rx;
|
||||
sb->caps = caps;
|
||||
sb->dmn = dmn;
|
||||
sb->inner = inner;
|
||||
sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
|
||||
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
|
||||
|
|
|
@ -180,6 +180,8 @@ void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
|
|||
struct mlx5dr_ste_build {
|
||||
u8 inner:1;
|
||||
u8 rx:1;
|
||||
u8 vhca_id_valid:1;
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_cmd_caps *caps;
|
||||
u8 lu_type;
|
||||
u16 byte_mask;
|
||||
|
@ -331,7 +333,7 @@ void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
|
|||
bool inner, bool rx);
|
||||
int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
|
||||
struct mlx5dr_match_param *mask,
|
||||
struct mlx5dr_cmd_caps *caps,
|
||||
struct mlx5dr_domain *dmn,
|
||||
bool inner, bool rx);
|
||||
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
|
||||
|
||||
|
@ -453,7 +455,7 @@ struct mlx5dr_match_misc {
|
|||
u32 gre_c_present:1;
|
||||
/* Source port.;0xffff determines wire port */
|
||||
u32 source_port:16;
|
||||
u32 reserved_auto2:16;
|
||||
u32 source_eswitch_owner_vhca_id:16;
|
||||
/* VLAN ID of first VLAN tag the inner header of the incoming packet.
|
||||
* Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
|
||||
*/
|
||||
|
@ -745,7 +747,6 @@ struct mlx5dr_action {
|
|||
struct {
|
||||
struct mlx5dr_domain *dmn;
|
||||
struct mlx5dr_cmd_vport_cap *caps;
|
||||
u32 num;
|
||||
} vport;
|
||||
struct {
|
||||
u32 vlan_hdr; /* tpid_pcp_dei_vid */
|
||||
|
|
|
@ -3771,6 +3771,14 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|||
goto err_port_qdiscs_init;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
|
||||
false);
|
||||
if (err) {
|
||||
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
|
||||
mlxsw_sp_port->local_port);
|
||||
goto err_port_vlan_clear;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
|
||||
if (err) {
|
||||
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
|
||||
|
@ -3818,6 +3826,7 @@ err_port_vlan_create:
|
|||
err_port_pvid_set:
|
||||
mlxsw_sp_port_nve_fini(mlxsw_sp_port);
|
||||
err_port_nve_init:
|
||||
err_port_vlan_clear:
|
||||
mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
|
||||
err_port_qdiscs_init:
|
||||
mlxsw_sp_port_fids_fini(mlxsw_sp_port);
|
||||
|
|
|
@ -21,6 +21,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
|||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct flow_action_entry *act;
|
||||
int mirror_act_count = 0;
|
||||
int err, i;
|
||||
|
||||
if (!flow_action_has_entries(flow_action))
|
||||
|
@ -105,6 +106,11 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
|
|||
case FLOW_ACTION_MIRRED: {
|
||||
struct net_device *out_dev = act->dev;
|
||||
|
||||
if (mirror_act_count++) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Multiple mirror actions per rule are not supported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = mlxsw_sp_acl_rulei_act_mirror(mlxsw_sp, rulei,
|
||||
block, out_dev,
|
||||
extack);
|
||||
|
|
|
@ -176,8 +176,10 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
|
|||
u8 mask, val;
|
||||
int err;
|
||||
|
||||
if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack))
|
||||
if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_delete;
|
||||
}
|
||||
|
||||
tos_off = proto == htons(ETH_P_IP) ? 16 : 20;
|
||||
|
||||
|
@ -198,14 +200,18 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
|
|||
if ((iter->val & cmask) == (val & cmask) &&
|
||||
iter->band != knode->res->classid) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter");
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_delete;
|
||||
}
|
||||
}
|
||||
|
||||
if (!match) {
|
||||
match = kzalloc(sizeof(*match), GFP_KERNEL);
|
||||
if (!match)
|
||||
return -ENOMEM;
|
||||
if (!match) {
|
||||
err = -ENOMEM;
|
||||
goto err_delete;
|
||||
}
|
||||
|
||||
list_add(&match->list, &alink->dscp_map);
|
||||
}
|
||||
match->handle = knode->handle;
|
||||
|
@ -221,7 +227,7 @@ nfp_abm_u32_knode_replace(struct nfp_abm_link *alink,
|
|||
|
||||
err_delete:
|
||||
nfp_abm_u32_knode_delete(alink, knode);
|
||||
return -EOPNOTSUPP;
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type,
|
||||
|
|
|
@ -400,6 +400,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
|
|||
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
|
||||
if (!repr_priv) {
|
||||
err = -ENOMEM;
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
}
|
||||
|
||||
|
@ -413,6 +414,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
|
|||
port = nfp_port_alloc(app, port_type, repr);
|
||||
if (IS_ERR(port)) {
|
||||
err = PTR_ERR(port);
|
||||
kfree(repr_priv);
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
}
|
||||
|
@ -433,6 +435,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
|
|||
err = nfp_repr_init(app, repr,
|
||||
port_id, port, priv->nn->dp.netdev);
|
||||
if (err) {
|
||||
kfree(repr_priv);
|
||||
nfp_port_free(port);
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
|
@ -515,6 +518,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
|
|||
repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
|
||||
if (!repr_priv) {
|
||||
err = -ENOMEM;
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
}
|
||||
|
||||
|
@ -525,11 +529,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
|
|||
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
|
||||
if (IS_ERR(port)) {
|
||||
err = PTR_ERR(port);
|
||||
kfree(repr_priv);
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
}
|
||||
err = nfp_port_init_phy_port(app->pf, app, port, i);
|
||||
if (err) {
|
||||
kfree(repr_priv);
|
||||
nfp_port_free(port);
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
|
@ -542,6 +548,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
|
|||
err = nfp_repr_init(app, repr,
|
||||
cmsg_port_id, port, priv->nn->dp.netdev);
|
||||
if (err) {
|
||||
kfree(repr_priv);
|
||||
nfp_port_free(port);
|
||||
nfp_repr_free(repr);
|
||||
goto err_reprs_clean;
|
||||
|
|
|
@ -1,497 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* drivers/net/ethernet/netx-eth.c
|
||||
*
|
||||
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/mii.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/netx-regs.h>
|
||||
#include <mach/pfifo.h>
|
||||
#include <mach/xc.h>
|
||||
#include <linux/platform_data/eth-netx.h>
|
||||
|
||||
/* XC Fifo Offsets */
|
||||
#define EMPTY_PTR_FIFO(xcno) (0 + ((xcno) << 3)) /* Index of the empty pointer FIFO */
|
||||
#define IND_FIFO_PORT_HI(xcno) (1 + ((xcno) << 3)) /* Index of the FIFO where received */
|
||||
/* Data packages are indicated by XC */
|
||||
#define IND_FIFO_PORT_LO(xcno) (2 + ((xcno) << 3)) /* Index of the FIFO where received */
|
||||
/* Data packages are indicated by XC */
|
||||
#define REQ_FIFO_PORT_HI(xcno) (3 + ((xcno) << 3)) /* Index of the FIFO where Data packages */
|
||||
/* have to be indicated by ARM which */
|
||||
/* shall be sent */
|
||||
#define REQ_FIFO_PORT_LO(xcno) (4 + ((xcno) << 3)) /* Index of the FIFO where Data packages */
|
||||
/* have to be indicated by ARM which shall */
|
||||
/* be sent */
|
||||
#define CON_FIFO_PORT_HI(xcno) (5 + ((xcno) << 3)) /* Index of the FIFO where sent Data packages */
|
||||
/* are confirmed */
|
||||
#define CON_FIFO_PORT_LO(xcno) (6 + ((xcno) << 3)) /* Index of the FIFO where sent Data */
|
||||
/* packages are confirmed */
|
||||
#define PFIFO_MASK(xcno) (0x7f << (xcno*8))
|
||||
|
||||
#define FIFO_PTR_FRAMELEN_SHIFT 0
|
||||
#define FIFO_PTR_FRAMELEN_MASK (0x7ff << 0)
|
||||
#define FIFO_PTR_FRAMELEN(len) (((len) << 0) & FIFO_PTR_FRAMELEN_MASK)
|
||||
#define FIFO_PTR_TIMETRIG (1<<11)
|
||||
#define FIFO_PTR_MULTI_REQ
|
||||
#define FIFO_PTR_ORIGIN (1<<14)
|
||||
#define FIFO_PTR_VLAN (1<<15)
|
||||
#define FIFO_PTR_FRAMENO_SHIFT 16
|
||||
#define FIFO_PTR_FRAMENO_MASK (0x3f << 16)
|
||||
#define FIFO_PTR_FRAMENO(no) (((no) << 16) & FIFO_PTR_FRAMENO_MASK)
|
||||
#define FIFO_PTR_SEGMENT_SHIFT 22
|
||||
#define FIFO_PTR_SEGMENT_MASK (0xf << 22)
|
||||
#define FIFO_PTR_SEGMENT(seg) (((seg) & 0xf) << 22)
|
||||
#define FIFO_PTR_ERROR_SHIFT 28
|
||||
#define FIFO_PTR_ERROR_MASK (0xf << 28)
|
||||
|
||||
#define ISR_LINK_STATUS_CHANGE (1<<4)
|
||||
#define ISR_IND_LO (1<<3)
|
||||
#define ISR_CON_LO (1<<2)
|
||||
#define ISR_IND_HI (1<<1)
|
||||
#define ISR_CON_HI (1<<0)
|
||||
|
||||
#define ETH_MAC_LOCAL_CONFIG 0x1560
|
||||
#define ETH_MAC_4321 0x1564
|
||||
#define ETH_MAC_65 0x1568
|
||||
|
||||
#define MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT 16
|
||||
#define MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK (0xf<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT)
|
||||
#define MAC_TRAFFIC_CLASS_ARRANGEMENT(x) (((x)<<MAC_TRAFFIC_CLASS_ARRANGEMENT_SHIFT) & MAC_TRAFFIC_CLASS_ARRANGEMENT_MASK)
|
||||
#define LOCAL_CONFIG_LINK_STATUS_IRQ_EN (1<<24)
|
||||
#define LOCAL_CONFIG_CON_LO_IRQ_EN (1<<23)
|
||||
#define LOCAL_CONFIG_CON_HI_IRQ_EN (1<<22)
|
||||
#define LOCAL_CONFIG_IND_LO_IRQ_EN (1<<21)
|
||||
#define LOCAL_CONFIG_IND_HI_IRQ_EN (1<<20)
|
||||
|
||||
#define CARDNAME "netx-eth"
|
||||
|
||||
/* LSB must be zero */
|
||||
#define INTERNAL_PHY_ADR 0x1c
|
||||
|
||||
struct netx_eth_priv {
|
||||
void __iomem *sram_base, *xpec_base, *xmac_base;
|
||||
int id;
|
||||
struct mii_if_info mii;
|
||||
u32 msg_enable;
|
||||
struct xc *xc;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
static void netx_eth_set_multicast_list(struct net_device *ndev)
|
||||
{
|
||||
/* implement me */
|
||||
}
|
||||
|
||||
static int
|
||||
netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
||||
{
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
unsigned char *buf = skb->data;
|
||||
unsigned int len = skb->len;
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
memcpy_toio(priv->sram_base + 1560, (void *)buf, len);
|
||||
if (len < 60) {
|
||||
memset_io(priv->sram_base + 1560 + len, 0, 60 - len);
|
||||
len = 60;
|
||||
}
|
||||
|
||||
pfifo_push(REQ_FIFO_PORT_LO(priv->id),
|
||||
FIFO_PTR_SEGMENT(priv->id) |
|
||||
FIFO_PTR_FRAMENO(1) |
|
||||
FIFO_PTR_FRAMELEN(len));
|
||||
|
||||
ndev->stats.tx_packets++;
|
||||
ndev->stats.tx_bytes += skb->len;
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
spin_unlock_irq(&priv->lock);
|
||||
dev_kfree_skb(skb);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
static void netx_eth_receive(struct net_device *ndev)
|
||||
{
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
unsigned int val, frameno, seg, len;
|
||||
unsigned char *data;
|
||||
struct sk_buff *skb;
|
||||
|
||||
val = pfifo_pop(IND_FIFO_PORT_LO(priv->id));
|
||||
|
||||
frameno = (val & FIFO_PTR_FRAMENO_MASK) >> FIFO_PTR_FRAMENO_SHIFT;
|
||||
seg = (val & FIFO_PTR_SEGMENT_MASK) >> FIFO_PTR_SEGMENT_SHIFT;
|
||||
len = (val & FIFO_PTR_FRAMELEN_MASK) >> FIFO_PTR_FRAMELEN_SHIFT;
|
||||
|
||||
skb = netdev_alloc_skb(ndev, len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
ndev->stats.rx_dropped++;
|
||||
return;
|
||||
}
|
||||
|
||||
data = skb_put(skb, len);
|
||||
|
||||
memcpy_fromio(data, priv->sram_base + frameno * 1560, len);
|
||||
|
||||
pfifo_push(EMPTY_PTR_FIFO(priv->id),
|
||||
FIFO_PTR_SEGMENT(seg) | FIFO_PTR_FRAMENO(frameno));
|
||||
|
||||
skb->protocol = eth_type_trans(skb, ndev);
|
||||
netif_rx(skb);
|
||||
ndev->stats.rx_packets++;
|
||||
ndev->stats.rx_bytes += len;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
netx_eth_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct net_device *ndev = dev_id;
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
int status;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
|
||||
while (status) {
|
||||
int fill_level;
|
||||
writel(status, NETX_PFIFO_XPEC_ISR(priv->id));
|
||||
|
||||
if ((status & ISR_CON_HI) || (status & ISR_IND_HI))
|
||||
printk("%s: unexpected status: 0x%08x\n",
|
||||
__func__, status);
|
||||
|
||||
fill_level =
|
||||
readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
|
||||
while (fill_level--)
|
||||
netx_eth_receive(ndev);
|
||||
|
||||
if (status & ISR_CON_LO)
|
||||
netif_wake_queue(ndev);
|
||||
|
||||
if (status & ISR_LINK_STATUS_CHANGE)
|
||||
mii_check_media(&priv->mii, netif_msg_link(priv), 1);
|
||||
|
||||
status = readl(NETX_PFIFO_XPEC_ISR(priv->id));
|
||||
}
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int netx_eth_open(struct net_device *ndev)
|
||||
{
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
|
||||
if (request_irq
|
||||
(ndev->irq, netx_eth_interrupt, IRQF_SHARED, ndev->name, ndev))
|
||||
return -EAGAIN;
|
||||
|
||||
writel(ndev->dev_addr[0] |
|
||||
ndev->dev_addr[1]<<8 |
|
||||
ndev->dev_addr[2]<<16 |
|
||||
ndev->dev_addr[3]<<24,
|
||||
priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
|
||||
writel(ndev->dev_addr[4] |
|
||||
ndev->dev_addr[5]<<8,
|
||||
priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
|
||||
|
||||
writel(LOCAL_CONFIG_LINK_STATUS_IRQ_EN |
|
||||
LOCAL_CONFIG_CON_LO_IRQ_EN |
|
||||
LOCAL_CONFIG_CON_HI_IRQ_EN |
|
||||
LOCAL_CONFIG_IND_LO_IRQ_EN |
|
||||
LOCAL_CONFIG_IND_HI_IRQ_EN,
|
||||
priv->xpec_base + NETX_XPEC_RAM_START_OFS +
|
||||
ETH_MAC_LOCAL_CONFIG);
|
||||
|
||||
mii_check_media(&priv->mii, netif_msg_link(priv), 1);
|
||||
netif_start_queue(ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netx_eth_close(struct net_device *ndev)
|
||||
{
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
|
||||
netif_stop_queue(ndev);
|
||||
|
||||
writel(0,
|
||||
priv->xpec_base + NETX_XPEC_RAM_START_OFS + ETH_MAC_LOCAL_CONFIG);
|
||||
|
||||
free_irq(ndev->irq, ndev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void netx_eth_timeout(struct net_device *ndev)
|
||||
{
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
int i;
|
||||
|
||||
printk(KERN_ERR "%s: transmit timed out, resetting\n", ndev->name);
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
|
||||
xc_reset(priv->xc);
|
||||
xc_start(priv->xc);
|
||||
|
||||
for (i=2; i<=18; i++)
|
||||
pfifo_push(EMPTY_PTR_FIFO(priv->id),
|
||||
FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
|
||||
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
netif_wake_queue(ndev);
|
||||
}
|
||||
|
||||
static int
|
||||
netx_eth_phy_read(struct net_device *ndev, int phy_id, int reg)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
|
||||
MIIMU_REGADDR(reg) | MIIMU_PHY_NRES;
|
||||
|
||||
writel(val, NETX_MIIMU);
|
||||
while (readl(NETX_MIIMU) & MIIMU_SNRDY);
|
||||
|
||||
return readl(NETX_MIIMU) >> 16;
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
netx_eth_phy_write(struct net_device *ndev, int phy_id, int reg, int value)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
val = MIIMU_SNRDY | MIIMU_PREAMBLE | MIIMU_PHYADDR(phy_id) |
|
||||
MIIMU_REGADDR(reg) | MIIMU_PHY_NRES | MIIMU_OPMODE_WRITE |
|
||||
MIIMU_DATA(value);
|
||||
|
||||
writel(val, NETX_MIIMU);
|
||||
while (readl(NETX_MIIMU) & MIIMU_SNRDY);
|
||||
}
|
||||
|
||||
static const struct net_device_ops netx_eth_netdev_ops = {
|
||||
.ndo_open = netx_eth_open,
|
||||
.ndo_stop = netx_eth_close,
|
||||
.ndo_start_xmit = netx_eth_hard_start_xmit,
|
||||
.ndo_tx_timeout = netx_eth_timeout,
|
||||
.ndo_set_rx_mode = netx_eth_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
};
|
||||
|
||||
static int netx_eth_enable(struct net_device *ndev)
|
||||
{
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
unsigned int mac4321, mac65;
|
||||
int running, i, ret;
|
||||
bool inv_mac_addr = false;
|
||||
|
||||
ndev->netdev_ops = &netx_eth_netdev_ops;
|
||||
ndev->watchdog_timeo = msecs_to_jiffies(5000);
|
||||
|
||||
priv->msg_enable = NETIF_MSG_LINK;
|
||||
priv->mii.phy_id_mask = 0x1f;
|
||||
priv->mii.reg_num_mask = 0x1f;
|
||||
priv->mii.force_media = 0;
|
||||
priv->mii.full_duplex = 0;
|
||||
priv->mii.dev = ndev;
|
||||
priv->mii.mdio_read = netx_eth_phy_read;
|
||||
priv->mii.mdio_write = netx_eth_phy_write;
|
||||
priv->mii.phy_id = INTERNAL_PHY_ADR + priv->id;
|
||||
|
||||
running = xc_running(priv->xc);
|
||||
xc_stop(priv->xc);
|
||||
|
||||
/* if the xc engine is already running, assume the bootloader has
|
||||
* loaded the firmware for us
|
||||
*/
|
||||
if (running) {
|
||||
/* get Node Address from hardware */
|
||||
mac4321 = readl(priv->xpec_base +
|
||||
NETX_XPEC_RAM_START_OFS + ETH_MAC_4321);
|
||||
mac65 = readl(priv->xpec_base +
|
||||
NETX_XPEC_RAM_START_OFS + ETH_MAC_65);
|
||||
|
||||
ndev->dev_addr[0] = mac4321 & 0xff;
|
||||
ndev->dev_addr[1] = (mac4321 >> 8) & 0xff;
|
||||
ndev->dev_addr[2] = (mac4321 >> 16) & 0xff;
|
||||
ndev->dev_addr[3] = (mac4321 >> 24) & 0xff;
|
||||
ndev->dev_addr[4] = mac65 & 0xff;
|
||||
ndev->dev_addr[5] = (mac65 >> 8) & 0xff;
|
||||
} else {
|
||||
if (xc_request_firmware(priv->xc)) {
|
||||
printk(CARDNAME ": requesting firmware failed\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
xc_reset(priv->xc);
|
||||
xc_start(priv->xc);
|
||||
|
||||
if (!is_valid_ether_addr(ndev->dev_addr))
|
||||
inv_mac_addr = true;
|
||||
|
||||
for (i=2; i<=18; i++)
|
||||
pfifo_push(EMPTY_PTR_FIFO(priv->id),
|
||||
FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (inv_mac_addr)
|
||||
printk("%s: Invalid ethernet MAC address. Please set using ip\n",
|
||||
ndev->name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int netx_eth_drv_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct netx_eth_priv *priv;
|
||||
struct net_device *ndev;
|
||||
struct netxeth_platform_data *pdata;
|
||||
int ret;
|
||||
|
||||
ndev = alloc_etherdev(sizeof (struct netx_eth_priv));
|
||||
if (!ndev) {
|
||||
ret = -ENOMEM;
|
||||
goto exit;
|
||||
}
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
||||
platform_set_drvdata(pdev, ndev);
|
||||
|
||||
priv = netdev_priv(ndev);
|
||||
|
||||
pdata = dev_get_platdata(&pdev->dev);
|
||||
priv->xc = request_xc(pdata->xcno, &pdev->dev);
|
||||
if (!priv->xc) {
|
||||
dev_err(&pdev->dev, "unable to request xc engine\n");
|
||||
ret = -ENODEV;
|
||||
goto exit_free_netdev;
|
||||
}
|
||||
|
||||
ndev->irq = priv->xc->irq;
|
||||
priv->id = pdev->id;
|
||||
priv->xpec_base = priv->xc->xpec_base;
|
||||
priv->xmac_base = priv->xc->xmac_base;
|
||||
priv->sram_base = priv->xc->sram_base;
|
||||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = pfifo_request(PFIFO_MASK(priv->id));
|
||||
if (ret) {
|
||||
printk("unable to request PFIFO\n");
|
||||
goto exit_free_xc;
|
||||
}
|
||||
|
||||
ret = netx_eth_enable(ndev);
|
||||
if (ret)
|
||||
goto exit_free_pfifo;
|
||||
|
||||
return 0;
|
||||
exit_free_pfifo:
|
||||
pfifo_free(PFIFO_MASK(priv->id));
|
||||
exit_free_xc:
|
||||
free_xc(priv->xc);
|
||||
exit_free_netdev:
|
||||
free_netdev(ndev);
|
||||
exit:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int netx_eth_drv_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct net_device *ndev = platform_get_drvdata(pdev);
|
||||
struct netx_eth_priv *priv = netdev_priv(ndev);
|
||||
|
||||
unregister_netdev(ndev);
|
||||
xc_stop(priv->xc);
|
||||
free_xc(priv->xc);
|
||||
free_netdev(ndev);
|
||||
pfifo_free(PFIFO_MASK(priv->id));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netx_eth_drv_suspend(struct platform_device *pdev, pm_message_t state)
|
||||
{
|
||||
dev_err(&pdev->dev, "suspend not implemented\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int netx_eth_drv_resume(struct platform_device *pdev)
|
||||
{
|
||||
dev_err(&pdev->dev, "resume not implemented\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver netx_eth_driver = {
|
||||
.probe = netx_eth_drv_probe,
|
||||
.remove = netx_eth_drv_remove,
|
||||
.suspend = netx_eth_drv_suspend,
|
||||
.resume = netx_eth_drv_resume,
|
||||
.driver = {
|
||||
.name = CARDNAME,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init netx_eth_init(void)
|
||||
{
|
||||
unsigned int phy_control, val;
|
||||
|
||||
printk("NetX Ethernet driver\n");
|
||||
|
||||
phy_control = PHY_CONTROL_PHY_ADDRESS(INTERNAL_PHY_ADR>>1) |
|
||||
PHY_CONTROL_PHY1_MODE(PHY_MODE_ALL) |
|
||||
PHY_CONTROL_PHY1_AUTOMDIX |
|
||||
PHY_CONTROL_PHY1_EN |
|
||||
PHY_CONTROL_PHY0_MODE(PHY_MODE_ALL) |
|
||||
PHY_CONTROL_PHY0_AUTOMDIX |
|
||||
PHY_CONTROL_PHY0_EN |
|
||||
PHY_CONTROL_CLK_XLATIN;
|
||||
|
||||
val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
|
||||
writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
|
||||
|
||||
writel(phy_control | PHY_CONTROL_RESET, NETX_SYSTEM_PHY_CONTROL);
|
||||
udelay(100);
|
||||
|
||||
val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
|
||||
writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
|
||||
|
||||
writel(phy_control, NETX_SYSTEM_PHY_CONTROL);
|
||||
|
||||
return platform_driver_register(&netx_eth_driver);
|
||||
}
|
||||
|
||||
static void __exit netx_eth_cleanup(void)
|
||||
{
|
||||
platform_driver_unregister(&netx_eth_driver);
|
||||
}
|
||||
|
||||
module_init(netx_eth_init);
|
||||
module_exit(netx_eth_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:" CARDNAME);
|
||||
MODULE_FIRMWARE("xc0.bin");
|
||||
MODULE_FIRMWARE("xc1.bin");
|
||||
MODULE_FIRMWARE("xc2.bin");
|
|
@ -1347,7 +1347,7 @@ static int nixge_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
|
||||
if (priv->phy_mode < 0) {
|
||||
if ((int)priv->phy_mode < 0) {
|
||||
netdev_err(ndev, "not find \"phy-mode\" property\n");
|
||||
err = -EINVAL;
|
||||
goto unregister_mdio;
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config LPC_ENET
|
||||
tristate "NXP ethernet MAC on LPC devices"
|
||||
depends on ARCH_LPC32XX || COMPILE_TEST
|
||||
select PHYLIB
|
||||
help
|
||||
tristate "NXP ethernet MAC on LPC devices"
|
||||
depends on ARCH_LPC32XX || COMPILE_TEST
|
||||
select PHYLIB
|
||||
help
|
||||
Say Y or M here if you want to use the NXP ethernet MAC included on
|
||||
some NXP LPC devices. You can safely enable this option for LPC32xx
|
||||
SoC. Also available as a module.
|
||||
|
|
|
@ -26,7 +26,7 @@ config IONIC
|
|||
found in
|
||||
<file:Documentation/networking/device_drivers/pensando/ionic.rst>.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called ionic.
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called ionic.
|
||||
|
||||
endif # NET_VENDOR_PENSANDO
|
||||
|
|
|
@ -57,7 +57,7 @@ DEFINE_SHOW_ATTRIBUTE(identity);
|
|||
void ionic_debugfs_add_ident(struct ionic *ionic)
|
||||
{
|
||||
debugfs_create_file("identity", 0400, ionic->dentry,
|
||||
ionic, &identity_fops) ? 0 : -EOPNOTSUPP;
|
||||
ionic, &identity_fops);
|
||||
}
|
||||
|
||||
void ionic_debugfs_add_sizes(struct ionic *ionic)
|
||||
|
|
|
@ -1704,6 +1704,7 @@ static struct ionic_lif *ionic_lif_alloc(struct ionic *ionic, unsigned int index
|
|||
GFP_KERNEL);
|
||||
|
||||
if (!lif->rss_ind_tbl) {
|
||||
err = -ENOMEM;
|
||||
dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
|
||||
goto err_out_free_qcqs;
|
||||
}
|
||||
|
|
|
@ -779,8 +779,7 @@ qede_rx_build_skb(struct qede_dev *edev,
|
|||
return NULL;
|
||||
|
||||
skb_reserve(skb, pad);
|
||||
memcpy(skb_put(skb, len),
|
||||
page_address(bd->data) + offset, len);
|
||||
skb_put_data(skb, page_address(bd->data) + offset, len);
|
||||
qede_reuse_page(rxq, bd);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -2007,7 +2007,7 @@ static int netsec_probe(struct platform_device *pdev)
|
|||
NETIF_MSG_LINK | NETIF_MSG_PROBE;
|
||||
|
||||
priv->phy_interface = device_get_phy_mode(&pdev->dev);
|
||||
if (priv->phy_interface < 0) {
|
||||
if ((int)priv->phy_interface < 0) {
|
||||
dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
|
||||
ret = -ENODEV;
|
||||
goto free_ndev;
|
||||
|
|
|
@ -1566,7 +1566,7 @@ static int ave_probe(struct platform_device *pdev)
|
|||
|
||||
np = dev->of_node;
|
||||
phy_mode = of_get_phy_mode(np);
|
||||
if (phy_mode < 0) {
|
||||
if ((int)phy_mode < 0) {
|
||||
dev_err(dev, "phy-mode not found\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1662,19 +1662,19 @@ static int ave_probe(struct platform_device *pdev)
|
|||
"socionext,syscon-phy-mode",
|
||||
1, 0, &args);
|
||||
if (ret) {
|
||||
netdev_err(ndev, "can't get syscon-phy-mode property\n");
|
||||
dev_err(dev, "can't get syscon-phy-mode property\n");
|
||||
goto out_free_netdev;
|
||||
}
|
||||
priv->regmap = syscon_node_to_regmap(args.np);
|
||||
of_node_put(args.np);
|
||||
if (IS_ERR(priv->regmap)) {
|
||||
netdev_err(ndev, "can't map syscon-phy-mode\n");
|
||||
dev_err(dev, "can't map syscon-phy-mode\n");
|
||||
ret = PTR_ERR(priv->regmap);
|
||||
goto out_free_netdev;
|
||||
}
|
||||
ret = priv->data->get_pinmode(priv, phy_mode, args.args[0]);
|
||||
if (ret) {
|
||||
netdev_err(ndev, "invalid phy-mode setting\n");
|
||||
dev_err(dev, "invalid phy-mode setting\n");
|
||||
goto out_free_netdev;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
|
|||
struct device *dev = &gmac->pdev->dev;
|
||||
|
||||
gmac->phy_mode = of_get_phy_mode(dev->of_node);
|
||||
if (gmac->phy_mode < 0) {
|
||||
if ((int)gmac->phy_mode < 0) {
|
||||
dev_err(dev, "missing phy mode property\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -339,7 +339,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
|
|||
|
||||
dwmac->dev = &pdev->dev;
|
||||
dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
|
||||
if (dwmac->phy_mode < 0) {
|
||||
if ((int)dwmac->phy_mode < 0) {
|
||||
dev_err(&pdev->dev, "missing phy-mode property\n");
|
||||
ret = -EINVAL;
|
||||
goto err_remove_config_dt;
|
||||
|
|
|
@ -523,19 +523,18 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
|
|||
struct stmmac_rss *cfg, u32 num_rxq)
|
||||
{
|
||||
void __iomem *ioaddr = hw->pcsr;
|
||||
u32 *key = (u32 *)cfg->key;
|
||||
int i, ret;
|
||||
u32 value;
|
||||
|
||||
value = readl(ioaddr + XGMAC_RSS_CTRL);
|
||||
if (!cfg->enable) {
|
||||
if (!cfg || !cfg->enable) {
|
||||
value &= ~XGMAC_RSSE;
|
||||
writel(value, ioaddr + XGMAC_RSS_CTRL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
|
||||
ret = dwxgmac2_rss_write_reg(ioaddr, true, i, *key++);
|
||||
ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1557,13 +1557,15 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
|
|||
for (queue = 0; queue < rx_count; queue++) {
|
||||
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
|
||||
struct page_pool_params pp_params = { 0 };
|
||||
unsigned int num_pages;
|
||||
|
||||
rx_q->queue_index = queue;
|
||||
rx_q->priv_data = priv;
|
||||
|
||||
pp_params.flags = PP_FLAG_DMA_MAP;
|
||||
pp_params.pool_size = DMA_RX_SIZE;
|
||||
pp_params.order = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
|
||||
num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE);
|
||||
pp_params.order = ilog2(num_pages);
|
||||
pp_params.nid = dev_to_node(priv->device);
|
||||
pp_params.dev = priv->device;
|
||||
pp_params.dma_dir = DMA_FROM_DEVICE;
|
||||
|
|
|
@ -670,7 +670,7 @@ static int stmmac_test_flowctrl(struct stmmac_priv *priv)
|
|||
unsigned int pkt_count;
|
||||
int i, ret = 0;
|
||||
|
||||
if (!phydev || !phydev->pause)
|
||||
if (!phydev || (!phydev->pause && !phydev->asym_pause))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
|
||||
|
@ -1233,12 +1233,9 @@ static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
|
|||
return -EOPNOTSUPP;
|
||||
if (!priv->dma_cap.l3l4fnum)
|
||||
return -EOPNOTSUPP;
|
||||
if (priv->rss.enable) {
|
||||
struct stmmac_rss rss = { .enable = false, };
|
||||
|
||||
stmmac_rss_configure(priv, priv->hw, &rss,
|
||||
if (priv->rss.enable)
|
||||
stmmac_rss_configure(priv, priv->hw, NULL,
|
||||
priv->plat->rx_queues_to_use);
|
||||
}
|
||||
|
||||
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
|
||||
if (!dissector) {
|
||||
|
@ -1357,12 +1354,9 @@ static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
|
|||
return -EOPNOTSUPP;
|
||||
if (!priv->dma_cap.l3l4fnum)
|
||||
return -EOPNOTSUPP;
|
||||
if (priv->rss.enable) {
|
||||
struct stmmac_rss rss = { .enable = false, };
|
||||
|
||||
stmmac_rss_configure(priv, priv->hw, &rss,
|
||||
if (priv->rss.enable)
|
||||
stmmac_rss_configure(priv, priv->hw, NULL,
|
||||
priv->plat->rx_queues_to_use);
|
||||
}
|
||||
|
||||
dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
|
||||
if (!dissector) {
|
||||
|
|
|
@ -1762,7 +1762,7 @@ static int axienet_probe(struct platform_device *pdev)
|
|||
}
|
||||
} else {
|
||||
lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
|
||||
if (lp->phy_mode < 0) {
|
||||
if ((int)lp->phy_mode < 0) {
|
||||
ret = -EINVAL;
|
||||
goto free_netdev;
|
||||
}
|
||||
|
|
|
@ -1235,6 +1235,7 @@ deliver:
|
|||
macsec_rxsa_put(rx_sa);
|
||||
macsec_rxsc_put(rx_sc);
|
||||
|
||||
skb_orphan(skb);
|
||||
ret = gro_cells_receive(&macsec->gro_cells, skb);
|
||||
if (ret == NET_RX_SUCCESS)
|
||||
count_rx(dev, skb->len);
|
||||
|
|
|
@ -460,9 +460,9 @@ config RENESAS_PHY
|
|||
Supports the Renesas PHYs uPD60620 and uPD60620A.
|
||||
|
||||
config ROCKCHIP_PHY
|
||||
tristate "Driver for Rockchip Ethernet PHYs"
|
||||
---help---
|
||||
Currently supports the integrated Ethernet PHY.
|
||||
tristate "Driver for Rockchip Ethernet PHYs"
|
||||
---help---
|
||||
Currently supports the integrated Ethernet PHY.
|
||||
|
||||
config SMSC_PHY
|
||||
tristate "SMSC PHYs"
|
||||
|
|
|
@ -763,6 +763,8 @@ static int ksz9031_get_features(struct phy_device *phydev)
|
|||
* Whenever the device's Asymmetric Pause capability is set to 1,
|
||||
* link-up may fail after a link-up to link-down transition.
|
||||
*
|
||||
* The Errata Sheet is for ksz9031, but ksz9021 has the same issue
|
||||
*
|
||||
* Workaround:
|
||||
* Do not enable the Asymmetric Pause capability bit.
|
||||
*/
|
||||
|
@ -1076,6 +1078,7 @@ static struct phy_driver ksphy_driver[] = {
|
|||
/* PHY_GBIT_FEATURES */
|
||||
.driver_data = &ksz9021_type,
|
||||
.probe = kszphy_probe,
|
||||
.get_features = ksz9031_get_features,
|
||||
.config_init = ksz9021_config_init,
|
||||
.ack_interrupt = kszphy_ack_interrupt,
|
||||
.config_intr = kszphy_config_intr,
|
||||
|
|
|
@ -105,14 +105,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
|
|||
|
||||
static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
|
||||
{
|
||||
u16 lb_dis = BIT(1);
|
||||
|
||||
if (disable)
|
||||
ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
|
||||
ns_exp_write(phydev, 0x1c0,
|
||||
ns_exp_read(phydev, 0x1c0) | lb_dis);
|
||||
else
|
||||
ns_exp_write(phydev, 0x1c0,
|
||||
ns_exp_read(phydev, 0x1c0) & 0xfffe);
|
||||
ns_exp_read(phydev, 0x1c0) & ~lb_dis);
|
||||
|
||||
pr_debug("10BASE-T HDX loopback %s\n",
|
||||
(ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
|
||||
(ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
|
||||
}
|
||||
|
||||
static int ns_config_init(struct phy_device *phydev)
|
||||
|
|
|
@ -1415,6 +1415,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
|
|||
netif_wake_queue(ppp->dev);
|
||||
else
|
||||
netif_stop_queue(ppp->dev);
|
||||
} else {
|
||||
kfree_skb(skb);
|
||||
}
|
||||
ppp_xmit_unlock(ppp);
|
||||
}
|
||||
|
|
|
@ -1200,7 +1200,7 @@ err_kfree:
|
|||
kfree_skb(skb);
|
||||
err:
|
||||
rcu_read_lock();
|
||||
tap = rcu_dereference(q->tap);
|
||||
tap = rcu_dereference(q->tap);
|
||||
if (tap && tap->count_tx_dropped)
|
||||
tap->count_tx_dropped(tap);
|
||||
rcu_read_unlock();
|
||||
|
|
|
@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
|
|||
u8 ep;
|
||||
|
||||
for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
|
||||
|
||||
e = intf->cur_altsetting->endpoint + ep;
|
||||
|
||||
/* ignore endpoints which cannot transfer data */
|
||||
if (!usb_endpoint_maxp(&e->desc))
|
||||
continue;
|
||||
|
||||
switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
|
||||
case USB_ENDPOINT_XFER_INT:
|
||||
if (usb_endpoint_dir_in(&e->desc)) {
|
||||
|
|
|
@ -100,6 +100,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
|
|||
int intr = 0;
|
||||
|
||||
e = alt->endpoint + ep;
|
||||
|
||||
/* ignore endpoints which cannot transfer data */
|
||||
if (!usb_endpoint_maxp(&e->desc))
|
||||
continue;
|
||||
|
||||
switch (e->desc.bmAttributes) {
|
||||
case USB_ENDPOINT_XFER_INT:
|
||||
if (!usb_endpoint_dir_in(&e->desc))
|
||||
|
@ -339,6 +344,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
|
|||
{
|
||||
enum usb_device_speed speed = dev->udev->speed;
|
||||
|
||||
if (!dev->rx_urb_size || !dev->hard_mtu)
|
||||
goto insanity;
|
||||
switch (speed) {
|
||||
case USB_SPEED_HIGH:
|
||||
dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
|
||||
|
@ -355,6 +362,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
|
|||
dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
|
||||
break;
|
||||
default:
|
||||
insanity:
|
||||
dev->rx_qlen = dev->tx_qlen = 4;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1154,7 +1154,8 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
|
|||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
if (family == AF_INET6 && !ipv6_mod_enabled())
|
||||
if ((family == AF_INET6 || family == RTNL_FAMILY_IP6MR) &&
|
||||
!ipv6_mod_enabled())
|
||||
return 0;
|
||||
|
||||
skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
|
||||
|
|
|
@ -34,7 +34,7 @@ config ATH_TRACEPOINTS
|
|||
depends on ATH_DEBUG
|
||||
depends on EVENT_TRACING
|
||||
---help---
|
||||
This option enables tracepoints for atheros wireless drivers.
|
||||
This option enables tracepoints for atheros wireless drivers.
|
||||
Currently, ath9k makes use of this facility.
|
||||
|
||||
config ATH_REG_DYNAMIC_USER_REG_HINTS
|
||||
|
|
|
@ -5,5 +5,5 @@ config AR5523
|
|||
select ATH_COMMON
|
||||
select FW_LOADER
|
||||
---help---
|
||||
This module add support for AR5523 based USB dongles such as D-Link
|
||||
DWL-G132, Netgear WPN111 and many more.
|
||||
This module add support for AR5523 based USB dongles such as D-Link
|
||||
DWL-G132, Netgear WPN111 and many more.
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
config ATH6KL
|
||||
tristate "Atheros mobile chipsets support"
|
||||
depends on CFG80211
|
||||
---help---
|
||||
---help---
|
||||
This module adds core support for wireless adapters based on
|
||||
Atheros AR6003 and AR6004 chipsets. You still need separate
|
||||
bus drivers for USB and SDIO to be able to use real devices.
|
||||
|
|
|
@ -148,7 +148,7 @@ config ATH9K_CHANNEL_CONTEXT
|
|||
depends on ATH9K
|
||||
default n
|
||||
---help---
|
||||
This option enables channel context support in ath9k, which is needed
|
||||
This option enables channel context support in ath9k, which is needed
|
||||
for multi-channel concurrency. Enable this if P2P PowerSave support
|
||||
is required.
|
||||
|
||||
|
|
|
@ -41,9 +41,9 @@ config CARL9170_WPC
|
|||
default y
|
||||
|
||||
config CARL9170_HWRNG
|
||||
bool "Random number generator"
|
||||
depends on CARL9170 && (HW_RANDOM = y || HW_RANDOM = CARL9170)
|
||||
default n
|
||||
bool "Random number generator"
|
||||
depends on CARL9170 && (HW_RANDOM = y || HW_RANDOM = CARL9170)
|
||||
default n
|
||||
help
|
||||
Provides a hardware random number generator to the kernel.
|
||||
|
||||
|
|
|
@ -1012,11 +1012,11 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
|
|||
skb_orphan(skb);
|
||||
|
||||
if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) {
|
||||
wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
|
||||
dev_kfree_skb(skb);
|
||||
ndev->stats.rx_dropped++;
|
||||
stats->rx_replay++;
|
||||
stats->rx_dropped++;
|
||||
wil_dbg_txrx(wil, "Rx drop %d bytes\n", skb->len);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -20,22 +20,22 @@ config ATMEL
|
|||
select FW_LOADER
|
||||
select CRC32
|
||||
---help---
|
||||
A driver 802.11b wireless cards based on the Atmel fast-vnet
|
||||
chips. This driver supports standard Linux wireless extensions.
|
||||
A driver 802.11b wireless cards based on the Atmel fast-vnet
|
||||
chips. This driver supports standard Linux wireless extensions.
|
||||
|
||||
Many cards based on this chipset do not have flash memory
|
||||
and need their firmware loaded at start-up. If yours is
|
||||
one of these, you will need to provide a firmware image
|
||||
to be loaded into the card by the driver. The Atmel
|
||||
firmware package can be downloaded from
|
||||
<http://www.thekelleys.org.uk/atmel>
|
||||
Many cards based on this chipset do not have flash memory
|
||||
and need their firmware loaded at start-up. If yours is
|
||||
one of these, you will need to provide a firmware image
|
||||
to be loaded into the card by the driver. The Atmel
|
||||
firmware package can be downloaded from
|
||||
<http://www.thekelleys.org.uk/atmel>
|
||||
|
||||
config PCI_ATMEL
|
||||
tristate "Atmel at76c506 PCI cards"
|
||||
depends on ATMEL && PCI
|
||||
---help---
|
||||
Enable support for PCI and mini-PCI cards containing the
|
||||
Atmel at76c506 chip.
|
||||
Enable support for PCI and mini-PCI cards containing the
|
||||
Atmel at76c506 chip.
|
||||
|
||||
config PCMCIA_ATMEL
|
||||
tristate "Atmel at76c502/at76c504 PCMCIA cards"
|
||||
|
@ -48,11 +48,11 @@ config PCMCIA_ATMEL
|
|||
Atmel at76c502 and at76c504 chips.
|
||||
|
||||
config AT76C50X_USB
|
||||
tristate "Atmel at76c503/at76c505/at76c505a USB cards"
|
||||
depends on MAC80211 && USB
|
||||
select FW_LOADER
|
||||
---help---
|
||||
Enable support for USB Wireless devices using Atmel at76c503,
|
||||
at76c505 or at76c505a chips.
|
||||
tristate "Atmel at76c503/at76c505/at76c505a USB cards"
|
||||
depends on MAC80211 && USB
|
||||
select FW_LOADER
|
||||
---help---
|
||||
Enable support for USB Wireless devices using Atmel at76c503,
|
||||
at76c505 or at76c505a chips.
|
||||
|
||||
endif # WLAN_VENDOR_ATMEL
|
||||
|
|
|
@ -13,37 +13,37 @@ config IPW2100
|
|||
select LIB80211
|
||||
select LIBIPW
|
||||
---help---
|
||||
A driver for the Intel PRO/Wireless 2100 Network
|
||||
A driver for the Intel PRO/Wireless 2100 Network
|
||||
Connection 802.11b wireless network adapter.
|
||||
|
||||
See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
|
||||
See <file:Documentation/networking/device_drivers/intel/ipw2100.txt>
|
||||
for information on the capabilities currently enabled in this driver
|
||||
and for tips for debugging issues and problems.
|
||||
|
||||
In order to use this driver, you will need a firmware image for it.
|
||||
You can obtain the firmware from
|
||||
<http://ipw2100.sf.net/>. Once you have the firmware image, you
|
||||
You can obtain the firmware from
|
||||
<http://ipw2100.sf.net/>. Once you have the firmware image, you
|
||||
will need to place it in /lib/firmware.
|
||||
|
||||
You will also very likely need the Wireless Tools in order to
|
||||
configure your card:
|
||||
You will also very likely need the Wireless Tools in order to
|
||||
configure your card:
|
||||
|
||||
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
|
||||
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
|
||||
|
||||
It is recommended that you compile this driver as a module (M)
|
||||
rather than built-in (Y). This driver requires firmware at device
|
||||
initialization time, and when built-in this typically happens
|
||||
before the filesystem is accessible (hence firmware will be
|
||||
unavailable and initialization will fail). If you do choose to build
|
||||
this driver into your kernel image, you can avoid this problem by
|
||||
including the firmware and a firmware loader in an initramfs.
|
||||
|
||||
It is recommended that you compile this driver as a module (M)
|
||||
rather than built-in (Y). This driver requires firmware at device
|
||||
initialization time, and when built-in this typically happens
|
||||
before the filesystem is accessible (hence firmware will be
|
||||
unavailable and initialization will fail). If you do choose to build
|
||||
this driver into your kernel image, you can avoid this problem by
|
||||
including the firmware and a firmware loader in an initramfs.
|
||||
|
||||
config IPW2100_MONITOR
|
||||
bool "Enable promiscuous mode"
|
||||
depends on IPW2100
|
||||
---help---
|
||||
bool "Enable promiscuous mode"
|
||||
depends on IPW2100
|
||||
---help---
|
||||
Enables promiscuous/monitor mode support for the ipw2100 driver.
|
||||
With this feature compiled into the driver, you can switch to
|
||||
With this feature compiled into the driver, you can switch to
|
||||
promiscuous mode via the Wireless Tool's Monitor mode. While in this
|
||||
mode, no packets can be sent.
|
||||
|
||||
|
@ -51,17 +51,17 @@ config IPW2100_DEBUG
|
|||
bool "Enable full debugging output in IPW2100 module."
|
||||
depends on IPW2100
|
||||
---help---
|
||||
This option will enable debug tracing output for the IPW2100.
|
||||
This option will enable debug tracing output for the IPW2100.
|
||||
|
||||
This will result in the kernel module being ~60k larger. You can
|
||||
control which debug output is sent to the kernel log by setting the
|
||||
value in
|
||||
This will result in the kernel module being ~60k larger. You can
|
||||
control which debug output is sent to the kernel log by setting the
|
||||
value in
|
||||
|
||||
/sys/bus/pci/drivers/ipw2100/debug_level
|
||||
|
||||
This entry will only exist if this option is enabled.
|
||||
|
||||
If you are not trying to debug or develop the IPW2100 driver, you
|
||||
If you are not trying to debug or develop the IPW2100 driver, you
|
||||
most likely want to say N here.
|
||||
|
||||
config IPW2200
|
||||
|
@ -75,37 +75,37 @@ config IPW2200
|
|||
select LIB80211
|
||||
select LIBIPW
|
||||
---help---
|
||||
A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
|
||||
Connection adapters.
|
||||
A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
|
||||
Connection adapters.
|
||||
|
||||
See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
|
||||
See <file:Documentation/networking/device_drivers/intel/ipw2200.txt>
|
||||
for information on the capabilities currently enabled in this
|
||||
driver and for tips for debugging issues and problems.
|
||||
|
||||
In order to use this driver, you will need a firmware image for it.
|
||||
You can obtain the firmware from
|
||||
<http://ipw2200.sf.net/>. See the above referenced README.ipw2200
|
||||
You can obtain the firmware from
|
||||
<http://ipw2200.sf.net/>. See the above referenced README.ipw2200
|
||||
for information on where to install the firmware images.
|
||||
|
||||
You will also very likely need the Wireless Tools in order to
|
||||
configure your card:
|
||||
You will also very likely need the Wireless Tools in order to
|
||||
configure your card:
|
||||
|
||||
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
|
||||
<http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
|
||||
|
||||
It is recommended that you compile this driver as a module (M)
|
||||
rather than built-in (Y). This driver requires firmware at device
|
||||
initialization time, and when built-in this typically happens
|
||||
before the filesystem is accessible (hence firmware will be
|
||||
unavailable and initialization will fail). If you do choose to build
|
||||
this driver into your kernel image, you can avoid this problem by
|
||||
including the firmware and a firmware loader in an initramfs.
|
||||
It is recommended that you compile this driver as a module (M)
|
||||
rather than built-in (Y). This driver requires firmware at device
|
||||
initialization time, and when built-in this typically happens
|
||||
before the filesystem is accessible (hence firmware will be
|
||||
unavailable and initialization will fail). If you do choose to build
|
||||
this driver into your kernel image, you can avoid this problem by
|
||||
including the firmware and a firmware loader in an initramfs.
|
||||
|
||||
config IPW2200_MONITOR
|
||||
bool "Enable promiscuous mode"
|
||||
depends on IPW2200
|
||||
---help---
|
||||
bool "Enable promiscuous mode"
|
||||
depends on IPW2200
|
||||
---help---
|
||||
Enables promiscuous/monitor mode support for the ipw2200 driver.
|
||||
With this feature compiled into the driver, you can switch to
|
||||
With this feature compiled into the driver, you can switch to
|
||||
promiscuous mode via the Wireless Tool's Monitor mode. While in this
|
||||
mode, no packets can be sent.
|
||||
|
||||
|
@ -118,28 +118,28 @@ config IPW2200_PROMISCUOUS
|
|||
depends on IPW2200_MONITOR
|
||||
select IPW2200_RADIOTAP
|
||||
---help---
|
||||
Enables the creation of a second interface prefixed 'rtap'.
|
||||
This second interface will provide every received in radiotap
|
||||
Enables the creation of a second interface prefixed 'rtap'.
|
||||
This second interface will provide every received in radiotap
|
||||
format.
|
||||
|
||||
This is useful for performing wireless network analysis while
|
||||
maintaining an active association.
|
||||
This is useful for performing wireless network analysis while
|
||||
maintaining an active association.
|
||||
|
||||
Example usage:
|
||||
Example usage:
|
||||
|
||||
% modprobe ipw2200 rtap_iface=1
|
||||
% ifconfig rtap0 up
|
||||
% tethereal -i rtap0
|
||||
% modprobe ipw2200 rtap_iface=1
|
||||
% ifconfig rtap0 up
|
||||
% tethereal -i rtap0
|
||||
|
||||
If you do not specify 'rtap_iface=1' as a module parameter then
|
||||
the rtap interface will not be created and you will need to turn
|
||||
it on via sysfs:
|
||||
|
||||
% echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
|
||||
If you do not specify 'rtap_iface=1' as a module parameter then
|
||||
the rtap interface will not be created and you will need to turn
|
||||
it on via sysfs:
|
||||
|
||||
% echo 1 > /sys/bus/pci/drivers/ipw2200/*/rtap_iface
|
||||
|
||||
config IPW2200_QOS
|
||||
bool "Enable QoS support"
|
||||
depends on IPW2200
|
||||
bool "Enable QoS support"
|
||||
depends on IPW2200
|
||||
|
||||
config IPW2200_DEBUG
|
||||
bool "Enable full debugging output in IPW2200 module."
|
||||
|
|
|
@ -91,9 +91,9 @@ config IWLEGACY_DEBUG
|
|||
any problems you may encounter.
|
||||
|
||||
config IWLEGACY_DEBUGFS
|
||||
bool "iwlegacy (iwl 3945/4965) debugfs support"
|
||||
depends on IWLEGACY && MAC80211_DEBUGFS
|
||||
---help---
|
||||
bool "iwlegacy (iwl 3945/4965) debugfs support"
|
||||
depends on IWLEGACY && MAC80211_DEBUGFS
|
||||
---help---
|
||||
Enable creation of debugfs files for the iwlegacy drivers. This
|
||||
is a low-impact option that allows getting insight into the
|
||||
driver's state at runtime.
|
||||
|
|
|
@ -119,9 +119,9 @@ config IWLWIFI_DEBUG
|
|||
any problems you may encounter.
|
||||
|
||||
config IWLWIFI_DEBUGFS
|
||||
bool "iwlwifi debugfs support"
|
||||
depends on MAC80211_DEBUGFS
|
||||
---help---
|
||||
bool "iwlwifi debugfs support"
|
||||
depends on MAC80211_DEBUGFS
|
||||
---help---
|
||||
Enable creation of debugfs files for the iwlwifi drivers. This
|
||||
is a low-impact option that allows getting insight into the
|
||||
driver's state at runtime.
|
||||
|
|
|
@ -889,11 +889,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
|
|||
* firmware versions. Unfortunately, we don't have a TLV API
|
||||
* flag to rely on, so rely on the major version which is in
|
||||
* the first byte of ucode_ver. This was implemented
|
||||
* initially on version 38 and then backported to 36, 29 and
|
||||
* 17.
|
||||
* initially on version 38 and then backported to29 and 17.
|
||||
* The intention was to have it in 36 as well, but not all
|
||||
* 8000 family got this feature enabled. The 8000 family is
|
||||
* the only one using version 36, so skip this version
|
||||
* entirely.
|
||||
*/
|
||||
return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
|
||||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
|
||||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
|
||||
IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
|
||||
}
|
||||
|
|
|
@ -555,16 +555,19 @@ static int compare_temps(const void *a, const void *b)
|
|||
return ((s16)le16_to_cpu(*(__le16 *)a) -
|
||||
(s16)le16_to_cpu(*(__le16 *)b));
|
||||
}
|
||||
#endif
|
||||
|
||||
int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
|
||||
{
|
||||
struct temp_report_ths_cmd cmd = {0};
|
||||
int ret, i, j, idx = 0;
|
||||
int ret;
|
||||
#ifdef CONFIG_THERMAL
|
||||
int i, j, idx = 0;
|
||||
|
||||
lockdep_assert_held(&mvm->mutex);
|
||||
|
||||
if (!mvm->tz_device.tzone)
|
||||
return -EINVAL;
|
||||
goto send;
|
||||
|
||||
/* The driver holds array of temperature trips that are unsorted
|
||||
* and uncompressed, the FW should get it compressed and sorted
|
||||
|
@ -597,6 +600,7 @@ int iwl_mvm_send_temp_report_ths_cmd(struct iwl_mvm *mvm)
|
|||
}
|
||||
|
||||
send:
|
||||
#endif
|
||||
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP,
|
||||
TEMP_REPORTING_THRESHOLDS_CMD),
|
||||
0, sizeof(cmd), &cmd);
|
||||
|
@ -607,6 +611,7 @@ send:
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_THERMAL
|
||||
static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
|
||||
int *temperature)
|
||||
{
|
||||
|
|
|
@ -333,7 +333,6 @@ static int mt7615_driver_own(struct mt7615_dev *dev)
|
|||
|
||||
static int mt7615_load_patch(struct mt7615_dev *dev)
|
||||
{
|
||||
const char *firmware = MT7615_ROM_PATCH;
|
||||
const struct mt7615_patch_hdr *hdr;
|
||||
const struct firmware *fw = NULL;
|
||||
int len, ret, sem;
|
||||
|
@ -349,7 +348,7 @@ static int mt7615_load_patch(struct mt7615_dev *dev)
|
|||
return -EAGAIN;
|
||||
}
|
||||
|
||||
ret = request_firmware(&fw, firmware, dev->mt76.dev);
|
||||
ret = request_firmware(&fw, MT7615_ROM_PATCH, dev->mt76.dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -447,13 +446,11 @@ mt7615_mcu_send_ram_firmware(struct mt7615_dev *dev,
|
|||
|
||||
static int mt7615_load_ram(struct mt7615_dev *dev)
|
||||
{
|
||||
const struct firmware *fw;
|
||||
const struct mt7615_fw_trailer *hdr;
|
||||
const char *n9_firmware = MT7615_FIRMWARE_N9;
|
||||
const char *cr4_firmware = MT7615_FIRMWARE_CR4;
|
||||
const struct firmware *fw;
|
||||
int ret;
|
||||
|
||||
ret = request_firmware(&fw, n9_firmware, dev->mt76.dev);
|
||||
ret = request_firmware(&fw, MT7615_FIRMWARE_N9, dev->mt76.dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -482,7 +479,7 @@ static int mt7615_load_ram(struct mt7615_dev *dev)
|
|||
|
||||
release_firmware(fw);
|
||||
|
||||
ret = request_firmware(&fw, cr4_firmware, dev->mt76.dev);
|
||||
ret = request_firmware(&fw, MT7615_FIRMWARE_CR4, dev->mt76.dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -26,9 +26,9 @@
|
|||
#define MT7615_RX_RING_SIZE 1024
|
||||
#define MT7615_RX_MCU_RING_SIZE 512
|
||||
|
||||
#define MT7615_FIRMWARE_CR4 "mt7615_cr4.bin"
|
||||
#define MT7615_FIRMWARE_N9 "mt7615_n9.bin"
|
||||
#define MT7615_ROM_PATCH "mt7615_rom_patch.bin"
|
||||
#define MT7615_FIRMWARE_CR4 "mediatek/mt7615_cr4.bin"
|
||||
#define MT7615_FIRMWARE_N9 "mediatek/mt7615_n9.bin"
|
||||
#define MT7615_ROM_PATCH "mediatek/mt7615_rom_patch.bin"
|
||||
|
||||
#define MT7615_EEPROM_SIZE 1024
|
||||
#define MT7615_TOKEN_SIZE 4096
|
||||
|
|
|
@ -98,17 +98,17 @@ config RT2800PCI_RT53XX
|
|||
bool "rt2800pci - Include support for rt53xx devices (EXPERIMENTAL)"
|
||||
default y
|
||||
---help---
|
||||
This adds support for rt53xx wireless chipset family to the
|
||||
rt2800pci driver.
|
||||
Supported chips: RT5390
|
||||
This adds support for rt53xx wireless chipset family to the
|
||||
rt2800pci driver.
|
||||
Supported chips: RT5390
|
||||
|
||||
config RT2800PCI_RT3290
|
||||
bool "rt2800pci - Include support for rt3290 devices (EXPERIMENTAL)"
|
||||
default y
|
||||
---help---
|
||||
This adds support for rt3290 wireless chipset family to the
|
||||
rt2800pci driver.
|
||||
Supported chips: RT3290
|
||||
This adds support for rt3290 wireless chipset family to the
|
||||
rt2800pci driver.
|
||||
Supported chips: RT3290
|
||||
endif
|
||||
|
||||
config RT2500USB
|
||||
|
@ -176,16 +176,16 @@ config RT2800USB_RT3573
|
|||
config RT2800USB_RT53XX
|
||||
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
|
||||
---help---
|
||||
This adds support for rt53xx wireless chipset family to the
|
||||
rt2800usb driver.
|
||||
Supported chips: RT5370
|
||||
This adds support for rt53xx wireless chipset family to the
|
||||
rt2800usb driver.
|
||||
Supported chips: RT5370
|
||||
|
||||
config RT2800USB_RT55XX
|
||||
bool "rt2800usb - Include support for rt55xx devices (EXPERIMENTAL)"
|
||||
---help---
|
||||
This adds support for rt55xx wireless chipset family to the
|
||||
rt2800usb driver.
|
||||
Supported chips: RT5572
|
||||
This adds support for rt55xx wireless chipset family to the
|
||||
rt2800usb driver.
|
||||
Supported chips: RT5572
|
||||
|
||||
config RT2800USB_UNKNOWN
|
||||
bool "rt2800usb - Include support for unknown (USB) devices"
|
||||
|
|
|
@ -707,9 +707,6 @@ int rtw_download_firmware(struct rtw_dev *rtwdev, struct rtw_fw_state *fw)
|
|||
rtwdev->h2c.last_box_num = 0;
|
||||
rtwdev->h2c.seq = 0;
|
||||
|
||||
rtw_fw_send_general_info(rtwdev);
|
||||
rtw_fw_send_phydm_info(rtwdev);
|
||||
|
||||
rtw_flag_set(rtwdev, RTW_FLAG_FW_RUNNING);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -704,6 +704,10 @@ static int rtw_power_on(struct rtw_dev *rtwdev)
|
|||
goto err_off;
|
||||
}
|
||||
|
||||
/* send H2C after HCI has started */
|
||||
rtw_fw_send_general_info(rtwdev);
|
||||
rtw_fw_send_phydm_info(rtwdev);
|
||||
|
||||
wifi_only = !rtwdev->efuse.btcoex;
|
||||
rtw_coex_power_on_setting(rtwdev);
|
||||
rtw_coex_init_hw_config(rtwdev, wifi_only);
|
||||
|
|
|
@ -90,16 +90,13 @@ static inline void *rtw_pci_get_tx_desc(struct rtw_pci_tx_ring *tx_ring, u8 idx)
|
|||
return tx_ring->r.head + offset;
|
||||
}
|
||||
|
||||
static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
|
||||
struct rtw_pci_tx_ring *tx_ring)
|
||||
static void rtw_pci_free_tx_ring_skbs(struct rtw_dev *rtwdev,
|
||||
struct rtw_pci_tx_ring *tx_ring)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
|
||||
struct rtw_pci_tx_data *tx_data;
|
||||
struct sk_buff *skb, *tmp;
|
||||
dma_addr_t dma;
|
||||
u8 *head = tx_ring->r.head;
|
||||
u32 len = tx_ring->r.len;
|
||||
int ring_sz = len * tx_ring->r.desc_size;
|
||||
|
||||
/* free every skb remained in tx list */
|
||||
skb_queue_walk_safe(&tx_ring->queue, skb, tmp) {
|
||||
|
@ -110,21 +107,30 @@ static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
|
|||
pci_unmap_single(pdev, dma, skb->len, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
}
|
||||
|
||||
static void rtw_pci_free_tx_ring(struct rtw_dev *rtwdev,
|
||||
struct rtw_pci_tx_ring *tx_ring)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
|
||||
u8 *head = tx_ring->r.head;
|
||||
u32 len = tx_ring->r.len;
|
||||
int ring_sz = len * tx_ring->r.desc_size;
|
||||
|
||||
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
|
||||
|
||||
/* free the ring itself */
|
||||
pci_free_consistent(pdev, ring_sz, head, tx_ring->r.dma);
|
||||
tx_ring->r.head = NULL;
|
||||
}
|
||||
|
||||
static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
|
||||
struct rtw_pci_rx_ring *rx_ring)
|
||||
static void rtw_pci_free_rx_ring_skbs(struct rtw_dev *rtwdev,
|
||||
struct rtw_pci_rx_ring *rx_ring)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
|
||||
struct sk_buff *skb;
|
||||
dma_addr_t dma;
|
||||
u8 *head = rx_ring->r.head;
|
||||
int buf_sz = RTK_PCI_RX_BUF_SIZE;
|
||||
int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
|
||||
dma_addr_t dma;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rx_ring->r.len; i++) {
|
||||
|
@ -137,6 +143,16 @@ static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
|
|||
dev_kfree_skb(skb);
|
||||
rx_ring->buf[i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void rtw_pci_free_rx_ring(struct rtw_dev *rtwdev,
|
||||
struct rtw_pci_rx_ring *rx_ring)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(rtwdev->dev);
|
||||
u8 *head = rx_ring->r.head;
|
||||
int ring_sz = rx_ring->r.desc_size * rx_ring->r.len;
|
||||
|
||||
rtw_pci_free_rx_ring_skbs(rtwdev, rx_ring);
|
||||
|
||||
pci_free_consistent(pdev, ring_sz, head, rx_ring->r.dma);
|
||||
}
|
||||
|
@ -484,6 +500,17 @@ static void rtw_pci_dma_reset(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
|
|||
rtwpci->rx_tag = 0;
|
||||
}
|
||||
|
||||
static void rtw_pci_dma_release(struct rtw_dev *rtwdev, struct rtw_pci *rtwpci)
|
||||
{
|
||||
struct rtw_pci_tx_ring *tx_ring;
|
||||
u8 queue;
|
||||
|
||||
for (queue = 0; queue < RTK_MAX_TX_QUEUE_NUM; queue++) {
|
||||
tx_ring = &rtwpci->tx_rings[queue];
|
||||
rtw_pci_free_tx_ring_skbs(rtwdev, tx_ring);
|
||||
}
|
||||
}
|
||||
|
||||
static int rtw_pci_start(struct rtw_dev *rtwdev)
|
||||
{
|
||||
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
|
||||
|
@ -505,6 +532,7 @@ static void rtw_pci_stop(struct rtw_dev *rtwdev)
|
|||
|
||||
spin_lock_irqsave(&rtwpci->irq_lock, flags);
|
||||
rtw_pci_disable_interrupt(rtwdev, rtwpci);
|
||||
rtw_pci_dma_release(rtwdev, rtwpci);
|
||||
spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -1633,7 +1633,7 @@ static bool check_read_regs(struct zd_usb *usb, struct usb_req_read_regs *req,
|
|||
*/
|
||||
if (rr->length < struct_size(regs, regs, count)) {
|
||||
dev_dbg_f(zd_usb_dev(usb),
|
||||
"error: actual length %d less than expected %ld\n",
|
||||
"error: actual length %d less than expected %zu\n",
|
||||
rr->length, struct_size(regs, regs, count));
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -661,7 +661,7 @@ static int st95hf_error_handling(struct st95hf_context *stcontext,
|
|||
result = -ETIMEDOUT;
|
||||
else
|
||||
result = -EIO;
|
||||
return result;
|
||||
return result;
|
||||
}
|
||||
|
||||
/* Check for CRC err only if CRC is present in the tag response */
|
||||
|
|
|
@ -362,7 +362,7 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
|
|||
int ret;
|
||||
|
||||
iface = of_get_phy_mode(np);
|
||||
if (iface < 0)
|
||||
if ((int)iface < 0)
|
||||
return NULL;
|
||||
if (of_phy_is_fixed_link(np)) {
|
||||
ret = of_phy_register_fixed_link(np);
|
||||
|
|
|
@ -155,7 +155,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|||
err = -EINVAL;
|
||||
break;
|
||||
} else if (cmd == PTP_EXTTS_REQUEST) {
|
||||
req.extts.flags &= ~PTP_EXTTS_VALID_FLAGS;
|
||||
req.extts.flags &= PTP_EXTTS_V1_VALID_FLAGS;
|
||||
req.extts.rsv[0] = 0;
|
||||
req.extts.rsv[1] = 0;
|
||||
}
|
||||
|
@ -184,7 +184,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
|
|||
err = -EINVAL;
|
||||
break;
|
||||
} else if (cmd == PTP_PEROUT_REQUEST) {
|
||||
req.perout.flags &= ~PTP_PEROUT_VALID_FLAGS;
|
||||
req.perout.flags &= PTP_PEROUT_V1_VALID_FLAGS;
|
||||
req.perout.rsv[0] = 0;
|
||||
req.perout.rsv[1] = 0;
|
||||
req.perout.rsv[2] = 0;
|
||||
|
|
|
@ -282,7 +282,6 @@ enum {
|
|||
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
|
||||
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
|
||||
MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
|
||||
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
|
||||
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
|
||||
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
|
||||
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
|
||||
|
@ -296,6 +295,7 @@ enum {
|
|||
MLX5_CMD_OP_DESTROY_UCTX = 0xa06,
|
||||
MLX5_CMD_OP_CREATE_UMEM = 0xa08,
|
||||
MLX5_CMD_OP_DESTROY_UMEM = 0xa0a,
|
||||
MLX5_CMD_OP_SYNC_STEERING = 0xb00,
|
||||
MLX5_CMD_OP_MAX
|
||||
};
|
||||
|
||||
|
@ -487,7 +487,7 @@ union mlx5_ifc_gre_key_bits {
|
|||
|
||||
struct mlx5_ifc_fte_match_set_misc_bits {
|
||||
u8 gre_c_present[0x1];
|
||||
u8 reserved_auto1[0x1];
|
||||
u8 reserved_at_1[0x1];
|
||||
u8 gre_k_present[0x1];
|
||||
u8 gre_s_present[0x1];
|
||||
u8 source_vhca_port[0x4];
|
||||
|
@ -5054,50 +5054,50 @@ struct mlx5_ifc_query_hca_cap_in_bits {
|
|||
|
||||
struct mlx5_ifc_other_hca_cap_bits {
|
||||
u8 roce[0x1];
|
||||
u8 reserved_0[0x27f];
|
||||
u8 reserved_at_1[0x27f];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_other_hca_cap_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_0[0x18];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_1[0x40];
|
||||
u8 reserved_at_40[0x40];
|
||||
|
||||
struct mlx5_ifc_other_hca_cap_bits other_capability;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_query_other_hca_cap_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_0[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_1[0x10];
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_2[0x10];
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 function_id[0x10];
|
||||
|
||||
u8 reserved_3[0x20];
|
||||
u8 reserved_at_60[0x20];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_other_hca_cap_out_bits {
|
||||
u8 status[0x8];
|
||||
u8 reserved_0[0x18];
|
||||
u8 reserved_at_8[0x18];
|
||||
|
||||
u8 syndrome[0x20];
|
||||
|
||||
u8 reserved_1[0x40];
|
||||
u8 reserved_at_40[0x40];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_modify_other_hca_cap_in_bits {
|
||||
u8 opcode[0x10];
|
||||
u8 reserved_0[0x10];
|
||||
u8 reserved_at_10[0x10];
|
||||
|
||||
u8 reserved_1[0x10];
|
||||
u8 reserved_at_20[0x10];
|
||||
u8 op_mod[0x10];
|
||||
|
||||
u8 reserved_2[0x10];
|
||||
u8 reserved_at_40[0x10];
|
||||
u8 function_id[0x10];
|
||||
u8 field_select[0x20];
|
||||
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
|
||||
*/
|
||||
|
||||
#ifndef __ETH_NETX_H
|
||||
#define __ETH_NETX_H
|
||||
|
||||
struct netxeth_platform_data {
|
||||
unsigned int xcno; /* number of xmac/xpec engine this eth uses */
|
||||
};
|
||||
|
||||
#endif
|
|
@ -4144,8 +4144,17 @@ static inline void *skb_ext_find(const struct sk_buff *skb, enum skb_ext_id id)
|
|||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void skb_ext_reset(struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(skb->active_extensions)) {
|
||||
__skb_ext_put(skb->extensions);
|
||||
skb->active_extensions = 0;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void skb_ext_put(struct sk_buff *skb) {}
|
||||
static inline void skb_ext_reset(struct sk_buff *skb) {}
|
||||
static inline void skb_ext_del(struct sk_buff *skb, int unused) {}
|
||||
static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
|
||||
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
|
||||
|
|
|
@ -71,6 +71,7 @@ struct inet_timewait_sock {
|
|||
tw_pad : 2, /* 2 bits hole */
|
||||
tw_tos : 8;
|
||||
u32 tw_txhash;
|
||||
u32 tw_priority;
|
||||
struct timer_list tw_timer;
|
||||
struct inet_bind_bucket *tw_tb;
|
||||
};
|
||||
|
|
|
@ -981,7 +981,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
|
|||
* upper-layer output functions
|
||||
*/
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass);
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority);
|
||||
|
||||
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
|
||||
|
||||
|
|
|
@ -889,6 +889,8 @@ enum nft_chain_flags {
|
|||
NFT_CHAIN_HW_OFFLOAD = 0x2,
|
||||
};
|
||||
|
||||
#define NFT_CHAIN_POLICY_UNSET U8_MAX
|
||||
|
||||
/**
|
||||
* struct nft_chain - nf_tables chain
|
||||
*
|
||||
|
@ -1181,6 +1183,10 @@ struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table,
|
|||
const struct nlattr *nla,
|
||||
u8 genmask);
|
||||
|
||||
void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx,
|
||||
struct nft_flowtable *flowtable,
|
||||
enum nft_trans_phase phase);
|
||||
|
||||
void nft_register_flowtable_type(struct nf_flowtable_type *type);
|
||||
void nft_unregister_flowtable_type(struct nf_flowtable_type *type);
|
||||
|
||||
|
|
|
@ -53,10 +53,11 @@ struct rtable {
|
|||
unsigned int rt_flags;
|
||||
__u16 rt_type;
|
||||
__u8 rt_is_input;
|
||||
u8 rt_gw_family;
|
||||
__u8 rt_uses_gateway;
|
||||
|
||||
int rt_iif;
|
||||
|
||||
u8 rt_gw_family;
|
||||
/* Info on neighbour */
|
||||
union {
|
||||
__be32 rt_gw4;
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче