Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "This pull request is dedicated to the upcoming snowpocalypse parts 2 and 3 in the Pacific Northwest: 1) Drop profiles are broken because some drivers use dev_kfree_skb* instead of dev_consume_skb*, from Yang Wei. 2) Fix IWLWIFI kconfig deps, from Luca Coelho. 3) Fix percpu maps updating in bpftool, from Paolo Abeni. 4) Missing station release in batman-adv, from Felix Fietkau. 5) Fix some networking compat ioctl bugs, from Johannes Berg. 6) ucc_geth must reset the BQL queue state when stopping the device, from Mathias Thore. 7) Several XDP bug fixes in virtio_net from Toshiaki Makita. 8) TSO packets must be sent always on queue 0 in stmmac, from Jose Abreu. 9) Fix socket refcounting bug in RDS, from Eric Dumazet. 10) Handle sparse cpu allocations in bpf selftests, from Martynas Pumputis. 11) Make sure mgmt frames have enough tailroom in mac80211, from Felix Feitkau. 12) Use safe list walking in sctp_sendmsg() asoc list traversal, from Greg Kroah-Hartman. 13) Make DCCP's ccid_hc_[rt]x_parse_options always check for NULL ccid, from Eric Dumazet. 14) Need to reload WoL password into bcmsysport device after deep sleeps, from Florian Fainelli. 15) Remove filter from mask before freeing in cls_flower, from Petr Machata. 16) Missing release and use after free in error paths of s390 qeth code, from Julian Wiedmann. 17) Fix lockdep false positive in dsa code, from Marc Zyngier. 18) Fix counting of ATU violations in mv88e6xxx, from Andrew Lunn. 19) Fix EQ firmware assert in qed driver, from Manish Chopra. 20) Don't default Caivum PTP to Y in kconfig, from Bjorn Helgaas" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (116 commits) net: dsa: b53: Fix for failure when irq is not defined in dt sit: check if IPv6 enabled before calling ip6_err_gen_icmpv6_unreach() geneve: should not call rt6_lookup() when ipv6 was disabled net: Don't default Cavium PTP driver to 'y' net: broadcom: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: via-velocity: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: tehuti: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: sun: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: fsl_ucc_hdlc: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: fec_mpc52xx: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: smsc: epic100: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: dscc4: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: tulip: de2104x: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net: defxx: replace dev_kfree_skb_irq by dev_consume_skb_irq for drop profiles net/mlx5e: Don't overwrite pedit action when multiple pedit used net/mlx5e: Update hw flows when encap source mac changed qed*: Advance drivers version to 8.37.0.20 qed: Change verbosity for coalescing message. qede: Fix system crash on configuring channels. qed: Consider TX tcs while deriving the max num_queues for PF. ...
This commit is contained in:
Коммит
27b4ad621e
21
MAINTAINERS
21
MAINTAINERS
|
@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h
|
|||
BPF (Safe dynamic programs and tools)
|
||||
M: Alexei Starovoitov <ast@kernel.org>
|
||||
M: Daniel Borkmann <daniel@iogearbox.net>
|
||||
R: Martin KaFai Lau <kafai@fb.com>
|
||||
R: Song Liu <songliubraving@fb.com>
|
||||
R: Yonghong Song <yhs@fb.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
|
||||
|
@ -2873,6 +2876,8 @@ F: samples/bpf/
|
|||
F: tools/bpf/
|
||||
F: tools/lib/bpf/
|
||||
F: tools/testing/selftests/bpf/
|
||||
K: bpf
|
||||
N: bpf
|
||||
|
||||
BPF JIT for ARM
|
||||
M: Shubham Bansal <illusionist.neo@gmail.com>
|
||||
|
@ -12868,6 +12873,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt
|
|||
F: drivers/net/dsa/realtek-smi*
|
||||
F: drivers/net/dsa/rtl83*
|
||||
|
||||
REDPINE WIRELESS DRIVER
|
||||
M: Amitkumar Karwar <amitkarwar@gmail.com>
|
||||
M: Siva Rebbagondla <siva8118@gmail.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/wireless/rsi/
|
||||
|
||||
REGISTER MAP ABSTRACTION
|
||||
M: Mark Brown <broonie@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
|
@ -13696,6 +13708,15 @@ L: netdev@vger.kernel.org
|
|||
S: Supported
|
||||
F: drivers/net/ethernet/sfc/
|
||||
|
||||
SFF/SFP/SFP+ MODULE SUPPORT
|
||||
M: Russell King <linux@armlinux.org.uk>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/phy/phylink.c
|
||||
F: drivers/net/phy/sfp*
|
||||
F: include/linux/phylink.h
|
||||
F: include/linux/sfp.h
|
||||
|
||||
SGI GRU DRIVER
|
||||
M: Dimitri Sivanich <sivanich@sgi.com>
|
||||
S: Maintained
|
||||
|
|
|
@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
|
|||
spin_lock_irqsave(&timer->dev->lock, flags);
|
||||
if (timer->id >= 0)
|
||||
list_move_tail(&timer->list, &timer->dev->expired);
|
||||
spin_unlock_irqrestore(&timer->dev->lock, flags);
|
||||
wake_up_interruptible(&timer->dev->wait);
|
||||
spin_unlock_irqrestore(&timer->dev->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev)
|
|||
/* Clear all pending interrupts */
|
||||
writel(0xffffffff, priv->regs + B53_SRAB_INTR);
|
||||
|
||||
if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID)
|
||||
return;
|
||||
|
||||
for (i = 0; i < B53_N_PORTS; i++) {
|
||||
port = &priv->port_intrs[i];
|
||||
|
||||
|
|
|
@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
|
|||
{
|
||||
struct mv88e6xxx_chip *chip = dev_id;
|
||||
struct mv88e6xxx_atu_entry entry;
|
||||
int spid;
|
||||
int err;
|
||||
u16 val;
|
||||
|
||||
|
@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
|
|||
if (err)
|
||||
goto out;
|
||||
|
||||
spid = entry.state;
|
||||
|
||||
if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
|
||||
dev_err_ratelimited(chip->dev,
|
||||
"ATU age out violation for %pM\n",
|
||||
|
@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
|
|||
|
||||
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
|
||||
dev_err_ratelimited(chip->dev,
|
||||
"ATU member violation for %pM portvec %x\n",
|
||||
entry.mac, entry.portvec);
|
||||
chip->ports[entry.portvec].atu_member_violation++;
|
||||
"ATU member violation for %pM portvec %x spid %d\n",
|
||||
entry.mac, entry.portvec, spid);
|
||||
chip->ports[spid].atu_member_violation++;
|
||||
}
|
||||
|
||||
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
|
||||
dev_err_ratelimited(chip->dev,
|
||||
"ATU miss violation for %pM portvec %x\n",
|
||||
entry.mac, entry.portvec);
|
||||
chip->ports[entry.portvec].atu_miss_violation++;
|
||||
"ATU miss violation for %pM portvec %x spid %d\n",
|
||||
entry.mac, entry.portvec, spid);
|
||||
chip->ports[spid].atu_miss_violation++;
|
||||
}
|
||||
|
||||
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
|
||||
dev_err_ratelimited(chip->dev,
|
||||
"ATU full violation for %pM portvec %x\n",
|
||||
entry.mac, entry.portvec);
|
||||
chip->ports[entry.portvec].atu_full_violation++;
|
||||
"ATU full violation for %pM portvec %x spid %d\n",
|
||||
entry.mac, entry.portvec, spid);
|
||||
chip->ports[spid].atu_full_violation++;
|
||||
}
|
||||
mutex_unlock(&chip->reg_lock);
|
||||
|
||||
|
|
|
@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
|
|||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
u32 reg;
|
||||
|
||||
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
||||
wol->wolopts = priv->wolopts;
|
||||
|
@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
|
|||
if (!(priv->wolopts & WAKE_MAGICSECURE))
|
||||
return;
|
||||
|
||||
/* Return the programmed SecureOn password */
|
||||
reg = umac_readl(priv, UMAC_PSW_MS);
|
||||
put_unaligned_be16(reg, &wol->sopass[0]);
|
||||
reg = umac_readl(priv, UMAC_PSW_LS);
|
||||
put_unaligned_be32(reg, &wol->sopass[2]);
|
||||
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
|
||||
}
|
||||
|
||||
static int bcm_sysport_set_wol(struct net_device *dev,
|
||||
|
@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
|
|||
if (wol->wolopts & ~supported)
|
||||
return -EINVAL;
|
||||
|
||||
/* Program the SecureOn password */
|
||||
if (wol->wolopts & WAKE_MAGICSECURE) {
|
||||
umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
|
||||
UMAC_PSW_MS);
|
||||
umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
|
||||
UMAC_PSW_LS);
|
||||
}
|
||||
if (wol->wolopts & WAKE_MAGICSECURE)
|
||||
memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
|
||||
|
||||
/* Flag the device and relevant IRQ as wakeup capable */
|
||||
if (wol->wolopts) {
|
||||
|
@ -2649,13 +2639,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
|
|||
unsigned int index, i = 0;
|
||||
u32 reg;
|
||||
|
||||
/* Password has already been programmed */
|
||||
reg = umac_readl(priv, UMAC_MPD_CTRL);
|
||||
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
|
||||
reg |= MPD_EN;
|
||||
reg &= ~PSW_EN;
|
||||
if (priv->wolopts & WAKE_MAGICSECURE)
|
||||
if (priv->wolopts & WAKE_MAGICSECURE) {
|
||||
/* Program the SecureOn password */
|
||||
umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
|
||||
UMAC_PSW_MS);
|
||||
umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
|
||||
UMAC_PSW_LS);
|
||||
reg |= PSW_EN;
|
||||
}
|
||||
umac_writel(priv, reg, UMAC_MPD_CTRL);
|
||||
|
||||
if (priv->wolopts & WAKE_FILTER) {
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
#define __BCM_SYSPORT_H
|
||||
|
||||
#include <linux/bitmap.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/net_dim.h>
|
||||
|
||||
|
@ -778,6 +779,7 @@ struct bcm_sysport_priv {
|
|||
unsigned int crc_fwd:1;
|
||||
u16 rev;
|
||||
u32 wolopts;
|
||||
u8 sopass[SOPASS_MAX];
|
||||
unsigned int wol_irq_disabled:1;
|
||||
|
||||
/* MIB related fields */
|
||||
|
|
|
@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
|
|||
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
||||
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
|
||||
u32 map_idx = ring->map_idx;
|
||||
unsigned int vector;
|
||||
|
||||
vector = bp->irq_tbl[map_idx].vector;
|
||||
disable_irq_nosync(vector);
|
||||
rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
enable_irq(vector);
|
||||
goto err_out;
|
||||
}
|
||||
bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
|
||||
bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
|
||||
enable_irq(vector);
|
||||
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
|
||||
|
||||
if (!i) {
|
||||
|
|
|
@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
|
|||
* for transmits, we just free buffers.
|
||||
*/
|
||||
|
||||
dev_kfree_skb_irq(sb);
|
||||
dev_consume_skb_irq(sb);
|
||||
|
||||
/*
|
||||
* .. and advance to the next buffer.
|
||||
|
|
|
@ -54,7 +54,6 @@ config CAVIUM_PTP
|
|||
tristate "Cavium PTP coprocessor as PTP clock"
|
||||
depends on 64BIT && PCI
|
||||
imply PTP_1588_CLOCK
|
||||
default y
|
||||
---help---
|
||||
This driver adds support for the Precision Time Protocol Clocks and
|
||||
Timestamping coprocessor (PTP) found on Cavium processors.
|
||||
|
|
|
@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
|
|||
* csum is correct or is zero.
|
||||
*/
|
||||
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
|
||||
tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
|
||||
tcp_udp_csum_ok && outer_csum_ok &&
|
||||
(ipv4_csum_ok || ipv6)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = encap;
|
||||
}
|
||||
|
|
|
@ -585,7 +585,7 @@ static void de_tx (struct de_private *de)
|
|||
netif_dbg(de, tx_done, de->dev,
|
||||
"tx done, slot %d\n", tx_tail);
|
||||
}
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
}
|
||||
|
||||
next:
|
||||
|
|
|
@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
|
|||
dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
}
|
||||
spin_unlock(&priv->lock);
|
||||
|
||||
|
|
|
@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
|
|||
u16 i, j;
|
||||
u8 __iomem *bd;
|
||||
|
||||
netdev_reset_queue(ugeth->ndev);
|
||||
|
||||
ug_info = ugeth->ug_info;
|
||||
uf_info = &ug_info->uf_info;
|
||||
|
||||
|
|
|
@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
|
|||
memset(p, 0, regs->len);
|
||||
memcpy_fromio(p, io, B3_RAM_ADDR);
|
||||
|
||||
memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
||||
regs->len - B3_RI_WTO_R1);
|
||||
if (regs->len > B3_RI_WTO_R1) {
|
||||
memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
|
||||
regs->len - B3_RI_WTO_R1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
|
||||
|
|
|
@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
|
|||
e->m_neigh.family = n->ops->family;
|
||||
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
|
||||
e->out_dev = out_dev;
|
||||
e->route_dev = route_dev;
|
||||
|
||||
/* It's important to add the neigh to the hash table before checking
|
||||
* the neigh validity state. So if we'll get a notification, in case the
|
||||
|
@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
|
|||
e->m_neigh.family = n->ops->family;
|
||||
memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
|
||||
e->out_dev = out_dev;
|
||||
e->route_dev = route_dev;
|
||||
|
||||
/* It's importent to add the neigh to the hash table before checking
|
||||
* the neigh validity state. So if we'll get a notification, in case the
|
||||
|
@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
|
|||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v)
|
||||
void *headers_v, u8 *match_level)
|
||||
{
|
||||
int tunnel_type;
|
||||
int err = 0;
|
||||
|
||||
tunnel_type = mlx5e_tc_tun_get_type(filter_dev);
|
||||
if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
|
||||
*match_level = MLX5_MATCH_L4;
|
||||
err = mlx5e_tc_tun_parse_vxlan(priv, spec, f,
|
||||
headers_c, headers_v);
|
||||
} else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) {
|
||||
*match_level = MLX5_MATCH_L3;
|
||||
err = mlx5e_tc_tun_parse_gretap(priv, spec, f,
|
||||
headers_c, headers_v);
|
||||
} else {
|
||||
|
|
|
@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
|
|||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
void *headers_c,
|
||||
void *headers_v);
|
||||
void *headers_v, u8 *match_level);
|
||||
|
||||
#endif //__MLX5_EN_TC_TUNNEL_H__
|
||||
|
|
|
@ -596,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
|
|||
if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
|
||||
ether_addr_copy(e->h_dest, ha);
|
||||
ether_addr_copy(eth->h_dest, ha);
|
||||
/* Update the encap source mac, in case that we delete
|
||||
* the flows when encap source mac changed.
|
||||
*/
|
||||
ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
|
||||
|
||||
mlx5e_tc_encap_flows_add(priv, e);
|
||||
}
|
||||
|
|
|
@ -148,6 +148,7 @@ struct mlx5e_encap_entry {
|
|||
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
|
||||
|
||||
struct net_device *out_dev;
|
||||
struct net_device *route_dev;
|
||||
int tunnel_type;
|
||||
int tunnel_hlen;
|
||||
int reformat_type;
|
||||
|
|
|
@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr {
|
|||
struct net_device *filter_dev;
|
||||
struct mlx5_flow_spec spec;
|
||||
int num_mod_hdr_actions;
|
||||
int max_mod_hdr_actions;
|
||||
void *mod_hdr_actions;
|
||||
int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
};
|
||||
|
@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
|||
static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct net_device *filter_dev)
|
||||
struct net_device *filter_dev, u8 *match_level)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||
|
@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
|||
int err = 0;
|
||||
|
||||
err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
|
||||
headers_c, headers_v);
|
||||
headers_c, headers_v, match_level);
|
||||
if (err) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"failed to parse tunnel attributes");
|
||||
|
@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f,
|
||||
struct net_device *filter_dev,
|
||||
u8 *match_level)
|
||||
u8 *match_level, u8 *tunnel_match_level)
|
||||
{
|
||||
struct netlink_ext_ack *extack = f->common.extack;
|
||||
void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
|
||||
|
@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
switch (key->addr_type) {
|
||||
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
|
||||
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
|
||||
if (parse_tunnel_attr(priv, spec, f, filter_dev))
|
||||
if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
|
||||
return -EOPNOTSUPP;
|
||||
break;
|
||||
default:
|
||||
|
@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
|||
struct mlx5_core_dev *dev = priv->mdev;
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
u8 match_level;
|
||||
int err;
|
||||
|
||||
err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level);
|
||||
err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
|
||||
|
||||
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
|
||||
rep = rpriv->rep;
|
||||
|
@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
|||
}
|
||||
}
|
||||
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
flow->esw_attr->match_level = match_level;
|
||||
else
|
||||
flow->esw_attr->tunnel_match_level = tunnel_match_level;
|
||||
} else {
|
||||
flow->nic_attr->match_level = match_level;
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = {
|
|||
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
|
||||
};
|
||||
|
||||
/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
|
||||
* max from the SW pedit action. On success, it says how many HW actions were
|
||||
* actually parsed.
|
||||
/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
|
||||
* max from the SW pedit action. On success, attr->num_mod_hdr_actions
|
||||
* says how many HW actions were actually parsed.
|
||||
*/
|
||||
static int offload_pedit_fields(struct pedit_headers *masks,
|
||||
struct pedit_headers *vals,
|
||||
|
@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
|
|||
add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
|
||||
|
||||
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
|
||||
action = parse_attr->mod_hdr_actions;
|
||||
max_actions = parse_attr->num_mod_hdr_actions;
|
||||
nactions = 0;
|
||||
action = parse_attr->mod_hdr_actions +
|
||||
parse_attr->num_mod_hdr_actions * action_size;
|
||||
|
||||
max_actions = parse_attr->max_mod_hdr_actions;
|
||||
nactions = parse_attr->num_mod_hdr_actions;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(fields); i++) {
|
||||
f = &fields[i];
|
||||
|
@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
|
|||
if (!parse_attr->mod_hdr_actions)
|
||||
return -ENOMEM;
|
||||
|
||||
parse_attr->num_mod_hdr_actions = max_actions;
|
||||
parse_attr->max_mod_hdr_actions = max_actions;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
|
||||
if (err)
|
||||
goto out_err;
|
||||
if (!parse_attr->mod_hdr_actions) {
|
||||
err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
|
||||
if (err)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = offload_pedit_fields(masks, vals, parse_attr, extack);
|
||||
if (err < 0)
|
||||
|
@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
|
|||
|
||||
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
||||
struct tcf_exts *exts,
|
||||
u32 actions,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
const struct tc_action *a;
|
||||
|
@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
|
|||
u16 ethertype;
|
||||
int nkeys, i;
|
||||
|
||||
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
|
||||
if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
|
||||
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
|
||||
else
|
||||
headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
|
||||
|
||||
ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
|
||||
|
||||
/* for non-IP we only re-write MACs, so we're okay */
|
||||
|
@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
|
|||
|
||||
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
return modify_header_match_supported(&parse_attr->spec, exts,
|
||||
extack);
|
||||
actions, extack);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
|
|||
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
|
||||
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
|
||||
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
|
||||
#endif
|
||||
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
|
||||
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
|
||||
#ifdef CONFIG_MLX5_EN_IPSEC
|
||||
wqe->eth = cur_eth;
|
||||
#endif
|
||||
}
|
||||
|
||||
/* fill wqe */
|
||||
|
|
|
@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr {
|
|||
} dests[MLX5_MAX_FLOW_FWD_VPORTS];
|
||||
u32 mod_hdr_id;
|
||||
u8 match_level;
|
||||
u8 tunnel_match_level;
|
||||
struct mlx5_fc *counter;
|
||||
u32 chain;
|
||||
u16 prio;
|
||||
|
|
|
@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
|
|||
MLX5_SET_TO_ONES(fte_match_set_misc, misc,
|
||||
source_eswitch_owner_vhca_id);
|
||||
|
||||
if (attr->match_level == MLX5_MATCH_NONE)
|
||||
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
|
||||
else
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
|
||||
MLX5_MATCH_MISC_PARAMETERS;
|
||||
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
|
||||
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
|
||||
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
|
||||
if (attr->tunnel_match_level != MLX5_MATCH_NONE)
|
||||
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
|
||||
if (attr->match_level != MLX5_MATCH_NONE)
|
||||
spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
|
||||
} else if (attr->match_level != MLX5_MATCH_NONE) {
|
||||
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
|
||||
}
|
||||
|
||||
if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
flow_act.modify_id = attr->mod_hdr_id;
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
extern const struct qed_common_ops qed_common_ops_pass;
|
||||
|
||||
#define QED_MAJOR_VERSION 8
|
||||
#define QED_MINOR_VERSION 33
|
||||
#define QED_MINOR_VERSION 37
|
||||
#define QED_REVISION_VERSION 0
|
||||
#define QED_ENGINEERING_VERSION 20
|
||||
|
||||
|
|
|
@ -2216,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
|
|||
u16 num_queues = 0;
|
||||
|
||||
/* Since the feature controls only queue-zones,
|
||||
* make sure we have the contexts [rx, tx, xdp] to
|
||||
* make sure we have the contexts [rx, xdp, tcs] to
|
||||
* match.
|
||||
*/
|
||||
for_each_hwfn(cdev, i) {
|
||||
|
@ -2226,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
|
|||
u16 cids;
|
||||
|
||||
cids = hwfn->pf_params.eth_pf_params.num_cons;
|
||||
num_queues += min_t(u16, l2_queues, cids / 3);
|
||||
cids /= (2 + info->num_tc);
|
||||
num_queues += min_t(u16, l2_queues, cids);
|
||||
}
|
||||
|
||||
/* queues might theoretically be >256, but interrupts'
|
||||
|
@ -2870,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle)
|
|||
p_hwfn = p_cid->p_owner;
|
||||
rc = qed_get_queue_coalesce(p_hwfn, coal, handle);
|
||||
if (rc)
|
||||
DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n");
|
||||
DP_VERBOSE(cdev, QED_MSG_DEBUG,
|
||||
"Unable to read queue coalescing\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
|
|||
* @param p_hwfn
|
||||
*/
|
||||
void qed_consq_free(struct qed_hwfn *p_hwfn);
|
||||
int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
|
||||
|
||||
/**
|
||||
* @file
|
||||
|
|
|
@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn)
|
|||
|
||||
p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
|
||||
p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan);
|
||||
if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits))
|
||||
p_ent->ramrod.pf_update.mf_vlan |=
|
||||
cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13));
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
|
|
@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
|||
|
||||
qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
|
||||
|
||||
/* Attempt to post pending requests */
|
||||
spin_lock_bh(&p_hwfn->p_spq->lock);
|
||||
rc = qed_spq_pend_post(p_hwfn);
|
||||
spin_unlock_bh(&p_hwfn->p_spq->lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
|
||||
int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_spq *p_spq = p_hwfn->p_spq;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
|
@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
|
|||
struct qed_spq_entry *p_ent = NULL;
|
||||
struct qed_spq_entry *tmp;
|
||||
struct qed_spq_entry *found = NULL;
|
||||
int rc;
|
||||
|
||||
if (!p_hwfn)
|
||||
return -EINVAL;
|
||||
|
@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
|
|||
*/
|
||||
qed_spq_return_entry(p_hwfn, found);
|
||||
|
||||
/* Attempt to post pending requests */
|
||||
spin_lock_bh(&p_spq->lock);
|
||||
rc = qed_spq_pend_post(p_hwfn);
|
||||
spin_unlock_bh(&p_spq->lock);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int qed_consq_alloc(struct qed_hwfn *p_hwfn)
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#include <net/tc_act/tc_gact.h>
|
||||
|
||||
#define QEDE_MAJOR_VERSION 8
|
||||
#define QEDE_MINOR_VERSION 33
|
||||
#define QEDE_MINOR_VERSION 37
|
||||
#define QEDE_REVISION_VERSION 0
|
||||
#define QEDE_ENGINEERING_VERSION 20
|
||||
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
|
||||
|
@ -494,6 +494,9 @@ struct qede_reload_args {
|
|||
|
||||
/* Datapath functions definition */
|
||||
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
|
||||
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback);
|
||||
netdev_features_t qede_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features);
|
||||
|
|
|
@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
{
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
int total_txq;
|
||||
|
||||
total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
|
||||
|
||||
return QEDE_TSS_COUNT(edev) ?
|
||||
fallback(dev, skb, NULL) % total_txq : 0;
|
||||
}
|
||||
|
||||
/* 8B udp header + 8B base tunnel header + 32B option length */
|
||||
#define QEDE_MAX_TUN_HDR_LEN 48
|
||||
|
||||
|
|
|
@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
|
|||
.ndo_open = qede_open,
|
||||
.ndo_stop = qede_close,
|
||||
.ndo_start_xmit = qede_start_xmit,
|
||||
.ndo_select_queue = qede_select_queue,
|
||||
.ndo_set_rx_mode = qede_set_rx_mode,
|
||||
.ndo_set_mac_address = qede_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
|
|||
.ndo_open = qede_open,
|
||||
.ndo_stop = qede_close,
|
||||
.ndo_start_xmit = qede_start_xmit,
|
||||
.ndo_select_queue = qede_select_queue,
|
||||
.ndo_set_rx_mode = qede_set_rx_mode,
|
||||
.ndo_set_mac_address = qede_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
|
|||
.ndo_open = qede_open,
|
||||
.ndo_stop = qede_close,
|
||||
.ndo_start_xmit = qede_start_xmit,
|
||||
.ndo_select_queue = qede_select_queue,
|
||||
.ndo_set_rx_mode = qede_set_rx_mode,
|
||||
.ndo_set_mac_address = qede_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
|
|
@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep)
|
|||
skb = ep->tx_skbuff[entry];
|
||||
pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
|
||||
skb->len, PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
ep->tx_skbuff[entry] = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -721,8 +721,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
|
|||
{
|
||||
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
if (!clk) {
|
||||
clk = priv->plat->clk_ref_rate;
|
||||
if (!clk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (usec * (clk / 1000000)) / 256;
|
||||
}
|
||||
|
@ -731,8 +734,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
|
|||
{
|
||||
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
|
||||
|
||||
if (!clk)
|
||||
return 0;
|
||||
if (!clk) {
|
||||
clk = priv->plat->clk_ref_rate;
|
||||
if (!clk)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return (riwt * 256) / (clk / 1000000);
|
||||
}
|
||||
|
|
|
@ -3023,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
|
||||
tx_q = &priv->tx_queue[queue];
|
||||
|
||||
if (priv->tx_path_in_lpi_mode)
|
||||
stmmac_disable_eee_mode(priv);
|
||||
|
||||
/* Manage oversized TCP frames for GMAC4 device */
|
||||
if (skb_is_gso(skb) && priv->tso) {
|
||||
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
|
||||
if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
|
||||
/*
|
||||
* There is no way to determine the number of TSO
|
||||
* capable Queues. Let's use always the Queue 0
|
||||
* because if TSO is supported then at least this
|
||||
* one will be capable.
|
||||
*/
|
||||
skb_set_queue_mapping(skb, 0);
|
||||
|
||||
return stmmac_tso_xmit(skb, dev);
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
|
||||
|
@ -3041,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
if (priv->tx_path_in_lpi_mode)
|
||||
stmmac_disable_eee_mode(priv);
|
||||
|
||||
entry = tx_q->cur_tx;
|
||||
first_entry = entry;
|
||||
WARN_ON(tx_q->tx_skbuff[first_entry]);
|
||||
|
|
|
@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
|
|||
cp->net_stats[ring].tx_packets++;
|
||||
cp->net_stats[ring].tx_bytes += skb->len;
|
||||
spin_unlock(&cp->stat_lock[ring]);
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
}
|
||||
cp->tx_old[ring] = entry;
|
||||
|
||||
|
|
|
@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp)
|
|||
|
||||
DTX(("skb(%p) ", skb));
|
||||
bp->tx_skbs[elem] = NULL;
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
|
||||
elem = NEXT_TX(elem);
|
||||
}
|
||||
|
|
|
@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp)
|
|||
this = &txbase[elem];
|
||||
}
|
||||
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
dev->stats.tx_packets++;
|
||||
}
|
||||
hp->tx_old = elem;
|
||||
|
|
|
@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv)
|
|||
tx_level -= db->rptr->len; /* '-' koz len is negative */
|
||||
|
||||
/* now should come skb pointer - free it */
|
||||
dev_kfree_skb_irq(db->rptr->addr.skb);
|
||||
dev_consume_skb_irq(db->rptr->addr.skb);
|
||||
bdx_tx_db_inc_rptr(db);
|
||||
}
|
||||
|
||||
|
|
|
@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr,
|
|||
dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
|
||||
le16_to_cpu(pktlen), DMA_TO_DEVICE);
|
||||
}
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
tdinfo->skb = NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp)
|
|||
bp->descr_block_virt->xmt_data[comp].long_1,
|
||||
p_xmt_drv_descr->p_skb->len,
|
||||
DMA_TO_DEVICE);
|
||||
dev_kfree_skb_irq(p_xmt_drv_descr->p_skb);
|
||||
dev_consume_skb_irq(p_xmt_drv_descr->p_skb);
|
||||
|
||||
/*
|
||||
* Move to start of next packet by updating completion index
|
||||
|
|
|
@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev,
|
|||
}
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
case AF_INET6: {
|
||||
struct rt6_info *rt = rt6_lookup(geneve->net,
|
||||
&info->key.u.ipv6.dst, NULL, 0,
|
||||
NULL, 0);
|
||||
struct rt6_info *rt;
|
||||
|
||||
if (!__in6_dev_get(dev))
|
||||
break;
|
||||
|
||||
rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
|
||||
NULL, 0);
|
||||
|
||||
if (rt && rt->dst.dev)
|
||||
ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
|
||||
|
|
|
@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context)
|
|||
}
|
||||
break;
|
||||
case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ):
|
||||
/* rx is starting */
|
||||
dev_dbg(printdev(lp), "RX is starting\n");
|
||||
mcr20a_handle_rx(lp);
|
||||
/* rx is starting */
|
||||
dev_dbg(printdev(lp), "RX is starting\n");
|
||||
mcr20a_handle_rx(lp);
|
||||
break;
|
||||
case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ):
|
||||
if (lp->is_tx) {
|
||||
|
|
|
@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval,
|
|||
err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
|
||||
if (!err) {
|
||||
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
|
||||
mdev->priv_flags |= IFF_L3MDEV_MASTER;
|
||||
mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
|
||||
} else
|
||||
goto fail;
|
||||
} else if (port->mode == IPVLAN_MODE_L3S) {
|
||||
/* Old mode was L3S */
|
||||
mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
|
||||
mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
|
||||
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
|
||||
mdev->l3mdev_ops = NULL;
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev)
|
|||
struct sk_buff *skb;
|
||||
|
||||
if (port->mode == IPVLAN_MODE_L3S) {
|
||||
dev->priv_flags &= ~IFF_L3MDEV_MASTER;
|
||||
dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
|
||||
ipvlan_unregister_nf_hook(dev_net(dev));
|
||||
dev->l3mdev_ops = NULL;
|
||||
}
|
||||
|
|
|
@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640,
|
|||
struct phy_txts *phy_txts)
|
||||
{
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
struct dp83640_skb_info *skb_info;
|
||||
struct sk_buff *skb;
|
||||
u64 ns;
|
||||
u8 overflow;
|
||||
u64 ns;
|
||||
|
||||
/* We must already have the skb that triggered this. */
|
||||
|
||||
again:
|
||||
skb = skb_dequeue(&dp83640->tx_queue);
|
||||
|
||||
if (!skb) {
|
||||
pr_debug("have timestamp but tx_queue empty\n");
|
||||
return;
|
||||
|
@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640,
|
|||
}
|
||||
return;
|
||||
}
|
||||
skb_info = (struct dp83640_skb_info *)skb->cb;
|
||||
if (time_after(jiffies, skb_info->tmo)) {
|
||||
kfree_skb(skb);
|
||||
goto again;
|
||||
}
|
||||
|
||||
ns = phy2txts(phy_txts);
|
||||
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
||||
|
@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
|
|||
static void dp83640_txtstamp(struct phy_device *phydev,
|
||||
struct sk_buff *skb, int type)
|
||||
{
|
||||
struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
|
||||
struct dp83640_private *dp83640 = phydev->priv;
|
||||
|
||||
switch (dp83640->hwts_tx_en) {
|
||||
|
@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
|
|||
/* fall through */
|
||||
case HWTSTAMP_TX_ON:
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
|
||||
skb_queue_tail(&dp83640->tx_queue, skb);
|
||||
break;
|
||||
|
||||
|
|
|
@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
|
|||
|
||||
/* SGMII-to-Copper mode initialization */
|
||||
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
|
||||
/* Select page 18 */
|
||||
err = marvell_set_page(phydev, 18);
|
||||
if (err < 0)
|
||||
|
@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
|
|||
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* There appears to be a bug in the 88e1512 when used in
|
||||
* SGMII to copper mode, where the AN advertisement register
|
||||
* clears the pause bits each time a negotiation occurs.
|
||||
* This means we can never be truely sure what was advertised,
|
||||
* so disable Pause support.
|
||||
*/
|
||||
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
|
||||
phydev->supported);
|
||||
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
|
||||
phydev->supported);
|
||||
linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
|
||||
phydev->advertising);
|
||||
linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
|
||||
phydev->advertising);
|
||||
}
|
||||
|
||||
return m88e1318_config_init(phydev);
|
||||
|
|
|
@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
|||
if (rtnl_dereference(tun->xdp_prog))
|
||||
sock_set_flag(&tfile->sk, SOCK_XDP);
|
||||
|
||||
tun_set_real_num_queues(tun);
|
||||
|
||||
/* device is allowed to go away first, so no need to hold extra
|
||||
* refcnt.
|
||||
*/
|
||||
|
@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
|
|||
rcu_assign_pointer(tfile->tun, tun);
|
||||
rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
|
||||
tun->numqueues++;
|
||||
tun_set_real_num_queues(tun);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644);
|
|||
#define VIRTIO_XDP_TX BIT(0)
|
||||
#define VIRTIO_XDP_REDIR BIT(1)
|
||||
|
||||
#define VIRTIO_XDP_FLAG BIT(0)
|
||||
|
||||
/* RX packet size EWMA. The average packet size is used to determine the packet
|
||||
* buffer size when refilling RX rings. As the entire RX ring may be refilled
|
||||
* at once, the weight is chosen so that the EWMA will be insensitive to short-
|
||||
|
@ -252,6 +254,21 @@ struct padded_vnet_hdr {
|
|||
char padding[4];
|
||||
};
|
||||
|
||||
static bool is_xdp_frame(void *ptr)
|
||||
{
|
||||
return (unsigned long)ptr & VIRTIO_XDP_FLAG;
|
||||
}
|
||||
|
||||
static void *xdp_to_ptr(struct xdp_frame *ptr)
|
||||
{
|
||||
return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG);
|
||||
}
|
||||
|
||||
static struct xdp_frame *ptr_to_xdp(void *ptr)
|
||||
{
|
||||
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
|
||||
}
|
||||
|
||||
/* Converting between virtqueue no. and kernel tx/rx queue no.
|
||||
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
|
||||
*/
|
||||
|
@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
|||
|
||||
sg_init_one(sq->sg, xdpf->data, xdpf->len);
|
||||
|
||||
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC);
|
||||
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf),
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(err))
|
||||
return -ENOSPC; /* Caller handle free/refcnt */
|
||||
|
||||
|
@ -482,15 +500,24 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct receive_queue *rq = vi->rq;
|
||||
struct xdp_frame *xdpf_sent;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct send_queue *sq;
|
||||
unsigned int len;
|
||||
int packets = 0;
|
||||
int bytes = 0;
|
||||
int drops = 0;
|
||||
int kicks = 0;
|
||||
int ret, err;
|
||||
void *ptr;
|
||||
int i;
|
||||
|
||||
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
||||
* indicate XDP resources have been successfully allocated.
|
||||
*/
|
||||
xdp_prog = rcu_dereference(rq->xdp_prog);
|
||||
if (!xdp_prog)
|
||||
return -ENXIO;
|
||||
|
||||
sq = virtnet_xdp_sq(vi);
|
||||
|
||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
||||
|
@ -499,19 +526,21 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
||||
* indicate XDP resources have been successfully allocated.
|
||||
*/
|
||||
xdp_prog = rcu_dereference(rq->xdp_prog);
|
||||
if (!xdp_prog) {
|
||||
ret = -ENXIO;
|
||||
drops = n;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Free up any pending old buffers before queueing new ones. */
|
||||
while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
|
||||
xdp_return_frame(xdpf_sent);
|
||||
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
||||
if (likely(is_xdp_frame(ptr))) {
|
||||
struct xdp_frame *frame = ptr_to_xdp(ptr);
|
||||
|
||||
bytes += frame->len;
|
||||
xdp_return_frame(frame);
|
||||
} else {
|
||||
struct sk_buff *skb = ptr;
|
||||
|
||||
bytes += skb->len;
|
||||
napi_consume_skb(skb, false);
|
||||
}
|
||||
packets++;
|
||||
}
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct xdp_frame *xdpf = frames[i];
|
||||
|
@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
|||
}
|
||||
out:
|
||||
u64_stats_update_begin(&sq->stats.syncp);
|
||||
sq->stats.bytes += bytes;
|
||||
sq->stats.packets += packets;
|
||||
sq->stats.xdp_tx += n;
|
||||
sq->stats.xdp_tx_drops += drops;
|
||||
sq->stats.kicks += kicks;
|
||||
|
@ -1332,18 +1363,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
|
|||
|
||||
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
unsigned int len;
|
||||
unsigned int packets = 0;
|
||||
unsigned int bytes = 0;
|
||||
void *ptr;
|
||||
|
||||
while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
||||
pr_debug("Sent skb %p\n", skb);
|
||||
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
|
||||
if (likely(!is_xdp_frame(ptr))) {
|
||||
struct sk_buff *skb = ptr;
|
||||
|
||||
bytes += skb->len;
|
||||
pr_debug("Sent skb %p\n", skb);
|
||||
|
||||
bytes += skb->len;
|
||||
napi_consume_skb(skb, in_napi);
|
||||
} else {
|
||||
struct xdp_frame *frame = ptr_to_xdp(ptr);
|
||||
|
||||
bytes += frame->len;
|
||||
xdp_return_frame(frame);
|
||||
}
|
||||
packets++;
|
||||
|
||||
napi_consume_skb(skb, in_napi);
|
||||
}
|
||||
|
||||
/* Avoid overhead when no packets have been processed
|
||||
|
@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
|
|||
u64_stats_update_end(&sq->stats.syncp);
|
||||
}
|
||||
|
||||
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
||||
{
|
||||
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
||||
return false;
|
||||
else if (q < vi->curr_queue_pairs)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static void virtnet_poll_cleantx(struct receive_queue *rq)
|
||||
{
|
||||
struct virtnet_info *vi = rq->vq->vdev->priv;
|
||||
|
@ -1365,7 +1414,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
|
|||
struct send_queue *sq = &vi->sq[index];
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
|
||||
|
||||
if (!sq->napi.weight)
|
||||
if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
|
||||
return;
|
||||
|
||||
if (__netif_tx_trylock(txq)) {
|
||||
|
@ -1442,8 +1491,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
|
|||
{
|
||||
struct send_queue *sq = container_of(napi, struct send_queue, napi);
|
||||
struct virtnet_info *vi = sq->vq->vdev->priv;
|
||||
struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq));
|
||||
unsigned int index = vq2txq(sq->vq);
|
||||
struct netdev_queue *txq;
|
||||
|
||||
if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
|
||||
/* We don't need to enable cb for XDP */
|
||||
napi_complete_done(napi, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
txq = netdev_get_tx_queue(vi->dev, index);
|
||||
__netif_tx_lock(txq, raw_smp_processor_id());
|
||||
free_old_xmit_skbs(sq, true);
|
||||
__netif_tx_unlock(txq);
|
||||
|
@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
|
||||
if (!prog && !old_prog)
|
||||
return 0;
|
||||
|
||||
if (prog) {
|
||||
prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
|
||||
if (IS_ERR(prog))
|
||||
|
@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
|
|||
}
|
||||
|
||||
/* Make sure NAPI is not using any XDP TX queues for RX. */
|
||||
if (netif_running(dev))
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
if (netif_running(dev)) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
napi_disable(&vi->rq[i].napi);
|
||||
virtnet_napi_tx_disable(&vi->sq[i].napi);
|
||||
}
|
||||
}
|
||||
|
||||
if (!prog) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
||||
if (i == 0)
|
||||
virtnet_restore_guest_offloads(vi);
|
||||
}
|
||||
synchronize_net();
|
||||
}
|
||||
|
||||
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
||||
err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
|
||||
if (err)
|
||||
goto err;
|
||||
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
|
||||
vi->xdp_queue_pairs = xdp_qp;
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
|
||||
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
||||
if (i == 0) {
|
||||
if (!old_prog)
|
||||
if (prog) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
|
||||
if (i == 0 && !old_prog)
|
||||
virtnet_clear_guest_offloads(vi);
|
||||
if (!prog)
|
||||
virtnet_restore_guest_offloads(vi);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
if (netif_running(dev))
|
||||
if (netif_running(dev)) {
|
||||
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
||||
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
||||
&vi->sq[i].napi);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
||||
if (!prog) {
|
||||
virtnet_clear_guest_offloads(vi);
|
||||
for (i = 0; i < vi->max_queue_pairs; i++)
|
||||
rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
|
||||
}
|
||||
|
||||
if (netif_running(dev)) {
|
||||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
|
||||
virtnet_napi_tx_enable(vi, vi->sq[i].vq,
|
||||
&vi->sq[i].napi);
|
||||
}
|
||||
}
|
||||
if (prog)
|
||||
bpf_prog_sub(prog, vi->max_queue_pairs - 1);
|
||||
return err;
|
||||
|
@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi)
|
|||
put_page(vi->rq[i].alloc_frag.page);
|
||||
}
|
||||
|
||||
static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
|
||||
{
|
||||
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
|
||||
return false;
|
||||
else if (q < vi->curr_queue_pairs)
|
||||
return true;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
static void free_unused_bufs(struct virtnet_info *vi)
|
||||
{
|
||||
void *buf;
|
||||
|
@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
|
|||
for (i = 0; i < vi->max_queue_pairs; i++) {
|
||||
struct virtqueue *vq = vi->sq[i].vq;
|
||||
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
|
||||
if (!is_xdp_raw_buffer_queue(vi, i))
|
||||
if (!is_xdp_frame(buf))
|
||||
dev_kfree_skb(buf);
|
||||
else
|
||||
put_page(virt_to_head_page(buf));
|
||||
xdp_return_frame(ptr_to_xdp(buf));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1575,7 +1575,7 @@ try:
|
|||
dev->stats.tx_packets++;
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
}
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
dpriv->tx_skbuff[cur] = NULL;
|
||||
++dpriv->tx_dirty;
|
||||
} else {
|
||||
|
|
|
@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv)
|
|||
memset(priv->tx_buffer +
|
||||
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
|
||||
0, skb->len);
|
||||
dev_kfree_skb_irq(skb);
|
||||
dev_consume_skb_irq(skb);
|
||||
|
||||
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
|
||||
priv->skb_dirtytx =
|
||||
|
|
|
@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
|
|||
{
|
||||
.id = WCN3990_HW_1_0_DEV_VERSION,
|
||||
.dev_id = 0,
|
||||
.bus = ATH10K_BUS_PCI,
|
||||
.bus = ATH10K_BUS_SNOC,
|
||||
.name = "wcn3990 hw1.0",
|
||||
.continuous_frag_desc = true,
|
||||
.tx_chain_mask = 0x7,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config IWLWIFI
|
||||
tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) "
|
||||
depends on PCI && HAS_IOMEM
|
||||
depends on PCI && HAS_IOMEM && CFG80211
|
||||
select FW_LOADER
|
||||
---help---
|
||||
Select to build the driver supporting the:
|
||||
|
@ -47,6 +47,7 @@ if IWLWIFI
|
|||
config IWLWIFI_LEDS
|
||||
bool
|
||||
depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI
|
||||
depends on IWLMVM || IWLDVM
|
||||
select LEDS_TRIGGERS
|
||||
select MAC80211_LEDS
|
||||
default y
|
||||
|
|
|
@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev)
|
|||
mt76x02_add_rate_power_offset(t, delta);
|
||||
}
|
||||
|
||||
void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
|
||||
void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp)
|
||||
{
|
||||
struct mt76x0_chan_map {
|
||||
u8 chan;
|
||||
u8 offset;
|
||||
} chan_map[] = {
|
||||
{ 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 },
|
||||
{ 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 },
|
||||
{ 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 },
|
||||
{ 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 },
|
||||
{ 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 },
|
||||
{ 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 },
|
||||
{ 167, 17 }, { 171, 18 }, { 173, 19 },
|
||||
{ 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 },
|
||||
{ 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 },
|
||||
{ 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 },
|
||||
{ 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 },
|
||||
{ 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 },
|
||||
{ 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 },
|
||||
{ 167, 34 }, { 171, 36 }, { 175, 38 },
|
||||
};
|
||||
struct ieee80211_channel *chan = dev->mt76.chandef.chan;
|
||||
u8 offset, addr;
|
||||
int i, idx = 0;
|
||||
u16 data;
|
||||
int i;
|
||||
|
||||
if (mt76x0_tssi_enabled(dev)) {
|
||||
s8 target_power;
|
||||
|
@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
|
|||
else
|
||||
data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER);
|
||||
target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7];
|
||||
info[0] = target_power + mt76x0_get_delta(dev);
|
||||
info[1] = 0;
|
||||
*tp = target_power + mt76x0_get_delta(dev);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(chan_map); i++) {
|
||||
if (chan_map[i].chan <= chan->hw_value) {
|
||||
if (chan->hw_value <= chan_map[i].chan) {
|
||||
idx = (chan->hw_value == chan_map[i].chan);
|
||||
offset = chan_map[i].offset;
|
||||
break;
|
||||
}
|
||||
|
@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
|
|||
addr = MT_EE_TX_POWER_DELTA_BW80 + offset;
|
||||
} else {
|
||||
switch (chan->hw_value) {
|
||||
case 42:
|
||||
offset = 2;
|
||||
break;
|
||||
case 58:
|
||||
offset = 8;
|
||||
break;
|
||||
case 106:
|
||||
offset = 14;
|
||||
break;
|
||||
case 112:
|
||||
case 122:
|
||||
offset = 20;
|
||||
break;
|
||||
case 155:
|
||||
|
@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info)
|
|||
}
|
||||
|
||||
data = mt76x02_eeprom_get(dev, addr);
|
||||
|
||||
info[0] = data;
|
||||
if (!info[0] || info[0] > 0x3f)
|
||||
info[0] = 5;
|
||||
|
||||
info[1] = data >> 8;
|
||||
if (!info[1] || info[1] > 0x3f)
|
||||
info[1] = 5;
|
||||
*tp = data >> (8 * idx);
|
||||
if (*tp < 0 || *tp > 0x3f)
|
||||
*tp = 5;
|
||||
}
|
||||
|
||||
static int mt76x0_check_eeprom(struct mt76x02_dev *dev)
|
||||
|
|
|
@ -26,7 +26,7 @@ struct mt76x02_dev;
|
|||
int mt76x0_eeprom_init(struct mt76x02_dev *dev);
|
||||
void mt76x0_read_rx_gain(struct mt76x02_dev *dev);
|
||||
void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev);
|
||||
void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info);
|
||||
void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp);
|
||||
|
||||
static inline s8 s6_to_s8(u32 val)
|
||||
{
|
||||
|
|
|
@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev)
|
|||
void mt76x0_phy_set_txpower(struct mt76x02_dev *dev)
|
||||
{
|
||||
struct mt76_rate_power *t = &dev->mt76.rate_power;
|
||||
u8 info[2];
|
||||
s8 info;
|
||||
|
||||
mt76x0_get_tx_power_per_rate(dev);
|
||||
mt76x0_get_power_info(dev, info);
|
||||
mt76x0_get_power_info(dev, &info);
|
||||
|
||||
mt76x02_add_rate_power_offset(t, info[0]);
|
||||
mt76x02_add_rate_power_offset(t, info);
|
||||
mt76x02_limit_rate_power(t, dev->mt76.txpower_conf);
|
||||
dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t);
|
||||
mt76x02_add_rate_power_offset(t, -info[0]);
|
||||
mt76x02_add_rate_power_offset(t, -info);
|
||||
|
||||
mt76x02_phy_set_txpower(dev, info[0], info[1]);
|
||||
mt76x02_phy_set_txpower(dev, info, info);
|
||||
}
|
||||
|
||||
void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on)
|
||||
|
|
|
@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
|
|||
}
|
||||
|
||||
sdio_claim_host(func);
|
||||
/*
|
||||
* To guarantee that the SDIO card is power cycled, as required to make
|
||||
* the FW programming to succeed, let's do a brute force HW reset.
|
||||
*/
|
||||
mmc_hw_reset(card->host);
|
||||
|
||||
sdio_enable_func(func);
|
||||
sdio_release_host(func);
|
||||
|
||||
|
@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
|
|||
{
|
||||
struct sdio_func *func = dev_to_sdio_func(glue->dev);
|
||||
struct mmc_card *card = func->card;
|
||||
int error;
|
||||
|
||||
sdio_claim_host(func);
|
||||
sdio_disable_func(func);
|
||||
sdio_release_host(func);
|
||||
|
||||
/* Let runtime PM know the card is powered off */
|
||||
error = pm_runtime_put(&card->dev);
|
||||
if (error < 0 && error != -EBUSY) {
|
||||
dev_err(&card->dev, "%s failed: %i\n", __func__, error);
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
pm_runtime_put(&card->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/hashtable.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <net/ipv6.h>
|
||||
#include <net/if_inet6.h>
|
||||
|
@ -789,6 +790,7 @@ struct qeth_card {
|
|||
struct qeth_seqno seqno;
|
||||
struct qeth_card_options options;
|
||||
|
||||
struct workqueue_struct *event_wq;
|
||||
wait_queue_head_t wait_q;
|
||||
spinlock_t mclock;
|
||||
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
|
||||
|
@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[];
|
|||
extern const struct attribute_group qeth_device_attr_group;
|
||||
extern const struct attribute_group qeth_device_blkt_group;
|
||||
extern const struct device_type qeth_generic_devtype;
|
||||
extern struct workqueue_struct *qeth_wq;
|
||||
|
||||
int qeth_card_hw_is_reachable(struct qeth_card *);
|
||||
const char *qeth_get_cardname_short(struct qeth_card *);
|
||||
|
|
|
@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
|
|||
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
|
||||
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
|
||||
|
||||
struct workqueue_struct *qeth_wq;
|
||||
EXPORT_SYMBOL_GPL(qeth_wq);
|
||||
static struct workqueue_struct *qeth_wq;
|
||||
|
||||
int qeth_card_hw_is_reachable(struct qeth_card *card)
|
||||
{
|
||||
|
@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
|
|||
QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
|
||||
rc, CARD_DEVID(card));
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
qeth_release_buffer(channel, iob);
|
||||
card->read_or_write_problem = 1;
|
||||
qeth_schedule_recovery(card);
|
||||
wake_up(&card->wait_q);
|
||||
|
@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
|
|||
rc = qeth_get_problem(card, cdev, irb);
|
||||
if (rc) {
|
||||
card->read_or_write_problem = 1;
|
||||
if (iob)
|
||||
qeth_release_buffer(iob->channel, iob);
|
||||
qeth_clear_ipacmd_list(card);
|
||||
qeth_schedule_recovery(card);
|
||||
goto out;
|
||||
|
@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
|
|||
CARD_RDEV(card) = gdev->cdev[0];
|
||||
CARD_WDEV(card) = gdev->cdev[1];
|
||||
CARD_DDEV(card) = gdev->cdev[2];
|
||||
|
||||
card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev));
|
||||
if (!card->event_wq)
|
||||
goto out_wq;
|
||||
if (qeth_setup_channel(&card->read, true))
|
||||
goto out_ip;
|
||||
if (qeth_setup_channel(&card->write, true))
|
||||
|
@ -1481,6 +1487,8 @@ out_data:
|
|||
out_channel:
|
||||
qeth_clean_channel(&card->read);
|
||||
out_ip:
|
||||
destroy_workqueue(card->event_wq);
|
||||
out_wq:
|
||||
dev_set_drvdata(&gdev->dev, NULL);
|
||||
kfree(card);
|
||||
out:
|
||||
|
@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card,
|
|||
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
qeth_release_buffer(channel, iob);
|
||||
wake_up(&card->wait_q);
|
||||
return rc;
|
||||
}
|
||||
|
@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card,
|
|||
rc);
|
||||
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
|
||||
atomic_set(&channel->irq_pending, 0);
|
||||
qeth_release_buffer(channel, iob);
|
||||
wake_up(&card->wait_q);
|
||||
return rc;
|
||||
}
|
||||
|
@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
|
|||
}
|
||||
reply = qeth_alloc_reply(card);
|
||||
if (!reply) {
|
||||
qeth_release_buffer(channel, iob);
|
||||
return -ENOMEM;
|
||||
}
|
||||
reply->callback = reply_cb;
|
||||
|
@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
|
||||
static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
|
||||
{
|
||||
if (!q)
|
||||
return;
|
||||
|
||||
qeth_clear_outq_buffers(q, 1);
|
||||
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
|
||||
kfree(q);
|
||||
}
|
||||
|
@ -2467,10 +2479,8 @@ out_freeoutqbufs:
|
|||
card->qdio.out_qs[i]->bufs[j] = NULL;
|
||||
}
|
||||
out_freeoutq:
|
||||
while (i > 0) {
|
||||
qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
|
||||
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
|
||||
}
|
||||
while (i > 0)
|
||||
qeth_free_output_queue(card->qdio.out_qs[--i]);
|
||||
kfree(card->qdio.out_qs);
|
||||
card->qdio.out_qs = NULL;
|
||||
out_freepool:
|
||||
|
@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
|
|||
qeth_free_buffer_pool(card);
|
||||
/* free outbound qdio_qs */
|
||||
if (card->qdio.out_qs) {
|
||||
for (i = 0; i < card->qdio.no_out_queues; ++i) {
|
||||
qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
|
||||
qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
|
||||
}
|
||||
for (i = 0; i < card->qdio.no_out_queues; i++)
|
||||
qeth_free_output_queue(card->qdio.out_qs[i]);
|
||||
kfree(card->qdio.out_qs);
|
||||
card->qdio.out_qs = NULL;
|
||||
}
|
||||
|
@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card)
|
|||
qeth_clean_channel(&card->read);
|
||||
qeth_clean_channel(&card->write);
|
||||
qeth_clean_channel(&card->data);
|
||||
destroy_workqueue(card->event_wq);
|
||||
qeth_free_qdio_buffers(card);
|
||||
unregister_service_level(&card->qeth_service_level);
|
||||
dev_set_drvdata(&card->gdev->dev, NULL);
|
||||
|
|
|
@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
|
|||
qeth_clear_cmd_buffers(&card->read);
|
||||
qeth_clear_cmd_buffers(&card->write);
|
||||
}
|
||||
|
||||
flush_workqueue(card->event_wq);
|
||||
}
|
||||
|
||||
static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
|
||||
|
@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
|
|||
|
||||
if (cgdev->state == CCWGROUP_ONLINE)
|
||||
qeth_l2_set_offline(cgdev);
|
||||
|
||||
cancel_work_sync(&card->close_dev_work);
|
||||
if (qeth_netdev_is_registered(card->dev))
|
||||
unregister_netdev(card->dev);
|
||||
}
|
||||
|
@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card,
|
|||
data->card = card;
|
||||
memcpy(&data->qports, qports,
|
||||
sizeof(struct qeth_sbp_state_change) + extrasize);
|
||||
queue_work(qeth_wq, &data->worker);
|
||||
queue_work(card->event_wq, &data->worker);
|
||||
}
|
||||
|
||||
struct qeth_bridge_host_data {
|
||||
|
@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card,
|
|||
data->card = card;
|
||||
memcpy(&data->hostevs, hostevs,
|
||||
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
|
||||
queue_work(qeth_wq, &data->worker);
|
||||
queue_work(card->event_wq, &data->worker);
|
||||
}
|
||||
|
||||
/* SETBRIDGEPORT support; sending commands */
|
||||
|
|
|
@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
|
|||
qeth_clear_cmd_buffers(&card->read);
|
||||
qeth_clear_cmd_buffers(&card->write);
|
||||
}
|
||||
|
||||
flush_workqueue(card->event_wq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
|
|||
if (cgdev->state == CCWGROUP_ONLINE)
|
||||
qeth_l3_set_offline(cgdev);
|
||||
|
||||
cancel_work_sync(&card->close_dev_work);
|
||||
if (qeth_netdev_is_registered(card->dev))
|
||||
unregister_netdev(card->dev);
|
||||
qeth_l3_clear_ip_htable(card, 0);
|
||||
|
|
|
@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
|
|||
return qdisc_skb_cb(skb)->data;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u8 cb_saved[BPF_SKB_CB_LEN];
|
||||
|
@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
|||
return res;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u32 res;
|
||||
|
||||
preempt_disable();
|
||||
res = __bpf_prog_run_save_cb(prog, skb);
|
||||
preempt_enable();
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u8 *cb_data = bpf_skb_cb(skb);
|
||||
u32 res;
|
||||
|
||||
if (unlikely(prog->cb_access))
|
||||
memset(cb_data, 0, BPF_SKB_CB_LEN);
|
||||
|
||||
return BPF_PROG_RUN(prog, skb);
|
||||
preempt_disable();
|
||||
res = BPF_PROG_RUN(prog, skb);
|
||||
preempt_enable();
|
||||
return res;
|
||||
}
|
||||
|
||||
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
|
||||
|
|
|
@ -1483,6 +1483,7 @@ struct net_device_ops {
|
|||
* @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook
|
||||
* @IFF_FAILOVER: device is a failover master device
|
||||
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
|
||||
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
|
||||
*/
|
||||
enum netdev_priv_flags {
|
||||
IFF_802_1Q_VLAN = 1<<0,
|
||||
|
@ -1514,6 +1515,7 @@ enum netdev_priv_flags {
|
|||
IFF_NO_RX_HANDLER = 1<<26,
|
||||
IFF_FAILOVER = 1<<27,
|
||||
IFF_FAILOVER_SLAVE = 1<<28,
|
||||
IFF_L3MDEV_RX_HANDLER = 1<<29,
|
||||
};
|
||||
|
||||
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
|
||||
|
@ -1544,6 +1546,7 @@ enum netdev_priv_flags {
|
|||
#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER
|
||||
#define IFF_FAILOVER IFF_FAILOVER
|
||||
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
|
||||
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
|
||||
|
||||
/**
|
||||
* struct net_device - The DEVICE structure.
|
||||
|
@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev)
|
|||
return dev->priv_flags & IFF_SUPP_NOFCS;
|
||||
}
|
||||
|
||||
static inline bool netif_has_l3_rx_handler(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_L3MDEV_RX_HANDLER;
|
||||
}
|
||||
|
||||
static inline bool netif_is_l3_master(const struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_L3MDEV_MASTER;
|
||||
|
|
|
@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
|
|||
struct clk *pclk;
|
||||
struct clk *clk_ptp_ref;
|
||||
unsigned int clk_ptp_rate;
|
||||
unsigned int clk_ref_rate;
|
||||
struct reset_control *stmmac_rst;
|
||||
struct stmmac_axi *axi;
|
||||
int has_gmac4;
|
||||
|
|
|
@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto)
|
|||
|
||||
if (netif_is_l3_slave(skb->dev))
|
||||
master = netdev_master_upper_dev_get_rcu(skb->dev);
|
||||
else if (netif_is_l3_master(skb->dev))
|
||||
else if (netif_is_l3_master(skb->dev) ||
|
||||
netif_has_l3_rx_handler(skb->dev))
|
||||
master = skb->dev;
|
||||
|
||||
if (master && master->l3mdev_ops->l3mdev_l3_rcv)
|
||||
|
|
|
@ -469,9 +469,7 @@ struct nft_set_binding {
|
|||
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding);
|
||||
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding);
|
||||
void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding);
|
||||
struct nft_set_binding *binding, bool commit);
|
||||
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
|
||||
|
||||
/**
|
||||
|
@ -721,6 +719,13 @@ struct nft_expr_type {
|
|||
#define NFT_EXPR_STATEFUL 0x1
|
||||
#define NFT_EXPR_GC 0x2
|
||||
|
||||
enum nft_trans_phase {
|
||||
NFT_TRANS_PREPARE,
|
||||
NFT_TRANS_ABORT,
|
||||
NFT_TRANS_COMMIT,
|
||||
NFT_TRANS_RELEASE
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nft_expr_ops - nf_tables expression operations
|
||||
*
|
||||
|
@ -750,7 +755,8 @@ struct nft_expr_ops {
|
|||
void (*activate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
void (*deactivate)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase);
|
||||
void (*destroy)(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr);
|
||||
void (*destroy_clone)(const struct nft_ctx *ctx,
|
||||
|
@ -1323,12 +1329,15 @@ struct nft_trans_rule {
|
|||
struct nft_trans_set {
|
||||
struct nft_set *set;
|
||||
u32 set_id;
|
||||
bool bound;
|
||||
};
|
||||
|
||||
#define nft_trans_set(trans) \
|
||||
(((struct nft_trans_set *)trans->data)->set)
|
||||
#define nft_trans_set_id(trans) \
|
||||
(((struct nft_trans_set *)trans->data)->set_id)
|
||||
#define nft_trans_set_bound(trans) \
|
||||
(((struct nft_trans_set *)trans->data)->bound)
|
||||
|
||||
struct nft_trans_chain {
|
||||
bool update;
|
||||
|
|
|
@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
|
|||
|
||||
/* "typedef void new_void", "const void"...etc */
|
||||
if (!btf_type_is_void(next_type) &&
|
||||
!btf_type_is_fwd(next_type)) {
|
||||
!btf_type_is_fwd(next_type) &&
|
||||
!btf_type_is_func_proto(next_type)) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid type_id");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
|||
bpf_compute_and_save_data_end(skb, &saved_data_end);
|
||||
|
||||
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
|
||||
bpf_prog_run_save_cb);
|
||||
__bpf_prog_run_save_cb);
|
||||
bpf_restore_data_end(skb, saved_data_end);
|
||||
__skb_pull(skb, offset);
|
||||
skb->sk = save_sk;
|
||||
|
|
|
@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
|||
}
|
||||
|
||||
if (htab_is_prealloc(htab)) {
|
||||
pcpu_freelist_push(&htab->freelist, &l->fnode);
|
||||
__pcpu_freelist_push(&htab->freelist, &l->fnode);
|
||||
} else {
|
||||
atomic_dec(&htab->count);
|
||||
l->htab = htab;
|
||||
|
@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
|||
} else {
|
||||
struct pcpu_freelist_node *l;
|
||||
|
||||
l = pcpu_freelist_pop(&htab->freelist);
|
||||
l = __pcpu_freelist_pop(&htab->freelist);
|
||||
if (!l)
|
||||
return ERR_PTR(-E2BIG);
|
||||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
|
|
|
@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
|
|||
free_percpu(s->freelist);
|
||||
}
|
||||
|
||||
static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
|
||||
struct pcpu_freelist_node *node)
|
||||
static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
raw_spin_lock(&head->lock);
|
||||
node->next = head->first;
|
||||
|
@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
|
|||
raw_spin_unlock(&head->lock);
|
||||
}
|
||||
|
||||
void pcpu_freelist_push(struct pcpu_freelist *s,
|
||||
void __pcpu_freelist_push(struct pcpu_freelist *s,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
|
||||
|
||||
__pcpu_freelist_push(head, node);
|
||||
___pcpu_freelist_push(head, node);
|
||||
}
|
||||
|
||||
void pcpu_freelist_push(struct pcpu_freelist *s,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__pcpu_freelist_push(s, node);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
||||
|
@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
|||
for_each_possible_cpu(cpu) {
|
||||
again:
|
||||
head = per_cpu_ptr(s->freelist, cpu);
|
||||
__pcpu_freelist_push(head, buf);
|
||||
___pcpu_freelist_push(head, buf);
|
||||
i++;
|
||||
buf += elem_size;
|
||||
if (i == nr_elems)
|
||||
|
@ -74,14 +84,12 @@ again:
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
{
|
||||
struct pcpu_freelist_head *head;
|
||||
struct pcpu_freelist_node *node;
|
||||
unsigned long flags;
|
||||
int orig_cpu, cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
orig_cpu = cpu = raw_smp_processor_id();
|
||||
while (1) {
|
||||
head = per_cpu_ptr(s->freelist, cpu);
|
||||
|
@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
|||
node = head->first;
|
||||
if (node) {
|
||||
head->first = node->next;
|
||||
raw_spin_unlock_irqrestore(&head->lock, flags);
|
||||
raw_spin_unlock(&head->lock);
|
||||
return node;
|
||||
}
|
||||
raw_spin_unlock(&head->lock);
|
||||
cpu = cpumask_next(cpu, cpu_possible_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = 0;
|
||||
if (cpu == orig_cpu) {
|
||||
local_irq_restore(flags);
|
||||
if (cpu == orig_cpu)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
{
|
||||
struct pcpu_freelist_node *ret;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = __pcpu_freelist_pop(s);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -22,8 +22,12 @@ struct pcpu_freelist_node {
|
|||
struct pcpu_freelist_node *next;
|
||||
};
|
||||
|
||||
/* pcpu_freelist_* do spin_lock_irqsave. */
|
||||
void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
|
||||
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
|
||||
/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
|
||||
void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
|
||||
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
|
||||
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
||||
u32 nr_elems);
|
||||
int pcpu_freelist_init(struct pcpu_freelist *);
|
||||
|
|
|
@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr)
|
|||
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
err = bpf_map_offload_lookup_elem(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
this_cpu_inc(bpf_prog_active);
|
||||
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
|
||||
err = bpf_percpu_hash_copy(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
|
||||
err = bpf_percpu_array_copy(map, key, value);
|
||||
|
@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr)
|
|||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
this_cpu_dec(bpf_prog_active);
|
||||
preempt_enable();
|
||||
|
||||
done:
|
||||
if (err)
|
||||
goto free_value;
|
||||
|
||||
|
|
|
@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
|
|||
|
||||
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
err = __bpf_probe_register(btp, prog);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
return err;
|
||||
return __bpf_probe_register(btp, prog);
|
||||
}
|
||||
|
||||
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
return err;
|
||||
return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
|
||||
}
|
||||
|
||||
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
|
||||
|
|
|
@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
|
|||
static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
|
||||
int cnt, bool slow)
|
||||
{
|
||||
struct rhltable rhlt;
|
||||
struct rhltable *rhlt;
|
||||
unsigned int i, ret;
|
||||
const char *key;
|
||||
int err = 0;
|
||||
|
||||
err = rhltable_init(&rhlt, &test_rht_params_dup);
|
||||
if (WARN_ON(err))
|
||||
rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
|
||||
if (WARN_ON(!rhlt))
|
||||
return -EINVAL;
|
||||
|
||||
err = rhltable_init(rhlt, &test_rht_params_dup);
|
||||
if (WARN_ON(err)) {
|
||||
kfree(rhlt);
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < cnt; i++) {
|
||||
rhl_test_objects[i].value.tid = i;
|
||||
key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
|
||||
key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
|
||||
key += test_rht_params_dup.key_offset;
|
||||
|
||||
if (slow) {
|
||||
err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
|
||||
err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
|
||||
&rhl_test_objects[i].list_node.rhead));
|
||||
if (err == -EAGAIN)
|
||||
err = 0;
|
||||
} else
|
||||
err = rhltable_insert(&rhlt,
|
||||
err = rhltable_insert(rhlt,
|
||||
&rhl_test_objects[i].list_node,
|
||||
test_rht_params_dup);
|
||||
if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
|
||||
goto skip_print;
|
||||
}
|
||||
|
||||
ret = print_ht(&rhlt);
|
||||
ret = print_ht(rhlt);
|
||||
WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
|
||||
|
||||
skip_print:
|
||||
rhltable_destroy(&rhlt);
|
||||
rhltable_destroy(rhlt);
|
||||
kfree(rhlt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
|
|||
|
||||
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
|
||||
|
||||
/* free the TID stats immediately */
|
||||
cfg80211_sinfo_release_content(&sinfo);
|
||||
|
||||
dev_put(real_netdev);
|
||||
if (ret == -ENOENT) {
|
||||
/* Node is not associated anymore! It would be
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "main.h"
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/byteorder/generic.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/gfp.h>
|
||||
|
@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
|
|||
parent_dev = __dev_get_by_index((struct net *)parent_net,
|
||||
dev_get_iflink(net_dev));
|
||||
/* if we got a NULL parent_dev there is something broken.. */
|
||||
if (WARN(!parent_dev, "Cannot find parent device"))
|
||||
if (!parent_dev) {
|
||||
pr_err("Cannot find parent device\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
|
||||
return false;
|
||||
|
|
|
@ -221,6 +221,8 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
|
|||
|
||||
netif_trans_update(soft_iface);
|
||||
vid = batadv_get_vid(skb, 0);
|
||||
|
||||
skb_reset_mac_header(skb);
|
||||
ethhdr = eth_hdr(skb);
|
||||
|
||||
switch (ntohs(ethhdr->h_proto)) {
|
||||
|
|
|
@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|||
/* Only some socketops are supported */
|
||||
switch (optname) {
|
||||
case SO_RCVBUF:
|
||||
val = min_t(u32, val, sysctl_rmem_max);
|
||||
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
|
||||
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
|
||||
break;
|
||||
case SO_SNDBUF:
|
||||
val = min_t(u32, val, sysctl_wmem_max);
|
||||
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
|
||||
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
|
||||
break;
|
||||
|
|
|
@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc)
|
|||
struct sk_psock *psock = container_of(gc, struct sk_psock, gc);
|
||||
|
||||
/* No sk_callback_lock since already detached. */
|
||||
if (psock->parser.enabled)
|
||||
strp_done(&psock->parser.strp);
|
||||
strp_done(&psock->parser.strp);
|
||||
|
||||
cancel_work_sync(&psock->work);
|
||||
|
||||
|
|
|
@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
|
|||
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
|
||||
u8 pkt, u8 opt, u8 *val, u8 len)
|
||||
{
|
||||
if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
|
||||
if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
|
||||
return 0;
|
||||
return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
|
|||
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
|
||||
u8 pkt, u8 opt, u8 *val, u8 len)
|
||||
{
|
||||
if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
|
||||
if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
|
||||
return 0;
|
||||
return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
|
||||
}
|
||||
|
|
|
@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev)
|
|||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static struct lock_class_key dsa_master_addr_list_lock_key;
|
||||
|
||||
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
||||
{
|
||||
int ret;
|
||||
|
@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
|
|||
wmb();
|
||||
|
||||
dev->dsa_ptr = cpu_dp;
|
||||
lockdep_set_class(&dev->addr_list_lock,
|
||||
&dsa_master_addr_list_lock_key);
|
||||
|
||||
ret = dsa_master_ethtool_setup(dev);
|
||||
if (ret)
|
||||
|
|
|
@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
|
|||
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
|
||||
{
|
||||
struct net_device *master = dsa_slave_to_master(dev);
|
||||
|
||||
if (change & IFF_ALLMULTI)
|
||||
dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
|
||||
if (change & IFF_PROMISC)
|
||||
dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
|
||||
if (dev->flags & IFF_UP) {
|
||||
if (change & IFF_ALLMULTI)
|
||||
dev_set_allmulti(master,
|
||||
dev->flags & IFF_ALLMULTI ? 1 : -1);
|
||||
if (change & IFF_PROMISC)
|
||||
dev_set_promiscuity(master,
|
||||
dev->flags & IFF_PROMISC ? 1 : -1);
|
||||
}
|
||||
}
|
||||
|
||||
static void dsa_slave_set_rx_mode(struct net_device *dev)
|
||||
|
@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
|||
int ret;
|
||||
|
||||
/* Port's PHY and MAC both need to be EEE capable */
|
||||
if (!dev->phydev && !dp->pl)
|
||||
if (!dev->phydev || !dp->pl)
|
||||
return -ENODEV;
|
||||
|
||||
if (!ds->ops->set_mac_eee)
|
||||
|
@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
|
|||
int ret;
|
||||
|
||||
/* Port's PHY and MAC both need to be EEE capable */
|
||||
if (!dev->phydev && !dp->pl)
|
||||
if (!dev->phydev || !dp->pl)
|
||||
return -ENODEV;
|
||||
|
||||
if (!ds->ops->get_mac_eee)
|
||||
|
|
|
@ -1455,12 +1455,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|||
{
|
||||
struct ip_tunnel *t = netdev_priv(dev);
|
||||
struct ip_tunnel_parm *p = &t->parms;
|
||||
__be16 o_flags = p->o_flags;
|
||||
|
||||
if ((t->erspan_ver == 1 || t->erspan_ver == 2) &&
|
||||
!t->collect_md)
|
||||
o_flags |= TUNNEL_KEY;
|
||||
|
||||
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
|
||||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
|
||||
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
|
||||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
|
||||
gre_tnl_flags_to_gre_flags(p->o_flags)) ||
|
||||
gre_tnl_flags_to_gre_flags(o_flags)) ||
|
||||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
|
||||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
|
||||
nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
|
||||
|
|
|
@ -2098,12 +2098,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev)
|
|||
{
|
||||
struct ip6_tnl *t = netdev_priv(dev);
|
||||
struct __ip6_tnl_parm *p = &t->parms;
|
||||
__be16 o_flags = p->o_flags;
|
||||
|
||||
if ((p->erspan_ver == 1 || p->erspan_ver == 2) &&
|
||||
!p->collect_md)
|
||||
o_flags |= TUNNEL_KEY;
|
||||
|
||||
if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
|
||||
nla_put_be16(skb, IFLA_GRE_IFLAGS,
|
||||
gre_tnl_flags_to_gre_flags(p->i_flags)) ||
|
||||
nla_put_be16(skb, IFLA_GRE_OFLAGS,
|
||||
gre_tnl_flags_to_gre_flags(p->o_flags)) ||
|
||||
gre_tnl_flags_to_gre_flags(o_flags)) ||
|
||||
nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
|
||||
nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
|
||||
nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) ||
|
||||
|
|
|
@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
|
|||
struct sock *sk = sk_to_full_sk(skb->sk);
|
||||
unsigned int hh_len;
|
||||
struct dst_entry *dst;
|
||||
int strict = (ipv6_addr_type(&iph->daddr) &
|
||||
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
|
||||
struct flowi6 fl6 = {
|
||||
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
|
||||
rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
|
||||
strict ? skb_dst(skb)->dev->ifindex : 0,
|
||||
.flowi6_mark = skb->mark,
|
||||
.flowi6_uid = sock_net_uid(net, sk),
|
||||
.daddr = iph->daddr,
|
||||
|
|
|
@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
|
|||
} else {
|
||||
ip6_flow_hdr(hdr, 0, flowlabel);
|
||||
hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb));
|
||||
|
||||
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
|
||||
}
|
||||
|
||||
hdr->nexthdr = NEXTHDR_ROUTING;
|
||||
|
|
|
@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
|
|||
}
|
||||
|
||||
err = 0;
|
||||
if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
|
||||
if (__in6_dev_get(skb->dev) &&
|
||||
!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
|
||||
goto out;
|
||||
|
||||
if (t->parms.iph.daddr == 0)
|
||||
|
|
|
@ -83,8 +83,7 @@
|
|||
#define L2TP_SLFLAG_S 0x40000000
|
||||
#define L2TP_SL_SEQ_MASK 0x00ffffff
|
||||
|
||||
#define L2TP_HDR_SIZE_SEQ 10
|
||||
#define L2TP_HDR_SIZE_NOSEQ 6
|
||||
#define L2TP_HDR_SIZE_MAX 14
|
||||
|
||||
/* Default trace flags */
|
||||
#define L2TP_DEFAULT_DEBUG_FLAGS 0
|
||||
|
@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
|
|||
__skb_pull(skb, sizeof(struct udphdr));
|
||||
|
||||
/* Short packet? */
|
||||
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
|
||||
if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) {
|
||||
l2tp_info(tunnel, L2TP_MSG_DATA,
|
||||
"%s: recv short packet (len=%d)\n",
|
||||
tunnel->name, skb->len);
|
||||
|
@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb)
|
|||
goto error;
|
||||
}
|
||||
|
||||
if (tunnel->version == L2TP_HDR_VER_3 &&
|
||||
l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
|
||||
goto error;
|
||||
|
||||
l2tp_recv_common(session, skb, ptr, optr, hdrflags, length);
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
|
|
|
@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb,
|
||||
unsigned char **ptr, unsigned char **optr)
|
||||
{
|
||||
int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session);
|
||||
|
||||
if (opt_len > 0) {
|
||||
int off = *ptr - *optr;
|
||||
|
||||
if (!pskb_may_pull(skb, off + opt_len))
|
||||
return -1;
|
||||
|
||||
if (skb->data != *optr) {
|
||||
*optr = skb->data;
|
||||
*ptr = skb->data + off;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define l2tp_printk(ptr, type, func, fmt, ...) \
|
||||
do { \
|
||||
if (((ptr)->debug) & (type)) \
|
||||
|
|
|
@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb)
|
|||
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
|
||||
}
|
||||
|
||||
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
|
||||
goto discard_sess;
|
||||
|
||||
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
|
|
|
@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
|
|||
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length);
|
||||
}
|
||||
|
||||
if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr))
|
||||
goto discard_sess;
|
||||
|
||||
l2tp_recv_common(session, skb, ptr, optr, 0, skb->len);
|
||||
l2tp_session_dec_refcount(session);
|
||||
|
||||
|
|
|
@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
|||
int head_need, bool may_encrypt)
|
||||
{
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
struct ieee80211_hdr *hdr;
|
||||
bool enc_tailroom;
|
||||
int tail_need = 0;
|
||||
|
||||
if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
|
||||
hdr = (struct ieee80211_hdr *) skb->data;
|
||||
enc_tailroom = may_encrypt &&
|
||||
(sdata->crypto_tx_tailroom_needed_cnt ||
|
||||
ieee80211_is_mgmt(hdr->frame_control));
|
||||
|
||||
if (enc_tailroom) {
|
||||
tail_need = IEEE80211_ENCRYPT_TAILROOM;
|
||||
tail_need -= skb_tailroom(skb);
|
||||
tail_need = max_t(int, tail_need, 0);
|
||||
|
@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
if (skb_cloned(skb) &&
|
||||
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
|
||||
!skb_clone_writable(skb, ETH_HLEN) ||
|
||||
(may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
|
||||
!skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
|
||||
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
|
||||
else if (head_need || tail_need)
|
||||
I802_DEBUG_INC(local->tx_expand_skb_head);
|
||||
|
|
|
@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
|||
}
|
||||
|
||||
if (nf_ct_key_equal(h, tuple, zone, net)) {
|
||||
/* Tuple is taken already, so caller will need to find
|
||||
* a new source port to use.
|
||||
*
|
||||
* Only exception:
|
||||
* If the *original tuples* are identical, then both
|
||||
* conntracks refer to the same flow.
|
||||
* This is a rare situation, it can occur e.g. when
|
||||
* more than one UDP packet is sent from same socket
|
||||
* in different threads.
|
||||
*
|
||||
* Let nf_ct_resolve_clash() deal with this later.
|
||||
*/
|
||||
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
|
||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
|
||||
continue;
|
||||
|
||||
NF_CT_STAT_INC_ATOMIC(net, found);
|
||||
rcu_read_unlock();
|
||||
return 1;
|
||||
|
|
|
@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans)
|
|||
kfree(trans);
|
||||
}
|
||||
|
||||
static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
struct net *net = ctx->net;
|
||||
struct nft_trans *trans;
|
||||
|
||||
if (!nft_set_is_anonymous(set))
|
||||
return;
|
||||
|
||||
list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
|
||||
if (trans->msg_type == NFT_MSG_NEWSET &&
|
||||
nft_trans_set(trans) == set) {
|
||||
nft_trans_set_bound(trans) = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int nf_tables_register_hook(struct net *net,
|
||||
const struct nft_table *table,
|
||||
struct nft_chain *chain)
|
||||
|
@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx)
|
|||
return err;
|
||||
}
|
||||
|
||||
/* either expr ops provide both activate/deactivate, or neither */
|
||||
static bool nft_expr_check_ops(const struct nft_expr_ops *ops)
|
||||
{
|
||||
if (!ops)
|
||||
return true;
|
||||
|
||||
if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate)))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nft_rule_expr_activate(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
{
|
||||
|
@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx,
|
|||
}
|
||||
|
||||
static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
struct nft_rule *rule,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_expr *expr;
|
||||
|
||||
expr = nft_expr_first(rule);
|
||||
while (expr != nft_expr_last(rule) && expr->ops) {
|
||||
if (expr->ops->deactivate)
|
||||
expr->ops->deactivate(ctx, expr);
|
||||
expr->ops->deactivate(ctx, expr, phase);
|
||||
|
||||
expr = nft_expr_next(expr);
|
||||
}
|
||||
|
@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
|
|||
nft_trans_destroy(trans);
|
||||
return err;
|
||||
}
|
||||
nft_rule_expr_deactivate(ctx, rule);
|
||||
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1929,9 +1935,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk,
|
|||
*/
|
||||
int nft_register_expr(struct nft_expr_type *type)
|
||||
{
|
||||
if (!nft_expr_check_ops(type->ops))
|
||||
return -EINVAL;
|
||||
|
||||
nfnl_lock(NFNL_SUBSYS_NFTABLES);
|
||||
if (type->family == NFPROTO_UNSPEC)
|
||||
list_add_tail_rcu(&type->list, &nf_tables_expressions);
|
||||
|
@ -2079,10 +2082,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
|
|||
err = PTR_ERR(ops);
|
||||
goto err1;
|
||||
}
|
||||
if (!nft_expr_check_ops(ops)) {
|
||||
err = -EINVAL;
|
||||
goto err1;
|
||||
}
|
||||
} else
|
||||
ops = type->ops;
|
||||
|
||||
|
@ -2511,7 +2510,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
|
|||
static void nf_tables_rule_release(const struct nft_ctx *ctx,
|
||||
struct nft_rule *rule)
|
||||
{
|
||||
nft_rule_expr_deactivate(ctx, rule);
|
||||
nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE);
|
||||
nf_tables_rule_destroy(ctx, rule);
|
||||
}
|
||||
|
||||
|
@ -3708,39 +3707,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
|||
bind:
|
||||
binding->chain = ctx->chain;
|
||||
list_add_tail_rcu(&binding->list, &set->bindings);
|
||||
nft_set_trans_bind(ctx, set);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_bind_set);
|
||||
|
||||
void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding)
|
||||
{
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
|
||||
nft_is_active(ctx->net, set))
|
||||
list_add_tail_rcu(&set->list, &ctx->table->sets);
|
||||
|
||||
list_add_tail_rcu(&binding->list, &set->bindings);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_rebind_set);
|
||||
|
||||
void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
|
||||
struct nft_set_binding *binding)
|
||||
struct nft_set_binding *binding, bool event)
|
||||
{
|
||||
list_del_rcu(&binding->list);
|
||||
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
|
||||
nft_is_active(ctx->net, set))
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) {
|
||||
list_del_rcu(&set->list);
|
||||
if (event)
|
||||
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_unbind_set);
|
||||
|
||||
void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set)
|
||||
{
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set) &&
|
||||
nft_is_active(ctx->net, set)) {
|
||||
nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
|
||||
if (list_empty(&set->bindings) && nft_set_is_anonymous(set))
|
||||
nft_set_destroy(set);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nf_tables_destroy_set);
|
||||
|
||||
|
@ -6535,6 +6525,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
|
|||
nf_tables_rule_notify(&trans->ctx,
|
||||
nft_trans_rule(trans),
|
||||
NFT_MSG_DELRULE);
|
||||
nft_rule_expr_deactivate(&trans->ctx,
|
||||
nft_trans_rule(trans),
|
||||
NFT_TRANS_COMMIT);
|
||||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
nft_clear(net, nft_trans_set(trans));
|
||||
|
@ -6621,7 +6614,8 @@ static void nf_tables_abort_release(struct nft_trans *trans)
|
|||
nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
|
||||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
nft_set_destroy(nft_trans_set(trans));
|
||||
if (!nft_trans_set_bound(trans))
|
||||
nft_set_destroy(nft_trans_set(trans));
|
||||
break;
|
||||
case NFT_MSG_NEWSETELEM:
|
||||
nft_set_elem_destroy(nft_trans_elem_set(trans),
|
||||
|
@ -6682,7 +6676,9 @@ static int __nf_tables_abort(struct net *net)
|
|||
case NFT_MSG_NEWRULE:
|
||||
trans->ctx.chain->use--;
|
||||
list_del_rcu(&nft_trans_rule(trans)->list);
|
||||
nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
|
||||
nft_rule_expr_deactivate(&trans->ctx,
|
||||
nft_trans_rule(trans),
|
||||
NFT_TRANS_ABORT);
|
||||
break;
|
||||
case NFT_MSG_DELRULE:
|
||||
trans->ctx.chain->use++;
|
||||
|
@ -6692,7 +6688,8 @@ static int __nf_tables_abort(struct net *net)
|
|||
break;
|
||||
case NFT_MSG_NEWSET:
|
||||
trans->ctx.table->use--;
|
||||
list_del_rcu(&nft_trans_set(trans)->list);
|
||||
if (!nft_trans_set_bound(trans))
|
||||
list_del_rcu(&nft_trans_set(trans)->list);
|
||||
break;
|
||||
case NFT_MSG_DELSET:
|
||||
trans->ctx.table->use++;
|
||||
|
|
|
@ -61,6 +61,21 @@ static struct nft_compat_net *nft_compat_pernet(struct net *net)
|
|||
return net_generic(net, nft_compat_net_id);
|
||||
}
|
||||
|
||||
static void nft_xt_get(struct nft_xt *xt)
|
||||
{
|
||||
/* refcount_inc() warns on 0 -> 1 transition, but we can't
|
||||
* init the reference count to 1 in .select_ops -- we can't
|
||||
* undo such an increase when another expression inside the same
|
||||
* rule fails afterwards.
|
||||
*/
|
||||
if (xt->listcnt == 0)
|
||||
refcount_set(&xt->refcnt, 1);
|
||||
else
|
||||
refcount_inc(&xt->refcnt);
|
||||
|
||||
xt->listcnt++;
|
||||
}
|
||||
|
||||
static bool nft_xt_put(struct nft_xt *xt)
|
||||
{
|
||||
if (refcount_dec_and_test(&xt->refcnt)) {
|
||||
|
@ -291,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
return -EINVAL;
|
||||
|
||||
nft_xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
refcount_inc(&nft_xt->refcnt);
|
||||
nft_xt_get(nft_xt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -504,7 +519,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
|
|||
return ret;
|
||||
|
||||
nft_xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
refcount_inc(&nft_xt->refcnt);
|
||||
nft_xt_get(nft_xt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -558,41 +573,16 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
|
|||
__nft_match_destroy(ctx, expr, nft_expr_priv(expr));
|
||||
}
|
||||
|
||||
static void nft_compat_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
struct list_head *h)
|
||||
{
|
||||
struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
|
||||
if (xt->listcnt == 0)
|
||||
list_add(&xt->head, h);
|
||||
|
||||
xt->listcnt++;
|
||||
}
|
||||
|
||||
static void nft_compat_activate_mt(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
|
||||
|
||||
nft_compat_activate(ctx, expr, &cn->nft_match_list);
|
||||
}
|
||||
|
||||
static void nft_compat_activate_tg(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_compat_net *cn = nft_compat_pernet(ctx->net);
|
||||
|
||||
nft_compat_activate(ctx, expr, &cn->nft_target_list);
|
||||
}
|
||||
|
||||
static void nft_compat_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops);
|
||||
|
||||
if (--xt->listcnt == 0)
|
||||
list_del_init(&xt->head);
|
||||
if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) {
|
||||
if (--xt->listcnt == 0)
|
||||
list_del_init(&xt->head);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -848,7 +838,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
nft_match->ops.eval = nft_match_eval;
|
||||
nft_match->ops.init = nft_match_init;
|
||||
nft_match->ops.destroy = nft_match_destroy;
|
||||
nft_match->ops.activate = nft_compat_activate_mt;
|
||||
nft_match->ops.deactivate = nft_compat_deactivate;
|
||||
nft_match->ops.dump = nft_match_dump;
|
||||
nft_match->ops.validate = nft_match_validate;
|
||||
|
@ -866,7 +855,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
|
|||
|
||||
nft_match->ops.size = matchsize;
|
||||
|
||||
nft_match->listcnt = 1;
|
||||
nft_match->listcnt = 0;
|
||||
list_add(&nft_match->head, &cn->nft_match_list);
|
||||
|
||||
return &nft_match->ops;
|
||||
|
@ -953,7 +942,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|||
nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
|
||||
nft_target->ops.init = nft_target_init;
|
||||
nft_target->ops.destroy = nft_target_destroy;
|
||||
nft_target->ops.activate = nft_compat_activate_tg;
|
||||
nft_target->ops.deactivate = nft_compat_deactivate;
|
||||
nft_target->ops.dump = nft_target_dump;
|
||||
nft_target->ops.validate = nft_target_validate;
|
||||
|
@ -964,7 +952,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
|
|||
else
|
||||
nft_target->ops.eval = nft_target_eval_xt;
|
||||
|
||||
nft_target->listcnt = 1;
|
||||
nft_target->listcnt = 0;
|
||||
list_add(&nft_target->head, &cn->nft_target_list);
|
||||
|
||||
return &nft_target->ops;
|
||||
|
|
|
@ -235,20 +235,17 @@ err1:
|
|||
return err;
|
||||
}
|
||||
|
||||
static void nft_dynset_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_rebind_set(ctx, priv->set, &priv->binding);
|
||||
}
|
||||
|
||||
static void nft_dynset_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_dynset *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
|
||||
if (phase == NFT_TRANS_PREPARE)
|
||||
return;
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
}
|
||||
|
||||
static void nft_dynset_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = {
|
|||
.eval = nft_dynset_eval,
|
||||
.init = nft_dynset_init,
|
||||
.destroy = nft_dynset_destroy,
|
||||
.activate = nft_dynset_activate,
|
||||
.deactivate = nft_dynset_deactivate,
|
||||
.dump = nft_dynset_dump,
|
||||
};
|
||||
|
|
|
@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx,
|
|||
}
|
||||
|
||||
static void nft_immediate_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
|
||||
|
||||
if (phase == NFT_TRANS_COMMIT)
|
||||
return;
|
||||
|
||||
return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
|
||||
}
|
||||
|
||||
|
|
|
@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void nft_lookup_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_rebind_set(ctx, priv->set, &priv->binding);
|
||||
}
|
||||
|
||||
static void nft_lookup_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_lookup *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
|
||||
if (phase == NFT_TRANS_PREPARE)
|
||||
return;
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
}
|
||||
|
||||
static void nft_lookup_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)),
|
||||
.eval = nft_lookup_eval,
|
||||
.init = nft_lookup_init,
|
||||
.activate = nft_lookup_activate,
|
||||
.deactivate = nft_lookup_deactivate,
|
||||
.destroy = nft_lookup_destroy,
|
||||
.dump = nft_lookup_dump,
|
||||
|
|
|
@ -155,20 +155,17 @@ nla_put_failure:
|
|||
return -1;
|
||||
}
|
||||
|
||||
static void nft_objref_map_activate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_rebind_set(ctx, priv->set, &priv->binding);
|
||||
}
|
||||
|
||||
static void nft_objref_map_deactivate(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
const struct nft_expr *expr,
|
||||
enum nft_trans_phase phase)
|
||||
{
|
||||
struct nft_objref_map *priv = nft_expr_priv(expr);
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding);
|
||||
if (phase == NFT_TRANS_PREPARE)
|
||||
return;
|
||||
|
||||
nf_tables_unbind_set(ctx, priv->set, &priv->binding,
|
||||
phase == NFT_TRANS_COMMIT);
|
||||
}
|
||||
|
||||
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
|
||||
|
@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = {
|
|||
.size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)),
|
||||
.eval = nft_objref_map_eval,
|
||||
.init = nft_objref_map_init,
|
||||
.activate = nft_objref_map_activate,
|
||||
.deactivate = nft_objref_map_deactivate,
|
||||
.destroy = nft_objref_map_destroy,
|
||||
.dump = nft_objref_map_dump,
|
||||
|
|
|
@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
|
|||
__rds_create_bind_key(key, addr, port, scope_id);
|
||||
rcu_read_lock();
|
||||
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
|
||||
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
|
||||
rds_sock_addref(rs);
|
||||
else
|
||||
if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
|
||||
!refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
|
||||
rs = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
|
||||
|
|
|
@ -596,6 +596,7 @@ error_requeue_call:
|
|||
}
|
||||
error_no_call:
|
||||
release_sock(&rx->sk);
|
||||
error_trace:
|
||||
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
|
||||
return ret;
|
||||
|
||||
|
@ -604,7 +605,7 @@ wait_interrupted:
|
|||
wait_error:
|
||||
finish_wait(sk_sleep(&rx->sk), &wait);
|
||||
call = NULL;
|
||||
goto error_no_call;
|
||||
goto error_trace;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|||
if (!tc_skip_hw(fnew->flags)) {
|
||||
err = fl_hw_replace_filter(tp, fnew, extack);
|
||||
if (err)
|
||||
goto errout_mask;
|
||||
goto errout_mask_ht;
|
||||
}
|
||||
|
||||
if (!tc_in_hw(fnew->flags))
|
||||
|
@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
|
|||
kfree(mask);
|
||||
return 0;
|
||||
|
||||
errout_mask_ht:
|
||||
rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
|
||||
fnew->mask->filter_ht_params);
|
||||
|
||||
errout_mask:
|
||||
fl_mask_put(head, fnew->mask, false);
|
||||
|
||||
|
|
|
@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
|||
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
|
||||
struct sctp_transport *transport = NULL;
|
||||
struct sctp_sndrcvinfo _sinfo, *sinfo;
|
||||
struct sctp_association *asoc;
|
||||
struct sctp_association *asoc, *tmp;
|
||||
struct sctp_cmsgs cmsgs;
|
||||
union sctp_addr *daddr;
|
||||
bool new = false;
|
||||
|
@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
|
|||
|
||||
/* SCTP_SENDALL process */
|
||||
if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
|
||||
list_for_each_entry(asoc, &ep->asocs, asocs) {
|
||||
list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
|
||||
err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
|
||||
msg_len);
|
||||
if (err == 0)
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче