Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Check klogctl failure correctly, from Colin Ian King. 2) Prevent OOM when under memory pressure in flowcache, from Steffen Klassert. 3) Fix info leak in llc and rtnetlink ifmap code, from Kangjie Lu. 4) Memory barrier and multicast handling fixes in bnxt_en, from Michael Chan. 5) Endianness bug in mlx5, from Daniel Jurgens. 6) Fix disconnect handling in VSOCK, from Ian Campbell. 7) Fix locking of netdev list walking in get_bridge_ifindices(), from Nikolay Aleksandrov. 8) Bridge multicast MLD parser can look at wrong packet offsets, fix from Linus Lüssing. 9) Fix chip hang in qede driver, from Sudarsana Reddy Kalluru. 10) Fix missing setting of encapsulation before inner handling completes in udp_offload code, from Jarno Rajahalme. 11) Missing rollbacks during LAG join and flood configuration failures in mlxsw driver, from Ido Schimmel. 12) Fix error code checks in netxen driver, from Dan Carpenter. 13) Fix key size in new macsec driver, from Sabrina Dubroca. 14) Fix mlx5/VXLAN dependencies, from Arnd Bergmann. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (29 commits) net/mlx5e: make VXLAN support conditional Revert "net/mlx5: Kconfig: Fix MLX5_EN/VXLAN build issue" macsec: key identifier is 128 bits, not 64 Documentation/networking: more accurate LCO explanation macvtap: segmented packet is consumed tools: bpf_jit_disasm: check for klogctl failure qede: uninitialized variable in qede_start_xmit() netxen: netxen_rom_fast_read() doesn't return -1 netxen: reversed condition in netxen_nic_set_link_parameters() netxen: fix error handling in netxen_get_flash_block() mlxsw: spectrum: Add missing rollback in flood configuration mlxsw: spectrum: Fix rollback order in LAG join failure udp_offload: Set encapsulation before inner completes. udp_tunnel: Remove redundant udp_tunnel_gro_complete(). qede: prevent chip hang when increasing channels net: ipv6: tcp reset, icmp need to consider L3 domain bridge: fix igmp / mld query parsing net: bridge: fix old ioctl unlocked net device walk VSOCK: do not disconnect socket when peer has shutdown SEND only net/mlx4_en: Fix endianness bug in IPV6 csum calculation ...
This commit is contained in:
Коммит
26acc792c9
|
@ -69,18 +69,18 @@ LCO: Local Checksum Offload
|
|||
LCO is a technique for efficiently computing the outer checksum of an
|
||||
encapsulated datagram when the inner checksum is due to be offloaded.
|
||||
The ones-complement sum of a correctly checksummed TCP or UDP packet is
|
||||
equal to the sum of the pseudo header, because everything else gets
|
||||
'cancelled out' by the checksum field. This is because the sum was
|
||||
equal to the complement of the sum of the pseudo header, because everything
|
||||
else gets 'cancelled out' by the checksum field. This is because the sum was
|
||||
complemented before being written to the checksum field.
|
||||
More generally, this holds in any case where the 'IP-style' ones complement
|
||||
checksum is used, and thus any checksum that TX Checksum Offload supports.
|
||||
That is, if we have set up TX Checksum Offload with a start/offset pair, we
|
||||
know that _after the device has filled in that checksum_, the ones
|
||||
know that after the device has filled in that checksum, the ones
|
||||
complement sum from csum_start to the end of the packet will be equal to
|
||||
_whatever value we put in the checksum field beforehand_. This allows us
|
||||
to compute the outer checksum without looking at the payload: we simply
|
||||
stop summing when we get to csum_start, then add the 16-bit word at
|
||||
(csum_start + csum_offset).
|
||||
the complement of whatever value we put in the checksum field beforehand.
|
||||
This allows us to compute the outer checksum without looking at the payload:
|
||||
we simply stop summing when we get to csum_start, then add the complement of
|
||||
the 16-bit word at (csum_start + csum_offset).
|
||||
Then, when the true inner checksum is filled in (either by hardware or by
|
||||
skb_checksum_help()), the outer checksum will become correct by virtue of
|
||||
the arithmetic.
|
||||
|
|
|
@ -1595,21 +1595,22 @@ static int xgene_enet_probe(struct platform_device *pdev)
|
|||
|
||||
ret = xgene_enet_init_hw(pdata);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_netdev;
|
||||
|
||||
mac_ops = pdata->mac_ops;
|
||||
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
|
||||
ret = xgene_enet_mdio_config(pdata);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto err_netdev;
|
||||
} else {
|
||||
INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
|
||||
}
|
||||
|
||||
xgene_enet_napi_add(pdata);
|
||||
return 0;
|
||||
err:
|
||||
err_netdev:
|
||||
unregister_netdev(ndev);
|
||||
err:
|
||||
free_netdev(ndev);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1388,6 +1388,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
|||
if (!TX_CMP_VALID(txcmp, raw_cons))
|
||||
break;
|
||||
|
||||
/* The valid test of the entry must be done first before
|
||||
* reading any further.
|
||||
*/
|
||||
rmb();
|
||||
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
|
||||
tx_pkts++;
|
||||
/* return full budget so NAPI will complete. */
|
||||
|
@ -4038,9 +4042,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
|
|||
}
|
||||
|
||||
static int bnxt_cfg_rx_mode(struct bnxt *);
|
||||
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
|
||||
|
||||
static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
|
||||
{
|
||||
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
|
||||
int rc = 0;
|
||||
|
||||
if (irq_re_init) {
|
||||
|
@ -4096,13 +4102,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
|
|||
netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
|
||||
goto err_out;
|
||||
}
|
||||
bp->vnic_info[0].uc_filter_count = 1;
|
||||
vnic->uc_filter_count = 1;
|
||||
|
||||
bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
|
||||
vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
|
||||
|
||||
if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
|
||||
bp->vnic_info[0].rx_mask |=
|
||||
CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
||||
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
|
||||
|
||||
if (bp->dev->flags & IFF_ALLMULTI) {
|
||||
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
|
||||
vnic->mc_list_count = 0;
|
||||
} else {
|
||||
u32 mask = 0;
|
||||
|
||||
bnxt_mc_list_updated(bp, &mask);
|
||||
vnic->rx_mask |= mask;
|
||||
}
|
||||
|
||||
rc = bnxt_cfg_rx_mode(bp);
|
||||
if (rc)
|
||||
|
|
|
@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
|
|||
struct fec_enet_private *fep = netdev_priv(ndev);
|
||||
|
||||
for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
|
||||
clear_bit(queue_id, &fep->work_rx);
|
||||
pkt_received += fec_enet_rx_queue(ndev,
|
||||
int ret;
|
||||
|
||||
ret = fec_enet_rx_queue(ndev,
|
||||
budget - pkt_received, queue_id);
|
||||
|
||||
if (ret < budget - pkt_received)
|
||||
clear_bit(queue_id, &fep->work_rx);
|
||||
|
||||
pkt_received += ret;
|
||||
}
|
||||
return pkt_received;
|
||||
}
|
||||
|
|
|
@ -707,7 +707,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
|
|||
|
||||
if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
|
||||
return -1;
|
||||
hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
|
||||
hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
|
||||
|
||||
csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
|
||||
sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
|
||||
|
|
|
@ -14,7 +14,6 @@ config MLX5_CORE_EN
|
|||
bool "Mellanox Technologies ConnectX-4 Ethernet support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
|
||||
select PTP_1588_CLOCK
|
||||
select VXLAN if MLX5_CORE=y
|
||||
default n
|
||||
---help---
|
||||
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
|
||||
|
@ -32,3 +31,10 @@ config MLX5_CORE_EN_DCB
|
|||
This flag is depended on the kernel's DCB support.
|
||||
|
||||
If unsure, set to Y
|
||||
|
||||
config MLX5_CORE_EN_VXLAN
|
||||
bool "VXLAN offloads Support"
|
||||
default y
|
||||
depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m)
|
||||
---help---
|
||||
Say Y here if you want to use VXLAN offloads in the driver.
|
||||
|
|
|
@ -6,6 +6,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
|
|||
|
||||
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
|
||||
en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
|
||||
en_txrx.o en_clock.o vxlan.o en_tc.o
|
||||
en_txrx.o en_clock.o en_tc.o
|
||||
|
||||
mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o
|
||||
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
|
||||
|
|
|
@ -564,7 +564,9 @@ struct mlx5e_priv {
|
|||
struct mlx5e_flow_tables fts;
|
||||
struct mlx5e_eth_addr_db eth_addr;
|
||||
struct mlx5e_vlan_db vlan;
|
||||
#ifdef CONFIG_MLX5_CORE_EN_VXLAN
|
||||
struct mlx5e_vxlan_db vxlan;
|
||||
#endif
|
||||
|
||||
struct mlx5e_params params;
|
||||
struct workqueue_struct *wq;
|
||||
|
|
|
@ -2149,6 +2149,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
|
|||
vf_stats);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN)
|
||||
static void mlx5e_add_vxlan_port(struct net_device *netdev,
|
||||
sa_family_t sa_family, __be16 port)
|
||||
{
|
||||
|
@ -2220,6 +2221,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
|||
|
||||
return features;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_basic = {
|
||||
.ndo_open = mlx5e_open,
|
||||
|
@ -2251,9 +2253,11 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
|
|||
.ndo_set_features = mlx5e_set_features,
|
||||
.ndo_change_mtu = mlx5e_change_mtu,
|
||||
.ndo_do_ioctl = mlx5e_ioctl,
|
||||
#ifdef CONFIG_MLX5_CORE_EN_VXLAN
|
||||
.ndo_add_vxlan_port = mlx5e_add_vxlan_port,
|
||||
.ndo_del_vxlan_port = mlx5e_del_vxlan_port,
|
||||
.ndo_features_check = mlx5e_features_check,
|
||||
#endif
|
||||
.ndo_set_vf_mac = mlx5e_set_vf_mac,
|
||||
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
|
||||
.ndo_get_vf_config = mlx5e_get_vf_config,
|
||||
|
|
|
@ -48,14 +48,21 @@ struct mlx5e_vxlan_work {
|
|||
|
||||
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
|
||||
return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) &&
|
||||
(MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
|
||||
mlx5_core_is_pf(mdev));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MLX5_CORE_EN_VXLAN
|
||||
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
|
||||
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
|
||||
#else
|
||||
static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {}
|
||||
static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {}
|
||||
#endif
|
||||
|
||||
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
|
||||
u16 port, int add);
|
||||
struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
|
||||
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
|
||||
|
||||
#endif /* __MLX5_VXLAN_H__ */
|
||||
|
|
|
@ -2541,11 +2541,11 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
lag->ref_count++;
|
||||
return 0;
|
||||
|
||||
err_col_port_enable:
|
||||
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
|
||||
err_col_port_add:
|
||||
if (!lag->ref_count)
|
||||
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
|
||||
err_col_port_enable:
|
||||
mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -214,7 +214,15 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|||
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
|
||||
table_type, range, local_port, set);
|
||||
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
|
||||
if (err)
|
||||
goto err_flood_bm_set;
|
||||
else
|
||||
goto buffer_out;
|
||||
|
||||
err_flood_bm_set:
|
||||
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
|
||||
table_type, range, local_port, !set);
|
||||
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
|
||||
buffer_out:
|
||||
kfree(sftr_pl);
|
||||
return err;
|
||||
|
|
|
@ -1015,20 +1015,24 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
|
|||
{
|
||||
int i, v, addr;
|
||||
__le32 *ptr32;
|
||||
int ret;
|
||||
|
||||
addr = base;
|
||||
ptr32 = buf;
|
||||
for (i = 0; i < size / sizeof(u32); i++) {
|
||||
if (netxen_rom_fast_read(adapter, addr, &v) == -1)
|
||||
return -1;
|
||||
ret = netxen_rom_fast_read(adapter, addr, &v);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*ptr32 = cpu_to_le32(v);
|
||||
ptr32++;
|
||||
addr += sizeof(u32);
|
||||
}
|
||||
if ((char *)buf + size > (char *)ptr32) {
|
||||
__le32 local;
|
||||
if (netxen_rom_fast_read(adapter, addr, &v) == -1)
|
||||
return -1;
|
||||
ret = netxen_rom_fast_read(adapter, addr, &v);
|
||||
if (ret)
|
||||
return ret;
|
||||
local = cpu_to_le32(v);
|
||||
memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
|
||||
}
|
||||
|
@ -1940,7 +1944,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
|
|||
if (adapter->phy_read &&
|
||||
adapter->phy_read(adapter,
|
||||
NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
|
||||
&autoneg) != 0)
|
||||
&autoneg) == 0)
|
||||
adapter->link_autoneg = autoneg;
|
||||
} else
|
||||
goto link_down;
|
||||
|
|
|
@ -852,7 +852,8 @@ netxen_check_options(struct netxen_adapter *adapter)
|
|||
ptr32 = (__le32 *)&serial_num;
|
||||
offset = NX_FW_SERIAL_NUM_OFFSET;
|
||||
for (i = 0; i < 8; i++) {
|
||||
if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
|
||||
err = netxen_rom_fast_read(adapter, offset, &val);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "error reading board info\n");
|
||||
adapter->driver_mismatch = 1;
|
||||
return;
|
||||
|
|
|
@ -421,7 +421,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
|
|||
u8 xmit_type;
|
||||
u16 idx;
|
||||
u16 hlen;
|
||||
bool data_split;
|
||||
bool data_split = false;
|
||||
|
||||
/* Get tx-queue context and netdev index */
|
||||
txq_index = skb_get_queue_mapping(skb);
|
||||
|
@ -1938,8 +1938,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
|
|||
edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
|
||||
edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
|
||||
|
||||
DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
|
||||
|
||||
SET_NETDEV_DEV(ndev, &pdev->dev);
|
||||
|
||||
memset(&edev->stats, 0, sizeof(edev->stats));
|
||||
|
@ -2090,9 +2088,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
|
|||
{
|
||||
struct qed_pf_params pf_params;
|
||||
|
||||
/* 16 rx + 16 tx */
|
||||
/* 64 rx + 64 tx */
|
||||
memset(&pf_params, 0, sizeof(struct qed_pf_params));
|
||||
pf_params.eth_pf_params.num_cons = 32;
|
||||
pf_params.eth_pf_params.num_cons = 128;
|
||||
qed_ops->common->update_pf_params(cdev, &pf_params);
|
||||
}
|
||||
|
||||
|
|
|
@ -504,8 +504,6 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
|
|||
int gh_len;
|
||||
int err = -ENOSYS;
|
||||
|
||||
udp_tunnel_gro_complete(skb, nhoff);
|
||||
|
||||
gh = (struct genevehdr *)(skb->data + nhoff);
|
||||
gh_len = geneve_hlen(gh);
|
||||
type = gh->proto_type;
|
||||
|
@ -516,6 +514,9 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
|
|||
err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + gh_len);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ struct gcm_iv {
|
|||
* @tfm: crypto struct, key storage
|
||||
*/
|
||||
struct macsec_key {
|
||||
u64 id;
|
||||
u8 id[MACSEC_KEYID_LEN];
|
||||
struct crypto_aead *tfm;
|
||||
};
|
||||
|
||||
|
@ -1529,7 +1529,8 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
|
|||
[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
|
||||
[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
|
||||
[MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
|
||||
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 },
|
||||
[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
|
||||
.len = MACSEC_KEYID_LEN, },
|
||||
[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
|
||||
.len = MACSEC_MAX_KEY_LEN, },
|
||||
};
|
||||
|
@ -1576,6 +1577,9 @@ static bool validate_add_rxsa(struct nlattr **attrs)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1641,7 +1645,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
|
|||
if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
|
||||
rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
|
||||
|
||||
rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
|
||||
nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
|
||||
rx_sa->sc = rx_sc;
|
||||
rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
|
||||
|
||||
|
@ -1722,6 +1726,9 @@ static bool validate_add_txsa(struct nlattr **attrs)
|
|||
return false;
|
||||
}
|
||||
|
||||
if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1777,7 +1784,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
|
||||
nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
|
||||
|
||||
spin_lock_bh(&tx_sa->lock);
|
||||
tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
|
||||
|
@ -2318,7 +2325,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
|||
|
||||
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
|
||||
nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
|
||||
nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) ||
|
||||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
|
||||
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
|
||||
nla_nest_cancel(skb, txsa_nest);
|
||||
nla_nest_cancel(skb, txsa_list);
|
||||
|
@ -2419,7 +2426,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
|
|||
|
||||
if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
|
||||
nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
|
||||
nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) ||
|
||||
nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
|
||||
nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
|
||||
nla_nest_cancel(skb, rxsa_nest);
|
||||
nla_nest_cancel(skb, rxsc_nest);
|
||||
|
|
|
@ -373,7 +373,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
|
|||
goto wake_up;
|
||||
}
|
||||
|
||||
kfree_skb(skb);
|
||||
consume_skb(skb);
|
||||
while (segs) {
|
||||
struct sk_buff *nskb = segs->next;
|
||||
|
||||
|
|
|
@ -616,8 +616,9 @@ out:
|
|||
static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
|
||||
struct udp_offload *uoff)
|
||||
{
|
||||
udp_tunnel_gro_complete(skb, nhoff);
|
||||
|
||||
/* Sets 'skb->inner_mac_header' since we are always called with
|
||||
* 'skb->encapsulation' set.
|
||||
*/
|
||||
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
|
||||
}
|
||||
|
||||
|
|
|
@ -2164,6 +2164,9 @@ struct packet_offload {
|
|||
|
||||
struct udp_offload;
|
||||
|
||||
/* 'skb->encapsulation' is set before gro_complete() is called. gro_complete()
|
||||
* must set 'skb->inner_mac_header' to the beginning of tunnel payload.
|
||||
*/
|
||||
struct udp_offload_callbacks {
|
||||
struct sk_buff **(*gro_receive)(struct sk_buff **head,
|
||||
struct sk_buff *skb,
|
||||
|
|
|
@ -80,6 +80,7 @@ struct netns_xfrm {
|
|||
struct flow_cache flow_cache_global;
|
||||
atomic_t flow_cache_genid;
|
||||
struct list_head flow_cache_gc_list;
|
||||
atomic_t flow_cache_gc_count;
|
||||
spinlock_t flow_cache_gc_lock;
|
||||
struct work_struct flow_cache_gc_work;
|
||||
struct work_struct flow_cache_flush_work;
|
||||
|
|
|
@ -106,15 +106,6 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
|
|||
return iptunnel_handle_offloads(skb, type);
|
||||
}
|
||||
|
||||
static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
|
||||
{
|
||||
struct udphdr *uh;
|
||||
|
||||
uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
|
||||
skb_shinfo(skb)->gso_type |= uh->check ?
|
||||
SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
|
||||
}
|
||||
|
||||
static inline void udp_tunnel_encap_enable(struct socket *sock)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
|
|
@ -19,6 +19,8 @@
|
|||
|
||||
#define MACSEC_MAX_KEY_LEN 128
|
||||
|
||||
#define MACSEC_KEYID_LEN 16
|
||||
|
||||
#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
|
||||
#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
|
||||
|
||||
|
@ -77,7 +79,7 @@ enum macsec_sa_attrs {
|
|||
MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
|
||||
MACSEC_SA_ATTR_PN, /* config/dump, u32 */
|
||||
MACSEC_SA_ATTR_KEY, /* config, data */
|
||||
MACSEC_SA_ATTR_KEYID, /* config/dump, u64 */
|
||||
MACSEC_SA_ATTR_KEYID, /* config/dump, 128-bit */
|
||||
MACSEC_SA_ATTR_STATS, /* dump, nested, macsec_sa_stats_attr */
|
||||
__MACSEC_SA_ATTR_END,
|
||||
NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
|
||||
|
|
|
@ -21,18 +21,19 @@
|
|||
#include <asm/uaccess.h>
|
||||
#include "br_private.h"
|
||||
|
||||
/* called with RTNL */
|
||||
static int get_bridge_ifindices(struct net *net, int *indices, int num)
|
||||
{
|
||||
struct net_device *dev;
|
||||
int i = 0;
|
||||
|
||||
for_each_netdev(net, dev) {
|
||||
rcu_read_lock();
|
||||
for_each_netdev_rcu(net, dev) {
|
||||
if (i >= num)
|
||||
break;
|
||||
if (dev->priv_flags & IFF_EBRIDGE)
|
||||
indices[i++] = dev->ifindex;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return i;
|
||||
}
|
||||
|
|
|
@ -1279,6 +1279,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
|
|||
struct br_ip saddr;
|
||||
unsigned long max_delay;
|
||||
unsigned long now = jiffies;
|
||||
unsigned int offset = skb_transport_offset(skb);
|
||||
__be32 group;
|
||||
int err = 0;
|
||||
|
||||
|
@ -1289,14 +1290,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
|
|||
|
||||
group = ih->group;
|
||||
|
||||
if (skb->len == sizeof(*ih)) {
|
||||
if (skb->len == offset + sizeof(*ih)) {
|
||||
max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
|
||||
|
||||
if (!max_delay) {
|
||||
max_delay = 10 * HZ;
|
||||
group = 0;
|
||||
}
|
||||
} else if (skb->len >= sizeof(*ih3)) {
|
||||
} else if (skb->len >= offset + sizeof(*ih3)) {
|
||||
ih3 = igmpv3_query_hdr(skb);
|
||||
if (ih3->nsrcs)
|
||||
goto out;
|
||||
|
@ -1357,6 +1358,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
|||
struct br_ip saddr;
|
||||
unsigned long max_delay;
|
||||
unsigned long now = jiffies;
|
||||
unsigned int offset = skb_transport_offset(skb);
|
||||
const struct in6_addr *group = NULL;
|
||||
bool is_general_query;
|
||||
int err = 0;
|
||||
|
@ -1366,8 +1368,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
|||
(port && port->state == BR_STATE_DISABLED))
|
||||
goto out;
|
||||
|
||||
if (skb->len == sizeof(*mld)) {
|
||||
if (!pskb_may_pull(skb, sizeof(*mld))) {
|
||||
if (skb->len == offset + sizeof(*mld)) {
|
||||
if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1376,7 +1378,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
|
|||
if (max_delay)
|
||||
group = &mld->mld_mca;
|
||||
} else {
|
||||
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
|
||||
if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
|
|||
list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
|
||||
spin_unlock_bh(&xfrm->flow_cache_gc_lock);
|
||||
|
||||
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
|
||||
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
|
||||
flow_entry_kill(fce, xfrm);
|
||||
atomic_dec(&xfrm->flow_cache_gc_count);
|
||||
WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
|
||||
|
@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
|
|||
struct netns_xfrm *xfrm)
|
||||
{
|
||||
if (deleted) {
|
||||
atomic_add(deleted, &xfrm->flow_cache_gc_count);
|
||||
fcp->hash_count -= deleted;
|
||||
spin_lock_bh(&xfrm->flow_cache_gc_lock);
|
||||
list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
|
||||
|
@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
|
|||
if (fcp->hash_count > fc->high_watermark)
|
||||
flow_cache_shrink(fc, fcp);
|
||||
|
||||
if (fcp->hash_count > 2 * fc->high_watermark ||
|
||||
atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
|
||||
atomic_inc(&net->xfrm.flow_cache_genid);
|
||||
flo = ERR_PTR(-ENOBUFS);
|
||||
goto ret_object;
|
||||
}
|
||||
|
||||
fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
|
||||
if (fle) {
|
||||
fle->net = net;
|
||||
|
@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
|
|||
INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
|
||||
INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
|
||||
mutex_init(&net->xfrm.flow_flush_sem);
|
||||
atomic_set(&net->xfrm.flow_cache_gc_count, 0);
|
||||
|
||||
fc->hash_shift = 10;
|
||||
fc->low_watermark = 2 * flow_cache_hash_size(fc);
|
||||
|
|
|
@ -1180,14 +1180,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
|||
|
||||
static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct rtnl_link_ifmap map = {
|
||||
.mem_start = dev->mem_start,
|
||||
.mem_end = dev->mem_end,
|
||||
.base_addr = dev->base_addr,
|
||||
.irq = dev->irq,
|
||||
.dma = dev->dma,
|
||||
.port = dev->if_port,
|
||||
};
|
||||
struct rtnl_link_ifmap map;
|
||||
|
||||
memset(&map, 0, sizeof(map));
|
||||
map.mem_start = dev->mem_start;
|
||||
map.mem_end = dev->mem_end;
|
||||
map.base_addr = dev->base_addr;
|
||||
map.irq = dev->irq;
|
||||
map.dma = dev->dma;
|
||||
map.port = dev->if_port;
|
||||
|
||||
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
|
||||
return -EMSGSIZE;
|
||||
|
||||
|
|
|
@ -228,8 +228,6 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff,
|
|||
int err = -ENOSYS;
|
||||
const struct net_offload **offloads;
|
||||
|
||||
udp_tunnel_gro_complete(skb, nhoff);
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
|
@ -238,6 +236,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff,
|
|||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -414,6 +414,8 @@ static int gue_gro_complete(struct sk_buff *skb, int nhoff,
|
|||
|
||||
err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
|
||||
|
||||
skb_set_inner_mac_header(skb, nhoff + guehlen);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
return err;
|
||||
|
|
|
@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *tdev; /* Device to other host */
|
||||
int err;
|
||||
int mtu;
|
||||
|
||||
if (!dst) {
|
||||
dev->stats.tx_carrier_errors++;
|
||||
|
@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
tunnel->err_count = 0;
|
||||
}
|
||||
|
||||
mtu = dst_mtu(dst);
|
||||
if (skb->len > mtu) {
|
||||
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
} else {
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
}
|
||||
|
||||
dst_release(dst);
|
||||
goto tx_error;
|
||||
}
|
||||
|
||||
skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
|
||||
skb_dst_set(skb, dst);
|
||||
skb->dev = skb_dst(skb)->dev;
|
||||
|
|
|
@ -399,6 +399,11 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
|
|||
|
||||
uh->len = newlen;
|
||||
|
||||
/* Set encapsulation before calling into inner gro_complete() functions
|
||||
* to make them set up the inner offsets.
|
||||
*/
|
||||
skb->encapsulation = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
uo_priv = rcu_dereference(udp_offload_base);
|
||||
|
@ -421,9 +426,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
|
|||
if (skb->remcsum_offload)
|
||||
skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
|
||||
|
||||
skb->encapsulation = 1;
|
||||
skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -445,6 +445,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
|
|||
|
||||
if (__ipv6_addr_needs_scope_id(addr_type))
|
||||
iif = skb->dev->ifindex;
|
||||
else
|
||||
iif = l3mdev_master_ifindex(skb->dev);
|
||||
|
||||
/*
|
||||
* Must not send error if the source does not uniquely
|
||||
|
@ -499,9 +501,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
|
|||
else if (!fl6.flowi6_oif)
|
||||
fl6.flowi6_oif = np->ucast_oif;
|
||||
|
||||
if (!fl6.flowi6_oif)
|
||||
fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
|
||||
|
||||
dst = icmpv6_route_lookup(net, skb, sk, &fl6);
|
||||
if (IS_ERR(dst))
|
||||
goto out;
|
||||
|
|
|
@ -810,8 +810,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
|
|||
fl6.flowi6_proto = IPPROTO_TCP;
|
||||
if (rt6_need_strict(&fl6.daddr) && !oif)
|
||||
fl6.flowi6_oif = tcp_v6_iif(skb);
|
||||
else
|
||||
else {
|
||||
if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
|
||||
oif = skb->skb_iif;
|
||||
|
||||
fl6.flowi6_oif = oif;
|
||||
}
|
||||
|
||||
fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
|
||||
fl6.fl6_dport = t1->dest;
|
||||
fl6.fl6_sport = t1->source;
|
||||
|
|
|
@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
|
|||
if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
|
||||
struct llc_pktinfo info;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
|
||||
llc_pdu_decode_dsap(skb, &info.lpi_sap);
|
||||
llc_pdu_decode_da(skb, info.lpi_mac);
|
||||
|
|
|
@ -1808,27 +1808,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
else if (sk->sk_shutdown & RCV_SHUTDOWN)
|
||||
err = 0;
|
||||
|
||||
if (copied > 0) {
|
||||
/* We only do these additional bookkeeping/notification steps
|
||||
* if we actually copied something out of the queue pair
|
||||
* instead of just peeking ahead.
|
||||
*/
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
/* If the other side has shutdown for sending and there
|
||||
* is nothing more to read, then modify the socket
|
||||
* state.
|
||||
*/
|
||||
if (vsk->peer_shutdown & SEND_SHUTDOWN) {
|
||||
if (vsock_stream_has_data(vsk) <= 0) {
|
||||
sk->sk_state = SS_UNCONNECTED;
|
||||
sock_set_flag(sk, SOCK_DONE);
|
||||
sk->sk_state_change(sk);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (copied > 0)
|
||||
err = copied;
|
||||
}
|
||||
|
||||
out:
|
||||
release_sock(sk);
|
||||
|
|
|
@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
|
|||
|
||||
skb_dst_force(skb);
|
||||
|
||||
/* Inner headers are invalid now. */
|
||||
skb->encapsulation = 0;
|
||||
|
||||
err = x->type->output(x, skb);
|
||||
if (err == -EINPROGRESS)
|
||||
goto out;
|
||||
|
|
|
@ -98,6 +98,9 @@ static char *get_klog_buff(unsigned int *klen)
|
|||
char *buff;
|
||||
|
||||
len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
|
||||
if (len < 0)
|
||||
return NULL;
|
||||
|
||||
buff = malloc(len);
|
||||
if (!buff)
|
||||
return NULL;
|
||||
|
|
Загрузка…
Ссылка в новой задаче