Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

 1) Fix ieeeu02154 atusb driver use-after-free, from Johan Hovold.

 2) Need to validate TCA_CBQ_WRROPT netlink attributes, from Eric
    Dumazet.

 3) txq null deref in mac80211, from Miaoqing Pan.

 4) ionic driver needs to select NET_DEVLINK, from Arnd Bergmann.

 5) Need to disable bh during nft_connlimit GC, from Pablo Neira Ayuso.

 6) Avoid division by zero in taprio scheduler, from Vladimir Oltean.

 7) Various xgmac fixes in stmmac driver from Jose Abreu.

 8) Avoid 64-bit division in mlx5 leading to link errors on 32-bit from
    Michal Kubecek.

 9) Fix bad VLAN check in rtl8366 DSA driver, from Linus Walleij.

10) Fix sleep while atomic in sja1105, from Vladimir Oltean.

11) Suspend/resume deadlock in stmmac, from Thierry Reding.

12) Various UDP GSO fixes from Josh Hunt.

13) Fix slab out of bounds access in tcp_zerocopy_receive(), from Eric
    Dumazet.

14) Fix OOPS in __ipv6_ifa_notify(), from David Ahern.

15) Memory leak in NFC's llcp_sock_bind, from Eric Dumazet.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (72 commits)
  selftests/net: add nettest to .gitignore
  net: qlogic: Fix memory leak in ql_alloc_large_buffers
  nfc: fix memory leak in llcp_sock_bind()
  sch_dsmark: fix potential NULL deref in dsmark_init()
  net: phy: at803x: use operating parameters from PHY-specific status
  net: phy: extract pause mode
  net: phy: extract link partner advertisement reading
  net: phy: fix write to mii-ctrl1000 register
  ipv6: Handle missing host route in __ipv6_ifa_notify
  net: phy: allow for reset line to be tied to a sleepy GPIO controller
  net: ipv4: avoid mixed n_redirects and rate_tokens usage
  r8152: Set macpassthru in reset_resume callback
  cxgb4:Fix out-of-bounds MSI-X info array access
  Revert "ipv6: Handle race in addrconf_dad_work"
  net: make sock_prot_memory_pressure() return "const char *"
  rxrpc: Fix rxrpc_recvmsg tracepoint
  qmi_wwan: add support for Cinterion CLS8 devices
  tcp: fix slab-out-of-bounds in tcp_zerocopy_receive()
  lib: textsearch: fix escapes in example code
  udp: only do GSO if # of segs > 1
  ...
This commit is contained in:
Linus Torvalds 2019-10-05 08:50:15 -07:00
Родитель 6fe137cbe3 ef129d3414
Коммит 9819a30c11
99 изменённых файлов: 539 добавлений и 281 удалений

Просмотреть файл

@ -23,6 +23,7 @@ Contents:
intel/ice
google/gve
mellanox/mlx5
netronome/nfp
pensando/ionic
.. only:: subproject and html

Просмотреть файл

@ -272,7 +272,7 @@ supported flags are:
* MSG_DONTWAIT, i.e. non-blocking operation.
recvmsg(2)
^^^^^^^^^
^^^^^^^^^^
In most cases recvmsg(2) is needed if you want to extract more information than
recvfrom(2) can provide. For example package priority and timestamp. The

Просмотреть файл

@ -705,7 +705,7 @@ qca8k_setup(struct dsa_switch *ds)
BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
/* Setup connection between CPU port & user ports */
for (i = 0; i < DSA_MAX_PORTS; i++) {
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@ -1077,7 +1077,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
if (!priv->ds)
return -ENOMEM;

Просмотреть файл

@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan)
{
struct realtek_smi *smi = ds->priv;
u16 vid;
int ret;
if (!smi->ops->is_vlan_valid(smi, port))
return -EINVAL;
for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
if (!smi->ops->is_vlan_valid(smi, vid))
return -EINVAL;
dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
vlan->vid_begin, vlan->vid_end);
@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
u16 vid;
int ret;
if (!smi->ops->is_vlan_valid(smi, port))
return;
for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
if (!smi->ops->is_vlan_valid(smi, vid))
return;
dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
port,

Просмотреть файл

@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
dev_err(smi->dev, "failed to get parent IRQ\n");
return irq ? irq : -EINVAL;
ret = irq ? irq : -EINVAL;
goto out_put_node;
}
/* This clears the IRQ status register */
@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
&val);
if (ret) {
dev_err(smi->dev, "can't read interrupt status\n");
return ret;
goto out_put_node;
}
/* Fetch IRQ edge information from the descriptor */
@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
val);
if (ret) {
dev_err(smi->dev, "could not configure IRQ polarity\n");
return ret;
goto out_put_node;
}
ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
"RTL8366RB", smi);
if (ret) {
dev_err(smi->dev, "unable to request irq: %d\n", ret);
return ret;
goto out_put_node;
}
smi->irqdomain = irq_domain_add_linear(intc,
RTL8366RB_NUM_INTERRUPT,
@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
smi);
if (!smi->irqdomain) {
dev_err(smi->dev, "failed to create IRQ domain\n");
return -EINVAL;
ret = -EINVAL;
goto out_put_node;
}
for (i = 0; i < smi->num_ports; i++)
irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
return 0;
out_put_node:
of_node_put(intc);
return ret;
}
static int rtl8366rb_set_addr(struct realtek_smi *smi)

Просмотреть файл

@ -1897,7 +1897,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
return sja1105_static_config_reload(priv);
}
/* Caller must hold priv->tagger_data.meta_lock */
/* Must be called only with priv->tagger_data.state bit
* SJA1105_HWTS_RX_EN cleared
*/
static int sja1105_change_rxtstamping(struct sja1105_private *priv,
bool on)
{
@ -1954,16 +1956,17 @@ static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
break;
}
if (rx_on != priv->tagger_data.hwts_rx_en) {
spin_lock(&priv->tagger_data.meta_lock);
if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
rc = sja1105_change_rxtstamping(priv, rx_on);
spin_unlock(&priv->tagger_data.meta_lock);
if (rc < 0) {
dev_err(ds->dev,
"Failed to change RX timestamping: %d\n", rc);
return -EFAULT;
return rc;
}
priv->tagger_data.hwts_rx_en = rx_on;
if (rx_on)
set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
}
if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
@ -1982,7 +1985,7 @@ static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
config.tx_type = HWTSTAMP_TX_ON;
else
config.tx_type = HWTSTAMP_TX_OFF;
if (priv->tagger_data.hwts_rx_en)
if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
else
config.rx_filter = HWTSTAMP_FILTER_NONE;
@ -2005,12 +2008,12 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
mutex_lock(&priv->ptp_lock);
now = priv->tstamp_cc.read(&priv->tstamp_cc);
while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
u64 ts;
now = priv->tstamp_cc.read(&priv->tstamp_cc);
*shwt = (struct skb_shared_hwtstamps) {0};
ts = SJA1105_SKB_CB(skb)->meta_tstamp;
@ -2031,7 +2034,7 @@ static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
struct sja1105_private *priv = ds->priv;
struct sja1105_tagger_data *data = &priv->tagger_data;
if (!data->hwts_rx_en)
if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
return false;
/* We need to read the full PTP clock to reconstruct the Rx
@ -2201,6 +2204,7 @@ static int sja1105_probe(struct spi_device *spi)
tagger_data = &priv->tagger_data;
skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
spin_lock_init(&tagger_data->meta_lock);
/* Connections between dsa_port and sja1105_port */
for (i = 0; i < SJA1105_NUM_PORTS; i++) {

Просмотреть файл

@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
if (rc < 0) {
dev_err(dev, "Invalid config, cannot upload\n");
return -EINVAL;
rc = -EINVAL;
goto out;
}
/* Prevent PHY jabbering during switch reset by inhibiting
* Tx on all ports and waiting for current packet to drain.
@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
rc = sja1105_inhibit_tx(priv, port_bitmap, true);
if (rc < 0) {
dev_err(dev, "Failed to inhibit Tx on ports\n");
return -ENXIO;
rc = -ENXIO;
goto out;
}
/* Wait for an eventual egress packet to finish transmission
* (reach IFG). It is guaranteed that a second one will not

Просмотреть файл

@ -526,7 +526,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
struct device *dev = &ag->pdev->dev;
struct net_device *ndev = ag->ndev;
static struct mii_bus *mii_bus;
struct device_node *np;
struct device_node *np, *mnp;
int err;
np = dev->of_node;
@ -571,7 +571,9 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
msleep(200);
}
err = of_mdiobus_register(mii_bus, np);
mnp = of_get_child_by_name(np, "mdio");
err = of_mdiobus_register(mii_bus, mnp);
of_node_put(mnp);
if (err)
goto mdio_err_put_clk;

Просмотреть файл

@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
static int alloc_uld_rxqs(struct adapter *adap,
struct sge_uld_rxq_info *rxq_info, bool lro)
{
struct sge *s = &adap->sge;
unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
struct sge_ofld_rxq *q = rxq_info->uldrxq;
unsigned short *ids = rxq_info->rspq_id;
unsigned int bmap_idx = 0;
struct sge *s = &adap->sge;
unsigned int per_chan;
int i, err, msi_idx, que_idx = 0;
per_chan = rxq_info->nrxq / adap->params.nports;
@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
if (msi_idx >= 0) {
bmap_idx = get_msix_idx_from_bmap(adap);
if (bmap_idx < 0) {
err = -ENOSPC;
goto freeout;
}
msi_idx = adap->msix_info_ulds[bmap_idx].idx;
}
err = t4_sge_alloc_rxq(adap, &q->rspq, false,

Просмотреть файл

@ -148,11 +148,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
{
u32 time_cnt;
u32 reg_value;
int ret;
regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
if (ret)
return ret;
reg_value &= st_msk;
if ((!!check_st) == (!!reg_value))
break;

Просмотреть файл

@ -137,7 +137,8 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
icm_mr->icm_start_addr = icm_mr->dm.addr;
align_diff = icm_mr->icm_start_addr % align_base;
/* align_base is always a power of 2 */
align_diff = icm_mr->icm_start_addr & (align_base - 1);
if (align_diff)
icm_mr->used_length = align_base - align_diff;

Просмотреть файл

@ -388,13 +388,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
continue;
phy = of_phy_find_device(phy_node);
of_node_put(phy_node);
if (!phy)
continue;
err = ocelot_probe_port(ocelot, port, regs, phy);
if (err) {
of_node_put(portnp);
return err;
goto out_put_ports;
}
phy_mode = of_get_phy_mode(portnp);
@ -422,7 +423,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
"invalid phy mode for port%d, (Q)SGMII only\n",
port);
of_node_put(portnp);
return -EINVAL;
err = -EINVAL;
goto out_put_ports;
}
serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@ -435,7 +437,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
"missing SerDes phys for port%d\n",
port);
goto err_probe_ports;
of_node_put(portnp);
goto out_put_ports;
}
ocelot->ports[port]->serdes = serdes;
@ -447,9 +450,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Ocelot switch probed\n");
return 0;
err_probe_ports:
out_put_ports:
of_node_put(ports);
return err;
}

Просмотреть файл

@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
config IONIC
tristate "Pensando Ethernet IONIC Support"
depends on 64BIT && PCI
select NET_DEVLINK
help
This enables the support for the Pensando family of Ethernet
adapters. More specific information on this driver can be

Просмотреть файл

@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
netdev_err(qdev->ndev,
"PCI mapping failed with error: %d\n",
err);
dev_kfree_skb_irq(skb);
ql_free_large_buffers(qdev);
return -ENOMEM;
}

Просмотреть файл

@ -282,7 +282,6 @@ struct netsec_desc_ring {
void *vaddr;
u16 head, tail;
u16 xdp_xmit; /* netsec_xdp_xmit packets */
bool is_xdp;
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
spinlock_t lock; /* XDP tx queue locking */
@ -634,8 +633,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
unsigned int bytes;
int cnt = 0;
if (dring->is_xdp)
spin_lock(&dring->lock);
spin_lock(&dring->lock);
bytes = 0;
entry = dring->vaddr + DESC_SZ * tail;
@ -682,8 +680,8 @@ next:
entry = dring->vaddr + DESC_SZ * tail;
cnt++;
}
if (dring->is_xdp)
spin_unlock(&dring->lock);
spin_unlock(&dring->lock);
if (!cnt)
return false;
@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
de->attr = attr;
/* under spin_lock if using XDP */
if (!dring->is_xdp)
dma_wmb();
dring->desc[idx] = *desc;
if (desc->buf_type == TYPE_NETSEC_SKB)
@ -1123,12 +1118,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
u16 tso_seg_len = 0;
int filled;
if (dring->is_xdp)
spin_lock_bh(&dring->lock);
spin_lock_bh(&dring->lock);
filled = netsec_desc_used(dring);
if (netsec_check_stop_tx(priv, filled)) {
if (dring->is_xdp)
spin_unlock_bh(&dring->lock);
spin_unlock_bh(&dring->lock);
net_warn_ratelimited("%s %s Tx queue full\n",
dev_name(priv->dev), ndev->name);
return NETDEV_TX_BUSY;
@ -1161,8 +1154,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
if (dring->is_xdp)
spin_unlock_bh(&dring->lock);
spin_unlock_bh(&dring->lock);
netif_err(priv, drv, priv->ndev,
"%s: DMA mapping failed\n", __func__);
ndev->stats.tx_dropped++;
@ -1177,8 +1169,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, skb->len);
netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
if (dring->is_xdp)
spin_unlock_bh(&dring->lock);
spin_unlock_bh(&dring->lock);
netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
return NETDEV_TX_OK;
@ -1262,7 +1253,6 @@ err:
static void netsec_setup_tx_dring(struct netsec_priv *priv)
{
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
int i;
for (i = 0; i < DESC_NUM; i++) {
@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
*/
de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
}
if (xdp_prog)
dring->is_xdp = true;
else
dring->is_xdp = false;
}
static int netsec_setup_rx_dring(struct netsec_priv *priv)

Просмотреть файл

@ -401,8 +401,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
int numhashregs = (hw->multicast_filter_bins >> 5);
int mcbitslog2 = hw->mcast_bits_log2;
unsigned int value;
u32 mc_filter[8];
int i;
memset(mc_filter, 0, sizeof(mc_filter));
value = readl(ioaddr + GMAC_PACKET_FILTER);
value &= ~GMAC_PACKET_FILTER_HMC;
value &= ~GMAC_PACKET_FILTER_HPF;
@ -416,16 +419,13 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
/* Pass all multi */
value |= GMAC_PACKET_FILTER_PM;
/* Set all the bits of the HASH tab */
for (i = 0; i < numhashregs; i++)
writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
memset(mc_filter, 0xff, sizeof(mc_filter));
} else if (!netdev_mc_empty(dev)) {
struct netdev_hw_addr *ha;
u32 mc_filter[8];
/* Hash filter for multicast */
value |= GMAC_PACKET_FILTER_HMC;
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
/* The upper n bits of the calculated CRC are used to
* index the contents of the hash table. The number of
@ -440,10 +440,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
*/
mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
}
for (i = 0; i < numhashregs; i++)
writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
}
for (i = 0; i < numhashregs; i++)
writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
value |= GMAC_PACKET_FILTER_HPF;
/* Handle multiple unicast addresses */

Просмотреть файл

@ -84,7 +84,7 @@
#define XGMAC_TSIE BIT(12)
#define XGMAC_LPIIE BIT(5)
#define XGMAC_PMTIE BIT(4)
#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE)
#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
#define XGMAC_PT GENMASK(31, 16)
#define XGMAC_PT_SHIFT 16
@ -122,6 +122,7 @@
#define XGMAC_HWFEAT_GMIISEL BIT(1)
#define XGMAC_HW_FEATURE1 0x00000120
#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27)
#define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24)
#define XGMAC_HWFEAT_RSSEN BIT(20)
#define XGMAC_HWFEAT_TSOEN BIT(18)
#define XGMAC_HWFEAT_SPHEN BIT(17)

Просмотреть файл

@ -472,7 +472,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
/* Handle multiple unicast addresses */
if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
value |= XGMAC_FILTER_PR;
} else {
struct netdev_hw_addr *ha;
@ -523,8 +523,8 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
struct stmmac_rss *cfg, u32 num_rxq)
{
void __iomem *ioaddr = hw->pcsr;
u32 value, *key;
int i, ret;
u32 value;
value = readl(ioaddr + XGMAC_RSS_CTRL);
if (!cfg || !cfg->enable) {
@ -533,8 +533,9 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
return 0;
}
for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
key = (u32 *)cfg->key;
for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
if (ret)
return ret;
}

Просмотреть файл

@ -380,6 +380,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
/* MAC HW feature 1 */
hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;

Просмотреть файл

@ -629,6 +629,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
ptp_over_ethernet = PTP_TCR_TSIPENA;
@ -4715,11 +4716,9 @@ int stmmac_suspend(struct device *dev)
if (!ndev || !netif_running(ndev))
return 0;
mutex_lock(&priv->lock);
phylink_mac_change(priv->phylink, false);
rtnl_lock();
phylink_stop(priv->phylink);
rtnl_unlock();
mutex_lock(&priv->lock);
netif_device_detach(ndev);
stmmac_stop_all_queues(priv);
@ -4734,6 +4733,12 @@ int stmmac_suspend(struct device *dev)
stmmac_pmt(priv, priv->hw, priv->wolopts);
priv->irq_wake = 1;
} else {
mutex_unlock(&priv->lock);
rtnl_lock();
phylink_stop(priv->phylink);
rtnl_unlock();
mutex_lock(&priv->lock);
stmmac_mac_set(priv, priv->ioaddr, false);
pinctrl_pm_select_sleep_state(priv->device);
/* Disable clock in case of PWM is off */
@ -4824,12 +4829,16 @@ int stmmac_resume(struct device *dev)
stmmac_start_all_queues(priv);
rtnl_lock();
phylink_start(priv->phylink);
rtnl_unlock();
mutex_unlock(&priv->lock);
if (!device_may_wakeup(priv->device)) {
rtnl_lock();
phylink_start(priv->phylink);
rtnl_unlock();
}
phylink_mac_change(priv->phylink, true);
return 0;
}
EXPORT_SYMBOL_GPL(stmmac_resume);

Просмотреть файл

@ -1564,10 +1564,6 @@ static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
struct stmmac_packet_attrs attr = { };
int size = priv->dma_buf_sz;
/* Only XGMAC has SW support for multiple RX descs in same packet */
if (priv->plat->has_xgmac)
size = priv->dev->max_mtu;
attr.dst = priv->dev->dev_addr;
attr.max_size = size - ETH_FCS_LEN;
attr.queue_mapping = queue;

Просмотреть файл

@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_interface *interface)
ieee802154_unregister_hw(atusb->hw);
usb_put_dev(atusb->usb_dev);
ieee802154_free_hw(atusb->hw);
usb_set_intfdata(interface, NULL);
usb_put_dev(atusb->usb_dev);
pr_debug("%s done\n", __func__);
}

Просмотреть файл

@ -3145,12 +3145,12 @@ static int ca8210_probe(struct spi_device *spi_device)
goto error;
}
priv->spi->dev.platform_data = pdata;
ret = ca8210_get_platform_data(priv->spi, pdata);
if (ret) {
dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
goto error;
}
priv->spi->dev.platform_data = pdata;
ret = ca8210_dev_com_init(priv);
if (ret) {

Просмотреть файл

@ -800,7 +800,7 @@ mcr20a_handle_rx_read_buf_complete(void *context)
if (!skb)
return;
memcpy(skb_put(skb, len), lp->rx_buf, len);
__skb_put_data(skb, lp->rx_buf, len);
ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,

Просмотреть файл

@ -15,6 +15,15 @@
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#define AT803X_SPECIFIC_STATUS 0x11
#define AT803X_SS_SPEED_MASK (3 << 14)
#define AT803X_SS_SPEED_1000 (2 << 14)
#define AT803X_SS_SPEED_100 (1 << 14)
#define AT803X_SS_SPEED_10 (0 << 14)
#define AT803X_SS_DUPLEX BIT(13)
#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
#define AT803X_SS_MDIX BIT(6)
#define AT803X_INTR_ENABLE 0x12
#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
@ -357,6 +366,64 @@ static int at803x_aneg_done(struct phy_device *phydev)
return aneg_done;
}
static int at803x_read_status(struct phy_device *phydev)
{
int ss, err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
/* why bother the PHY if nothing can have changed */
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
err = genphy_read_lpa(phydev);
if (err < 0)
return err;
/* Read the AT8035 PHY-Specific Status register, which indicates the
* speed and duplex that the PHY is actually using, irrespective of
* whether we are in autoneg mode or not.
*/
ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
if (ss < 0)
return ss;
if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
switch (ss & AT803X_SS_SPEED_MASK) {
case AT803X_SS_SPEED_10:
phydev->speed = SPEED_10;
break;
case AT803X_SS_SPEED_100:
phydev->speed = SPEED_100;
break;
case AT803X_SS_SPEED_1000:
phydev->speed = SPEED_1000;
break;
}
if (ss & AT803X_SS_DUPLEX)
phydev->duplex = DUPLEX_FULL;
else
phydev->duplex = DUPLEX_HALF;
if (ss & AT803X_SS_MDIX)
phydev->mdix = ETH_TP_MDI_X;
else
phydev->mdix = ETH_TP_MDI;
}
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
phy_resolve_aneg_pause(phydev);
return 0;
}
static struct phy_driver at803x_driver[] = {
{
/* ATHEROS 8035 */
@ -370,6 +437,7 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
.ack_interrupt = at803x_ack_interrupt,
.config_intr = at803x_config_intr,
}, {
@ -399,6 +467,7 @@ static struct phy_driver at803x_driver[] = {
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_GBIT_FEATURES */
.read_status = at803x_read_status,
.aneg_done = at803x_aneg_done,
.ack_interrupt = &at803x_ack_interrupt,
.config_intr = &at803x_config_intr,

Просмотреть файл

@ -121,7 +121,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
return;
if (mdiodev->reset_gpio)
gpiod_set_value(mdiodev->reset_gpio, value);
gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
if (mdiodev->reset_ctrl) {
if (value)

Просмотреть файл

@ -283,6 +283,18 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
phydev->eee_broken_modes = broken;
}
void phy_resolve_aneg_pause(struct phy_device *phydev)
{
if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
phydev->lp_advertising);
phydev->asym_pause = linkmode_test_bit(
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->lp_advertising);
}
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
/**
* phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
* @phydev: The phy_device struct
@ -305,13 +317,7 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
break;
}
if (phydev->duplex == DUPLEX_FULL) {
phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
phydev->lp_advertising);
phydev->asym_pause = linkmode_test_bit(
ETHTOOL_LINK_MODE_Asym_Pause_BIT,
phydev->lp_advertising);
}
phy_resolve_aneg_pause(phydev);
}
EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);

Просмотреть файл

@ -457,6 +457,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
val);
change_autoneg = true;
break;
case MII_CTRL1000:
mii_ctrl1000_mod_linkmode_adv_t(phydev->advertising,
val);
change_autoneg = true;
break;
default:
/* do nothing */
break;

Просмотреть файл

@ -1783,32 +1783,9 @@ done:
}
EXPORT_SYMBOL(genphy_update_link);
/**
* genphy_read_status - check the link status and update current link state
* @phydev: target phy_device struct
*
* Description: Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. Start by checking the gigabit possibilities,
* then move on to 10/100.
*/
int genphy_read_status(struct phy_device *phydev)
int genphy_read_lpa(struct phy_device *phydev)
{
int lpa, lpagb, err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
/* why bother the PHY if nothing can have changed */
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
int lpa, lpagb;
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
if (phydev->is_gigabit_capable) {
@ -1838,6 +1815,44 @@ int genphy_read_status(struct phy_device *phydev)
return lpa;
mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
}
return 0;
}
EXPORT_SYMBOL(genphy_read_lpa);
/**
* genphy_read_status - check the link status and update current link state
* @phydev: target phy_device struct
*
* Description: Check the link, then figure out the current state
* by comparing what we advertise with what the link partner
* advertises. Start by checking the gigabit possibilities,
* then move on to 10/100.
*/
int genphy_read_status(struct phy_device *phydev)
{
int err, old_link = phydev->link;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
if (err)
return err;
/* why bother the PHY if nothing can have changed */
if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
return 0;
phydev->speed = SPEED_UNKNOWN;
phydev->duplex = DUPLEX_UNKNOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
err = genphy_read_lpa(phydev);
if (err < 0)
return err;
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
phy_resolve_aneg_linkmode(phydev);
} else if (phydev->autoneg == AUTONEG_DISABLE) {
int bmcr = phy_read(phydev, MII_BMCR);

Просмотреть файл

@ -238,7 +238,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
nf_reset(skb);
nf_reset_ct(skb);
skb->ip_summed = CHECKSUM_NONE;
ip_select_ident(net, skb, NULL);
@ -358,7 +358,7 @@ static int pptp_rcv(struct sk_buff *skb)
po = lookup_chan(htons(header->call_id), iph->saddr);
if (po) {
skb_dst_drop(skb);
nf_reset(skb);
nf_reset_ct(skb);
return sk_receive_skb(sk_pppox(po), skb, 0);
}
drop:

Просмотреть файл

@ -1104,7 +1104,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
*/
skb_orphan(skb);
nf_reset(skb);
nf_reset_ct(skb);
if (ptr_ring_produce(&tfile->tx_ring, skb))
goto drop;

Просмотреть файл

@ -2620,14 +2620,18 @@ static struct hso_device *hso_create_bulk_serial_device(
*/
if (serial->tiocmget) {
tiocmget = serial->tiocmget;
tiocmget->endp = hso_get_ep(interface,
USB_ENDPOINT_XFER_INT,
USB_DIR_IN);
if (!tiocmget->endp) {
dev_err(&interface->dev, "Failed to find INT IN ep\n");
goto exit;
}
tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
if (tiocmget->urb) {
mutex_init(&tiocmget->mutex);
init_waitqueue_head(&tiocmget->waitq);
tiocmget->endp = hso_get_ep(
interface,
USB_ENDPOINT_XFER_INT,
USB_DIR_IN);
} else
hso_free_tiomget(serial);
}

Просмотреть файл

@ -1350,6 +1350,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */

Просмотреть файл

@ -4799,10 +4799,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
struct r8152 *tp = usb_get_intfdata(intf);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
mutex_lock(&tp->control);
tp->rtl_ops.init(tp);
queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
mutex_unlock(&tp->control);
set_ethernet_addr(tp);
return rtl8152_resume(intf);
}

Просмотреть файл

@ -1585,7 +1585,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Don't wait up for transmitted skbs to be freed. */
if (!use_napi) {
skb_orphan(skb);
nf_reset(skb);
nf_reset_ct(skb);
}
/* If running out of space, stop queue to avoid getting packets that we

Просмотреть файл

@ -366,7 +366,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
struct neighbour *neigh;
int ret;
nf_reset(skb);
nf_reset_ct(skb);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@ -459,7 +459,7 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
/* reset skb device */
if (likely(err == 1))
nf_reset(skb);
nf_reset_ct(skb);
else
skb = NULL;
@ -560,7 +560,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
bool is_v6gw = false;
int ret = -EINVAL;
nf_reset(skb);
nf_reset_ct(skb);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@ -670,7 +670,7 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
/* reset skb device */
if (likely(err == 1))
nf_reset(skb);
nf_reset_ct(skb);
else
skb = NULL;

Просмотреть файл

@ -1261,8 +1261,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
skb_orphan(skb);
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);
nf_reset(skb);
skb_ext_reset(skb);
nf_reset_ct(skb);
/*
* Get absolute mactime here so all HWs RX at the "same time", and

Просмотреть файл

@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
return 0;
}
static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
static int xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;
@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
queue->rx.rsp_cons = ++cons + skb_queue_len(list);
kfree_skb(nskb);
return ~0U;
return -ENOENT;
}
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
kfree_skb(nskb);
}
return cons;
queue->rx.rsp_cons = cons;
return 0;
}
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@ -1045,8 +1047,7 @@ err:
skb->data_len = rx->status;
skb->len += rx->status;
i = xennet_fill_frags(queue, skb, &tmpq);
if (unlikely(i == ~0U))
if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
@ -1056,7 +1057,7 @@ err:
__skb_queue_tail(&rxq, skb);
queue->rx.rsp_cons = ++i;
i = ++queue->rx.rsp_cons;
work_done++;
}

Просмотреть файл

@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
}
spin_lock_init(&ptp_qoriq->lock);
ktime_get_real_ts64(&now);
ptp_qoriq_settime(&ptp_qoriq->caps, &now);
@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
(ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
(ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
spin_lock_init(&ptp_qoriq->lock);
spin_lock_irqsave(&ptp_qoriq->lock, flags);
regs = &ptp_qoriq->regs;

Просмотреть файл

@ -349,10 +349,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
*/
dst_release(skb_dst(skb));
skb_dst_set(skb, NULL);
#ifdef CONFIG_XFRM
secpath_reset(skb);
#endif
nf_reset(skb);
skb_ext_reset(skb);
nf_reset_ct(skb);
#ifdef CONFIG_NET_SCHED
skb->tc_index = 0;

Просмотреть файл

@ -31,6 +31,8 @@
#define SJA1105_META_SMAC 0x222222222222ull
#define SJA1105_META_DMAC 0x0180C200000Eull
#define SJA1105_HWTS_RX_EN 0
/* Global tagger data: each struct sja1105_port has a reference to
* the structure defined in struct sja1105_private.
*/
@ -42,7 +44,7 @@ struct sja1105_tagger_data {
* from taggers running on multiple ports on SMP systems
*/
spinlock_t meta_lock;
bool hwts_rx_en;
unsigned long state;
};
struct sja1105_skb_cb {

Просмотреть файл

@ -455,6 +455,15 @@ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
lp_advertising, lpa & LPA_LPACK);
}
static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
u32 ctrl1000)
{
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
ctrl1000 & ADVERTISE_1000HALF);
linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
ctrl1000 & ADVERTISE_1000FULL);
}
/**
* linkmode_adv_to_lcl_adv_t
* @advertising:pointer to linkmode advertising

Просмотреть файл

@ -678,6 +678,7 @@ static inline bool phy_is_started(struct phy_device *phydev)
return phydev->state >= PHY_UP;
}
void phy_resolve_aneg_pause(struct phy_device *phydev);
void phy_resolve_aneg_linkmode(struct phy_device *phydev);
/**
@ -1076,6 +1077,7 @@ int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);

Просмотреть файл

@ -4160,15 +4160,12 @@ static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
#endif /* CONFIG_SKB_EXTENSIONS */
static inline void nf_reset(struct sk_buff *skb)
static inline void nf_reset_ct(struct sk_buff *skb)
{
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
nf_conntrack_put(skb_nfct(skb));
skb->_nfct = 0;
#endif
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
#endif
}
static inline void nf_reset_trace(struct sk_buff *skb)

Просмотреть файл

@ -1068,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg,
),
TP_fast_assign(
__entry->call = call->debug_id;
__entry->call = call ? call->debug_id : 0;
__entry->why = why;
__entry->seq = seq;
__entry->offset = offset;

Просмотреть файл

@ -89,9 +89,9 @@
* goto errout;
* }
*
* pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
* pos = textsearch_find_continuous(conf, &state, example, strlen(example));
* if (pos != UINT_MAX)
* panic("Oh my god, dancing chickens at \%d\n", pos);
* panic("Oh my god, dancing chickens at %d\n", pos);
*
* textsearch_destroy(conf);
*/

Просмотреть файл

@ -436,7 +436,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
/* clean the netfilter state now that the batman-adv header has been
* removed
*/
nf_reset(skb);
nf_reset_ct(skb);
if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
goto dropped;

Просмотреть файл

@ -3172,7 +3172,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
if (err && err != -EOPNOTSUPP) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -3432,7 +3432,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
NLM_F_MULTI);
if (err) {
if (err && err != -EOPNOTSUPP) {
mutex_unlock(&devlink->lock);
goto out;
}
@ -4088,7 +4088,7 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
cb->extack);
mutex_unlock(&devlink->lock);
if (err)
if (err && err != -EOPNOTSUPP)
break;
idx++;
}

Просмотреть файл

@ -5120,7 +5120,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->ignore_df = 0;
skb_dst_drop(skb);
skb_ext_reset(skb);
nf_reset(skb);
nf_reset_ct(skb);
nf_reset_trace(skb);
#ifdef CONFIG_NET_SWITCHDEV

Просмотреть файл

@ -1700,8 +1700,6 @@ static void __sk_destruct(struct rcu_head *head)
sk_filter_uncharge(sk, filter);
RCU_INIT_POINTER(sk->sk_filter, NULL);
}
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_detach_sock(sk);
sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
@ -1728,7 +1726,14 @@ static void __sk_destruct(struct rcu_head *head)
void sk_destruct(struct sock *sk)
{
if (sock_flag(sk, SOCK_RCU_FREE))
bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
if (rcu_access_pointer(sk->sk_reuseport_cb)) {
reuseport_detach_sock(sk);
use_call_rcu = true;
}
if (use_call_rcu)
call_rcu(&sk->sk_rcu, __sk_destruct);
else
__sk_destruct(&sk->sk_rcu);
@ -3492,7 +3497,7 @@ static long sock_prot_memory_allocated(struct proto *proto)
return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
}
static char *sock_prot_memory_pressure(struct proto *proto)
static const char *sock_prot_memory_pressure(struct proto *proto)
{
return proto->memory_pressure != NULL ?
proto_memory_pressure(proto) ? "yes" : "no" : "NI";

Просмотреть файл

@ -871,7 +871,7 @@ lookup:
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
nf_reset(skb);
nf_reset_ct(skb);
return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);

Просмотреть файл

@ -156,7 +156,11 @@ static struct sk_buff
/* Step 1: A timestampable frame was received.
* Buffer it until we get its meta frame.
*/
if (is_link_local && sp->data->hwts_rx_en) {
if (is_link_local) {
if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
/* Do normal processing. */
return skb;
spin_lock(&sp->data->meta_lock);
/* Was this a link-local frame instead of the meta
* that we were expecting?
@ -187,6 +191,12 @@ static struct sk_buff
} else if (is_meta) {
struct sk_buff *stampable_skb;
/* Drop the meta frame if we're not in the right state
* to process it.
*/
if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
return NULL;
spin_lock(&sp->data->meta_lock);
stampable_skb = sp->data->stampable_skb;

Просмотреть файл

@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev)
struct ip_tunnel *t = netdev_priv(dev);
ether_setup(dev);
dev->max_mtu = 0;
dev->netdev_ops = &erspan_netdev_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;

Просмотреть файл

@ -199,7 +199,7 @@ resubmit:
kfree_skb(skb);
return;
}
nf_reset(skb);
nf_reset_ct(skb);
}
ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
skb);

Просмотреть файл

@ -1794,7 +1794,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
ip_send_check(iph);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
nf_reset(skb);
nf_reset_ct(skb);
}
static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
@ -2140,7 +2140,7 @@ int ip_mr_input(struct sk_buff *skb)
mroute_sk = rcu_dereference(mrt->mroute_sk);
if (mroute_sk) {
nf_reset(skb);
nf_reset_ct(skb);
raw_rcv(mroute_sk, skb);
return 0;
}

Просмотреть файл

@ -65,7 +65,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Avoid counting cloned packets towards the original connection. */
nf_reset(skb);
nf_reset_ct(skb);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
#endif
/*

Просмотреть файл

@ -332,7 +332,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
nf_reset(skb);
nf_reset_ct(skb);
skb_push(skb, skb->data - skb_network_header(skb));

Просмотреть файл

@ -916,16 +916,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
if (peer->rate_tokens == 0 ||
time_after(jiffies,
(peer->rate_last +
(ip_rt_redirect_load << peer->rate_tokens)))) {
(ip_rt_redirect_load << peer->n_redirects)))) {
__be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->rate_tokens;
++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);

Просмотреть файл

@ -1798,13 +1798,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
}
if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
int remaining = zc->recv_skip_hint;
int size = skb_frag_size(frags);
while (remaining && (size != PAGE_SIZE ||
while (remaining && (skb_frag_size(frags) != PAGE_SIZE ||
skb_frag_off(frags))) {
remaining -= size;
remaining -= skb_frag_size(frags);
frags++;
size = skb_frag_size(frags);
}
zc->recv_skip_hint -= remaining;
break;

Просмотреть файл

@ -1916,7 +1916,7 @@ process:
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard_and_relse;
nf_reset(skb);
nf_reset_ct(skb);
if (tcp_filter(sk, skb))
goto discard_and_relse;

Просмотреть файл

@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk,
return false;
start_ts = tcp_sk(sk)->retrans_stamp;
if (likely(timeout == 0))
timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
if (likely(timeout == 0)) {
unsigned int rto_base = TCP_RTO_MIN;
if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
rto_base = tcp_timeout_init(sk);
timeout = tcp_model_timeout(sk, boundary, rto_base);
}
return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
}

Просмотреть файл

@ -821,6 +821,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
int is_udplite = IS_UDPLITE(sk);
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
int datalen = len - sizeof(*uh);
__wsum csum = 0;
/*
@ -854,10 +855,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
return -EIO;
}
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
cork->gso_size);
if (datalen > cork->gso_size) {
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
cork->gso_size);
}
goto csum_partial;
}
@ -1969,7 +1972,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
*/
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
nf_reset_ct(skb);
if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
@ -2298,7 +2301,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
nf_reset(skb);
nf_reset_ct(skb);
/* No socket. Drop packet silently, if checksum is wrong */
if (udp_lib_checksum_complete(skb))

Просмотреть файл

@ -5964,13 +5964,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
switch (event) {
case RTM_NEWADDR:
/*
* If the address was optimistic
* we inserted the route at the start of
* our DAD process, so we don't need
* to do it again
* If the address was optimistic we inserted the route at the
* start of our DAD process, so we don't need to do it again.
* If the device was taken down in the middle of the DAD
* cycle there is a race where we could get here without a
* host route, so nothing to insert. That will be fixed when
* the device is brought up.
*/
if (!rcu_access_pointer(ifp->rt->fib6_node))
if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
ip6_ins_rt(net, ifp->rt);
} else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
&ifp->addr, ifp->idev->dev->name);
}
if (ifp->idev->cnf.forwarding)
addrconf_join_anycast(ifp);
if (!ipv6_addr_any(&ifp->peer_addr))

Просмотреть файл

@ -223,6 +223,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
if (ipv6_addr_is_multicast(&hdr->saddr))
goto err;
/* While RFC4291 is not explicit about v4mapped addresses
* in IPv6 headers, it seems clear linux dual-stack
* model can not deal properly with these.
* Security models could be fooled by ::ffff:127.0.0.1 for example.
*
* https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
*/
if (ipv6_addr_v4mapped(&hdr->saddr))
goto err;
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
@ -371,7 +381,7 @@ resubmit_final:
/* Free reference early: we don't need it any more,
and it may hold ip_conntrack module loaded
indefinitely. */
nf_reset(skb);
nf_reset_ct(skb);
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));

Просмотреть файл

@ -54,7 +54,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
return;
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
nf_reset(skb);
nf_reset_ct(skb);
nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
#endif
if (hooknum == NF_INET_PRE_ROUTING ||

Просмотреть файл

@ -215,7 +215,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
/* Not releasing hash table! */
if (clone) {
nf_reset(clone);
nf_reset_ct(clone);
rawv6_rcv(sk, clone);
}
}

Просмотреть файл

@ -1109,6 +1109,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
__wsum csum = 0;
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
int datalen = len - sizeof(*uh);
/*
* Create a UDP header
@ -1141,8 +1142,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
return -EIO;
}
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
if (datalen > cork->gso_size) {
skb_shinfo(skb)->gso_size = cork->gso_size;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
cork->gso_size);
}
goto csum_partial;
}

Просмотреть файл

@ -1078,7 +1078,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
nf_reset(skb);
nf_reset_ct(skb);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {

Просмотреть файл

@ -151,7 +151,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
skb->ip_summed = CHECKSUM_NONE;
skb_dst_drop(skb);
nf_reset(skb);
nf_reset_ct(skb);
rcu_read_lock();
dev = rcu_dereference(spriv->dev);

Просмотреть файл

@ -193,7 +193,7 @@ pass_up:
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
nf_reset_ct(skb);
return sk_receive_skb(sk, skb, 1);

Просмотреть файл

@ -206,7 +206,7 @@ pass_up:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_put;
nf_reset(skb);
nf_reset_ct(skb);
return sk_receive_skb(sk, skb, 1);

Просмотреть файл

@ -487,9 +487,14 @@ static ssize_t ieee80211_if_fmt_aqm(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
{
struct ieee80211_local *local = sdata->local;
struct txq_info *txqi = to_txq_info(sdata->vif.txq);
struct txq_info *txqi;
int len;
if (!sdata->vif.txq)
return 0;
txqi = to_txq_info(sdata->vif.txq);
spin_lock_bh(&local->fq.lock);
rcu_read_lock();
@ -658,7 +663,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
DEBUGFS_ADD(hw_queues);
if (sdata->local->ops->wake_tx_queue)
if (sdata->local->ops->wake_tx_queue &&
sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
sdata->vif.type != NL80211_IFTYPE_NAN)
DEBUGFS_ADD(aqm);
}

Просмотреть файл

@ -247,7 +247,8 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
struct sta_info *sta;
int i;
spin_lock_bh(&fq->lock);
local_bh_disable();
spin_lock(&fq->lock);
if (sdata->vif.type == NL80211_IFTYPE_AP)
ps = &sdata->bss->ps;
@ -273,9 +274,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
&txqi->flags))
continue;
spin_unlock_bh(&fq->lock);
spin_unlock(&fq->lock);
drv_wake_tx_queue(local, txqi);
spin_lock_bh(&fq->lock);
spin_lock(&fq->lock);
}
}
@ -288,12 +289,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
(ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
goto out;
spin_unlock_bh(&fq->lock);
spin_unlock(&fq->lock);
drv_wake_tx_queue(local, txqi);
local_bh_enable();
return;
out:
spin_unlock_bh(&fq->lock);
spin_unlock(&fq->lock);
local_bh_enable();
}
static void

Просмотреть файл

@ -613,7 +613,7 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
ret = ip_vs_confirm_conntrack(skb);
if (ret == NF_ACCEPT) {
nf_reset(skb);
nf_reset_ct(skb);
skb_forward_csum(skb);
}
return ret;

Просмотреть файл

@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
{
struct nft_connlimit *priv = nft_expr_priv(expr);
bool ret;
return nf_conncount_gc_list(net, &priv->list);
local_bh_disable();
ret = nf_conncount_gc_list(net, &priv->list);
local_bh_enable();
return ret;
}
static struct nft_expr_type nft_connlimit_type;

Просмотреть файл

@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->service_name = kmemdup(llcp_addr.service_name,
llcp_sock->service_name_len,
GFP_KERNEL);
if (!llcp_sock->service_name) {
ret = -ENOMEM;
goto put_dev;
}
llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
if (llcp_sock->ssap == LLCP_SAP_MAX) {
kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL;
ret = -EADDRINUSE;
goto put_dev;
}

Просмотреть файл

@ -237,7 +237,7 @@ static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
}
skb_dst_drop(skb);
nf_reset(skb);
nf_reset_ct(skb);
secpath_reset(skb);
skb->pkt_type = PACKET_HOST;

Просмотреть файл

@ -1821,7 +1821,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset(skb);
nf_reset_ct(skb);
spkt = &PACKET_SKB_CB(skb)->sa.pkt;
@ -2121,7 +2121,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
skb_dst_drop(skb);
/* drop conntrack reference */
nf_reset(skb);
nf_reset_ct(skb);
spin_lock(&sk->sk_receive_queue.lock);
po->stats.stats1.tp_packets++;

Просмотреть файл

@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
refcount_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
rds_ibdev->max_wrs = device->attrs.max_qp_wr;
rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
device->name,
rds_ibdev->use_fastreg ? "FRMR" : "FMR");
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);

Просмотреть файл

@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
[TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
};
static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
struct nlattr *opt,
struct netlink_ext_ack *extack)
{
int err;
if (!opt) {
NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
return -EINVAL;
}
err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
cbq_policy, extack);
if (err < 0)
return err;
if (tb[TCA_CBQ_WRROPT]) {
const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
if (wrr->priority > TC_CBQ_MAXPRIO) {
NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
err = -EINVAL;
}
}
return err;
}
static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
struct netlink_ext_ack *extack)
{
@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
q->delay_timer.function = cbq_undelay;
if (!opt) {
NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
return -EINVAL;
}
err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
extack);
err = cbq_opt_parse(tb, opt, extack);
if (err < 0)
return err;
@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
struct cbq_class *parent;
struct qdisc_rate_table *rtab = NULL;
if (!opt) {
NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
return -EINVAL;
}
err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
extack);
err = cbq_opt_parse(tb, opt, extack);
if (err < 0)
return err;

Просмотреть файл

@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
if (err < 0)
goto skip;
if (ecmd.base.speed != SPEED_UNKNOWN)
if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
speed = ecmd.base.speed;
skip:

Просмотреть файл

@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
goto errout;
err = -EINVAL;
if (!tb[TCA_DSMARK_INDICES])
goto errout;
indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
if (hweight32(indices) != 1)

Просмотреть файл

@ -1044,12 +1044,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
if (err < 0)
goto skip;
if (ecmd.base.speed != SPEED_UNKNOWN)
if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
speed = ecmd.base.speed;
skip:
picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
speed * 1000 * 1000);
picos_per_byte = (USEC_PER_SEC * 8) / speed;
atomic64_set(&q->picos_per_byte, picos_per_byte);
netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",

Просмотреть файл

@ -201,7 +201,7 @@ int sctp_rcv(struct sk_buff *skb)
if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
goto discard_release;
nf_reset(skb);
nf_reset_ct(skb);
if (sk_filter(sk, skb))
goto discard_release;

Просмотреть файл

@ -160,6 +160,7 @@ struct tipc_link {
struct {
u16 len;
u16 limit;
struct sk_buff *target_bskb;
} backlog[5];
u16 snd_nxt;
u16 window;
@ -880,6 +881,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
void tipc_link_reset(struct tipc_link *l)
{
struct sk_buff_head list;
u32 imp;
__skb_queue_head_init(&list);
@ -901,11 +903,10 @@ void tipc_link_reset(struct tipc_link *l)
__skb_queue_purge(&l->deferdq);
__skb_queue_purge(&l->backlogq);
__skb_queue_purge(&l->failover_deferdq);
l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
l->backlog[imp].len = 0;
l->backlog[imp].target_bskb = NULL;
}
kfree_skb(l->reasm_buf);
kfree_skb(l->reasm_tnlmsg);
kfree_skb(l->failover_reasm_skb);
@ -947,7 +948,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
struct sk_buff_head *transmq = &l->transmq;
struct sk_buff_head *backlogq = &l->backlogq;
struct sk_buff *skb, *_skb, *bskb;
struct sk_buff *skb, *_skb, **tskb;
int pkt_cnt = skb_queue_len(list);
int rc = 0;
@ -999,19 +1000,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
seqno++;
continue;
}
if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
tskb = &l->backlog[imp].target_bskb;
if (tipc_msg_bundle(*tskb, hdr, mtu)) {
kfree_skb(__skb_dequeue(list));
l->stats.sent_bundled++;
continue;
}
if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
kfree_skb(__skb_dequeue(list));
__skb_queue_tail(backlogq, bskb);
l->backlog[msg_importance(buf_msg(bskb))].len++;
__skb_queue_tail(backlogq, *tskb);
l->backlog[imp].len++;
l->stats.sent_bundled++;
l->stats.sent_bundles++;
continue;
}
l->backlog[imp].target_bskb = NULL;
l->backlog[imp].len += skb_queue_len(list);
skb_queue_splice_tail_init(list, backlogq);
}
@ -1027,6 +1030,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
u16 seqno = l->snd_nxt;
u16 ack = l->rcv_nxt - 1;
u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
u32 imp;
while (skb_queue_len(&l->transmq) < l->window) {
skb = skb_peek(&l->backlogq);
@ -1037,7 +1041,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
break;
__skb_dequeue(&l->backlogq);
hdr = buf_msg(skb);
l->backlog[msg_importance(hdr)].len--;
imp = msg_importance(hdr);
l->backlog[imp].len--;
if (unlikely(skb == l->backlog[imp].target_bskb))
l->backlog[imp].target_bskb = NULL;
__skb_queue_tail(&l->transmq, skb);
/* next retransmit attempt */
if (link_is_bc_sndlink(l))

Просмотреть файл

@ -543,10 +543,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
bmsg = buf_msg(_skb);
tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
INT_H_SIZE, dnode);
if (msg_isdata(msg))
msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
else
msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
msg_set_importance(bmsg, msg_importance(msg));
msg_set_seqno(bmsg, msg_seqno(msg));
msg_set_ack(bmsg, msg_ack(msg));
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));

Просмотреть файл

@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net,
}
EXPORT_SYMBOL_GPL(__vsock_create);
static void __vsock_release(struct sock *sk)
static void __vsock_release(struct sock *sk, int level)
{
if (sk) {
struct sk_buff *skb;
@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk)
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
/* The release call is supposed to use lock_sock_nested()
* rather than lock_sock(), if a sock lock should be acquired.
*/
transport->release(vsk);
lock_sock(sk);
/* When "level" is SINGLE_DEPTH_NESTING, use the nested
* version to avoid the warning "possible recursive locking
* detected". When "level" is 0, lock_sock_nested(sk, level)
* is the same as lock_sock(sk).
*/
lock_sock_nested(sk, level);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk)
/* Clean up any sockets that never were accepted. */
while ((pending = vsock_dequeue_accept(sk)) != NULL) {
__vsock_release(pending);
__vsock_release(pending, SINGLE_DEPTH_NESTING);
sock_put(pending);
}
@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
static int vsock_release(struct socket *sock)
{
__vsock_release(sock->sk);
__vsock_release(sock->sk, 0);
sock->sk = NULL;
sock->state = SS_FREE;

Просмотреть файл

@ -559,7 +559,7 @@ static void hvs_release(struct vsock_sock *vsk)
struct sock *sk = sk_vsock(vsk);
bool remove_sock;
lock_sock(sk);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
remove_sock = hvs_close_lock_held(vsk);
release_sock(sk);
if (remove_sock)

Просмотреть файл

@ -820,7 +820,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
struct sock *sk = &vsk->sk;
bool remove_sock = true;
lock_sock(sk);
lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type == SOCK_STREAM)
remove_sock = virtio_transport_close(vsk);

Просмотреть файл

@ -201,6 +201,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
return __cfg80211_rdev_from_attrs(netns, info->attrs);
}
static int validate_beacon_head(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
const u8 *data = nla_data(attr);
unsigned int len = nla_len(attr);
const struct element *elem;
const struct ieee80211_mgmt *mgmt = (void *)data;
unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
u.beacon.variable);
if (len < fixedlen)
goto err;
if (ieee80211_hdrlen(mgmt->frame_control) !=
offsetof(struct ieee80211_mgmt, u.beacon))
goto err;
data += fixedlen;
len -= fixedlen;
for_each_element(elem, data, len) {
/* nothing */
}
if (for_each_element_completed(elem, data, len))
return 0;
err:
NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
return -EINVAL;
}
static int validate_ie_attr(const struct nlattr *attr,
struct netlink_ext_ack *extack)
{
@ -338,8 +370,9 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
[NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
[NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_BEACON_HEAD] =
NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
IEEE80211_MAX_DATA_LEN),
[NL80211_ATTR_BEACON_TAIL] =
NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
IEEE80211_MAX_DATA_LEN),
@ -2636,6 +2669,8 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
memset(chandef, 0, sizeof(*chandef));
chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
chandef->center_freq1 = control_freq;
@ -3176,7 +3211,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
if (rdev->ops->get_channel) {
int ret;
struct cfg80211_chan_def chandef;
struct cfg80211_chan_def chandef = {};
ret = rdev_get_channel(rdev, wdev, &chandef);
if (ret == 0) {
@ -6270,6 +6305,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->del_mpath)
return -EOPNOTSUPP;
if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
return -EOPNOTSUPP;
return rdev_del_mpath(rdev, dev, dst);
}

Просмотреть файл

@ -2108,7 +2108,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
{
struct cfg80211_chan_def chandef;
struct cfg80211_chan_def chandef = {};
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
enum nl80211_iftype iftype;

Просмотреть файл

@ -1723,7 +1723,12 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
return;
new_ie_len -= trans_ssid[1];
mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
if (!mbssid)
/*
* It's not valid to have the MBSSID element before SSID
* ignore if that happens - the code below assumes it is
* after (while copying things inbetween).
*/
if (!mbssid || mbssid < trans_ssid)
return;
new_ie_len -= mbssid[1];
rcu_read_lock();

Просмотреть файл

@ -798,7 +798,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_chan_def chandef;
struct cfg80211_chan_def chandef = {};
int ret;
switch (wdev->iftype) {

Просмотреть файл

@ -706,7 +706,7 @@ resume:
if (err)
goto drop;
nf_reset(skb);
nf_reset_ct(skb);
if (decaps) {
sp = skb_sec_path(skb);

Просмотреть файл

@ -185,7 +185,7 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
skb->skb_iif = 0;
skb->ignore_df = 0;
skb_dst_drop(skb);
nf_reset(skb);
nf_reset_ct(skb);
nf_reset_trace(skb);
if (!xnet)

Просмотреть файл

@ -502,7 +502,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
struct net *net = xs_net(skb_dst(skb)->xfrm);
while (likely((err = xfrm_output_one(skb, err)) == 0)) {
nf_reset(skb);
nf_reset_ct(skb);
err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
if (unlikely(err != 1))

Просмотреть файл

@ -2808,7 +2808,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
continue;
}
nf_reset(skb);
nf_reset_ct(skb);
skb_dst_drop(skb);
skb_dst_set(skb, dst);

1
tools/testing/selftests/net/.gitignore поставляемый
Просмотреть файл

@ -21,3 +21,4 @@ ipv6_flowlabel
ipv6_flowlabel_mgr
so_txtime
tcp_fastopen_backup_key
nettest

Просмотреть файл

@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
.tfail = true,
},
{
/* send a single MSS: will fail with GSO, because the segment
* logic in udp4_ufo_fragment demands a gso skb to be > MTU
*/
/* send a single MSS: will fall back to no GSO */
.tlen = CONST_MSS_V4,
.gso_len = CONST_MSS_V4,
.tfail = true,
.r_num_mss = 1,
},
{
@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
.tfail = true,
},
{
/* send a single 1B MSS: will fail, see single MSS above */
/* send a single 1B MSS: will fall back to no GSO */
.tlen = 1,
.gso_len = 1,
.tfail = true,
.r_num_mss = 1,
},
{
@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
.tfail = true,
},
{
/* send a single MSS: will fail with GSO, because the segment
* logic in udp4_ufo_fragment demands a gso skb to be > MTU
*/
/* send a single MSS: will fall back to no GSO */
.tlen = CONST_MSS_V6,
.gso_len = CONST_MSS_V6,
.tfail = true,
.r_num_mss = 1,
},
{
@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
.tfail = true,
},
{
/* send a single 1B MSS: will fail, see single MSS above */
/* send a single 1B MSS: will fall back to no GSO */
.tlen = 1,
.gso_len = 1,
.tfail = true,
.r_num_mss = 1,
},
{