Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Load correct firmware in rtl8192ce wireless driver, from Jurij Smakov. 2) Fix leak of tx_ring and tx_cq due to overwriting in mlx4 driver, from Martin KaFai Lau. 3) Need to reference count PHY driver module when it is attached, from Mao Wenan. 4) Don't do zero length vzalloc() in ethtool register dump, from Stanislaw Gruszka. 5) Defer net_disable_timestamp() to a workqueue to get out of locking issues, from Eric Dumazet. 6) We cannot drop the SKB dst when IP options refer to them, fix also from Eric Dumazet. 7) Incorrect packet header offset calculations in ip6_gre, again from Eric Dumazet. 8) Missing tcp_v6_restore_cb() causes use-after-free, from Eric too. 9) tcp_splice_read() can get into an infinite loop with URG, and hey it's from Eric once more. 10) vnet_hdr_sz can change asynchronously, so read it once during decision making in macvtap and tun, from Willem de Bruijn. 11) Can't use kernel stack for DMA transfers in USB networking drivers, from Ben Hutchings. 12) Handle csum errors properly in UDP by calling the proper destructor, from Eric Dumazet. 13) For non-deterministic softirq run when scheduling NAPI from a workqueue in mlx4, from Benjamin Poirier. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (28 commits) sctp: check af before verify address in sctp_addr_id2transport sctp: avoid BUG_ON on sctp_wait_for_sndbuf mlx4: Invoke softirqs after napi_reschedule udp: properly cope with csum errors catc: Use heap buffer for memory size test catc: Combine failure cleanup code in catc_probe() rtl8150: Use heap buffers for all register access pegasus: Use heap buffers for all register access macvtap: read vnet_hdr_size once tun: read vnet_hdr_sz once tcp: avoid infinite loop in tcp_splice_read() hns: avoid stack overflow with CONFIG_KASAN ipv6: Fix IPv6 packet loss in scenarios involving roaming + snooping switches ipv6: tcp: add a missing tcp_v6_restore_cb() nl80211: Fix mesh HT operation check mac80211: Fix adding of mesh vendor IEs mac80211: Allocate a sync skcipher explicitly for FILS AEAD mac80211: Fix FILS AEAD protection in Association Request frame ip6_gre: fix ip6gre_err() invalid reads netlabel: out of bound access in cipso_v4_validate() ...
This commit is contained in:
Коммит
926af6273f
|
@ -1014,9 +1014,7 @@
|
|||
|
||||
static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(base);
|
||||
|
||||
writel(value, reg_addr + reg);
|
||||
writel(value, base + reg);
|
||||
}
|
||||
|
||||
#define dsaf_write_dev(a, reg, value) \
|
||||
|
@ -1024,9 +1022,7 @@ static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
|
|||
|
||||
static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
|
||||
{
|
||||
u8 __iomem *reg_addr = ACCESS_ONCE(base);
|
||||
|
||||
return readl(reg_addr + reg);
|
||||
return readl(base + reg);
|
||||
}
|
||||
|
||||
static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
|
||||
|
|
|
@ -1099,7 +1099,7 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
|
|||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||
new_prof.tx_ring_size = tx_size;
|
||||
new_prof.rx_ring_size = rx_size;
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -1774,7 +1774,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
|
|||
new_prof.tx_ring_num[TX_XDP] = xdp_count;
|
||||
new_prof.rx_ring_num = channel->rx_count;
|
||||
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -2042,6 +2042,8 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|||
if (priv->tx_cq[t] && priv->tx_cq[t][i])
|
||||
mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
|
||||
}
|
||||
kfree(priv->tx_ring[t]);
|
||||
kfree(priv->tx_cq[t]);
|
||||
}
|
||||
|
||||
for (i = 0; i < priv->rx_ring_num; i++) {
|
||||
|
@ -2184,9 +2186,11 @@ static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
|
|||
|
||||
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp,
|
||||
struct mlx4_en_port_profile *prof)
|
||||
struct mlx4_en_port_profile *prof,
|
||||
bool carry_xdp_prog)
|
||||
{
|
||||
int t;
|
||||
struct bpf_prog *xdp_prog;
|
||||
int i, t;
|
||||
|
||||
mlx4_en_copy_priv(tmp, priv, prof);
|
||||
|
||||
|
@ -2200,6 +2204,23 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
|||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* All rx_rings has the same xdp_prog. Pick the first one. */
|
||||
xdp_prog = rcu_dereference_protected(
|
||||
priv->rx_ring[0]->xdp_prog,
|
||||
lockdep_is_held(&priv->mdev->state_lock));
|
||||
|
||||
if (xdp_prog && carry_xdp_prog) {
|
||||
xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
|
||||
if (IS_ERR(xdp_prog)) {
|
||||
mlx4_en_free_resources(tmp);
|
||||
return PTR_ERR(xdp_prog);
|
||||
}
|
||||
for (i = 0; i < tmp->rx_ring_num; i++)
|
||||
rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
|
||||
xdp_prog);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2214,7 +2235,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||
{
|
||||
struct mlx4_en_priv *priv = netdev_priv(dev);
|
||||
struct mlx4_en_dev *mdev = priv->mdev;
|
||||
int t;
|
||||
|
||||
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
|
||||
|
||||
|
@ -2248,11 +2268,6 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|||
mlx4_en_free_resources(priv);
|
||||
mutex_unlock(&mdev->state_lock);
|
||||
|
||||
for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
||||
kfree(priv->tx_ring[t]);
|
||||
kfree(priv->tx_cq[t]);
|
||||
}
|
||||
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
|
@ -2755,7 +2770,7 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|||
en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
|
||||
}
|
||||
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
|
||||
if (err) {
|
||||
if (prog)
|
||||
bpf_prog_sub(prog, priv->rx_ring_num - 1);
|
||||
|
@ -3499,7 +3514,7 @@ int mlx4_en_reset_config(struct net_device *dev,
|
|||
memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
||||
memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
|
||||
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
||||
err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -514,8 +514,11 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
|
|||
return;
|
||||
|
||||
for (ring = 0; ring < priv->rx_ring_num; ring++) {
|
||||
if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
|
||||
if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
|
||||
local_bh_disable();
|
||||
napi_reschedule(&priv->rx_cq[ring]->napi);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -679,7 +679,8 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
|
|||
|
||||
int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp,
|
||||
struct mlx4_en_port_profile *prof);
|
||||
struct mlx4_en_port_profile *prof,
|
||||
bool carry_xdp_prog);
|
||||
void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
|
||||
struct mlx4_en_priv *tmp);
|
||||
|
||||
|
|
|
@ -681,7 +681,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||
size_t linear;
|
||||
|
||||
if (q->flags & IFF_VNET_HDR) {
|
||||
vnet_hdr_len = q->vnet_hdr_sz;
|
||||
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
|
||||
|
||||
err = -EINVAL;
|
||||
if (len < vnet_hdr_len)
|
||||
|
@ -820,7 +820,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
|||
|
||||
if (q->flags & IFF_VNET_HDR) {
|
||||
struct virtio_net_hdr vnet_hdr;
|
||||
vnet_hdr_len = q->vnet_hdr_sz;
|
||||
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
|
||||
if (iov_iter_count(iter) < vnet_hdr_len)
|
||||
return -EINVAL;
|
||||
|
||||
|
|
|
@ -920,6 +920,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
|
|||
return -EIO;
|
||||
}
|
||||
|
||||
if (!try_module_get(d->driver->owner)) {
|
||||
dev_err(&dev->dev, "failed to get the device driver module\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
get_device(d);
|
||||
|
||||
/* Assume that if there is no driver, that it doesn't
|
||||
|
@ -977,6 +982,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
|
|||
error:
|
||||
phy_detach(phydev);
|
||||
put_device(d);
|
||||
module_put(d->driver->owner);
|
||||
if (ndev_owner != bus->owner)
|
||||
module_put(bus->owner);
|
||||
return err;
|
||||
|
@ -1059,6 +1065,7 @@ void phy_detach(struct phy_device *phydev)
|
|||
bus = phydev->mdio.bus;
|
||||
|
||||
put_device(&phydev->mdio.dev);
|
||||
module_put(phydev->mdio.dev.driver->owner);
|
||||
if (ndev_owner != bus->owner)
|
||||
module_put(bus->owner);
|
||||
}
|
||||
|
|
|
@ -1170,9 +1170,11 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
}
|
||||
|
||||
if (tun->flags & IFF_VNET_HDR) {
|
||||
if (len < tun->vnet_hdr_sz)
|
||||
int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
|
||||
|
||||
if (len < vnet_hdr_sz)
|
||||
return -EINVAL;
|
||||
len -= tun->vnet_hdr_sz;
|
||||
len -= vnet_hdr_sz;
|
||||
|
||||
if (!copy_from_iter_full(&gso, sizeof(gso), from))
|
||||
return -EFAULT;
|
||||
|
@ -1183,7 +1185,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
|
||||
if (tun16_to_cpu(tun, gso.hdr_len) > len)
|
||||
return -EINVAL;
|
||||
iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
|
||||
iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
|
||||
}
|
||||
|
||||
if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
|
||||
|
@ -1335,7 +1337,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
|
|||
vlan_hlen = VLAN_HLEN;
|
||||
|
||||
if (tun->flags & IFF_VNET_HDR)
|
||||
vnet_hdr_sz = tun->vnet_hdr_sz;
|
||||
vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
|
||||
|
||||
total = skb->len + vlan_hlen + vnet_hdr_sz;
|
||||
|
||||
|
|
|
@ -776,7 +776,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
struct net_device *netdev;
|
||||
struct catc *catc;
|
||||
u8 broadcast[ETH_ALEN];
|
||||
int i, pktsz;
|
||||
int pktsz, ret;
|
||||
|
||||
if (usb_set_interface(usbdev,
|
||||
intf->altsetting->desc.bInterfaceNumber, 1)) {
|
||||
|
@ -811,12 +811,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
|
||||
(!catc->rx_urb) || (!catc->irq_urb)) {
|
||||
dev_err(&intf->dev, "No free urbs available.\n");
|
||||
usb_free_urb(catc->ctrl_urb);
|
||||
usb_free_urb(catc->tx_urb);
|
||||
usb_free_urb(catc->rx_urb);
|
||||
usb_free_urb(catc->irq_urb);
|
||||
free_netdev(netdev);
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto fail_free;
|
||||
}
|
||||
|
||||
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
|
||||
|
@ -844,15 +840,24 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
catc->irq_buf, 2, catc_irq_done, catc, 1);
|
||||
|
||||
if (!catc->is_f5u011) {
|
||||
u32 *buf;
|
||||
int i;
|
||||
|
||||
dev_dbg(dev, "Checking memory size\n");
|
||||
|
||||
i = 0x12345678;
|
||||
catc_write_mem(catc, 0x7a80, &i, 4);
|
||||
i = 0x87654321;
|
||||
catc_write_mem(catc, 0xfa80, &i, 4);
|
||||
catc_read_mem(catc, 0x7a80, &i, 4);
|
||||
buf = kmalloc(4, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_free;
|
||||
}
|
||||
|
||||
switch (i) {
|
||||
*buf = 0x12345678;
|
||||
catc_write_mem(catc, 0x7a80, buf, 4);
|
||||
*buf = 0x87654321;
|
||||
catc_write_mem(catc, 0xfa80, buf, 4);
|
||||
catc_read_mem(catc, 0x7a80, buf, 4);
|
||||
|
||||
switch (*buf) {
|
||||
case 0x12345678:
|
||||
catc_set_reg(catc, TxBufCount, 8);
|
||||
catc_set_reg(catc, RxBufCount, 32);
|
||||
|
@ -868,6 +873,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
break;
|
||||
}
|
||||
|
||||
kfree(buf);
|
||||
|
||||
dev_dbg(dev, "Getting MAC from SEEROM.\n");
|
||||
|
||||
catc_get_mac(catc, netdev->dev_addr);
|
||||
|
@ -913,16 +920,21 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
|
|||
usb_set_intfdata(intf, catc);
|
||||
|
||||
SET_NETDEV_DEV(netdev, &intf->dev);
|
||||
if (register_netdev(netdev) != 0) {
|
||||
ret = register_netdev(netdev);
|
||||
if (ret)
|
||||
goto fail_clear_intfdata;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_clear_intfdata:
|
||||
usb_set_intfdata(intf, NULL);
|
||||
fail_free:
|
||||
usb_free_urb(catc->ctrl_urb);
|
||||
usb_free_urb(catc->tx_urb);
|
||||
usb_free_urb(catc->rx_urb);
|
||||
usb_free_urb(catc->irq_urb);
|
||||
free_netdev(netdev);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void catc_disconnect(struct usb_interface *intf)
|
||||
|
|
|
@ -126,40 +126,61 @@ static void async_ctrl_callback(struct urb *urb)
|
|||
|
||||
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
|
||||
{
|
||||
u8 *buf;
|
||||
int ret;
|
||||
|
||||
buf = kmalloc(size, GFP_NOIO);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
|
||||
PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
|
||||
indx, data, size, 1000);
|
||||
indx, buf, size, 1000);
|
||||
if (ret < 0)
|
||||
netif_dbg(pegasus, drv, pegasus->net,
|
||||
"%s returned %d\n", __func__, ret);
|
||||
else if (ret <= size)
|
||||
memcpy(data, buf, ret);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
|
||||
static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
|
||||
const void *data)
|
||||
{
|
||||
u8 *buf;
|
||||
int ret;
|
||||
|
||||
buf = kmemdup(data, size, GFP_NOIO);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
|
||||
PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
|
||||
indx, data, size, 100);
|
||||
indx, buf, size, 100);
|
||||
if (ret < 0)
|
||||
netif_dbg(pegasus, drv, pegasus->net,
|
||||
"%s returned %d\n", __func__, ret);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
|
||||
{
|
||||
u8 *buf;
|
||||
int ret;
|
||||
|
||||
buf = kmemdup(&data, 1, GFP_NOIO);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
|
||||
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
|
||||
indx, &data, 1, 1000);
|
||||
indx, buf, 1, 1000);
|
||||
if (ret < 0)
|
||||
netif_dbg(pegasus, drv, pegasus->net,
|
||||
"%s returned %d\n", __func__, ret);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -155,16 +155,36 @@ static const char driver_name [] = "rtl8150";
|
|||
*/
|
||||
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
|
||||
{
|
||||
return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
|
||||
void *buf;
|
||||
int ret;
|
||||
|
||||
buf = kmalloc(size, GFP_NOIO);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
|
||||
RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
|
||||
indx, 0, data, size, 500);
|
||||
indx, 0, buf, size, 500);
|
||||
if (ret > 0 && ret <= size)
|
||||
memcpy(data, buf, ret);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
|
||||
static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
|
||||
{
|
||||
return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
|
||||
void *buf;
|
||||
int ret;
|
||||
|
||||
buf = kmemdup(data, size, GFP_NOIO);
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
|
||||
RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
|
||||
indx, 0, data, size, 500);
|
||||
indx, 0, buf, size, 500);
|
||||
kfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void async_set_reg_cb(struct urb *urb)
|
||||
|
|
|
@ -92,7 +92,7 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
|
|||
struct rtl_priv *rtlpriv = rtl_priv(hw);
|
||||
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
|
||||
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
|
||||
char *fw_name = "rtlwifi/rtl8192cfwU.bin";
|
||||
char *fw_name;
|
||||
|
||||
rtl8192ce_bt_reg_init(hw);
|
||||
|
||||
|
@ -164,8 +164,13 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
|
|||
}
|
||||
|
||||
/* request fw */
|
||||
if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
|
||||
if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
|
||||
!IS_92C_SERIAL(rtlhal->version))
|
||||
fw_name = "rtlwifi/rtl8192cfwU.bin";
|
||||
else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
|
||||
fw_name = "rtlwifi/rtl8192cfwU_B.bin";
|
||||
else
|
||||
fw_name = "rtlwifi/rtl8192cfw.bin";
|
||||
|
||||
rtlpriv->max_fw_size = 0x4000;
|
||||
pr_info("Using firmware %s\n", fw_name);
|
||||
|
|
|
@ -309,6 +309,10 @@ static inline int cipso_v4_validate(const struct sk_buff *skb,
|
|||
}
|
||||
|
||||
for (opt_iter = 6; opt_iter < opt_len;) {
|
||||
if (opt_iter + 1 == opt_len) {
|
||||
err_offset = opt_iter;
|
||||
goto out;
|
||||
}
|
||||
tag_len = opt[opt_iter + 1];
|
||||
if ((tag_len == 0) || (tag_len > (opt_len - opt_iter))) {
|
||||
err_offset = opt_iter + 1;
|
||||
|
|
|
@ -2006,7 +2006,9 @@ void sk_reset_timer(struct sock *sk, struct timer_list *timer,
|
|||
void sk_stop_timer(struct sock *sk, struct timer_list *timer);
|
||||
|
||||
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int flags);
|
||||
unsigned int flags,
|
||||
void (*destructor)(struct sock *sk,
|
||||
struct sk_buff *skb));
|
||||
int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
||||
int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
|
|
|
@ -23,14 +23,12 @@ struct ipv6_sr_hdr {
|
|||
__u8 type;
|
||||
__u8 segments_left;
|
||||
__u8 first_segment;
|
||||
__u8 flag_1;
|
||||
__u8 flag_2;
|
||||
__u8 reserved;
|
||||
__u8 flags;
|
||||
__u16 reserved;
|
||||
|
||||
struct in6_addr segments[0];
|
||||
};
|
||||
|
||||
#define SR6_FLAG1_CLEANUP (1 << 7)
|
||||
#define SR6_FLAG1_PROTECTED (1 << 6)
|
||||
#define SR6_FLAG1_OAM (1 << 5)
|
||||
#define SR6_FLAG1_ALERT (1 << 4)
|
||||
|
@ -42,8 +40,7 @@ struct ipv6_sr_hdr {
|
|||
#define SR6_TLV_PADDING 4
|
||||
#define SR6_TLV_HMAC 5
|
||||
|
||||
#define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP)
|
||||
#define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
|
||||
#define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC)
|
||||
|
||||
struct sr6_tlv {
|
||||
__u8 type;
|
||||
|
|
|
@ -332,7 +332,9 @@ void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
|
|||
EXPORT_SYMBOL(__skb_free_datagram_locked);
|
||||
|
||||
int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
|
||||
unsigned int flags)
|
||||
unsigned int flags,
|
||||
void (*destructor)(struct sock *sk,
|
||||
struct sk_buff *skb))
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
|
@ -342,6 +344,8 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
|
|||
if (skb == skb_peek(&sk->sk_receive_queue)) {
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
atomic_dec(&skb->users);
|
||||
if (destructor)
|
||||
destructor(sk, skb);
|
||||
err = 0;
|
||||
}
|
||||
spin_unlock_bh(&sk->sk_receive_queue.lock);
|
||||
|
@ -375,7 +379,7 @@ EXPORT_SYMBOL(__sk_queue_drop_skb);
|
|||
|
||||
int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
|
||||
{
|
||||
int err = __sk_queue_drop_skb(sk, skb, flags);
|
||||
int err = __sk_queue_drop_skb(sk, skb, flags, NULL);
|
||||
|
||||
kfree_skb(skb);
|
||||
sk_mem_reclaim_partial(sk);
|
||||
|
|
|
@ -1695,24 +1695,19 @@ EXPORT_SYMBOL_GPL(net_dec_egress_queue);
|
|||
|
||||
static struct static_key netstamp_needed __read_mostly;
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
/* We are not allowed to call static_key_slow_dec() from irq context
|
||||
* If net_disable_timestamp() is called from irq context, defer the
|
||||
* static_key_slow_dec() calls.
|
||||
*/
|
||||
static atomic_t netstamp_needed_deferred;
|
||||
static void netstamp_clear(struct work_struct *work)
|
||||
{
|
||||
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
|
||||
|
||||
while (deferred--)
|
||||
static_key_slow_dec(&netstamp_needed);
|
||||
}
|
||||
static DECLARE_WORK(netstamp_work, netstamp_clear);
|
||||
#endif
|
||||
|
||||
void net_enable_timestamp(void)
|
||||
{
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
|
||||
|
||||
if (deferred) {
|
||||
while (--deferred)
|
||||
static_key_slow_dec(&netstamp_needed);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
static_key_slow_inc(&netstamp_needed);
|
||||
}
|
||||
EXPORT_SYMBOL(net_enable_timestamp);
|
||||
|
@ -1720,12 +1715,12 @@ EXPORT_SYMBOL(net_enable_timestamp);
|
|||
void net_disable_timestamp(void)
|
||||
{
|
||||
#ifdef HAVE_JUMP_LABEL
|
||||
if (in_interrupt()) {
|
||||
/* net_disable_timestamp() can be called from non process context */
|
||||
atomic_inc(&netstamp_needed_deferred);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
schedule_work(&netstamp_work);
|
||||
#else
|
||||
static_key_slow_dec(&netstamp_needed);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(net_disable_timestamp);
|
||||
|
||||
|
|
|
@ -1405,9 +1405,12 @@ static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
|
|||
if (regs.len > reglen)
|
||||
regs.len = reglen;
|
||||
|
||||
regbuf = NULL;
|
||||
if (reglen) {
|
||||
regbuf = vzalloc(reglen);
|
||||
if (reglen && !regbuf)
|
||||
if (!regbuf)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ops->get_regs(dev, ®s, regbuf);
|
||||
|
||||
|
|
|
@ -1587,6 +1587,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
|
|||
goto validate_return_locked;
|
||||
}
|
||||
|
||||
if (opt_iter + 1 == opt_len) {
|
||||
err_offset = opt_iter;
|
||||
goto validate_return_locked;
|
||||
}
|
||||
tag_len = tag[1];
|
||||
if (tag_len > (opt_len - opt_iter)) {
|
||||
err_offset = opt_iter + 1;
|
||||
|
|
|
@ -1238,6 +1238,13 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
|||
pktinfo->ipi_ifindex = 0;
|
||||
pktinfo->ipi_spec_dst.s_addr = 0;
|
||||
}
|
||||
/* We need to keep the dst for __ip_options_echo()
|
||||
* We could restrict the test to opt.ts_needtime || opt.srr,
|
||||
* but the following is good enough as IP options are not often used.
|
||||
*/
|
||||
if (unlikely(IPCB(skb)->opt.optlen))
|
||||
skb_dst_force(skb);
|
||||
else
|
||||
skb_dst_drop(skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -770,6 +770,12 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
|
|||
ret = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
/* if __tcp_splice_read() got nothing while we have
|
||||
* an skb in receive queue, we do not want to loop.
|
||||
* This might happen with URG data.
|
||||
*/
|
||||
if (!skb_queue_empty(&sk->sk_receive_queue))
|
||||
break;
|
||||
sk_wait_data(sk, &timeo, NULL);
|
||||
if (signal_pending(current)) {
|
||||
ret = sock_intr_errno(timeo);
|
||||
|
|
|
@ -1501,7 +1501,7 @@ try_again:
|
|||
return err;
|
||||
|
||||
csum_copy_err:
|
||||
if (!__sk_queue_drop_skb(sk, skb, flags)) {
|
||||
if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
|
||||
UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
|
||||
UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||
}
|
||||
|
|
|
@ -3386,9 +3386,15 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
|
|||
}
|
||||
|
||||
if (idev) {
|
||||
if (idev->if_flags & IF_READY)
|
||||
/* device is already configured. */
|
||||
if (idev->if_flags & IF_READY) {
|
||||
/* device is already configured -
|
||||
* but resend MLD reports, we might
|
||||
* have roamed and need to update
|
||||
* multicast snooping switches
|
||||
*/
|
||||
ipv6_mc_up(idev);
|
||||
break;
|
||||
}
|
||||
idev->if_flags |= IF_READY;
|
||||
}
|
||||
|
||||
|
|
|
@ -327,7 +327,6 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
|
|||
struct ipv6_sr_hdr *hdr;
|
||||
struct inet6_dev *idev;
|
||||
struct in6_addr *addr;
|
||||
bool cleanup = false;
|
||||
int accept_seg6;
|
||||
|
||||
hdr = (struct ipv6_sr_hdr *)skb_transport_header(skb);
|
||||
|
@ -351,11 +350,7 @@ static int ipv6_srh_rcv(struct sk_buff *skb)
|
|||
#endif
|
||||
|
||||
looped_back:
|
||||
if (hdr->segments_left > 0) {
|
||||
if (hdr->nexthdr != NEXTHDR_IPV6 && hdr->segments_left == 1 &&
|
||||
sr_has_cleanup(hdr))
|
||||
cleanup = true;
|
||||
} else {
|
||||
if (hdr->segments_left == 0) {
|
||||
if (hdr->nexthdr == NEXTHDR_IPV6) {
|
||||
int offset = (hdr->hdrlen + 1) << 3;
|
||||
|
||||
|
@ -418,21 +413,6 @@ looped_back:
|
|||
|
||||
ipv6_hdr(skb)->daddr = *addr;
|
||||
|
||||
if (cleanup) {
|
||||
int srhlen = (hdr->hdrlen + 1) << 3;
|
||||
int nh = hdr->nexthdr;
|
||||
|
||||
skb_pull_rcsum(skb, sizeof(struct ipv6hdr) + srhlen);
|
||||
memmove(skb_network_header(skb) + srhlen,
|
||||
skb_network_header(skb),
|
||||
(unsigned char *)hdr - skb_network_header(skb));
|
||||
skb->network_header += srhlen;
|
||||
ipv6_hdr(skb)->nexthdr = nh;
|
||||
ipv6_hdr(skb)->payload_len = htons(skb->len -
|
||||
sizeof(struct ipv6hdr));
|
||||
skb_push_rcsum(skb, sizeof(struct ipv6hdr));
|
||||
}
|
||||
|
||||
skb_dst_drop(skb);
|
||||
|
||||
ip6_route_input(skb);
|
||||
|
@ -453,14 +433,9 @@ looped_back:
|
|||
}
|
||||
ipv6_hdr(skb)->hop_limit--;
|
||||
|
||||
/* be sure that srh is still present before reinjecting */
|
||||
if (!cleanup) {
|
||||
skb_pull(skb, sizeof(struct ipv6hdr));
|
||||
goto looped_back;
|
||||
}
|
||||
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
|
||||
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
|
||||
}
|
||||
|
||||
dst_input(skb);
|
||||
|
||||
|
|
|
@ -369,33 +369,35 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
|
|||
static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
|
||||
u8 type, u8 code, int offset, __be32 info)
|
||||
{
|
||||
const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
|
||||
__be16 *p = (__be16 *)(skb->data + offset);
|
||||
int grehlen = offset + 4;
|
||||
const struct gre_base_hdr *greh;
|
||||
const struct ipv6hdr *ipv6h;
|
||||
int grehlen = sizeof(*greh);
|
||||
struct ip6_tnl *t;
|
||||
int key_off = 0;
|
||||
__be16 flags;
|
||||
__be32 key;
|
||||
|
||||
flags = p[0];
|
||||
if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
|
||||
if (!pskb_may_pull(skb, offset + grehlen))
|
||||
return;
|
||||
greh = (const struct gre_base_hdr *)(skb->data + offset);
|
||||
flags = greh->flags;
|
||||
if (flags & (GRE_VERSION | GRE_ROUTING))
|
||||
return;
|
||||
if (flags&GRE_KEY) {
|
||||
grehlen += 4;
|
||||
if (flags & GRE_CSUM)
|
||||
grehlen += 4;
|
||||
}
|
||||
if (flags & GRE_KEY) {
|
||||
key_off = grehlen + offset;
|
||||
grehlen += 4;
|
||||
}
|
||||
|
||||
/* If only 8 bytes returned, keyed message will be dropped here */
|
||||
if (!pskb_may_pull(skb, grehlen))
|
||||
if (!pskb_may_pull(skb, offset + grehlen))
|
||||
return;
|
||||
ipv6h = (const struct ipv6hdr *)skb->data;
|
||||
p = (__be16 *)(skb->data + offset);
|
||||
greh = (const struct gre_base_hdr *)(skb->data + offset);
|
||||
key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
|
||||
|
||||
t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
|
||||
flags & GRE_KEY ?
|
||||
*(((__be32 *)p) + (grehlen / 4) - 1) : 0,
|
||||
p[1]);
|
||||
key, greh->protocol);
|
||||
if (!t)
|
||||
return;
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
|
|||
* hash function (RadioGatun) with up to 1216 bits
|
||||
*/
|
||||
|
||||
/* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */
|
||||
/* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
|
||||
plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
|
||||
|
||||
/* this limit allows for 14 segments */
|
||||
|
@ -186,7 +186,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
|
|||
*
|
||||
* 1. Source IPv6 address (128 bits)
|
||||
* 2. first_segment value (8 bits)
|
||||
* 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0)
|
||||
* 3. Flags (8 bits)
|
||||
* 4. HMAC Key ID (32 bits)
|
||||
* 5. All segments in the segments list (n * 128 bits)
|
||||
*/
|
||||
|
@ -202,8 +202,8 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
|
|||
/* first_segment value */
|
||||
*off++ = hdr->first_segment;
|
||||
|
||||
/* cleanup flag */
|
||||
*off++ = !!(sr_has_cleanup(hdr)) << 7;
|
||||
/* flags */
|
||||
*off++ = hdr->flags;
|
||||
|
||||
/* HMAC Key ID */
|
||||
memcpy(off, &hmackeyid, 4);
|
||||
|
|
|
@ -991,6 +991,16 @@ drop:
|
|||
return 0; /* don't send reset */
|
||||
}
|
||||
|
||||
static void tcp_v6_restore_cb(struct sk_buff *skb)
|
||||
{
|
||||
/* We need to move header back to the beginning if xfrm6_policy_check()
|
||||
* and tcp_v6_fill_cb() are going to be called again.
|
||||
* ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
|
||||
*/
|
||||
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
|
||||
sizeof(struct inet6_skb_parm));
|
||||
}
|
||||
|
||||
static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
|
||||
struct request_sock *req,
|
||||
struct dst_entry *dst,
|
||||
|
@ -1182,10 +1192,12 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
|
|||
sk_gfp_mask(sk, GFP_ATOMIC));
|
||||
consume_skb(ireq->pktopts);
|
||||
ireq->pktopts = NULL;
|
||||
if (newnp->pktoptions)
|
||||
if (newnp->pktoptions) {
|
||||
tcp_v6_restore_cb(newnp->pktoptions);
|
||||
skb_set_owner_r(newnp->pktoptions, newsk);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return newsk;
|
||||
|
||||
|
@ -1198,16 +1210,6 @@ out:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void tcp_v6_restore_cb(struct sk_buff *skb)
|
||||
{
|
||||
/* We need to move header back to the beginning if xfrm6_policy_check()
|
||||
* and tcp_v6_fill_cb() are going to be called again.
|
||||
* ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
|
||||
*/
|
||||
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
|
||||
sizeof(struct inet6_skb_parm));
|
||||
}
|
||||
|
||||
/* The socket must have it's spinlock held when we get
|
||||
* here, unless it is a TCP_LISTEN socket.
|
||||
*
|
||||
|
|
|
@ -441,7 +441,7 @@ try_again:
|
|||
return err;
|
||||
|
||||
csum_copy_err:
|
||||
if (!__sk_queue_drop_skb(sk, skb, flags)) {
|
||||
if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
|
||||
if (is_udp4) {
|
||||
UDP_INC_STATS(sock_net(sk),
|
||||
UDP_MIB_CSUMERRORS, is_udplite);
|
||||
|
|
|
@ -124,7 +124,7 @@ static int aes_siv_encrypt(const u8 *key, size_t key_len,
|
|||
|
||||
/* CTR */
|
||||
|
||||
tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
|
||||
tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(tfm2)) {
|
||||
kfree(tmp);
|
||||
return PTR_ERR(tfm2);
|
||||
|
@ -183,7 +183,7 @@ static int aes_siv_decrypt(const u8 *key, size_t key_len,
|
|||
|
||||
/* CTR */
|
||||
|
||||
tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, 0);
|
||||
tfm2 = crypto_alloc_skcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
|
||||
if (IS_ERR(tfm2))
|
||||
return PTR_ERR(tfm2);
|
||||
/* K2 for CTR */
|
||||
|
@ -272,7 +272,7 @@ int fils_encrypt_assoc_req(struct sk_buff *skb,
|
|||
crypt_len = skb->data + skb->len - encr;
|
||||
skb_put(skb, AES_BLOCK_SIZE);
|
||||
return aes_siv_encrypt(assoc_data->fils_kek, assoc_data->fils_kek_len,
|
||||
encr, crypt_len, 1, addr, len, encr);
|
||||
encr, crypt_len, 5, addr, len, encr);
|
||||
}
|
||||
|
||||
int fils_decrypt_assoc_resp(struct ieee80211_sub_if_data *sdata,
|
||||
|
|
|
@ -339,7 +339,7 @@ int mesh_add_vendor_ies(struct ieee80211_sub_if_data *sdata,
|
|||
/* fast-forward to vendor IEs */
|
||||
offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
|
||||
|
||||
if (offset) {
|
||||
if (offset < ifmsh->ie_len) {
|
||||
len = ifmsh->ie_len - offset;
|
||||
data = ifmsh->ie + offset;
|
||||
if (skb_tailroom(skb) < len)
|
||||
|
|
|
@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
|
|||
union sctp_addr *laddr = (union sctp_addr *)addr;
|
||||
struct sctp_transport *transport;
|
||||
|
||||
if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
|
||||
if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
|
||||
return NULL;
|
||||
|
||||
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
|
||||
|
@ -7426,7 +7426,8 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
|
|||
*/
|
||||
release_sock(sk);
|
||||
current_timeo = schedule_timeout(current_timeo);
|
||||
BUG_ON(sk != asoc->base.sk);
|
||||
if (sk != asoc->base.sk)
|
||||
goto do_error;
|
||||
lock_sock(sk);
|
||||
|
||||
*timeo_p = current_timeo;
|
||||
|
|
|
@ -5916,6 +5916,7 @@ do { \
|
|||
break;
|
||||
}
|
||||
cfg->ht_opmode = ht_opmode;
|
||||
mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
|
||||
}
|
||||
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
|
||||
1, 65535, mask,
|
||||
|
|
Загрузка…
Ссылка в новой задаче