Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (24 commits) bridge: Partially disable netpoll support tcp: fix crash in tcp_xmit_retransmit_queue IPv6: fix CoA check in RH2 input handler (mip6_rthdr_input()) ibmveth: lost IRQ while closing/opening device leads to service loss rt2x00: Fix lockdep warning in rt2x00lib_probe_dev() vhost: avoid pr_err on condition guest can trigger ipmr: Don't leak memory if fib lookup fails. vhost-net: avoid flush under lock net: fix problem in reading sock TX queue net/core: neighbour update Oops net: skb_tx_hash() fix relative to skb_orphan_try() rfs: call sock_rps_record_flow() in tcp_splice_read() xfrm: do not assume that template resolving always returns xfrms hostap_pci: set dev->base_addr during probe axnet_cs: use spin_lock_irqsave in ax_interrupt dsa: Fix Kconfig dependencies. act_nat: not all of the ICMP packets need an IP header payload r8169: incorrect identifier for a 8168dp Phonet: fix skb leak in pipe endpoint accept() Bluetooth: Update sec_level/auth_type for already existing connections ...
This commit is contained in:
Коммит
516bd66415
|
@ -677,7 +677,7 @@ static int ibmveth_close(struct net_device *netdev)
|
|||
if (!adapter->pool_config)
|
||||
netif_stop_queue(netdev);
|
||||
|
||||
free_irq(netdev->irq, netdev);
|
||||
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
|
||||
|
||||
do {
|
||||
lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
|
||||
|
@ -689,6 +689,8 @@ static int ibmveth_close(struct net_device *netdev)
|
|||
lpar_rc);
|
||||
}
|
||||
|
||||
free_irq(netdev->irq, netdev);
|
||||
|
||||
adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
|
||||
|
||||
ibmveth_cleanup(adapter);
|
||||
|
|
|
@ -1168,6 +1168,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
|
|||
int interrupts, nr_serviced = 0, i;
|
||||
struct ei_device *ei_local;
|
||||
int handled = 0;
|
||||
unsigned long flags;
|
||||
|
||||
e8390_base = dev->base_addr;
|
||||
ei_local = netdev_priv(dev);
|
||||
|
@ -1176,7 +1177,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
|
|||
* Protect the irq test too.
|
||||
*/
|
||||
|
||||
spin_lock(&ei_local->page_lock);
|
||||
spin_lock_irqsave(&ei_local->page_lock, flags);
|
||||
|
||||
if (ei_local->irqlock)
|
||||
{
|
||||
|
@ -1188,7 +1189,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
|
|||
dev->name, inb_p(e8390_base + EN0_ISR),
|
||||
inb_p(e8390_base + EN0_IMR));
|
||||
#endif
|
||||
spin_unlock(&ei_local->page_lock);
|
||||
spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
|
@ -1261,7 +1262,7 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id)
|
|||
ei_local->irqlock = 0;
|
||||
outb_p(ENISR_ALL, e8390_base + EN0_IMR);
|
||||
|
||||
spin_unlock(&ei_local->page_lock);
|
||||
spin_unlock_irqrestore(&ei_local->page_lock, flags);
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
|
|
|
@ -1316,7 +1316,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
|
|||
{ 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
|
||||
|
||||
/* 8168C family. */
|
||||
{ 0x7cf00000, 0x3ca00000, RTL_GIGA_MAC_VER_24 },
|
||||
{ 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
|
||||
{ 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
|
||||
{ 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
|
||||
{ 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
|
||||
|
|
|
@ -730,13 +730,17 @@ static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
|
|||
|
||||
/* RX */
|
||||
if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
|
||||
goto err;
|
||||
goto err_rx;
|
||||
|
||||
/* Register Read */
|
||||
if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
|
||||
goto err;
|
||||
goto err_reg;
|
||||
|
||||
return 0;
|
||||
err_reg:
|
||||
ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
|
||||
err_rx:
|
||||
ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
|
||||
err:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -330,6 +330,7 @@ static int prism2_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
dev->irq = pdev->irq;
|
||||
hw_priv->mem_start = mem;
|
||||
dev->base_addr = (unsigned long) mem;
|
||||
|
||||
prism2_pci_cor_sreset(local);
|
||||
|
||||
|
|
|
@ -97,6 +97,17 @@ static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
|
|||
spin_lock_irqsave(&priv->sta_lock, flags);
|
||||
memset(priv->stations, 0, sizeof(priv->stations));
|
||||
priv->num_stations = 0;
|
||||
|
||||
/*
|
||||
* Remove all key information that is not stored as part of station
|
||||
* information since mac80211 may not have had a
|
||||
* chance to remove all the keys. When device is reconfigured by
|
||||
* mac80211 after an error all keys will be reconfigured.
|
||||
*/
|
||||
priv->ucode_key_table = 0;
|
||||
priv->key_mapping_key = 0;
|
||||
memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
|
||||
|
||||
spin_unlock_irqrestore(&priv->sta_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -853,6 +853,11 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
|
|||
BIT(NL80211_IFTYPE_MESH_POINT) |
|
||||
BIT(NL80211_IFTYPE_WDS);
|
||||
|
||||
/*
|
||||
* Initialize configuration work.
|
||||
*/
|
||||
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
|
||||
|
||||
/*
|
||||
* Let the driver probe the device to detect the capabilities.
|
||||
*/
|
||||
|
@ -862,11 +867,6 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
|
|||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize configuration work.
|
||||
*/
|
||||
INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
|
||||
|
||||
/*
|
||||
* Allocate queue array.
|
||||
*/
|
||||
|
|
|
@ -177,8 +177,8 @@ static void handle_tx(struct vhost_net *net)
|
|||
break;
|
||||
}
|
||||
if (err != len)
|
||||
pr_err("Truncated TX packet: "
|
||||
" len %d != %zd\n", err, len);
|
||||
pr_debug("Truncated TX packet: "
|
||||
" len %d != %zd\n", err, len);
|
||||
vhost_add_used_and_signal(&net->dev, vq, head, 0);
|
||||
total_len += len;
|
||||
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
|
||||
|
@ -275,8 +275,8 @@ static void handle_rx(struct vhost_net *net)
|
|||
}
|
||||
/* TODO: Should check and handle checksum. */
|
||||
if (err > len) {
|
||||
pr_err("Discarded truncated rx packet: "
|
||||
" len %d > %zd\n", err, len);
|
||||
pr_debug("Discarded truncated rx packet: "
|
||||
" len %d > %zd\n", err, len);
|
||||
vhost_discard_vq_desc(vq);
|
||||
continue;
|
||||
}
|
||||
|
@ -534,11 +534,16 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
|
|||
rcu_assign_pointer(vq->private_data, sock);
|
||||
vhost_net_enable_vq(n, vq);
|
||||
done:
|
||||
mutex_unlock(&vq->mutex);
|
||||
|
||||
if (oldsock) {
|
||||
vhost_net_flush_vq(n, index);
|
||||
fput(oldsock->file);
|
||||
}
|
||||
|
||||
mutex_unlock(&n->dev.mutex);
|
||||
return 0;
|
||||
|
||||
err_vq:
|
||||
mutex_unlock(&vq->mutex);
|
||||
err:
|
||||
|
|
|
@ -1224,12 +1224,7 @@ static inline void sk_tx_queue_clear(struct sock *sk)
|
|||
|
||||
static inline int sk_tx_queue_get(const struct sock *sk)
|
||||
{
|
||||
return sk->sk_tx_queue_mapping;
|
||||
}
|
||||
|
||||
static inline bool sk_tx_queue_recorded(const struct sock *sk)
|
||||
{
|
||||
return (sk && sk->sk_tx_queue_mapping >= 0);
|
||||
return sk ? sk->sk_tx_queue_mapping : -1;
|
||||
}
|
||||
|
||||
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
|
||||
|
|
|
@ -358,6 +358,11 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8
|
|||
acl->sec_level = sec_level;
|
||||
acl->auth_type = auth_type;
|
||||
hci_acl_connect(acl);
|
||||
} else {
|
||||
if (acl->sec_level < sec_level)
|
||||
acl->sec_level = sec_level;
|
||||
if (acl->auth_type < auth_type)
|
||||
acl->auth_type = auth_type;
|
||||
}
|
||||
|
||||
if (type == ACL_LINK)
|
||||
|
|
|
@ -1049,6 +1049,8 @@ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *s
|
|||
if (conn) {
|
||||
if (!ev->status)
|
||||
conn->link_mode |= HCI_LM_AUTH;
|
||||
else
|
||||
conn->sec_level = BT_SECURITY_LOW;
|
||||
|
||||
clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
|
||||
|
||||
|
|
|
@ -401,6 +401,11 @@ static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
|
|||
l2cap_send_sframe(pi, control);
|
||||
}
|
||||
|
||||
static inline int __l2cap_no_conn_pending(struct sock *sk)
|
||||
{
|
||||
return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
|
||||
}
|
||||
|
||||
static void l2cap_do_start(struct sock *sk)
|
||||
{
|
||||
struct l2cap_conn *conn = l2cap_pi(sk)->conn;
|
||||
|
@ -409,12 +414,13 @@ static void l2cap_do_start(struct sock *sk)
|
|||
if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
|
||||
return;
|
||||
|
||||
if (l2cap_check_security(sk)) {
|
||||
if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
|
||||
struct l2cap_conn_req req;
|
||||
req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
|
||||
req.psm = l2cap_pi(sk)->psm;
|
||||
|
||||
l2cap_pi(sk)->ident = l2cap_get_ident(conn);
|
||||
l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
|
||||
|
||||
l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
|
||||
L2CAP_CONN_REQ, sizeof(req), &req);
|
||||
|
@ -464,12 +470,14 @@ static void l2cap_conn_start(struct l2cap_conn *conn)
|
|||
}
|
||||
|
||||
if (sk->sk_state == BT_CONNECT) {
|
||||
if (l2cap_check_security(sk)) {
|
||||
if (l2cap_check_security(sk) &&
|
||||
__l2cap_no_conn_pending(sk)) {
|
||||
struct l2cap_conn_req req;
|
||||
req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
|
||||
req.psm = l2cap_pi(sk)->psm;
|
||||
|
||||
l2cap_pi(sk)->ident = l2cap_get_ident(conn);
|
||||
l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
|
||||
|
||||
l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
|
||||
L2CAP_CONN_REQ, sizeof(req), &req);
|
||||
|
@ -2912,7 +2920,6 @@ static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hd
|
|||
l2cap_pi(sk)->ident = 0;
|
||||
l2cap_pi(sk)->dcid = dcid;
|
||||
l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
|
||||
|
||||
l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
|
||||
|
||||
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
|
||||
|
@ -4404,6 +4411,7 @@ static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
|||
req.psm = l2cap_pi(sk)->psm;
|
||||
|
||||
l2cap_pi(sk)->ident = l2cap_get_ident(conn);
|
||||
l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
|
||||
|
||||
l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
|
||||
L2CAP_CONN_REQ, sizeof(req), &req);
|
||||
|
|
|
@ -217,14 +217,6 @@ static bool br_devices_support_netpoll(struct net_bridge *br)
|
|||
return count != 0 && ret;
|
||||
}
|
||||
|
||||
static void br_poll_controller(struct net_device *br_dev)
|
||||
{
|
||||
struct netpoll *np = br_dev->npinfo->netpoll;
|
||||
|
||||
if (np->real_dev != br_dev)
|
||||
netpoll_poll_dev(np->real_dev);
|
||||
}
|
||||
|
||||
void br_netpoll_cleanup(struct net_device *dev)
|
||||
{
|
||||
struct net_bridge *br = netdev_priv(dev);
|
||||
|
@ -295,7 +287,6 @@ static const struct net_device_ops br_netdev_ops = {
|
|||
.ndo_do_ioctl = br_dev_ioctl,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_netpoll_cleanup = br_netpoll_cleanup,
|
||||
.ndo_poll_controller = br_poll_controller,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
|
|
@ -50,14 +50,7 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
|
|||
kfree_skb(skb);
|
||||
else {
|
||||
skb_push(skb, ETH_HLEN);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
|
||||
netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
|
||||
skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
|
||||
} else
|
||||
#endif
|
||||
dev_queue_xmit(skb);
|
||||
dev_queue_xmit(skb);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -73,23 +66,9 @@ int br_forward_finish(struct sk_buff *skb)
|
|||
|
||||
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
struct net_bridge *br = to->br;
|
||||
if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
|
||||
struct netpoll *np;
|
||||
to->dev->npinfo = skb->dev->npinfo;
|
||||
np = skb->dev->npinfo->netpoll;
|
||||
np->real_dev = np->dev = to->dev;
|
||||
to->dev->priv_flags |= IFF_IN_NETPOLL;
|
||||
}
|
||||
#endif
|
||||
skb->dev = to->dev;
|
||||
NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
|
||||
br_forward_finish);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
if (skb->dev->npinfo)
|
||||
skb->dev->npinfo->netpoll->dev = br->dev;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
|
||||
|
|
|
@ -1911,8 +1911,16 @@ static int dev_gso_segment(struct sk_buff *skb)
|
|||
*/
|
||||
static inline void skb_orphan_try(struct sk_buff *skb)
|
||||
{
|
||||
if (!skb_tx(skb)->flags)
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (sk && !skb_tx(skb)->flags) {
|
||||
/* skb_tx_hash() wont be able to get sk.
|
||||
* We copy sk_hash into skb->rxhash
|
||||
*/
|
||||
if (!skb->rxhash)
|
||||
skb->rxhash = sk->sk_hash;
|
||||
skb_orphan(skb);
|
||||
}
|
||||
}
|
||||
|
||||
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
|
@ -1998,8 +2006,7 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
|
|||
if (skb->sk && skb->sk->sk_hash)
|
||||
hash = skb->sk->sk_hash;
|
||||
else
|
||||
hash = (__force u16) skb->protocol;
|
||||
|
||||
hash = (__force u16) skb->protocol ^ skb->rxhash;
|
||||
hash = jhash_1word(hash, hashrnd);
|
||||
|
||||
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
|
||||
|
@ -2022,12 +2029,11 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
|
|||
static struct netdev_queue *dev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
u16 queue_index;
|
||||
int queue_index;
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (sk_tx_queue_recorded(sk)) {
|
||||
queue_index = sk_tx_queue_get(sk);
|
||||
} else {
|
||||
queue_index = sk_tx_queue_get(sk);
|
||||
if (queue_index < 0) {
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
|
||||
if (ops->ndo_select_queue) {
|
||||
|
|
|
@ -949,7 +949,10 @@ static void neigh_update_hhs(struct neighbour *neigh)
|
|||
{
|
||||
struct hh_cache *hh;
|
||||
void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
|
||||
= neigh->dev->header_ops->cache_update;
|
||||
= NULL;
|
||||
|
||||
if (neigh->dev->header_ops)
|
||||
update = neigh->dev->header_ops->cache_update;
|
||||
|
||||
if (update) {
|
||||
for (hh = neigh->hh; hh; hh = hh->hh_next) {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
menuconfig NET_DSA
|
||||
bool "Distributed Switch Architecture support"
|
||||
default n
|
||||
depends on EXPERIMENTAL && !S390
|
||||
depends on EXPERIMENTAL && NET_ETHERNET && !S390
|
||||
select PHYLIB
|
||||
---help---
|
||||
This allows you to use hardware switch chips that use
|
||||
|
|
|
@ -442,8 +442,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
int err;
|
||||
|
||||
err = ipmr_fib_lookup(net, &fl, &mrt);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
read_lock(&mrt_lock);
|
||||
dev->stats.tx_bytes += skb->len;
|
||||
|
@ -1728,8 +1730,10 @@ int ip_mr_input(struct sk_buff *skb)
|
|||
goto dont_forward;
|
||||
|
||||
err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
kfree_skb(skb);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!local) {
|
||||
if (IPCB(skb)->opt.router_alert) {
|
||||
|
|
|
@ -608,6 +608,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
|
|||
ssize_t spliced;
|
||||
int ret;
|
||||
|
||||
sock_rps_record_flow(sk);
|
||||
/*
|
||||
* We can't seek on a socket input
|
||||
*/
|
||||
|
|
|
@ -2208,6 +2208,9 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
|||
int mib_idx;
|
||||
int fwd_rexmitting = 0;
|
||||
|
||||
if (!tp->packets_out)
|
||||
return;
|
||||
|
||||
if (!tp->lost_out)
|
||||
tp->retransmit_high = tp->snd_una;
|
||||
|
||||
|
|
|
@ -347,11 +347,12 @@ static const struct xfrm_type mip6_destopt_type =
|
|||
|
||||
static int mip6_rthdr_input(struct xfrm_state *x, struct sk_buff *skb)
|
||||
{
|
||||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
struct rt2_hdr *rt2 = (struct rt2_hdr *)skb->data;
|
||||
int err = rt2->rt_hdr.nexthdr;
|
||||
|
||||
spin_lock(&x->lock);
|
||||
if (!ipv6_addr_equal(&rt2->addr, (struct in6_addr *)x->coaddr) &&
|
||||
if (!ipv6_addr_equal(&iph->daddr, (struct in6_addr *)x->coaddr) &&
|
||||
!ipv6_addr_any((struct in6_addr *)x->coaddr))
|
||||
err = -ENOENT;
|
||||
spin_unlock(&x->lock);
|
||||
|
|
|
@ -698,6 +698,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp)
|
|||
newsk = NULL;
|
||||
goto out;
|
||||
}
|
||||
kfree_skb(oskb);
|
||||
|
||||
sock_hold(sk);
|
||||
pep_sk(newsk)->listener = sk;
|
||||
|
|
|
@ -205,7 +205,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
|
|||
{
|
||||
struct icmphdr *icmph;
|
||||
|
||||
if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
|
||||
if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
|
||||
goto drop;
|
||||
|
||||
icmph = (void *)(skb_network_header(skb) + ihl);
|
||||
|
@ -215,6 +215,9 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
|
|||
(icmph->type != ICMP_PARAMETERPROB))
|
||||
break;
|
||||
|
||||
if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
|
||||
goto drop;
|
||||
|
||||
iph = (void *)(icmph + 1);
|
||||
if (egress)
|
||||
addr = iph->daddr;
|
||||
|
|
|
@ -1594,8 +1594,8 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
|
|||
|
||||
/* Try to instantiate a bundle */
|
||||
err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
|
||||
if (err < 0) {
|
||||
if (err != -EAGAIN)
|
||||
if (err <= 0) {
|
||||
if (err != 0 && err != -EAGAIN)
|
||||
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
@ -1678,6 +1678,13 @@ xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
|
|||
goto make_dummy_bundle;
|
||||
dst_hold(&xdst->u.dst);
|
||||
return oldflo;
|
||||
} else if (new_xdst == NULL) {
|
||||
num_xfrms = 0;
|
||||
if (oldflo == NULL)
|
||||
goto make_dummy_bundle;
|
||||
xdst->num_xfrms = 0;
|
||||
dst_hold(&xdst->u.dst);
|
||||
return oldflo;
|
||||
}
|
||||
|
||||
/* Kill the previous bundle */
|
||||
|
@ -1760,6 +1767,10 @@ restart:
|
|||
xfrm_pols_put(pols, num_pols);
|
||||
err = PTR_ERR(xdst);
|
||||
goto dropdst;
|
||||
} else if (xdst == NULL) {
|
||||
num_xfrms = 0;
|
||||
drop_pols = num_pols;
|
||||
goto no_transform;
|
||||
}
|
||||
|
||||
spin_lock_bh(&xfrm_policy_sk_bundle_lock);
|
||||
|
|
Загрузка…
Ссылка в новой задаче