macvlan: handle fragmented multicast frames
Fragmented multicast frames are delivered to a single macvlan port, because ip defrag logic considers other samples are redundant. Implement a defrag step before trying to send the multicast frame. Reported-by: Ben Greear <greearb@candelatech.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
f7ba35da58
Коммит
bc416d9768
|
@ -169,6 +169,9 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
|
||||||
|
|
||||||
port = macvlan_port_get_rcu(skb->dev);
|
port = macvlan_port_get_rcu(skb->dev);
|
||||||
if (is_multicast_ether_addr(eth->h_dest)) {
|
if (is_multicast_ether_addr(eth->h_dest)) {
|
||||||
|
skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
|
||||||
|
if (!skb)
|
||||||
|
return RX_HANDLER_CONSUMED;
|
||||||
src = macvlan_hash_lookup(port, eth->h_source);
|
src = macvlan_hash_lookup(port, eth->h_source);
|
||||||
if (!src)
|
if (!src)
|
||||||
/* frame comes from an external address */
|
/* frame comes from an external address */
|
||||||
|
|
|
@ -406,9 +406,18 @@ enum ip_defrag_users {
|
||||||
IP_DEFRAG_VS_OUT,
|
IP_DEFRAG_VS_OUT,
|
||||||
IP_DEFRAG_VS_FWD,
|
IP_DEFRAG_VS_FWD,
|
||||||
IP_DEFRAG_AF_PACKET,
|
IP_DEFRAG_AF_PACKET,
|
||||||
|
IP_DEFRAG_MACVLAN,
|
||||||
};
|
};
|
||||||
|
|
||||||
int ip_defrag(struct sk_buff *skb, u32 user);
|
int ip_defrag(struct sk_buff *skb, u32 user);
|
||||||
|
#ifdef CONFIG_INET
|
||||||
|
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user);
|
||||||
|
#else
|
||||||
|
static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
|
||||||
|
{
|
||||||
|
return skb;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
int ip_frag_mem(struct net *net);
|
int ip_frag_mem(struct net *net);
|
||||||
int ip_frag_nqueues(struct net *net);
|
int ip_frag_nqueues(struct net *net);
|
||||||
|
|
||||||
|
|
|
@ -682,6 +682,42 @@ int ip_defrag(struct sk_buff *skb, u32 user)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ip_defrag);
|
EXPORT_SYMBOL(ip_defrag);
|
||||||
|
|
||||||
|
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
|
||||||
|
{
|
||||||
|
const struct iphdr *iph;
|
||||||
|
u32 len;
|
||||||
|
|
||||||
|
if (skb->protocol != htons(ETH_P_IP))
|
||||||
|
return skb;
|
||||||
|
|
||||||
|
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
||||||
|
return skb;
|
||||||
|
|
||||||
|
iph = ip_hdr(skb);
|
||||||
|
if (iph->ihl < 5 || iph->version != 4)
|
||||||
|
return skb;
|
||||||
|
if (!pskb_may_pull(skb, iph->ihl*4))
|
||||||
|
return skb;
|
||||||
|
iph = ip_hdr(skb);
|
||||||
|
len = ntohs(iph->tot_len);
|
||||||
|
if (skb->len < len || len < (iph->ihl * 4))
|
||||||
|
return skb;
|
||||||
|
|
||||||
|
if (ip_is_fragment(ip_hdr(skb))) {
|
||||||
|
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||||
|
if (skb) {
|
||||||
|
if (pskb_trim_rcsum(skb, len))
|
||||||
|
return skb;
|
||||||
|
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
||||||
|
if (ip_defrag(skb, user))
|
||||||
|
return NULL;
|
||||||
|
skb->rxhash = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return skb;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ip_check_defrag);
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
static int zero;
|
static int zero;
|
||||||
|
|
||||||
|
|
|
@ -1213,43 +1213,6 @@ static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *sk
|
||||||
return f->arr[cpu % num];
|
return f->arr[cpu % num];
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sk_buff *fanout_check_defrag(struct sk_buff *skb)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_INET
|
|
||||||
const struct iphdr *iph;
|
|
||||||
u32 len;
|
|
||||||
|
|
||||||
if (skb->protocol != htons(ETH_P_IP))
|
|
||||||
return skb;
|
|
||||||
|
|
||||||
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
||||||
return skb;
|
|
||||||
|
|
||||||
iph = ip_hdr(skb);
|
|
||||||
if (iph->ihl < 5 || iph->version != 4)
|
|
||||||
return skb;
|
|
||||||
if (!pskb_may_pull(skb, iph->ihl*4))
|
|
||||||
return skb;
|
|
||||||
iph = ip_hdr(skb);
|
|
||||||
len = ntohs(iph->tot_len);
|
|
||||||
if (skb->len < len || len < (iph->ihl * 4))
|
|
||||||
return skb;
|
|
||||||
|
|
||||||
if (ip_is_fragment(ip_hdr(skb))) {
|
|
||||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
|
||||||
if (skb) {
|
|
||||||
if (pskb_trim_rcsum(skb, len))
|
|
||||||
return skb;
|
|
||||||
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
|
||||||
if (ip_defrag(skb, IP_DEFRAG_AF_PACKET))
|
|
||||||
return NULL;
|
|
||||||
skb->rxhash = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return skb;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
|
static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
|
||||||
struct packet_type *pt, struct net_device *orig_dev)
|
struct packet_type *pt, struct net_device *orig_dev)
|
||||||
{
|
{
|
||||||
|
@ -1268,7 +1231,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
|
||||||
case PACKET_FANOUT_HASH:
|
case PACKET_FANOUT_HASH:
|
||||||
default:
|
default:
|
||||||
if (f->defrag) {
|
if (f->defrag) {
|
||||||
skb = fanout_check_defrag(skb);
|
skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
|
||||||
if (!skb)
|
if (!skb)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче