2019-05-27 09:55:01 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Linux NET3: Internet Group Management Protocol [IGMP]
|
|
|
|
*
|
|
|
|
* Authors:
|
2008-10-14 06:01:08 +04:00
|
|
|
* Alan Cox <alan@lxorguk.ukuu.org.uk>
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Extended to talk the BSD extended IGMP protocol of mrouted 3.6
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_IGMP_H
|
|
|
|
#define _LINUX_IGMP_H
|
|
|
|
|
|
|
|
#include <linux/skbuff.h>
|
2006-12-04 07:15:30 +03:00
|
|
|
#include <linux/timer.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/in.h>
|
2019-01-21 09:26:25 +03:00
|
|
|
#include <linux/ip.h>
|
2017-06-30 13:08:02 +03:00
|
|
|
#include <linux/refcount.h>
|
2012-10-13 13:46:48 +04:00
|
|
|
#include <uapi/linux/igmp.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-03-24 08:05:44 +03:00
|
|
|
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct igmphdr *)skb_transport_header(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct igmpv3_report *
|
|
|
|
igmpv3_report_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct igmpv3_report *)skb_transport_header(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct igmpv3_query *
|
|
|
|
igmpv3_query_hdr(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
return (struct igmpv3_query *)skb_transport_header(skb);
|
|
|
|
}
|
|
|
|
|
2009-11-04 20:50:58 +03:00
|
|
|
struct ip_sf_socklist {
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned int sl_max;
|
|
|
|
unsigned int sl_count;
|
2010-02-02 18:32:29 +03:00
|
|
|
struct rcu_head rcu;
|
2020-03-24 01:59:00 +03:00
|
|
|
__be32 sl_addr[];
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
#define IP_SFBLOCK 10 /* allocate this many at once */
|
|
|
|
|
|
|
|
/* ip_mc_socklist is real list now. Speed is not argument;
|
|
|
|
this list never used in fast path code
|
|
|
|
*/
|
|
|
|
|
2009-11-04 20:50:58 +03:00
|
|
|
struct ip_mc_socklist {
|
2010-11-12 08:46:50 +03:00
|
|
|
struct ip_mc_socklist __rcu *next_rcu;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ip_mreqn multi;
|
|
|
|
unsigned int sfmode; /* MCAST_{INCLUDE,EXCLUDE} */
|
2010-11-12 08:46:50 +03:00
|
|
|
struct ip_sf_socklist __rcu *sflist;
|
2010-02-02 18:32:29 +03:00
|
|
|
struct rcu_head rcu;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2009-11-04 20:50:58 +03:00
|
|
|
struct ip_sf_list {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ip_sf_list *sf_next;
|
|
|
|
unsigned long sf_count[2]; /* include/exclude counts */
|
2019-05-23 01:00:25 +03:00
|
|
|
__be32 sf_inaddr;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned char sf_gsresp; /* include in g & s response? */
|
|
|
|
unsigned char sf_oldin; /* change state */
|
|
|
|
unsigned char sf_crcount; /* retrans. left to send */
|
|
|
|
};
|
|
|
|
|
2009-11-04 20:50:58 +03:00
|
|
|
struct ip_mc_list {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct in_device *interface;
|
2006-06-06 08:04:39 +04:00
|
|
|
__be32 multiaddr;
|
2010-11-12 08:46:50 +03:00
|
|
|
unsigned int sfmode;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ip_sf_list *sources;
|
|
|
|
struct ip_sf_list *tomb;
|
|
|
|
unsigned long sfcount[2];
|
2010-11-12 08:46:50 +03:00
|
|
|
union {
|
|
|
|
struct ip_mc_list *next;
|
|
|
|
struct ip_mc_list __rcu *next_rcu;
|
|
|
|
};
|
2013-06-07 19:48:57 +04:00
|
|
|
struct ip_mc_list __rcu *next_hash;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct timer_list timer;
|
|
|
|
int users;
|
2017-06-30 13:08:02 +03:00
|
|
|
refcount_t refcnt;
|
2005-04-17 02:20:36 +04:00
|
|
|
spinlock_t lock;
|
|
|
|
char tm_running;
|
|
|
|
char reporter;
|
|
|
|
char unsolicit_count;
|
|
|
|
char loaded;
|
|
|
|
unsigned char gsquery; /* check source marks? */
|
|
|
|
unsigned char crcount;
|
2010-11-12 08:46:50 +03:00
|
|
|
struct rcu_head rcu;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* V3 exponential field decoding */
|
|
|
|
#define IGMPV3_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
|
|
|
|
#define IGMPV3_EXP(thresh, nbmant, nbexp, value) \
|
|
|
|
((value) < (thresh) ? (value) : \
|
2006-11-19 21:38:39 +03:00
|
|
|
((IGMPV3_MASK(value, nbmant) | (1<<(nbmant))) << \
|
2005-04-17 02:20:36 +04:00
|
|
|
(IGMPV3_MASK((value) >> (nbmant), nbexp) + (nbexp))))
|
|
|
|
|
|
|
|
#define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value)
|
|
|
|
#define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value)
|
|
|
|
|
2019-01-21 09:26:25 +03:00
|
|
|
static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len)
|
|
|
|
{
|
|
|
|
if (skb_transport_offset(skb) + ip_transport_len(skb) < len)
|
2019-03-10 01:43:38 +03:00
|
|
|
return 0;
|
2019-01-21 09:26:25 +03:00
|
|
|
|
|
|
|
return pskb_may_pull(skb, len);
|
|
|
|
}
|
|
|
|
|
2015-09-28 21:10:31 +03:00
|
|
|
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
|
2005-04-17 02:20:36 +04:00
|
|
|
extern int igmp_rcv(struct sk_buff *);
|
|
|
|
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
|
ipv4/igmp: init group mode as INCLUDE when join source group
Based on RFC3376 5.1
If no interface
state existed for that multicast address before the change (i.e., the
change consisted of creating a new per-interface record), or if no
state exists after the change (i.e., the change consisted of deleting
a per-interface record), then the "non-existent" state is considered
to have a filter mode of INCLUDE and an empty source list.
Which means a new multicast group should start with state IN().
Function ip_mc_join_group() works correctly for IGMP ASM(Any-Source Multicast)
mode. It adds a group with state EX() and inits crcount to mc_qrv,
so the kernel will send a TO_EX() report message after adding group.
But for IGMPv3 SSM(Source-specific multicast) JOIN_SOURCE_GROUP mode, we
split the group joining into two steps. First we join the group like ASM,
i.e. via ip_mc_join_group(). So the state changes from IN() to EX().
Then we add the source-specific address with INCLUDE mode. So the state
changes from EX() to IN(A).
Before the first step sends a group change record, we finished the second
step. So we will only send the second change record. i.e. TO_IN(A).
Regarding the RFC stands, we should actually send an ALLOW(A) message for
SSM JOIN_SOURCE_GROUP as the state should mimic the 'IN() to IN(A)'
transition.
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we used to send both ALLOW(A) and TO_IN(A). After this change we only send
TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add new wrapper
functions so we don't need to change too much code.
v1 -> v2:
In my first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
an filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses' sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:26 +03:00
|
|
|
extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
|
|
|
|
unsigned int mode);
|
2005-04-17 02:20:36 +04:00
|
|
|
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
|
|
|
|
extern void ip_mc_drop_socket(struct sock *sk);
|
|
|
|
extern int ip_mc_source(int add, int omode, struct sock *sk,
|
|
|
|
struct ip_mreq_source *mreqs, int ifindex);
|
|
|
|
extern int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf,int ifindex);
|
|
|
|
extern int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
|
|
|
|
struct ip_msfilter __user *optval, int __user *optlen);
|
|
|
|
extern int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
|
2020-03-30 00:18:30 +03:00
|
|
|
struct sockaddr_storage __user *p);
|
2017-08-07 18:44:19 +03:00
|
|
|
extern int ip_mc_sf_allow(struct sock *sk, __be32 local, __be32 rmt,
|
|
|
|
int dif, int sdif);
|
2005-04-17 02:20:36 +04:00
|
|
|
extern void ip_mc_init_dev(struct in_device *);
|
|
|
|
extern void ip_mc_destroy_dev(struct in_device *);
|
|
|
|
extern void ip_mc_up(struct in_device *);
|
|
|
|
extern void ip_mc_down(struct in_device *);
|
2009-09-15 13:37:40 +04:00
|
|
|
extern void ip_mc_unmap(struct in_device *);
|
|
|
|
extern void ip_mc_remap(struct in_device *);
|
2019-02-02 07:20:52 +03:00
|
|
|
extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp);
|
|
|
|
static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
|
|
|
|
{
|
|
|
|
return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr,
|
|
|
|
gfp_t gfp);
|
2006-09-28 05:30:07 +04:00
|
|
|
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
|
2019-01-21 09:26:25 +03:00
|
|
|
int ip_mc_check_igmp(struct sk_buff *skb);
|
bonding: Improve IGMP join processing
In active-backup mode, the current bonding code duplicates IGMP
traffic to all slaves, so that switches are up to date in case of a
failover from an active to a backup interface. If bonding then fails
back to the original active interface, it is likely that the "active
slave" switch's IGMP forwarding for the port will be out of date until
some event occurs to refresh the switch (e.g., a membership query).
This patch alters the behavior of bonding to no longer flood
IGMP to all ports, and to issue IGMP JOINs to the newly active port at
the time of a failover. This insures that switches are kept up to date
for all cases.
"GOELLESCH Niels" <niels.goellesch@eurocontrol.int> originally
reported this problem, and included a patch. His original patch was
modified by Jay Vosburgh to additionally remove the existing IGMP flood
behavior, use RCU, streamline code paths, fix trailing white space, and
adjust for style.
Signed-off-by: Jay Vosburgh <fubar@us.ibm.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
2007-03-01 04:03:37 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|