2019-05-27 09:55:01 +03:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Multicast support for IPv6
|
2007-02-09 17:24:49 +03:00
|
|
|
* Linux INET6 implementation
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
|
|
|
* Authors:
|
2007-02-09 17:24:49 +03:00
|
|
|
* Pedro Roque <roque@di.fc.ul.pt>
|
2005-04-17 02:20:36 +04:00
|
|
|
*
|
2007-02-09 17:24:49 +03:00
|
|
|
* Based on linux/ipv4/igmp.c and linux/ipv4/ip_sockglue.c
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Changes:
|
|
|
|
*
|
|
|
|
* yoshfuji : fix format of router-alert option
|
|
|
|
* YOSHIFUJI Hideaki @USAGI:
|
|
|
|
* Fixed source address for MLD message based on
|
|
|
|
* <draft-ietf-magma-mld-source-05.txt>.
|
|
|
|
* YOSHIFUJI Hideaki @USAGI:
|
|
|
|
* - Ignore Queries for invalid addresses.
|
|
|
|
* - MLD for link-local addresses.
|
|
|
|
* David L Stevens <dlstevens@us.ibm.com>:
|
|
|
|
* - MLDv2 support
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/socket.h>
|
|
|
|
#include <linux/sockios.h>
|
|
|
|
#include <linux/jiffies.h>
|
|
|
|
#include <linux/times.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/in6.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/if_arp.h>
|
|
|
|
#include <linux/route.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
#include <linux/seq_file.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/slab.h>
|
2013-07-26 19:05:16 +04:00
|
|
|
#include <linux/pkt_sched.h>
|
2010-04-18 07:42:05 +04:00
|
|
|
#include <net/mld.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <linux/netfilter.h>
|
|
|
|
#include <linux/netfilter_ipv6.h>
|
|
|
|
|
2007-09-12 14:01:34 +04:00
|
|
|
#include <net/net_namespace.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <net/sock.h>
|
|
|
|
#include <net/snmp.h>
|
|
|
|
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
#include <net/protocol.h>
|
|
|
|
#include <net/if_inet6.h>
|
|
|
|
#include <net/ndisc.h>
|
|
|
|
#include <net/addrconf.h>
|
|
|
|
#include <net/ip6_route.h>
|
2008-04-04 01:31:03 +04:00
|
|
|
#include <net/inet_common.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#include <net/ip6_checksum.h>
|
|
|
|
|
2010-04-18 07:42:05 +04:00
|
|
|
/* Ensure that we have struct in6_addr aligned on 32bit word. */
|
2018-02-07 02:40:35 +03:00
|
|
|
static int __mld2_query_bugs[] __attribute__((__unused__)) = {
|
|
|
|
BUILD_BUG_ON_ZERO(offsetof(struct mld2_query, mld2q_srcs) % 4),
|
|
|
|
BUILD_BUG_ON_ZERO(offsetof(struct mld2_report, mld2r_grec) % 4),
|
|
|
|
BUILD_BUG_ON_ZERO(offsetof(struct mld2_grec, grec_mca) % 4)
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
|
|
|
|
|
|
|
|
static void igmp6_join_group(struct ifmcaddr6 *ma);
|
|
|
|
static void igmp6_leave_group(struct ifmcaddr6 *ma);
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void igmp6_timer_handler(struct timer_list *t);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void mld_gq_timer_expire(struct timer_list *t);
|
|
|
|
static void mld_ifc_timer_expire(struct timer_list *t);
|
2005-04-17 02:20:36 +04:00
|
|
|
static void mld_ifc_event(struct inet6_dev *idev);
|
|
|
|
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
|
2017-01-12 16:19:37 +03:00
|
|
|
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
|
2005-04-17 02:20:36 +04:00
|
|
|
static void mld_clear_delrec(struct inet6_dev *idev);
|
2013-09-04 02:19:38 +04:00
|
|
|
static bool mld_in_v1_mode(const struct inet6_dev *idev);
|
2005-04-17 02:20:36 +04:00
|
|
|
static int sf_setstate(struct ifmcaddr6 *pmc);
|
|
|
|
static void sf_markstate(struct ifmcaddr6 *pmc);
|
|
|
|
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
|
2011-04-22 08:53:02 +04:00
|
|
|
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
|
|
|
|
int sfmode, int sfcount, const struct in6_addr *psfsrc,
|
2005-04-17 02:20:36 +04:00
|
|
|
int delta);
|
2011-04-22 08:53:02 +04:00
|
|
|
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
|
|
|
|
int sfmode, int sfcount, const struct in6_addr *psfsrc,
|
2005-04-17 02:20:36 +04:00
|
|
|
int delta);
|
|
|
|
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
|
|
|
|
struct inet6_dev *idev);
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
static int __ipv6_dev_mc_inc(struct net_device *dev,
|
|
|
|
const struct in6_addr *addr, unsigned int mode);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#define MLD_QRV_DEFAULT 2
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
/* RFC3810, 9.2. Query Interval */
|
|
|
|
#define MLD_QI_DEFAULT (125 * HZ)
|
|
|
|
/* RFC3810, 9.3. Query Response Interval */
|
|
|
|
#define MLD_QRI_DEFAULT (10 * HZ)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-08-20 14:22:02 +04:00
|
|
|
/* RFC3810, 8.1 Query Version Distinctions */
|
|
|
|
#define MLD_V1_QUERY_LEN 24
|
|
|
|
#define MLD_V2_QUERY_LEN_MIN 28
|
|
|
|
|
2005-12-27 04:03:46 +03:00
|
|
|
#define IPV6_MLD_MAX_MSF 64
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-09-23 01:15:41 +04:00
|
|
|
int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
|
2014-09-02 17:49:25 +04:00
|
|
|
int sysctl_mld_qrv __read_mostly = MLD_QRV_DEFAULT;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* socket join on multicast group
|
|
|
|
*/
|
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
#define for_each_pmc_rcu(np, pmc) \
|
|
|
|
for (pmc = rcu_dereference(np->ipv6_mc_list); \
|
|
|
|
pmc != NULL; \
|
|
|
|
pmc = rcu_dereference(pmc->next))
|
|
|
|
|
2013-08-14 03:03:46 +04:00
|
|
|
static int unsolicited_report_interval(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
int iv;
|
|
|
|
|
2013-09-04 02:19:38 +04:00
|
|
|
if (mld_in_v1_mode(idev))
|
2013-08-14 03:03:46 +04:00
|
|
|
iv = idev->cnf.mldv1_unsolicited_report_interval;
|
|
|
|
else
|
|
|
|
iv = idev->cnf.mldv2_unsolicited_report_interval;
|
|
|
|
|
|
|
|
return iv > 0 ? iv : 1;
|
|
|
|
}
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
|
|
|
|
const struct in6_addr *addr, unsigned int mode)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct net_device *dev = NULL;
|
|
|
|
struct ipv6_mc_socklist *mc_lst;
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
2008-03-25 20:26:21 +03:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
|
2015-02-25 20:58:34 +03:00
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!ipv6_addr_is_multicast(addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
for_each_pmc_rcu(np, mc_lst) {
|
2005-06-22 00:58:25 +04:00
|
|
|
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
|
|
|
|
ipv6_addr_equal(&mc_lst->addr, addr)) {
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_read_unlock();
|
2005-06-22 00:58:25 +04:00
|
|
|
return -EADDRINUSE;
|
|
|
|
}
|
|
|
|
}
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_read_unlock();
|
2005-06-22 00:58:25 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
|
|
|
|
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!mc_lst)
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
mc_lst->next = NULL;
|
2011-11-21 07:39:03 +04:00
|
|
|
mc_lst->addr = *addr;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (ifindex == 0) {
|
|
|
|
struct rt6_info *rt;
|
2018-03-02 19:32:17 +03:00
|
|
|
rt = rt6_lookup(net, addr, NULL, 0, NULL, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (rt) {
|
2011-12-29 05:19:20 +04:00
|
|
|
dev = rt->dst.dev;
|
2012-10-29 04:13:19 +04:00
|
|
|
ip6_rt_put(rt);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
} else
|
2014-09-12 02:35:14 +04:00
|
|
|
dev = __dev_get_by_index(net, ifindex);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!dev) {
|
2005-04-17 02:20:36 +04:00
|
|
|
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
mc_lst->ifindex = dev->ifindex;
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
mc_lst->sfmode = mode;
|
2006-01-05 00:56:31 +03:00
|
|
|
rwlock_init(&mc_lst->sflock);
|
2005-04-17 02:20:36 +04:00
|
|
|
mc_lst->sflist = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* now add/increase the group membership on the device
|
|
|
|
*/
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
err = __ipv6_dev_mc_inc(dev, addr, mode);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (err) {
|
|
|
|
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
mc_lst->next = np->ipv6_mc_list;
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-02-25 20:58:34 +03:00
|
|
|
return 0;
|
|
|
|
}
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
|
|
|
|
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
|
|
|
|
}
|
2015-02-25 20:58:34 +03:00
|
|
|
EXPORT_SYMBOL(ipv6_sock_mc_join);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
|
|
|
|
const struct in6_addr *addr, unsigned int mode)
|
|
|
|
{
|
|
|
|
return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* socket leave on multicast group
|
|
|
|
*/
|
ipv4, ipv6: kill ip_mc_{join, leave}_group and ipv6_sock_mc_{join, drop}
in favor of their inner __ ones, which doesn't grab rtnl.
As these functions need to operate on a locked socket, we can't be
grabbing rtnl by then. It's too late and doing so causes reversed
locking.
So this patch:
- move rtnl handling to callers instead while already fixing some
reversed locking situations, like on vxlan and ipvs code.
- renames __ ones to not have the __ mark:
__ip_mc_{join,leave}_group -> ip_mc_{join,leave}_group
__ipv6_sock_mc_{join,drop} -> ipv6_sock_mc_{join,drop}
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-18 20:50:43 +03:00
|
|
|
int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
2010-11-23 16:12:15 +03:00
|
|
|
struct ipv6_mc_socklist *mc_lst;
|
|
|
|
struct ipv6_mc_socklist __rcu **lnk;
|
2008-03-25 20:26:21 +03:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-02-25 20:58:34 +03:00
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2012-07-17 11:28:59 +04:00
|
|
|
if (!ipv6_addr_is_multicast(addr))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
for (lnk = &np->ipv6_mc_list;
|
2014-09-12 02:35:13 +04:00
|
|
|
(mc_lst = rtnl_dereference(*lnk)) != NULL;
|
2010-11-23 16:12:15 +03:00
|
|
|
lnk = &mc_lst->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
|
|
|
|
ipv6_addr_equal(&mc_lst->addr, addr)) {
|
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
*lnk = mc_lst->next;
|
|
|
|
|
2014-09-12 02:35:14 +04:00
|
|
|
dev = __dev_get_by_index(net, mc_lst->ifindex);
|
2015-03-29 16:00:05 +03:00
|
|
|
if (dev) {
|
2010-06-08 01:05:02 +04:00
|
|
|
struct inet6_dev *idev = __in6_dev_get(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-08-18 03:27:39 +04:00
|
|
|
(void) ip6_mc_leave_src(sk, mc_lst, idev);
|
2010-06-08 01:05:02 +04:00
|
|
|
if (idev)
|
2005-04-17 02:20:36 +04:00
|
|
|
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
|
2006-08-18 03:27:39 +04:00
|
|
|
} else
|
|
|
|
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
|
2014-09-02 12:29:29 +04:00
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
|
2011-03-18 07:00:50 +03:00
|
|
|
kfree_rcu(mc_lst, rcu);
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-09 04:47:28 +04:00
|
|
|
return -EADDRNOTAVAIL;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2015-02-25 20:58:34 +03:00
|
|
|
EXPORT_SYMBOL(ipv6_sock_mc_drop);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
/* called with rcu_read_lock() */
|
|
|
|
static struct inet6_dev *ip6_mc_find_dev_rcu(struct net *net,
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct in6_addr *group,
|
2010-06-08 01:05:02 +04:00
|
|
|
int ifindex)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct net_device *dev = NULL;
|
|
|
|
struct inet6_dev *idev = NULL;
|
|
|
|
|
|
|
|
if (ifindex == 0) {
|
2018-03-02 19:32:17 +03:00
|
|
|
struct rt6_info *rt = rt6_lookup(net, group, NULL, 0, NULL, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (rt) {
|
2011-12-29 05:19:20 +04:00
|
|
|
dev = rt->dst.dev;
|
2012-10-29 04:13:19 +04:00
|
|
|
ip6_rt_put(rt);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
} else
|
2010-06-08 01:05:02 +04:00
|
|
|
dev = dev_get_by_index_rcu(net, ifindex);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!dev)
|
2010-06-08 01:05:02 +04:00
|
|
|
return NULL;
|
|
|
|
idev = __in6_dev_get(dev);
|
2008-12-15 10:15:21 +03:00
|
|
|
if (!idev)
|
2010-11-14 20:05:00 +03:00
|
|
|
return NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
read_lock_bh(&idev->lock);
|
2010-06-08 01:05:02 +04:00
|
|
|
if (idev->dead) {
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
return idev;
|
|
|
|
}
|
|
|
|
|
2016-10-20 09:35:12 +03:00
|
|
|
void __ipv6_sock_mc_close(struct sock *sk)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct ipv6_mc_socklist *mc_lst;
|
2008-03-25 20:26:21 +03:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2016-10-20 09:35:12 +03:00
|
|
|
ASSERT_RTNL();
|
2012-12-05 13:18:10 +04:00
|
|
|
|
2014-09-12 02:35:13 +04:00
|
|
|
while ((mc_lst = rtnl_dereference(np->ipv6_mc_list)) != NULL) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
np->ipv6_mc_list = mc_lst->next;
|
|
|
|
|
2014-09-12 02:35:14 +04:00
|
|
|
dev = __dev_get_by_index(net, mc_lst->ifindex);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (dev) {
|
2010-06-08 01:05:02 +04:00
|
|
|
struct inet6_dev *idev = __in6_dev_get(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-08-18 03:27:39 +04:00
|
|
|
(void) ip6_mc_leave_src(sk, mc_lst, idev);
|
2010-06-08 01:05:02 +04:00
|
|
|
if (idev)
|
2005-04-17 02:20:36 +04:00
|
|
|
__ipv6_dev_mc_dec(idev, &mc_lst->addr);
|
2006-08-18 03:27:39 +04:00
|
|
|
} else
|
|
|
|
(void) ip6_mc_leave_src(sk, mc_lst, NULL);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
|
2011-03-18 07:00:50 +03:00
|
|
|
kfree_rcu(mc_lst, rcu);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2016-10-20 09:35:12 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
void ipv6_sock_mc_close(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
|
|
|
|
if (!rcu_access_pointer(np->ipv6_mc_list))
|
|
|
|
return;
|
|
|
|
rtnl_lock();
|
|
|
|
__ipv6_sock_mc_close(sk);
|
2014-09-02 12:29:29 +04:00
|
|
|
rtnl_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
int ip6_mc_source(int add, int omode, struct sock *sk,
|
|
|
|
struct group_source_req *pgsr)
|
|
|
|
{
|
|
|
|
struct in6_addr *source, *group;
|
|
|
|
struct ipv6_mc_socklist *pmc;
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
struct ipv6_pinfo *inet6 = inet6_sk(sk);
|
|
|
|
struct ip6_sf_socklist *psl;
|
2008-03-25 20:26:21 +03:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 02:20:36 +04:00
|
|
|
int i, j, rv;
|
2005-06-22 00:58:25 +04:00
|
|
|
int leavegroup = 0;
|
2005-12-28 01:03:00 +03:00
|
|
|
int pmclocked = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
|
|
|
|
source = &((struct sockaddr_in6 *)&pgsr->gsr_source)->sin6_addr;
|
|
|
|
group = &((struct sockaddr_in6 *)&pgsr->gsr_group)->sin6_addr;
|
|
|
|
|
|
|
|
if (!ipv6_addr_is_multicast(group))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
idev = ip6_mc_find_dev_rcu(net, group, pgsr->gsr_interface);
|
|
|
|
if (!idev) {
|
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENODEV;
|
2010-06-08 01:05:02 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
err = -EADDRNOTAVAIL;
|
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
for_each_pmc_rcu(inet6, pmc) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
|
|
|
|
continue;
|
|
|
|
if (ipv6_addr_equal(&pmc->addr, group))
|
|
|
|
break;
|
|
|
|
}
|
2005-07-09 04:45:16 +04:00
|
|
|
if (!pmc) { /* must have a prior join */
|
|
|
|
err = -EINVAL;
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
2005-07-09 04:45:16 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
/* if a source filter was set, must be the same mode as before */
|
|
|
|
if (pmc->sflist) {
|
2005-07-09 04:45:16 +04:00
|
|
|
if (pmc->sfmode != omode) {
|
|
|
|
err = -EINVAL;
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
2005-07-09 04:45:16 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
} else if (pmc->sfmode != omode) {
|
|
|
|
/* allow mode switches for empty-set filters */
|
|
|
|
ip6_mc_add_src(idev, group, omode, 0, NULL, 0);
|
|
|
|
ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
|
|
|
|
pmc->sfmode = omode;
|
|
|
|
}
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
write_lock(&pmc->sflock);
|
2005-12-28 01:03:00 +03:00
|
|
|
pmclocked = 1;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
psl = pmc->sflist;
|
|
|
|
if (!add) {
|
|
|
|
if (!psl)
|
2005-07-09 04:45:16 +04:00
|
|
|
goto done; /* err = -EADDRNOTAVAIL */
|
2005-04-17 02:20:36 +04:00
|
|
|
rv = !0;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < psl->sl_count; i++) {
|
2013-01-29 16:48:23 +04:00
|
|
|
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (rv == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (rv) /* source not found */
|
2005-07-09 04:45:16 +04:00
|
|
|
goto done; /* err = -EADDRNOTAVAIL */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-06-22 00:58:25 +04:00
|
|
|
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
|
|
|
|
if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
|
|
|
|
leavegroup = 1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* update the interface filter */
|
|
|
|
ip6_mc_del_src(idev, group, omode, 1, source, 1);
|
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (j = i+1; j < psl->sl_count; j++)
|
2005-04-17 02:20:36 +04:00
|
|
|
psl->sl_addr[j-1] = psl->sl_addr[j];
|
|
|
|
psl->sl_count--;
|
|
|
|
err = 0;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
/* else, add a new source to the filter */
|
|
|
|
|
|
|
|
if (psl && psl->sl_count >= sysctl_mld_max_msf) {
|
|
|
|
err = -ENOBUFS;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
if (!psl || psl->sl_count == psl->sl_max) {
|
|
|
|
struct ip6_sf_socklist *newpsl;
|
|
|
|
int count = IP6_SFBLOCK;
|
|
|
|
|
|
|
|
if (psl)
|
|
|
|
count += psl->sl_max;
|
2006-01-12 02:56:43 +03:00
|
|
|
newpsl = sock_kmalloc(sk, IP6_SFLSIZE(count), GFP_ATOMIC);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!newpsl) {
|
|
|
|
err = -ENOBUFS;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
newpsl->sl_max = count;
|
|
|
|
newpsl->sl_count = count - IP6_SFBLOCK;
|
|
|
|
if (psl) {
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < psl->sl_count; i++)
|
2005-04-17 02:20:36 +04:00
|
|
|
newpsl->sl_addr[i] = psl->sl_addr[i];
|
|
|
|
sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
|
|
|
|
}
|
|
|
|
pmc->sflist = psl = newpsl;
|
|
|
|
}
|
|
|
|
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < psl->sl_count; i++) {
|
2013-01-29 16:48:23 +04:00
|
|
|
rv = !ipv6_addr_equal(&psl->sl_addr[i], source);
|
2013-02-04 01:34:10 +04:00
|
|
|
if (rv == 0) /* There is an error in the address. */
|
|
|
|
goto done;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2014-08-25 00:53:10 +04:00
|
|
|
for (j = psl->sl_count-1; j >= i; j--)
|
2005-04-17 02:20:36 +04:00
|
|
|
psl->sl_addr[j+1] = psl->sl_addr[j];
|
|
|
|
psl->sl_addr[i] = *source;
|
|
|
|
psl->sl_count++;
|
|
|
|
err = 0;
|
|
|
|
/* update the interface list */
|
|
|
|
ip6_mc_add_src(idev, group, omode, 1, source, 1);
|
|
|
|
done:
|
2005-12-28 01:03:00 +03:00
|
|
|
if (pmclocked)
|
2010-06-08 01:05:02 +04:00
|
|
|
write_unlock(&pmc->sflock);
|
2005-04-17 02:20:36 +04:00
|
|
|
read_unlock_bh(&idev->lock);
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2005-06-22 00:58:25 +04:00
|
|
|
if (leavegroup)
|
ipv4, ipv6: kill ip_mc_{join, leave}_group and ipv6_sock_mc_{join, drop}
in favor of their inner __ ones, which doesn't grab rtnl.
As these functions need to operate on a locked socket, we can't be
grabbing rtnl by then. It's too late and doing so causes reversed
locking.
So this patch:
- move rtnl handling to callers instead while already fixing some
reversed locking situations, like on vxlan and ipvs code.
- renames __ ones to not have the __ mark:
__ip_mc_{join,leave}_group -> ip_mc_{join,leave}_group
__ipv6_sock_mc_{join,drop} -> ipv6_sock_mc_{join,drop}
Signed-off-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-18 20:50:43 +03:00
|
|
|
err = ipv6_sock_mc_drop(sk, pgsr->gsr_interface, group);
|
2005-04-17 02:20:36 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2020-03-30 22:43:10 +03:00
|
|
|
int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf,
|
|
|
|
struct sockaddr_storage *list)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct in6_addr *group;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ipv6_mc_socklist *pmc;
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
struct ipv6_pinfo *inet6 = inet6_sk(sk);
|
|
|
|
struct ip6_sf_socklist *newpsl, *psl;
|
2008-03-25 20:26:21 +03:00
|
|
|
struct net *net = sock_net(sk);
|
2005-07-09 04:47:28 +04:00
|
|
|
int leavegroup = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
int i, err;
|
|
|
|
|
|
|
|
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
|
|
|
|
|
|
|
|
if (!ipv6_addr_is_multicast(group))
|
|
|
|
return -EINVAL;
|
|
|
|
if (gsf->gf_fmode != MCAST_INCLUDE &&
|
|
|
|
gsf->gf_fmode != MCAST_EXCLUDE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
if (!idev) {
|
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENODEV;
|
2010-06-08 01:05:02 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-07-09 08:44:39 +04:00
|
|
|
err = 0;
|
2005-12-28 01:03:00 +03:00
|
|
|
|
2005-07-09 04:47:28 +04:00
|
|
|
if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
|
|
|
|
leavegroup = 1;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
for_each_pmc_rcu(inet6, pmc) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->ifindex != gsf->gf_interface)
|
|
|
|
continue;
|
|
|
|
if (ipv6_addr_equal(&pmc->addr, group))
|
|
|
|
break;
|
|
|
|
}
|
2005-07-09 04:45:16 +04:00
|
|
|
if (!pmc) { /* must have a prior join */
|
|
|
|
err = -EINVAL;
|
2005-04-17 02:20:36 +04:00
|
|
|
goto done;
|
2005-07-09 04:45:16 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
if (gsf->gf_numsrc) {
|
2006-01-12 02:56:43 +03:00
|
|
|
newpsl = sock_kmalloc(sk, IP6_SFLSIZE(gsf->gf_numsrc),
|
|
|
|
GFP_ATOMIC);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!newpsl) {
|
|
|
|
err = -ENOBUFS;
|
|
|
|
goto done;
|
|
|
|
}
|
|
|
|
newpsl->sl_max = newpsl->sl_count = gsf->gf_numsrc;
|
2020-03-30 22:43:10 +03:00
|
|
|
for (i = 0; i < newpsl->sl_count; ++i, ++list) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sockaddr_in6 *psin6;
|
|
|
|
|
2020-03-30 22:43:10 +03:00
|
|
|
psin6 = (struct sockaddr_in6 *)list;
|
2005-04-17 02:20:36 +04:00
|
|
|
newpsl->sl_addr[i] = psin6->sin6_addr;
|
|
|
|
}
|
|
|
|
err = ip6_mc_add_src(idev, group, gsf->gf_fmode,
|
|
|
|
newpsl->sl_count, newpsl->sl_addr, 0);
|
|
|
|
if (err) {
|
|
|
|
sock_kfree_s(sk, newpsl, IP6_SFLSIZE(newpsl->sl_max));
|
|
|
|
goto done;
|
|
|
|
}
|
2005-10-28 04:02:08 +04:00
|
|
|
} else {
|
2005-04-17 02:20:36 +04:00
|
|
|
newpsl = NULL;
|
2005-10-28 04:02:08 +04:00
|
|
|
(void) ip6_mc_add_src(idev, group, gsf->gf_fmode, 0, NULL, 0);
|
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
write_lock(&pmc->sflock);
|
2005-04-17 02:20:36 +04:00
|
|
|
psl = pmc->sflist;
|
|
|
|
if (psl) {
|
|
|
|
(void) ip6_mc_del_src(idev, group, pmc->sfmode,
|
|
|
|
psl->sl_count, psl->sl_addr, 0);
|
|
|
|
sock_kfree_s(sk, psl, IP6_SFLSIZE(psl->sl_max));
|
|
|
|
} else
|
|
|
|
(void) ip6_mc_del_src(idev, group, pmc->sfmode, 0, NULL, 0);
|
|
|
|
pmc->sflist = newpsl;
|
|
|
|
pmc->sfmode = gsf->gf_fmode;
|
2010-06-08 01:05:02 +04:00
|
|
|
write_unlock(&pmc->sflock);
|
2005-07-09 04:45:16 +04:00
|
|
|
err = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
done:
|
|
|
|
read_unlock_bh(&idev->lock);
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2005-07-09 04:47:28 +04:00
|
|
|
if (leavegroup)
|
|
|
|
err = ipv6_sock_mc_drop(sk, gsf->gf_interface, group);
|
2005-04-17 02:20:36 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf,
|
2020-03-30 00:18:30 +03:00
|
|
|
struct sockaddr_storage *p)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int err, i, count, copycount;
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct in6_addr *group;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ipv6_mc_socklist *pmc;
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
struct ipv6_pinfo *inet6 = inet6_sk(sk);
|
|
|
|
struct ip6_sf_socklist *psl;
|
2008-03-25 20:26:21 +03:00
|
|
|
struct net *net = sock_net(sk);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
group = &((struct sockaddr_in6 *)&gsf->gf_group)->sin6_addr;
|
|
|
|
|
|
|
|
if (!ipv6_addr_is_multicast(group))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
idev = ip6_mc_find_dev_rcu(net, group, gsf->gf_interface);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
if (!idev) {
|
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENODEV;
|
2010-06-08 01:05:02 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
err = -EADDRNOTAVAIL;
|
2014-09-12 02:35:15 +04:00
|
|
|
/* changes to the ipv6_mc_list require the socket lock and
|
|
|
|
* rtnl lock. We have the socket lock and rcu read lock,
|
2005-12-28 01:03:00 +03:00
|
|
|
* so reading the list is safe.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
for_each_pmc_rcu(inet6, pmc) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->ifindex != gsf->gf_interface)
|
|
|
|
continue;
|
|
|
|
if (ipv6_addr_equal(group, &pmc->addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!pmc) /* must have a prior join */
|
|
|
|
goto done;
|
|
|
|
gsf->gf_fmode = pmc->sfmode;
|
|
|
|
psl = pmc->sflist;
|
|
|
|
count = psl ? psl->sl_count : 0;
|
|
|
|
read_unlock_bh(&idev->lock);
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
|
|
|
|
gsf->gf_numsrc = count;
|
2014-09-12 02:35:15 +04:00
|
|
|
/* changes to psl require the socket lock, and a write lock
|
|
|
|
* on pmc->sflock. We have the socket lock so reading here is safe.
|
2005-12-28 01:03:00 +03:00
|
|
|
*/
|
2020-03-30 00:18:30 +03:00
|
|
|
for (i = 0; i < copycount; i++, p++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sockaddr_in6 *psin6;
|
|
|
|
struct sockaddr_storage ss;
|
|
|
|
|
|
|
|
psin6 = (struct sockaddr_in6 *)&ss;
|
|
|
|
memset(&ss, 0, sizeof(ss));
|
|
|
|
psin6->sin6_family = AF_INET6;
|
|
|
|
psin6->sin6_addr = psl->sl_addr[i];
|
2020-03-30 00:18:30 +03:00
|
|
|
if (copy_to_user(p, &ss, sizeof(ss)))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
done:
|
|
|
|
read_unlock_bh(&idev->lock);
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2012-05-18 22:57:34 +04:00
|
|
|
bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
|
|
|
|
const struct in6_addr *src_addr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ipv6_pinfo *np = inet6_sk(sk);
|
|
|
|
struct ipv6_mc_socklist *mc;
|
|
|
|
struct ip6_sf_socklist *psl;
|
2012-05-18 22:57:34 +04:00
|
|
|
bool rv = true;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_read_lock();
|
|
|
|
for_each_pmc_rcu(np, mc) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&mc->addr, mc_addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!mc) {
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_read_unlock();
|
2018-09-10 11:27:15 +03:00
|
|
|
return np->mc_all;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
read_lock(&mc->sflock);
|
2005-04-17 02:20:36 +04:00
|
|
|
psl = mc->sflist;
|
|
|
|
if (!psl) {
|
|
|
|
rv = mc->sfmode == MCAST_EXCLUDE;
|
|
|
|
} else {
|
|
|
|
int i;
|
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < psl->sl_count; i++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&psl->sl_addr[i], src_addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (mc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
|
2012-05-18 22:57:34 +04:00
|
|
|
rv = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
|
2012-05-18 22:57:34 +04:00
|
|
|
rv = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
read_unlock(&mc->sflock);
|
2010-11-23 16:12:15 +03:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2018-07-20 09:07:42 +03:00
|
|
|
static void igmp6_group_added(struct ifmcaddr6 *mc)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct net_device *dev = mc->idev->dev;
|
|
|
|
char buf[MAX_ADDR_LEN];
|
|
|
|
|
2013-02-09 08:29:58 +04:00
|
|
|
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
|
|
|
|
IPV6_ADDR_SCOPE_LINKLOCAL)
|
|
|
|
return;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock_bh(&mc->mca_lock);
|
|
|
|
if (!(mc->mca_flags&MAF_LOADED)) {
|
|
|
|
mc->mca_flags |= MAF_LOADED;
|
|
|
|
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
|
2010-04-02 01:22:57 +04:00
|
|
|
dev_mc_add(dev, buf);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&mc->mca_lock);
|
|
|
|
|
|
|
|
if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
|
|
|
|
return;
|
|
|
|
|
2013-09-04 02:19:38 +04:00
|
|
|
if (mld_in_v1_mode(mc->idev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
igmp6_join_group(mc);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* else v2 */
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
/* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
|
|
|
|
* should not send filter-mode change record as the mode
|
|
|
|
* should be from IN() to IN(A).
|
|
|
|
*/
|
2018-07-20 09:07:42 +03:00
|
|
|
if (mc->mca_sfmode == MCAST_EXCLUDE)
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
mc->mca_crcount = mc->idev->mc_qrv;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
mld_ifc_event(mc->idev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp6_group_dropped(struct ifmcaddr6 *mc)
|
|
|
|
{
|
|
|
|
struct net_device *dev = mc->idev->dev;
|
|
|
|
char buf[MAX_ADDR_LEN];
|
|
|
|
|
2013-02-09 08:29:58 +04:00
|
|
|
if (IPV6_ADDR_MC_SCOPE(&mc->mca_addr) <
|
|
|
|
IPV6_ADDR_SCOPE_LINKLOCAL)
|
|
|
|
return;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock_bh(&mc->mca_lock);
|
|
|
|
if (mc->mca_flags&MAF_LOADED) {
|
|
|
|
mc->mca_flags &= ~MAF_LOADED;
|
|
|
|
if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
|
2010-04-02 01:22:57 +04:00
|
|
|
dev_mc_del(dev, buf);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
spin_unlock_bh(&mc->mca_lock);
|
2017-01-12 16:19:37 +03:00
|
|
|
if (mc->mca_flags & MAF_NOREPORT)
|
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!mc->idev->dead)
|
|
|
|
igmp6_leave_group(mc);
|
|
|
|
|
|
|
|
spin_lock_bh(&mc->mca_lock);
|
|
|
|
if (del_timer(&mc->mca_timer))
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_dec(&mc->mca_refcnt);
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_bh(&mc->mca_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* deleted ifmcaddr6 manipulation
|
|
|
|
*/
|
|
|
|
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc;
|
|
|
|
|
|
|
|
/* this is an "ifmcaddr6" for convenience; only the fields below
|
|
|
|
* are actually used. In particular, the refcnt and users are not
|
|
|
|
* used for management of the delete list. Using the same structure
|
|
|
|
* for deleted items allows change reports to use common code with
|
|
|
|
* non-deleted or query-response MCA's.
|
|
|
|
*/
|
2006-03-21 10:01:32 +03:00
|
|
|
pmc = kzalloc(sizeof(*pmc), GFP_ATOMIC);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!pmc)
|
|
|
|
return;
|
2006-03-21 10:01:32 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock_bh(&im->mca_lock);
|
|
|
|
spin_lock_init(&pmc->mca_lock);
|
|
|
|
pmc->idev = im->idev;
|
|
|
|
in6_dev_hold(idev);
|
|
|
|
pmc->mca_addr = im->mca_addr;
|
|
|
|
pmc->mca_crcount = idev->mc_qrv;
|
|
|
|
pmc->mca_sfmode = im->mca_sfmode;
|
|
|
|
if (pmc->mca_sfmode == MCAST_INCLUDE) {
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
|
|
|
|
pmc->mca_tomb = im->mca_tomb;
|
|
|
|
pmc->mca_sources = im->mca_sources;
|
|
|
|
im->mca_tomb = im->mca_sources = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_crcount = pmc->mca_crcount;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&im->mca_lock);
|
|
|
|
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_lock_bh(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
pmc->next = idev->mc_tomb;
|
|
|
|
idev->mc_tomb = pmc;
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_unlock_bh(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2017-01-12 16:19:37 +03:00
|
|
|
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc, *pmc_prev;
|
2017-01-12 16:19:37 +03:00
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
struct in6_addr *pmca = &im->mca_addr;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_lock_bh(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
pmc_prev = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_tomb; pmc; pmc = pmc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&pmc->mca_addr, pmca))
|
|
|
|
break;
|
|
|
|
pmc_prev = pmc;
|
|
|
|
}
|
|
|
|
if (pmc) {
|
|
|
|
if (pmc_prev)
|
|
|
|
pmc_prev->next = pmc->next;
|
|
|
|
else
|
|
|
|
idev->mc_tomb = pmc->next;
|
|
|
|
}
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_unlock_bh(&idev->mc_lock);
|
|
|
|
|
2017-01-12 16:19:37 +03:00
|
|
|
spin_lock_bh(&im->mca_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc) {
|
2017-01-12 16:19:37 +03:00
|
|
|
im->idev = pmc->idev;
|
2018-07-20 09:04:27 +03:00
|
|
|
if (im->mca_sfmode == MCAST_INCLUDE) {
|
2019-08-27 13:33:12 +03:00
|
|
|
swap(im->mca_tomb, pmc->mca_tomb);
|
|
|
|
swap(im->mca_sources, pmc->mca_sources);
|
2017-01-12 16:19:37 +03:00
|
|
|
for (psf = im->mca_sources; psf; psf = psf->sf_next)
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
psf->sf_crcount = idev->mc_qrv;
|
|
|
|
} else {
|
|
|
|
im->mca_crcount = idev->mc_qrv;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
in6_dev_put(pmc->idev);
|
2019-08-27 13:33:12 +03:00
|
|
|
ip6_mc_clear_src(pmc);
|
2017-02-08 16:16:45 +03:00
|
|
|
kfree(pmc);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2017-01-12 16:19:37 +03:00
|
|
|
spin_unlock_bh(&im->mca_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_clear_delrec(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc, *nextpmc;
|
|
|
|
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_lock_bh(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
pmc = idev->mc_tomb;
|
|
|
|
idev->mc_tomb = NULL;
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_unlock_bh(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
for (; pmc; pmc = nextpmc) {
|
|
|
|
nextpmc = pmc->next;
|
|
|
|
ip6_mc_clear_src(pmc);
|
|
|
|
in6_dev_put(pmc->idev);
|
|
|
|
kfree(pmc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* clear dead sources, too */
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ip6_sf_list *psf, *psf_next;
|
|
|
|
|
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
psf = pmc->mca_tomb;
|
|
|
|
pmc->mca_tomb = NULL;
|
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (; psf; psf = psf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
psf_next = psf->sf_next;
|
|
|
|
kfree(psf);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
|
2014-09-12 02:35:16 +04:00
|
|
|
static void mca_get(struct ifmcaddr6 *mc)
|
|
|
|
{
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_inc(&mc->mca_refcnt);
|
2014-09-12 02:35:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void ma_put(struct ifmcaddr6 *mc)
|
|
|
|
{
|
2017-07-04 09:34:57 +03:00
|
|
|
if (refcount_dec_and_test(&mc->mca_refcnt)) {
|
2014-09-12 02:35:16 +04:00
|
|
|
in6_dev_put(mc->idev);
|
|
|
|
kfree(mc);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
const struct in6_addr *addr,
|
|
|
|
unsigned int mode)
|
2014-09-12 02:35:16 +04:00
|
|
|
{
|
|
|
|
struct ifmcaddr6 *mc;
|
|
|
|
|
|
|
|
mc = kzalloc(sizeof(*mc), GFP_ATOMIC);
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!mc)
|
2014-09-12 02:35:16 +04:00
|
|
|
return NULL;
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
|
2014-09-12 02:35:16 +04:00
|
|
|
|
|
|
|
mc->mca_addr = *addr;
|
|
|
|
mc->idev = idev; /* reference taken by caller */
|
|
|
|
mc->mca_users = 1;
|
|
|
|
/* mca_stamp should be updated upon changes */
|
|
|
|
mc->mca_cstamp = mc->mca_tstamp = jiffies;
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_set(&mc->mca_refcnt, 1);
|
2014-09-12 02:35:16 +04:00
|
|
|
spin_lock_init(&mc->mca_lock);
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
mc->mca_sfmode = mode;
|
|
|
|
mc->mca_sfcount[mode] = 1;
|
2014-09-12 02:35:16 +04:00
|
|
|
|
|
|
|
if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
|
|
|
|
IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
|
|
|
|
mc->mca_flags |= MAF_NOREPORT;
|
|
|
|
|
|
|
|
return mc;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* device multicast group inc (add if not found)
|
|
|
|
*/
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
static int __ipv6_dev_mc_inc(struct net_device *dev,
|
|
|
|
const struct in6_addr *addr, unsigned int mode)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ifmcaddr6 *mc;
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
|
2014-09-02 12:29:29 +04:00
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
/* we need to take a reference on idev */
|
2005-04-17 02:20:36 +04:00
|
|
|
idev = in6_dev_get(dev);
|
|
|
|
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!idev)
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
write_lock_bh(&idev->lock);
|
|
|
|
if (idev->dead) {
|
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
in6_dev_put(idev);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (mc = idev->mc_list; mc; mc = mc->next) {
|
|
|
|
if (ipv6_addr_equal(&mc->mca_addr, addr)) {
|
|
|
|
mc->mca_users++;
|
|
|
|
write_unlock_bh(&idev->lock);
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
in6_dev_put(idev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
mc = mca_alloc(idev, addr, mode);
|
2014-09-12 02:35:16 +04:00
|
|
|
if (!mc) {
|
2005-04-17 02:20:36 +04:00
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
in6_dev_put(idev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
mc->next = idev->mc_list;
|
|
|
|
idev->mc_list = mc;
|
2014-09-12 02:35:16 +04:00
|
|
|
|
|
|
|
/* Hold this for the code below before we unlock,
|
|
|
|
* it is already exposed via idev->mc_list.
|
|
|
|
*/
|
|
|
|
mca_get(mc);
|
2005-04-17 02:20:36 +04:00
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
|
2017-01-12 16:19:37 +03:00
|
|
|
mld_del_delrec(idev, mc);
|
2018-07-20 09:07:42 +03:00
|
|
|
igmp6_group_added(mc);
|
2005-04-17 02:20:36 +04:00
|
|
|
ma_put(mc);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
|
|
|
|
{
|
|
|
|
return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
|
|
|
|
}
|
2019-01-21 09:26:27 +03:00
|
|
|
EXPORT_SYMBOL(ipv6_dev_mc_inc);
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* device multicast group del
|
|
|
|
*/
|
[IPV6]: Make address arguments const.
- net/ipv6/addrconf.c:
ipv6_get_ifaddr(), ipv6_dev_get_saddr()
- net/ipv6/mcast.c:
ipv6_sock_mc_join(), ipv6_sock_mc_drop(),
inet6_mc_check(),
ipv6_dev_mc_inc(), __ipv6_dev_mc_dec(), ipv6_dev_mc_dec(),
ipv6_chk_mcast_addr()
- net/ipv6/route.c:
rt6_lookup(), icmp6_dst_alloc()
- net/ipv6/ip6_output.c:
ip6_nd_hdr()
- net/ipv6/ndisc.c:
ndisc_send_ns(), ndisc_send_rs(), ndisc_send_redirect(),
ndisc_get_neigh(), __ndisc_send()
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2008-04-10 10:42:10 +04:00
|
|
|
int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ifmcaddr6 *ma, **map;
|
|
|
|
|
2014-09-02 12:29:29 +04:00
|
|
|
ASSERT_RTNL();
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
write_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (map = &idev->mc_list; (ma = *map) != NULL; map = &ma->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&ma->mca_addr, addr)) {
|
|
|
|
if (--ma->mca_users == 0) {
|
|
|
|
*map = ma->next;
|
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
|
|
|
|
igmp6_group_dropped(ma);
|
2017-01-12 16:19:37 +03:00
|
|
|
ip6_mc_clear_src(ma);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
ma_put(ma);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
[IPV6]: Make address arguments const.
- net/ipv6/addrconf.c:
ipv6_get_ifaddr(), ipv6_dev_get_saddr()
- net/ipv6/mcast.c:
ipv6_sock_mc_join(), ipv6_sock_mc_drop(),
inet6_mc_check(),
ipv6_dev_mc_inc(), __ipv6_dev_mc_dec(), ipv6_dev_mc_dec(),
ipv6_chk_mcast_addr()
- net/ipv6/route.c:
rt6_lookup(), icmp6_dst_alloc()
- net/ipv6/ip6_output.c:
ip6_nd_hdr()
- net/ipv6/ndisc.c:
ndisc_send_ns(), ndisc_send_rs(), ndisc_send_redirect(),
ndisc_get_neigh(), __ndisc_send()
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
2008-04-10 10:42:10 +04:00
|
|
|
int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2010-06-08 01:05:02 +04:00
|
|
|
struct inet6_dev *idev;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
|
2014-09-12 02:35:14 +04:00
|
|
|
ASSERT_RTNL();
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
idev = __in6_dev_get(dev);
|
|
|
|
if (!idev)
|
|
|
|
err = -ENODEV;
|
|
|
|
else
|
|
|
|
err = __ipv6_dev_mc_dec(idev, addr);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2019-01-21 09:26:27 +03:00
|
|
|
EXPORT_SYMBOL(ipv6_dev_mc_dec);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* check if the interface/address pair is valid
|
|
|
|
*/
|
2012-05-18 22:57:34 +04:00
|
|
|
bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
|
|
|
|
const struct in6_addr *src_addr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
struct ifmcaddr6 *mc;
|
2012-05-18 22:57:34 +04:00
|
|
|
bool rv = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
idev = __in6_dev_get(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (idev) {
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (mc = idev->mc_list; mc; mc = mc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&mc->mca_addr, group))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (mc) {
|
|
|
|
if (src_addr && !ipv6_addr_any(src_addr)) {
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
|
|
|
|
spin_lock_bh(&mc->mca_lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = mc->mca_sources; psf; psf = psf->sf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&psf->sf_addr, src_addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (psf)
|
|
|
|
rv = psf->sf_count[MCAST_INCLUDE] ||
|
|
|
|
psf->sf_count[MCAST_EXCLUDE] !=
|
|
|
|
mc->mca_sfcount[MCAST_EXCLUDE];
|
|
|
|
else
|
2014-08-25 00:53:10 +04:00
|
|
|
rv = mc->mca_sfcount[MCAST_EXCLUDE] != 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_bh(&mc->mca_lock);
|
|
|
|
} else
|
2012-05-18 22:57:34 +04:00
|
|
|
rv = true; /* don't filter unspecified source */
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_gq_start_timer(struct inet6_dev *idev)
|
|
|
|
{
|
2014-01-11 16:15:59 +04:00
|
|
|
unsigned long tv = prandom_u32() % idev->mc_maxdelay;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
idev->mc_gq_running = 1;
|
|
|
|
if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
|
|
|
|
in6_dev_hold(idev);
|
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:43 +04:00
|
|
|
static void mld_gq_stop_timer(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
idev->mc_gq_running = 0;
|
|
|
|
if (del_timer(&idev->mc_gq_timer))
|
|
|
|
__in6_dev_put(idev);
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:22:01 +04:00
|
|
|
static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2014-01-11 16:15:59 +04:00
|
|
|
unsigned long tv = prandom_u32() % delay;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
|
|
|
|
in6_dev_hold(idev);
|
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:43 +04:00
|
|
|
static void mld_ifc_stop_timer(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
idev->mc_ifc_count = 0;
|
|
|
|
if (del_timer(&idev->mc_ifc_timer))
|
|
|
|
__in6_dev_put(idev);
|
|
|
|
}
|
|
|
|
|
2013-08-20 14:22:01 +04:00
|
|
|
static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
|
2013-06-27 02:07:01 +04:00
|
|
|
{
|
2014-01-11 16:15:59 +04:00
|
|
|
unsigned long tv = prandom_u32() % delay;
|
2013-06-27 02:07:01 +04:00
|
|
|
|
|
|
|
if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
|
|
|
|
in6_dev_hold(idev);
|
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:43 +04:00
|
|
|
static void mld_dad_stop_timer(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
if (del_timer(&idev->mc_dad_timer))
|
|
|
|
__in6_dev_put(idev);
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* IGMP handling (alias multicast ICMPv6 messages)
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
|
|
|
|
{
|
|
|
|
unsigned long delay = resptime;
|
|
|
|
|
|
|
|
/* Do not start timer for these addresses */
|
|
|
|
if (ipv6_addr_is_ll_all_nodes(&ma->mca_addr) ||
|
|
|
|
IPV6_ADDR_MC_SCOPE(&ma->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (del_timer(&ma->mca_timer)) {
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_dec(&ma->mca_refcnt);
|
2005-04-17 02:20:36 +04:00
|
|
|
delay = ma->mca_timer.expires - jiffies;
|
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:41 +04:00
|
|
|
if (delay >= resptime)
|
2014-01-11 16:15:59 +04:00
|
|
|
delay = prandom_u32() % resptime;
|
2013-09-04 02:19:41 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
ma->mca_timer.expires = jiffies + delay;
|
|
|
|
if (!mod_timer(&ma->mca_timer, jiffies + delay))
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_inc(&ma->mca_refcnt);
|
2005-04-17 02:20:36 +04:00
|
|
|
ma->mca_flags |= MAF_TIMER_RUNNING;
|
|
|
|
}
|
|
|
|
|
2005-12-28 01:03:00 +03:00
|
|
|
/* mark EXCLUDE-mode sources */
|
2012-05-18 22:57:34 +04:00
|
|
|
static bool mld_xmarksources(struct ifmcaddr6 *pmc, int nsrcs,
|
|
|
|
const struct in6_addr *srcs)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
int i, scount;
|
|
|
|
|
|
|
|
scount = 0;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (scount == nsrcs)
|
|
|
|
break;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < nsrcs; i++) {
|
2005-12-28 01:03:00 +03:00
|
|
|
/* skip inactive filters */
|
2011-08-24 02:54:37 +04:00
|
|
|
if (psf->sf_count[MCAST_INCLUDE] ||
|
2005-12-28 01:03:00 +03:00
|
|
|
pmc->mca_sfcount[MCAST_EXCLUDE] !=
|
|
|
|
psf->sf_count[MCAST_EXCLUDE])
|
2012-04-05 13:36:29 +04:00
|
|
|
break;
|
2005-12-28 01:03:00 +03:00
|
|
|
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
|
|
|
|
scount++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pmc->mca_flags &= ~MAF_GSQUERY;
|
|
|
|
if (scount == nsrcs) /* all sources excluded */
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
|
|
|
return true;
|
2005-12-28 01:03:00 +03:00
|
|
|
}
|
|
|
|
|
2012-05-18 22:57:34 +04:00
|
|
|
static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
|
|
|
|
const struct in6_addr *srcs)
|
2005-12-28 01:03:00 +03:00
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
int i, scount;
|
|
|
|
|
|
|
|
if (pmc->mca_sfmode == MCAST_EXCLUDE)
|
|
|
|
return mld_xmarksources(pmc, nsrcs, srcs);
|
|
|
|
|
|
|
|
/* mark INCLUDE-mode sources */
|
|
|
|
|
|
|
|
scount = 0;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
|
2005-12-28 01:03:00 +03:00
|
|
|
if (scount == nsrcs)
|
|
|
|
break;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < nsrcs; i++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&srcs[i], &psf->sf_addr)) {
|
|
|
|
psf->sf_gsresp = 1;
|
|
|
|
scount++;
|
|
|
|
break;
|
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!scount) {
|
|
|
|
pmc->mca_flags &= ~MAF_GSQUERY;
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
pmc->mca_flags |= MAF_GSQUERY;
|
2012-05-18 22:57:34 +04:00
|
|
|
return true;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:40 +04:00
|
|
|
static int mld_force_mld_version(const struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
/* Normally, both are 0 here. If enforcement to a particular is
|
|
|
|
* being used, individual device enforcement will have a lower
|
|
|
|
* precedence over 'all' device (.../conf/all/force_mld_version).
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
|
|
|
|
return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
|
|
|
|
else
|
|
|
|
return idev->cnf.force_mld_version;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
return mld_force_mld_version(idev) == 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
return mld_force_mld_version(idev) == 1;
|
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:38 +04:00
|
|
|
static bool mld_in_v1_mode(const struct inet6_dev *idev)
|
|
|
|
{
|
2013-09-04 02:19:40 +04:00
|
|
|
if (mld_in_v2_mode_only(idev))
|
|
|
|
return false;
|
|
|
|
if (mld_in_v1_mode_only(idev))
|
2013-09-04 02:19:38 +04:00
|
|
|
return true;
|
|
|
|
if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
static void mld_set_v1_mode(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
/* RFC3810, relevant sections:
|
|
|
|
* - 9.1. Robustness Variable
|
|
|
|
* - 9.2. Query Interval
|
|
|
|
* - 9.3. Query Response Interval
|
|
|
|
* - 9.12. Older Version Querier Present Timeout
|
|
|
|
*/
|
|
|
|
unsigned long switchback;
|
|
|
|
|
|
|
|
switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
|
|
|
|
|
|
|
|
idev->mc_v1_seen = jiffies + switchback;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_update_qrv(struct inet6_dev *idev,
|
|
|
|
const struct mld2_query *mlh2)
|
|
|
|
{
|
|
|
|
/* RFC3810, relevant sections:
|
|
|
|
* - 5.1.8. QRV (Querier's Robustness Variable)
|
|
|
|
* - 9.1. Robustness Variable
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* The value of the Robustness Variable MUST NOT be zero,
|
|
|
|
* and SHOULD NOT be one. Catch this here if we ever run
|
|
|
|
* into such a case in future.
|
|
|
|
*/
|
2014-09-02 17:49:25 +04:00
|
|
|
const int min_qrv = min(MLD_QRV_DEFAULT, sysctl_mld_qrv);
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
WARN_ON(idev->mc_qrv == 0);
|
|
|
|
|
|
|
|
if (mlh2->mld2q_qrv > 0)
|
|
|
|
idev->mc_qrv = mlh2->mld2q_qrv;
|
|
|
|
|
2014-09-02 17:49:25 +04:00
|
|
|
if (unlikely(idev->mc_qrv < min_qrv)) {
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
|
2014-09-02 17:49:25 +04:00
|
|
|
idev->mc_qrv, min_qrv);
|
|
|
|
idev->mc_qrv = min_qrv;
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_update_qi(struct inet6_dev *idev,
|
|
|
|
const struct mld2_query *mlh2)
|
|
|
|
{
|
|
|
|
/* RFC3810, relevant sections:
|
|
|
|
* - 5.1.9. QQIC (Querier's Query Interval Code)
|
|
|
|
* - 9.2. Query Interval
|
|
|
|
* - 9.12. Older Version Querier Present Timeout
|
|
|
|
* (the [Query Interval] in the last Query received)
|
|
|
|
*/
|
|
|
|
unsigned long mc_qqi;
|
|
|
|
|
|
|
|
if (mlh2->mld2q_qqic < 128) {
|
|
|
|
mc_qqi = mlh2->mld2q_qqic;
|
|
|
|
} else {
|
|
|
|
unsigned long mc_man, mc_exp;
|
|
|
|
|
|
|
|
mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
|
|
|
|
mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
|
|
|
|
|
|
|
|
mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
|
|
|
|
}
|
|
|
|
|
|
|
|
idev->mc_qi = mc_qqi * HZ;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_update_qri(struct inet6_dev *idev,
|
|
|
|
const struct mld2_query *mlh2)
|
|
|
|
{
|
|
|
|
/* RFC3810, relevant sections:
|
|
|
|
* - 5.1.3. Maximum Response Code
|
|
|
|
* - 9.3. Query Response Interval
|
|
|
|
*/
|
2013-09-04 02:19:39 +04:00
|
|
|
idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
}
|
|
|
|
|
2013-09-04 02:19:42 +04:00
|
|
|
static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
|
ipv6: mld: answer mldv2 queries with mldv1 reports in mldv1 fallback
RFC2710 (MLDv1), section 3.7. says:
The length of a received MLD message is computed by taking the
IPv6 Payload Length value and subtracting the length of any IPv6
extension headers present between the IPv6 header and the MLD
message. If that length is greater than 24 octets, that indicates
that there are other fields present *beyond* the fields described
above, perhaps belonging to a *future backwards-compatible* version
of MLD. An implementation of the version of MLD specified in this
document *MUST NOT* send an MLD message longer than 24 octets and
MUST ignore anything past the first 24 octets of a received MLD
message.
RFC3810 (MLDv2), section 8.2.1. states for *listeners* regarding
presence of MLDv1 routers:
In order to be compatible with MLDv1 routers, MLDv2 hosts MUST
operate in version 1 compatibility mode. [...] When Host
Compatibility Mode is MLDv2, a host acts using the MLDv2 protocol
on that interface. When Host Compatibility Mode is MLDv1, a host
acts in MLDv1 compatibility mode, using *only* the MLDv1 protocol,
on that interface. [...]
While section 8.3.1. specifies *router* behaviour regarding presence
of MLDv1 routers:
MLDv2 routers may be placed on a network where there is at least
one MLDv1 router. The following requirements apply:
If an MLDv1 router is present on the link, the Querier MUST use
the *lowest* version of MLD present on the network. This must be
administratively assured. Routers that desire to be compatible
with MLDv1 MUST have a configuration option to act in MLDv1 mode;
if an MLDv1 router is present on the link, the system administrator
must explicitly configure all MLDv2 routers to act in MLDv1 mode.
When in MLDv1 mode, the Querier MUST send periodic General Queries
truncated at the Multicast Address field (i.e., 24 bytes long),
and SHOULD also warn about receiving an MLDv2 Query (such warnings
must be rate-limited). The Querier MUST also fill in the Maximum
Response Delay in the Maximum Response Code field, i.e., the
exponential algorithm described in section 5.1.3. is not used. [...]
That means that we should not get queries from different versions of
MLD. When there's a MLDv1 router present, MLDv2 enforces truncation
and MRC == MRD (both fields are overlapping within the 24 octet range).
Section 8.3.2. specifies behaviour in the presence of MLDv1 multicast
address *listeners*:
MLDv2 routers may be placed on a network where there are hosts
that have not yet been upgraded to MLDv2. In order to be compatible
with MLDv1 hosts, MLDv2 routers MUST operate in version 1 compatibility
mode. MLDv2 routers keep a compatibility mode per multicast address
record. The compatibility mode of a multicast address is determined
from the Multicast Address Compatibility Mode variable, which can be
in one of the two following states: MLDv1 or MLDv2.
The Multicast Address Compatibility Mode of a multicast address
record is set to MLDv1 whenever an MLDv1 Multicast Listener Report is
*received* for that multicast address. At the same time, the Older
Version Host Present timer for the multicast address is set to Older
Version Host Present Timeout seconds. The timer is re-set whenever a
new MLDv1 Report is received for that multicast address. If the Older
Version Host Present timer expires, the router switches back to
Multicast Address Compatibility Mode of MLDv2 for that multicast
address. [...]
That means, what can happen is the following scenario, that hosts can
act in MLDv1 compatibility mode when they previously have received an
MLDv1 query (or, simply operate in MLDv1 mode-only); and at the same
time, an MLDv2 router could start up and transmits MLDv2 startup query
messages while being unaware of the current operational mode.
Given RFC2710, section 3.7 we would need to answer to that with an MLDv1
listener report, so that the router according to RFC3810, section 8.3.2.
would receive that and internally switch to MLDv1 compatibility as well.
Right now, I believe since the initial implementation of MLDv2, Linux
hosts would just silently drop such MLDv2 queries instead of replying
with an MLDv1 listener report, which would prevent a MLDv2 router going
into fallback mode (until it receives other MLDv1 queries).
Since the mapping of MRC to MRD in exactly such cases can make use of
the exponential algorithm from 5.1.3, we cannot [strictly speaking] be
aware in MLDv1 of the encoding in MRC, it seems also not mentioned by
the RFC. Since encodings are the same up to 32767, assume in such a
situation this value as a hard upper limit we would clamp. We have asked
one of the RFC authors on that regard, and he mentioned that there seem
not to be any implementations that make use of that exponential algorithm
on startup messages. In any case, this patch fixes this MLD
interoperability issue.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-20 16:03:55 +04:00
|
|
|
unsigned long *max_delay, bool v1_query)
|
2013-09-04 02:19:42 +04:00
|
|
|
{
|
|
|
|
unsigned long mldv1_md;
|
|
|
|
|
|
|
|
/* Ignore v1 queries */
|
|
|
|
if (mld_in_v2_mode_only(idev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
mldv1_md = ntohs(mld->mld_maxdelay);
|
ipv6: mld: answer mldv2 queries with mldv1 reports in mldv1 fallback
RFC2710 (MLDv1), section 3.7. says:
The length of a received MLD message is computed by taking the
IPv6 Payload Length value and subtracting the length of any IPv6
extension headers present between the IPv6 header and the MLD
message. If that length is greater than 24 octets, that indicates
that there are other fields present *beyond* the fields described
above, perhaps belonging to a *future backwards-compatible* version
of MLD. An implementation of the version of MLD specified in this
document *MUST NOT* send an MLD message longer than 24 octets and
MUST ignore anything past the first 24 octets of a received MLD
message.
RFC3810 (MLDv2), section 8.2.1. states for *listeners* regarding
presence of MLDv1 routers:
In order to be compatible with MLDv1 routers, MLDv2 hosts MUST
operate in version 1 compatibility mode. [...] When Host
Compatibility Mode is MLDv2, a host acts using the MLDv2 protocol
on that interface. When Host Compatibility Mode is MLDv1, a host
acts in MLDv1 compatibility mode, using *only* the MLDv1 protocol,
on that interface. [...]
While section 8.3.1. specifies *router* behaviour regarding presence
of MLDv1 routers:
MLDv2 routers may be placed on a network where there is at least
one MLDv1 router. The following requirements apply:
If an MLDv1 router is present on the link, the Querier MUST use
the *lowest* version of MLD present on the network. This must be
administratively assured. Routers that desire to be compatible
with MLDv1 MUST have a configuration option to act in MLDv1 mode;
if an MLDv1 router is present on the link, the system administrator
must explicitly configure all MLDv2 routers to act in MLDv1 mode.
When in MLDv1 mode, the Querier MUST send periodic General Queries
truncated at the Multicast Address field (i.e., 24 bytes long),
and SHOULD also warn about receiving an MLDv2 Query (such warnings
must be rate-limited). The Querier MUST also fill in the Maximum
Response Delay in the Maximum Response Code field, i.e., the
exponential algorithm described in section 5.1.3. is not used. [...]
That means that we should not get queries from different versions of
MLD. When there's a MLDv1 router present, MLDv2 enforces truncation
and MRC == MRD (both fields are overlapping within the 24 octet range).
Section 8.3.2. specifies behaviour in the presence of MLDv1 multicast
address *listeners*:
MLDv2 routers may be placed on a network where there are hosts
that have not yet been upgraded to MLDv2. In order to be compatible
with MLDv1 hosts, MLDv2 routers MUST operate in version 1 compatibility
mode. MLDv2 routers keep a compatibility mode per multicast address
record. The compatibility mode of a multicast address is determined
from the Multicast Address Compatibility Mode variable, which can be
in one of the two following states: MLDv1 or MLDv2.
The Multicast Address Compatibility Mode of a multicast address
record is set to MLDv1 whenever an MLDv1 Multicast Listener Report is
*received* for that multicast address. At the same time, the Older
Version Host Present timer for the multicast address is set to Older
Version Host Present Timeout seconds. The timer is re-set whenever a
new MLDv1 Report is received for that multicast address. If the Older
Version Host Present timer expires, the router switches back to
Multicast Address Compatibility Mode of MLDv2 for that multicast
address. [...]
That means, what can happen is the following scenario, that hosts can
act in MLDv1 compatibility mode when they previously have received an
MLDv1 query (or, simply operate in MLDv1 mode-only); and at the same
time, an MLDv2 router could start up and transmits MLDv2 startup query
messages while being unaware of the current operational mode.
Given RFC2710, section 3.7 we would need to answer to that with an MLDv1
listener report, so that the router according to RFC3810, section 8.3.2.
would receive that and internally switch to MLDv1 compatibility as well.
Right now, I believe since the initial implementation of MLDv2, Linux
hosts would just silently drop such MLDv2 queries instead of replying
with an MLDv1 listener report, which would prevent a MLDv2 router going
into fallback mode (until it receives other MLDv1 queries).
Since the mapping of MRC to MRD in exactly such cases can make use of
the exponential algorithm from 5.1.3, we cannot [strictly speaking] be
aware in MLDv1 of the encoding in MRC, it seems also not mentioned by
the RFC. Since encodings are the same up to 32767, assume in such a
situation this value as a hard upper limit we would clamp. We have asked
one of the RFC authors on that regard, and he mentioned that there seem
not to be any implementations that make use of that exponential algorithm
on startup messages. In any case, this patch fixes this MLD
interoperability issue.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-20 16:03:55 +04:00
|
|
|
|
|
|
|
/* When in MLDv1 fallback and a MLDv2 router start-up being
|
|
|
|
* unaware of current MLDv1 operation, the MRC == MRD mapping
|
|
|
|
* only works when the exponential algorithm is not being
|
|
|
|
* used (as MLDv1 is unaware of such things).
|
|
|
|
*
|
|
|
|
* According to the RFC author, the MLDv2 implementations
|
|
|
|
* he's aware of all use a MRC < 32768 on start up queries.
|
|
|
|
*
|
|
|
|
* Thus, should we *ever* encounter something else larger
|
|
|
|
* than that, just assume the maximum possible within our
|
|
|
|
* reach.
|
|
|
|
*/
|
|
|
|
if (!v1_query)
|
|
|
|
mldv1_md = min(mldv1_md, MLDV1_MRD_MAX_COMPAT);
|
|
|
|
|
2013-09-04 02:19:42 +04:00
|
|
|
*max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
|
|
|
|
|
ipv6: mld: answer mldv2 queries with mldv1 reports in mldv1 fallback
RFC2710 (MLDv1), section 3.7. says:
The length of a received MLD message is computed by taking the
IPv6 Payload Length value and subtracting the length of any IPv6
extension headers present between the IPv6 header and the MLD
message. If that length is greater than 24 octets, that indicates
that there are other fields present *beyond* the fields described
above, perhaps belonging to a *future backwards-compatible* version
of MLD. An implementation of the version of MLD specified in this
document *MUST NOT* send an MLD message longer than 24 octets and
MUST ignore anything past the first 24 octets of a received MLD
message.
RFC3810 (MLDv2), section 8.2.1. states for *listeners* regarding
presence of MLDv1 routers:
In order to be compatible with MLDv1 routers, MLDv2 hosts MUST
operate in version 1 compatibility mode. [...] When Host
Compatibility Mode is MLDv2, a host acts using the MLDv2 protocol
on that interface. When Host Compatibility Mode is MLDv1, a host
acts in MLDv1 compatibility mode, using *only* the MLDv1 protocol,
on that interface. [...]
While section 8.3.1. specifies *router* behaviour regarding presence
of MLDv1 routers:
MLDv2 routers may be placed on a network where there is at least
one MLDv1 router. The following requirements apply:
If an MLDv1 router is present on the link, the Querier MUST use
the *lowest* version of MLD present on the network. This must be
administratively assured. Routers that desire to be compatible
with MLDv1 MUST have a configuration option to act in MLDv1 mode;
if an MLDv1 router is present on the link, the system administrator
must explicitly configure all MLDv2 routers to act in MLDv1 mode.
When in MLDv1 mode, the Querier MUST send periodic General Queries
truncated at the Multicast Address field (i.e., 24 bytes long),
and SHOULD also warn about receiving an MLDv2 Query (such warnings
must be rate-limited). The Querier MUST also fill in the Maximum
Response Delay in the Maximum Response Code field, i.e., the
exponential algorithm described in section 5.1.3. is not used. [...]
That means that we should not get queries from different versions of
MLD. When there's a MLDv1 router present, MLDv2 enforces truncation
and MRC == MRD (both fields are overlapping within the 24 octet range).
Section 8.3.2. specifies behaviour in the presence of MLDv1 multicast
address *listeners*:
MLDv2 routers may be placed on a network where there are hosts
that have not yet been upgraded to MLDv2. In order to be compatible
with MLDv1 hosts, MLDv2 routers MUST operate in version 1 compatibility
mode. MLDv2 routers keep a compatibility mode per multicast address
record. The compatibility mode of a multicast address is determined
from the Multicast Address Compatibility Mode variable, which can be
in one of the two following states: MLDv1 or MLDv2.
The Multicast Address Compatibility Mode of a multicast address
record is set to MLDv1 whenever an MLDv1 Multicast Listener Report is
*received* for that multicast address. At the same time, the Older
Version Host Present timer for the multicast address is set to Older
Version Host Present Timeout seconds. The timer is re-set whenever a
new MLDv1 Report is received for that multicast address. If the Older
Version Host Present timer expires, the router switches back to
Multicast Address Compatibility Mode of MLDv2 for that multicast
address. [...]
That means, what can happen is the following scenario, that hosts can
act in MLDv1 compatibility mode when they previously have received an
MLDv1 query (or, simply operate in MLDv1 mode-only); and at the same
time, an MLDv2 router could start up and transmits MLDv2 startup query
messages while being unaware of the current operational mode.
Given RFC2710, section 3.7 we would need to answer to that with an MLDv1
listener report, so that the router according to RFC3810, section 8.3.2.
would receive that and internally switch to MLDv1 compatibility as well.
Right now, I believe since the initial implementation of MLDv2, Linux
hosts would just silently drop such MLDv2 queries instead of replying
with an MLDv1 listener report, which would prevent a MLDv2 router going
into fallback mode (until it receives other MLDv1 queries).
Since the mapping of MRC to MRD in exactly such cases can make use of
the exponential algorithm from 5.1.3, we cannot [strictly speaking] be
aware in MLDv1 of the encoding in MRC, it seems also not mentioned by
the RFC. Since encodings are the same up to 32767, assume in such a
situation this value as a hard upper limit we would clamp. We have asked
one of the RFC authors on that regard, and he mentioned that there seem
not to be any implementations that make use of that exponential algorithm
on startup messages. In any case, this patch fixes this MLD
interoperability issue.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-20 16:03:55 +04:00
|
|
|
/* MLDv1 router present: we need to go into v1 mode *only*
|
|
|
|
* when an MLDv1 query is received as per section 9.12. of
|
|
|
|
* RFC3810! And we know from RFC2710 section 3.7 that MLDv1
|
|
|
|
* queries MUST be of exactly 24 octets.
|
|
|
|
*/
|
|
|
|
if (v1_query)
|
|
|
|
mld_set_v1_mode(idev);
|
2013-09-04 02:19:42 +04:00
|
|
|
|
|
|
|
/* cancel MLDv2 report timer */
|
2013-09-04 02:19:43 +04:00
|
|
|
mld_gq_stop_timer(idev);
|
2013-09-04 02:19:42 +04:00
|
|
|
/* cancel the interface change timer */
|
2013-09-04 02:19:43 +04:00
|
|
|
mld_ifc_stop_timer(idev);
|
2013-09-04 02:19:42 +04:00
|
|
|
/* clear deleted report items */
|
|
|
|
mld_clear_delrec(idev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
|
|
|
|
unsigned long *max_delay)
|
|
|
|
{
|
|
|
|
*max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
|
|
|
|
|
|
|
|
mld_update_qrv(idev, mld);
|
|
|
|
mld_update_qi(idev, mld);
|
|
|
|
mld_update_qri(idev, mld);
|
|
|
|
|
|
|
|
idev->mc_maxdelay = *max_delay;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
/* called with rcu_read_lock() */
|
2005-04-17 02:20:36 +04:00
|
|
|
int igmp6_event_query(struct sk_buff *skb)
|
|
|
|
{
|
2005-10-31 15:09:45 +03:00
|
|
|
struct mld2_query *mlh2 = NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct ifmcaddr6 *ma;
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct in6_addr *group;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned long max_delay;
|
|
|
|
struct inet6_dev *idev;
|
2010-04-18 07:42:05 +04:00
|
|
|
struct mld_msg *mld;
|
2005-04-17 02:20:36 +04:00
|
|
|
int group_type;
|
|
|
|
int mark = 0;
|
2013-09-04 02:19:42 +04:00
|
|
|
int len, err;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* compute payload length excluding extension headers */
|
2007-04-26 04:54:47 +04:00
|
|
|
len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
|
2007-03-16 23:26:39 +03:00
|
|
|
len -= skb_network_header_len(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2014-06-27 05:57:53 +04:00
|
|
|
/* RFC3810 6.2
|
|
|
|
* Upon reception of an MLD message that contains a Query, the node
|
|
|
|
* checks if the source address of the message is a valid link-local
|
|
|
|
* address, if the Hop Limit is set to 1, and if the Router Alert
|
|
|
|
* option is present in the Hop-By-Hop Options header of the IPv6
|
|
|
|
* packet. If any of these checks fails, the packet is dropped.
|
|
|
|
*/
|
|
|
|
if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
|
|
|
|
ipv6_hdr(skb)->hop_limit != 1 ||
|
|
|
|
!(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
|
|
|
|
IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
idev = __in6_dev_get(skb->dev);
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!idev)
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
|
2010-04-18 07:42:05 +04:00
|
|
|
mld = (struct mld_msg *)icmp6_hdr(skb);
|
|
|
|
group = &mld->mld_mca;
|
2005-04-17 02:20:36 +04:00
|
|
|
group_type = ipv6_addr_type(group);
|
|
|
|
|
|
|
|
if (group_type != IPV6_ADDR_ANY &&
|
2010-06-08 01:05:02 +04:00
|
|
|
!(group_type&IPV6_ADDR_MULTICAST))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
|
|
|
|
ipv6: mld: answer mldv2 queries with mldv1 reports in mldv1 fallback
RFC2710 (MLDv1), section 3.7. says:
The length of a received MLD message is computed by taking the
IPv6 Payload Length value and subtracting the length of any IPv6
extension headers present between the IPv6 header and the MLD
message. If that length is greater than 24 octets, that indicates
that there are other fields present *beyond* the fields described
above, perhaps belonging to a *future backwards-compatible* version
of MLD. An implementation of the version of MLD specified in this
document *MUST NOT* send an MLD message longer than 24 octets and
MUST ignore anything past the first 24 octets of a received MLD
message.
RFC3810 (MLDv2), section 8.2.1. states for *listeners* regarding
presence of MLDv1 routers:
In order to be compatible with MLDv1 routers, MLDv2 hosts MUST
operate in version 1 compatibility mode. [...] When Host
Compatibility Mode is MLDv2, a host acts using the MLDv2 protocol
on that interface. When Host Compatibility Mode is MLDv1, a host
acts in MLDv1 compatibility mode, using *only* the MLDv1 protocol,
on that interface. [...]
While section 8.3.1. specifies *router* behaviour regarding presence
of MLDv1 routers:
MLDv2 routers may be placed on a network where there is at least
one MLDv1 router. The following requirements apply:
If an MLDv1 router is present on the link, the Querier MUST use
the *lowest* version of MLD present on the network. This must be
administratively assured. Routers that desire to be compatible
with MLDv1 MUST have a configuration option to act in MLDv1 mode;
if an MLDv1 router is present on the link, the system administrator
must explicitly configure all MLDv2 routers to act in MLDv1 mode.
When in MLDv1 mode, the Querier MUST send periodic General Queries
truncated at the Multicast Address field (i.e., 24 bytes long),
and SHOULD also warn about receiving an MLDv2 Query (such warnings
must be rate-limited). The Querier MUST also fill in the Maximum
Response Delay in the Maximum Response Code field, i.e., the
exponential algorithm described in section 5.1.3. is not used. [...]
That means that we should not get queries from different versions of
MLD. When there's a MLDv1 router present, MLDv2 enforces truncation
and MRC == MRD (both fields are overlapping within the 24 octet range).
Section 8.3.2. specifies behaviour in the presence of MLDv1 multicast
address *listeners*:
MLDv2 routers may be placed on a network where there are hosts
that have not yet been upgraded to MLDv2. In order to be compatible
with MLDv1 hosts, MLDv2 routers MUST operate in version 1 compatibility
mode. MLDv2 routers keep a compatibility mode per multicast address
record. The compatibility mode of a multicast address is determined
from the Multicast Address Compatibility Mode variable, which can be
in one of the two following states: MLDv1 or MLDv2.
The Multicast Address Compatibility Mode of a multicast address
record is set to MLDv1 whenever an MLDv1 Multicast Listener Report is
*received* for that multicast address. At the same time, the Older
Version Host Present timer for the multicast address is set to Older
Version Host Present Timeout seconds. The timer is re-set whenever a
new MLDv1 Report is received for that multicast address. If the Older
Version Host Present timer expires, the router switches back to
Multicast Address Compatibility Mode of MLDv2 for that multicast
address. [...]
That means, what can happen is the following scenario, that hosts can
act in MLDv1 compatibility mode when they previously have received an
MLDv1 query (or, simply operate in MLDv1 mode-only); and at the same
time, an MLDv2 router could start up and transmits MLDv2 startup query
messages while being unaware of the current operational mode.
Given RFC2710, section 3.7 we would need to answer to that with an MLDv1
listener report, so that the router according to RFC3810, section 8.3.2.
would receive that and internally switch to MLDv1 compatibility as well.
Right now, I believe since the initial implementation of MLDv2, Linux
hosts would just silently drop such MLDv2 queries instead of replying
with an MLDv1 listener report, which would prevent a MLDv2 router going
into fallback mode (until it receives other MLDv1 queries).
Since the mapping of MRC to MRD in exactly such cases can make use of
the exponential algorithm from 5.1.3, we cannot [strictly speaking] be
aware in MLDv1 of the encoding in MRC, it seems also not mentioned by
the RFC. Since encodings are the same up to 32767, assume in such a
situation this value as a hard upper limit we would clamp. We have asked
one of the RFC authors on that regard, and he mentioned that there seem
not to be any implementations that make use of that exponential algorithm
on startup messages. In any case, this patch fixes this MLD
interoperability issue.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-20 16:03:55 +04:00
|
|
|
if (len < MLD_V1_QUERY_LEN) {
|
|
|
|
return -EINVAL;
|
|
|
|
} else if (len == MLD_V1_QUERY_LEN || mld_in_v1_mode(idev)) {
|
|
|
|
err = mld_process_v1(idev, mld, &max_delay,
|
|
|
|
len == MLD_V1_QUERY_LEN);
|
2013-09-04 02:19:42 +04:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2013-08-20 14:22:02 +04:00
|
|
|
} else if (len >= MLD_V2_QUERY_LEN_MIN) {
|
2007-02-09 17:24:49 +03:00
|
|
|
int srcs_offset = sizeof(struct mld2_query) -
|
2005-10-31 15:09:45 +03:00
|
|
|
sizeof(struct icmp6hdr);
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
if (!pskb_may_pull(skb, srcs_offset))
|
2005-10-31 15:09:45 +03:00
|
|
|
return -EINVAL;
|
2010-06-08 01:05:02 +04:00
|
|
|
|
2007-04-26 05:04:18 +04:00
|
|
|
mlh2 = (struct mld2_query *)skb_transport_header(skb);
|
2013-08-20 14:22:00 +04:00
|
|
|
|
2013-09-04 02:19:42 +04:00
|
|
|
err = mld_process_v2(idev, mlh2, &max_delay);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
net: ipv6: mld: fix v1/v2 switchback timeout to rfc3810, 9.12.
i) RFC3810, 9.2. Query Interval [QI] says:
The Query Interval variable denotes the interval between General
Queries sent by the Querier. Default value: 125 seconds. [...]
ii) RFC3810, 9.3. Query Response Interval [QRI] says:
The Maximum Response Delay used to calculate the Maximum Response
Code inserted into the periodic General Queries. Default value:
10000 (10 seconds) [...] The number of seconds represented by the
[Query Response Interval] must be less than the [Query Interval].
iii) RFC3810, 9.12. Older Version Querier Present Timeout [OVQPT] says:
The Older Version Querier Present Timeout is the time-out for
transitioning a host back to MLDv2 Host Compatibility Mode. When an
MLDv1 query is received, MLDv2 hosts set their Older Version Querier
Present Timer to [Older Version Querier Present Timeout].
This value MUST be ([Robustness Variable] times (the [Query Interval]
in the last Query received)) plus ([Query Response Interval]).
Hence, on *default* the timeout results in:
[RV] = 2, [QI] = 125sec, [QRI] = 10sec
[OVQPT] = [RV] * [QI] + [QRI] = 260sec
Having that said, we currently calculate [OVQPT] (here given as 'switchback'
variable) as ...
switchback = (idev->mc_qrv + 1) * max_delay
RFC3810, 9.12. says "the [Query Interval] in the last Query received". In
section "9.14. Configuring timers", it is said:
This section is meant to provide advice to network administrators on
how to tune these settings to their network. Ambitious router
implementations might tune these settings dynamically based upon
changing characteristics of the network. [...]
iv) RFC38010, 9.14.2. Query Interval:
The overall level of periodic MLD traffic is inversely proportional
to the Query Interval. A longer Query Interval results in a lower
overall level of MLD traffic. The value of the Query Interval MUST
be equal to or greater than the Maximum Response Delay used to
calculate the Maximum Response Code inserted in General Query
messages.
I assume that was why switchback is calculated as is (3 * max_delay), although
this setting seems to be meant for routers only to configure their [QI]
interval for non-default intervals. So usage here like this is clearly wrong.
Concluding, the current behaviour in IPv6's multicast code is not conform
to the RFC as switch back is calculated wrongly. That is, it has a too small
value, so MLDv2 hosts switch back again to MLDv2 way too early, i.e. ~30secs
instead of ~260secs on default.
Hence, introduce necessary helper functions and fix this up properly as it
should be.
Introduced in 06da92283 ("[IPV6]: Add MLDv2 support."). Credits to Hannes
Frederic Sowa who also had a hand in this as well. Also thanks to Hangbin Liu
who did initial testing.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Cc: David Stevens <dlstevens@us.ibm.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-09-04 02:19:37 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (group_type == IPV6_ADDR_ANY) { /* general query */
|
2010-06-08 01:05:02 +04:00
|
|
|
if (mlh2->mld2q_nsrcs)
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL; /* no sources allowed */
|
2010-06-08 01:05:02 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
mld_gq_start_timer(idev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
/* mark sources to include, if group & source-specific */
|
2010-04-18 07:42:05 +04:00
|
|
|
if (mlh2->mld2q_nsrcs != 0) {
|
2007-02-09 17:24:49 +03:00
|
|
|
if (!pskb_may_pull(skb, srcs_offset +
|
2010-06-08 01:05:02 +04:00
|
|
|
ntohs(mlh2->mld2q_nsrcs) * sizeof(struct in6_addr)))
|
2005-10-31 15:09:45 +03:00
|
|
|
return -EINVAL;
|
2010-06-08 01:05:02 +04:00
|
|
|
|
2007-04-26 05:04:18 +04:00
|
|
|
mlh2 = (struct mld2_query *)skb_transport_header(skb);
|
2005-10-31 15:09:45 +03:00
|
|
|
mark = 1;
|
|
|
|
}
|
ipv6: mld: answer mldv2 queries with mldv1 reports in mldv1 fallback
RFC2710 (MLDv1), section 3.7. says:
The length of a received MLD message is computed by taking the
IPv6 Payload Length value and subtracting the length of any IPv6
extension headers present between the IPv6 header and the MLD
message. If that length is greater than 24 octets, that indicates
that there are other fields present *beyond* the fields described
above, perhaps belonging to a *future backwards-compatible* version
of MLD. An implementation of the version of MLD specified in this
document *MUST NOT* send an MLD message longer than 24 octets and
MUST ignore anything past the first 24 octets of a received MLD
message.
RFC3810 (MLDv2), section 8.2.1. states for *listeners* regarding
presence of MLDv1 routers:
In order to be compatible with MLDv1 routers, MLDv2 hosts MUST
operate in version 1 compatibility mode. [...] When Host
Compatibility Mode is MLDv2, a host acts using the MLDv2 protocol
on that interface. When Host Compatibility Mode is MLDv1, a host
acts in MLDv1 compatibility mode, using *only* the MLDv1 protocol,
on that interface. [...]
While section 8.3.1. specifies *router* behaviour regarding presence
of MLDv1 routers:
MLDv2 routers may be placed on a network where there is at least
one MLDv1 router. The following requirements apply:
If an MLDv1 router is present on the link, the Querier MUST use
the *lowest* version of MLD present on the network. This must be
administratively assured. Routers that desire to be compatible
with MLDv1 MUST have a configuration option to act in MLDv1 mode;
if an MLDv1 router is present on the link, the system administrator
must explicitly configure all MLDv2 routers to act in MLDv1 mode.
When in MLDv1 mode, the Querier MUST send periodic General Queries
truncated at the Multicast Address field (i.e., 24 bytes long),
and SHOULD also warn about receiving an MLDv2 Query (such warnings
must be rate-limited). The Querier MUST also fill in the Maximum
Response Delay in the Maximum Response Code field, i.e., the
exponential algorithm described in section 5.1.3. is not used. [...]
That means that we should not get queries from different versions of
MLD. When there's a MLDv1 router present, MLDv2 enforces truncation
and MRC == MRD (both fields are overlapping within the 24 octet range).
Section 8.3.2. specifies behaviour in the presence of MLDv1 multicast
address *listeners*:
MLDv2 routers may be placed on a network where there are hosts
that have not yet been upgraded to MLDv2. In order to be compatible
with MLDv1 hosts, MLDv2 routers MUST operate in version 1 compatibility
mode. MLDv2 routers keep a compatibility mode per multicast address
record. The compatibility mode of a multicast address is determined
from the Multicast Address Compatibility Mode variable, which can be
in one of the two following states: MLDv1 or MLDv2.
The Multicast Address Compatibility Mode of a multicast address
record is set to MLDv1 whenever an MLDv1 Multicast Listener Report is
*received* for that multicast address. At the same time, the Older
Version Host Present timer for the multicast address is set to Older
Version Host Present Timeout seconds. The timer is re-set whenever a
new MLDv1 Report is received for that multicast address. If the Older
Version Host Present timer expires, the router switches back to
Multicast Address Compatibility Mode of MLDv2 for that multicast
address. [...]
That means, what can happen is the following scenario, that hosts can
act in MLDv1 compatibility mode when they previously have received an
MLDv1 query (or, simply operate in MLDv1 mode-only); and at the same
time, an MLDv2 router could start up and transmits MLDv2 startup query
messages while being unaware of the current operational mode.
Given RFC2710, section 3.7 we would need to answer to that with an MLDv1
listener report, so that the router according to RFC3810, section 8.3.2.
would receive that and internally switch to MLDv1 compatibility as well.
Right now, I believe since the initial implementation of MLDv2, Linux
hosts would just silently drop such MLDv2 queries instead of replying
with an MLDv1 listener report, which would prevent a MLDv2 router going
into fallback mode (until it receives other MLDv1 queries).
Since the mapping of MRC to MRD in exactly such cases can make use of
the exponential algorithm from 5.1.3, we cannot [strictly speaking] be
aware in MLDv1 of the encoding in MRC, it seems also not mentioned by
the RFC. Since encodings are the same up to 32767, assume in such a
situation this value as a hard upper limit we would clamp. We have asked
one of the RFC authors on that regard, and he mentioned that there seem
not to be any implementations that make use of that exponential algorithm
on startup messages. In any case, this patch fixes this MLD
interoperability issue.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-20 16:03:55 +04:00
|
|
|
} else {
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
ipv6: mld: answer mldv2 queries with mldv1 reports in mldv1 fallback
RFC2710 (MLDv1), section 3.7. says:
The length of a received MLD message is computed by taking the
IPv6 Payload Length value and subtracting the length of any IPv6
extension headers present between the IPv6 header and the MLD
message. If that length is greater than 24 octets, that indicates
that there are other fields present *beyond* the fields described
above, perhaps belonging to a *future backwards-compatible* version
of MLD. An implementation of the version of MLD specified in this
document *MUST NOT* send an MLD message longer than 24 octets and
MUST ignore anything past the first 24 octets of a received MLD
message.
RFC3810 (MLDv2), section 8.2.1. states for *listeners* regarding
presence of MLDv1 routers:
In order to be compatible with MLDv1 routers, MLDv2 hosts MUST
operate in version 1 compatibility mode. [...] When Host
Compatibility Mode is MLDv2, a host acts using the MLDv2 protocol
on that interface. When Host Compatibility Mode is MLDv1, a host
acts in MLDv1 compatibility mode, using *only* the MLDv1 protocol,
on that interface. [...]
While section 8.3.1. specifies *router* behaviour regarding presence
of MLDv1 routers:
MLDv2 routers may be placed on a network where there is at least
one MLDv1 router. The following requirements apply:
If an MLDv1 router is present on the link, the Querier MUST use
the *lowest* version of MLD present on the network. This must be
administratively assured. Routers that desire to be compatible
with MLDv1 MUST have a configuration option to act in MLDv1 mode;
if an MLDv1 router is present on the link, the system administrator
must explicitly configure all MLDv2 routers to act in MLDv1 mode.
When in MLDv1 mode, the Querier MUST send periodic General Queries
truncated at the Multicast Address field (i.e., 24 bytes long),
and SHOULD also warn about receiving an MLDv2 Query (such warnings
must be rate-limited). The Querier MUST also fill in the Maximum
Response Delay in the Maximum Response Code field, i.e., the
exponential algorithm described in section 5.1.3. is not used. [...]
That means that we should not get queries from different versions of
MLD. When there's a MLDv1 router present, MLDv2 enforces truncation
and MRC == MRD (both fields are overlapping within the 24 octet range).
Section 8.3.2. specifies behaviour in the presence of MLDv1 multicast
address *listeners*:
MLDv2 routers may be placed on a network where there are hosts
that have not yet been upgraded to MLDv2. In order to be compatible
with MLDv1 hosts, MLDv2 routers MUST operate in version 1 compatibility
mode. MLDv2 routers keep a compatibility mode per multicast address
record. The compatibility mode of a multicast address is determined
from the Multicast Address Compatibility Mode variable, which can be
in one of the two following states: MLDv1 or MLDv2.
The Multicast Address Compatibility Mode of a multicast address
record is set to MLDv1 whenever an MLDv1 Multicast Listener Report is
*received* for that multicast address. At the same time, the Older
Version Host Present timer for the multicast address is set to Older
Version Host Present Timeout seconds. The timer is re-set whenever a
new MLDv1 Report is received for that multicast address. If the Older
Version Host Present timer expires, the router switches back to
Multicast Address Compatibility Mode of MLDv2 for that multicast
address. [...]
That means, what can happen is the following scenario, that hosts can
act in MLDv1 compatibility mode when they previously have received an
MLDv1 query (or, simply operate in MLDv1 mode-only); and at the same
time, an MLDv2 router could start up and transmits MLDv2 startup query
messages while being unaware of the current operational mode.
Given RFC2710, section 3.7 we would need to answer to that with an MLDv1
listener report, so that the router according to RFC3810, section 8.3.2.
would receive that and internally switch to MLDv1 compatibility as well.
Right now, I believe since the initial implementation of MLDv2, Linux
hosts would just silently drop such MLDv2 queries instead of replying
with an MLDv1 listener report, which would prevent a MLDv2 router going
into fallback mode (until it receives other MLDv1 queries).
Since the mapping of MRC to MRD in exactly such cases can make use of
the exponential algorithm from 5.1.3, we cannot [strictly speaking] be
aware in MLDv1 of the encoding in MRC, it seems also not mentioned by
the RFC. Since encodings are the same up to 32767, assume in such a
situation this value as a hard upper limit we would clamp. We have asked
one of the RFC authors on that regard, and he mentioned that there seem
not to be any implementations that make use of that exponential algorithm
on startup messages. In any case, this patch fixes this MLD
interoperability issue.
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-20 16:03:55 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
if (group_type == IPV6_ADDR_ANY) {
|
2014-08-25 00:53:10 +04:00
|
|
|
for (ma = idev->mc_list; ma; ma = ma->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock_bh(&ma->mca_lock);
|
|
|
|
igmp6_group_queried(ma, max_delay);
|
|
|
|
spin_unlock_bh(&ma->mca_lock);
|
|
|
|
}
|
|
|
|
} else {
|
2014-08-25 00:53:10 +04:00
|
|
|
for (ma = idev->mc_list; ma; ma = ma->next) {
|
2006-01-25 00:06:39 +03:00
|
|
|
if (!ipv6_addr_equal(group, &ma->mca_addr))
|
2005-04-17 02:20:36 +04:00
|
|
|
continue;
|
|
|
|
spin_lock_bh(&ma->mca_lock);
|
|
|
|
if (ma->mca_flags & MAF_TIMER_RUNNING) {
|
|
|
|
/* gsquery <- gsquery && mark */
|
|
|
|
if (!mark)
|
|
|
|
ma->mca_flags &= ~MAF_GSQUERY;
|
|
|
|
} else {
|
|
|
|
/* gsquery <- mark */
|
|
|
|
if (mark)
|
|
|
|
ma->mca_flags |= MAF_GSQUERY;
|
|
|
|
else
|
|
|
|
ma->mca_flags &= ~MAF_GSQUERY;
|
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
if (!(ma->mca_flags & MAF_GSQUERY) ||
|
2010-04-18 07:42:05 +04:00
|
|
|
mld_marksources(ma, ntohs(mlh2->mld2q_nsrcs), mlh2->mld2q_srcs))
|
2005-12-28 01:03:00 +03:00
|
|
|
igmp6_group_queried(ma, max_delay);
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_bh(&ma->mca_lock);
|
2006-01-25 00:06:39 +03:00
|
|
|
break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
/* called with rcu_read_lock() */
|
2005-04-17 02:20:36 +04:00
|
|
|
int igmp6_event_report(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *ma;
|
|
|
|
struct inet6_dev *idev;
|
2010-04-18 07:42:05 +04:00
|
|
|
struct mld_msg *mld;
|
2005-04-17 02:20:36 +04:00
|
|
|
int addr_type;
|
|
|
|
|
|
|
|
/* Our own report looped back. Ignore it. */
|
|
|
|
if (skb->pkt_type == PACKET_LOOPBACK)
|
|
|
|
return 0;
|
|
|
|
|
2005-12-03 07:32:59 +03:00
|
|
|
/* send our report if the MC router may not have heard this report */
|
|
|
|
if (skb->pkt_type != PACKET_MULTICAST &&
|
|
|
|
skb->pkt_type != PACKET_BROADCAST)
|
|
|
|
return 0;
|
|
|
|
|
2010-04-18 07:42:05 +04:00
|
|
|
if (!pskb_may_pull(skb, sizeof(*mld) - sizeof(struct icmp6hdr)))
|
2005-04-17 02:20:36 +04:00
|
|
|
return -EINVAL;
|
|
|
|
|
2010-04-18 07:42:05 +04:00
|
|
|
mld = (struct mld_msg *)icmp6_hdr(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Drop reports with not link local source */
|
2007-04-26 04:54:47 +04:00
|
|
|
addr_type = ipv6_addr_type(&ipv6_hdr(skb)->saddr);
|
2007-02-09 17:24:49 +03:00
|
|
|
if (addr_type != IPV6_ADDR_ANY &&
|
2005-04-17 02:20:36 +04:00
|
|
|
!(addr_type&IPV6_ADDR_LINKLOCAL))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
idev = __in6_dev_get(skb->dev);
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!idev)
|
2005-04-17 02:20:36 +04:00
|
|
|
return -ENODEV;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel the timer for this group
|
|
|
|
*/
|
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (ma = idev->mc_list; ma; ma = ma->next) {
|
2010-04-18 07:42:05 +04:00
|
|
|
if (ipv6_addr_equal(&ma->mca_addr, &mld->mld_mca)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock(&ma->mca_lock);
|
|
|
|
if (del_timer(&ma->mca_timer))
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_dec(&ma->mca_refcnt);
|
2005-04-17 02:20:36 +04:00
|
|
|
ma->mca_flags &= ~(MAF_LAST_REPORTER|MAF_TIMER_RUNNING);
|
|
|
|
spin_unlock(&ma->mca_lock);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-05-18 22:57:34 +04:00
|
|
|
static bool is_in(struct ifmcaddr6 *pmc, struct ip6_sf_list *psf, int type,
|
|
|
|
int gdeleted, int sdeleted)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case MLD2_MODE_IS_INCLUDE:
|
|
|
|
case MLD2_MODE_IS_EXCLUDE:
|
|
|
|
if (gdeleted || sdeleted)
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-12-28 01:03:00 +03:00
|
|
|
if (!((pmc->mca_flags & MAF_GSQUERY) && !psf->sf_gsresp)) {
|
|
|
|
if (pmc->mca_sfmode == MCAST_INCLUDE)
|
2012-05-18 22:57:34 +04:00
|
|
|
return true;
|
2005-12-28 01:03:00 +03:00
|
|
|
/* don't include if this source is excluded
|
|
|
|
* in all filters
|
|
|
|
*/
|
|
|
|
if (psf->sf_count[MCAST_INCLUDE])
|
2006-01-25 00:06:39 +03:00
|
|
|
return type == MLD2_MODE_IS_INCLUDE;
|
2005-12-28 01:03:00 +03:00
|
|
|
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
|
|
|
|
psf->sf_count[MCAST_EXCLUDE];
|
|
|
|
}
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
case MLD2_CHANGE_TO_INCLUDE:
|
|
|
|
if (gdeleted || sdeleted)
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
return psf->sf_count[MCAST_INCLUDE] != 0;
|
|
|
|
case MLD2_CHANGE_TO_EXCLUDE:
|
|
|
|
if (gdeleted || sdeleted)
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE] == 0 ||
|
|
|
|
psf->sf_count[MCAST_INCLUDE])
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
return pmc->mca_sfcount[MCAST_EXCLUDE] ==
|
|
|
|
psf->sf_count[MCAST_EXCLUDE];
|
|
|
|
case MLD2_ALLOW_NEW_SOURCES:
|
|
|
|
if (gdeleted || !psf->sf_crcount)
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
return (pmc->mca_sfmode == MCAST_INCLUDE) ^ sdeleted;
|
|
|
|
case MLD2_BLOCK_OLD_SOURCES:
|
|
|
|
if (pmc->mca_sfmode == MCAST_INCLUDE)
|
|
|
|
return gdeleted || (psf->sf_crcount && sdeleted);
|
|
|
|
return psf->sf_crcount && !gdeleted && !sdeleted;
|
|
|
|
}
|
2012-05-18 22:57:34 +04:00
|
|
|
return false;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
int scount = 0;
|
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
|
|
|
|
continue;
|
|
|
|
scount++;
|
|
|
|
}
|
|
|
|
return scount;
|
|
|
|
}
|
|
|
|
|
2013-01-21 10:48:19 +04:00
|
|
|
static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct net_device *dev,
|
|
|
|
const struct in6_addr *saddr,
|
|
|
|
const struct in6_addr *daddr,
|
|
|
|
int proto, int len)
|
|
|
|
{
|
|
|
|
struct ipv6hdr *hdr;
|
|
|
|
|
|
|
|
skb->protocol = htons(ETH_P_IPV6);
|
|
|
|
skb->dev = dev;
|
|
|
|
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
skb_put(skb, sizeof(struct ipv6hdr));
|
|
|
|
hdr = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
ip6_flow_hdr(hdr, 0, 0);
|
|
|
|
|
|
|
|
hdr->payload_len = htons(len);
|
|
|
|
hdr->nexthdr = proto;
|
|
|
|
hdr->hop_limit = inet6_sk(sk)->hop_limit;
|
|
|
|
|
|
|
|
hdr->saddr = *saddr;
|
|
|
|
hdr->daddr = *daddr;
|
|
|
|
}
|
|
|
|
|
2014-11-05 22:27:38 +03:00
|
|
|
static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-06-29 17:30:49 +04:00
|
|
|
struct net_device *dev = idev->dev;
|
2008-03-25 15:47:49 +03:00
|
|
|
struct net *net = dev_net(dev);
|
2008-03-07 22:16:55 +03:00
|
|
|
struct sock *sk = net->ipv6.igmp_sk;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sk_buff *skb;
|
|
|
|
struct mld2_report *pmr;
|
|
|
|
struct in6_addr addr_buf;
|
2008-04-10 10:42:11 +04:00
|
|
|
const struct in6_addr *saddr;
|
2011-11-18 06:20:04 +04:00
|
|
|
int hlen = LL_RESERVED_SPACE(dev);
|
|
|
|
int tlen = dev->needed_tailroom;
|
2014-11-05 22:27:38 +03:00
|
|
|
unsigned int size = mtu + hlen + tlen;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
|
|
|
u8 ra[8] = { IPPROTO_ICMPV6, 0,
|
|
|
|
IPV6_TLV_ROUTERALERT, 2, 0, 0,
|
|
|
|
IPV6_TLV_PADN, 0 };
|
|
|
|
|
|
|
|
/* we assume size > sizeof(ra) here */
|
2010-06-05 14:03:30 +04:00
|
|
|
/* limit our allocations to order-0 page */
|
|
|
|
size = min_t(int, size, SKB_MAX_ORDER(0, 0));
|
|
|
|
skb = sock_alloc_send_skb(sk, size, 1, &err);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-10-09 12:59:42 +04:00
|
|
|
if (!skb)
|
2005-04-17 02:20:36 +04:00
|
|
|
return NULL;
|
|
|
|
|
2013-07-26 19:05:16 +04:00
|
|
|
skb->priority = TC_PRIO_CONTROL;
|
2011-11-18 06:20:04 +04:00
|
|
|
skb_reserve(skb, hlen);
|
mld, igmp: Fix reserved tailroom calculation
The current reserved_tailroom calculation fails to take hlen and tlen into
account.
skb:
[__hlen__|__data____________|__tlen___|__extra__]
^ ^
head skb_end_offset
In this representation, hlen + data + tlen is the size passed to alloc_skb.
"extra" is the extra space made available in __alloc_skb because of
rounding up by kmalloc. We can reorder the representation like so:
[__hlen__|__data____________|__extra__|__tlen___]
^ ^
head skb_end_offset
The maximum space available for ip headers and payload without
fragmentation is min(mtu, data + extra). Therefore,
reserved_tailroom
= data + extra + tlen - min(mtu, data + extra)
= skb_end_offset - hlen - min(mtu, skb_end_offset - hlen - tlen)
= skb_tailroom - min(mtu, skb_tailroom - tlen) ; after skb_reserve(hlen)
Compare the second line to the current expression:
reserved_tailroom = skb_end_offset - min(mtu, skb_end_offset)
and we can see that hlen and tlen are not taken into account.
The min() in the third line can be expanded into:
if mtu < skb_tailroom - tlen:
reserved_tailroom = skb_tailroom - mtu
else:
reserved_tailroom = tlen
Depending on hlen, tlen, mtu and the number of multicast address records,
the current code may output skbs that have less tailroom than
dev->needed_tailroom or it may output more skbs than needed because not all
space available is used.
Fixes: 4c672e4b ("ipv6: mld: fix add_grhead skb_over_panic for devs with large MTUs")
Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-03-01 02:03:33 +03:00
|
|
|
skb_tailroom_reserve(skb, mtu, tlen);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-06-29 17:30:49 +04:00
|
|
|
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* <draft-ietf-magma-mld-source-05.txt>:
|
2007-02-09 17:24:49 +03:00
|
|
|
* use unspecified address as the source address
|
2005-04-17 02:20:36 +04:00
|
|
|
* when a valid link-local address is not available.
|
|
|
|
*/
|
2008-04-10 10:42:11 +04:00
|
|
|
saddr = &in6addr_any;
|
|
|
|
} else
|
|
|
|
saddr = &addr_buf;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-21 10:48:19 +04:00
|
|
|
ip6_mc_hdr(sk, skb, dev, saddr, &mld2_all_mcr, NEXTHDR_HOP, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
|
|
|
skb_put_data(skb, ra, sizeof(ra));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-04-20 07:29:13 +04:00
|
|
|
skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data);
|
2007-03-15 03:05:37 +03:00
|
|
|
skb_put(skb, sizeof(*pmr));
|
|
|
|
pmr = (struct mld2_report *)skb_transport_header(skb);
|
2010-04-18 07:42:05 +04:00
|
|
|
pmr->mld2r_type = ICMPV6_MLD2_REPORT;
|
|
|
|
pmr->mld2r_resv1 = 0;
|
|
|
|
pmr->mld2r_cksum = 0;
|
|
|
|
pmr->mld2r_resv2 = 0;
|
|
|
|
pmr->mld2r_ngrec = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_sendpack(struct sk_buff *skb)
|
|
|
|
{
|
2007-04-26 04:54:47 +04:00
|
|
|
struct ipv6hdr *pip6 = ipv6_hdr(skb);
|
2007-04-26 05:04:18 +04:00
|
|
|
struct mld2_report *pmr =
|
|
|
|
(struct mld2_report *)skb_transport_header(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
int payload_len, mldlen;
|
2010-06-08 01:05:02 +04:00
|
|
|
struct inet6_dev *idev;
|
2008-03-25 15:47:49 +03:00
|
|
|
struct net *net = dev_net(skb->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
int err;
|
2011-03-13 00:22:43 +03:00
|
|
|
struct flowi6 fl6;
|
2009-06-02 09:19:30 +04:00
|
|
|
struct dst_entry *dst;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
idev = __in6_dev_get(skb->dev);
|
2009-04-27 13:45:02 +04:00
|
|
|
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
|
|
|
|
|
2013-05-29 00:34:26 +04:00
|
|
|
payload_len = (skb_tail_pointer(skb) - skb_network_header(skb)) -
|
|
|
|
sizeof(*pip6);
|
|
|
|
mldlen = skb_tail_pointer(skb) - skb_transport_header(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
pip6->payload_len = htons(payload_len);
|
|
|
|
|
2010-04-18 07:42:05 +04:00
|
|
|
pmr->mld2r_cksum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
|
|
|
|
IPPROTO_ICMPV6,
|
|
|
|
csum_partial(skb_transport_header(skb),
|
|
|
|
mldlen, 0));
|
2007-12-07 04:40:56 +03:00
|
|
|
|
2011-03-13 00:22:43 +03:00
|
|
|
icmpv6_flow_init(net->ipv6.igmp_sk, &fl6, ICMPV6_MLD2_REPORT,
|
2007-12-07 04:40:56 +03:00
|
|
|
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
|
|
|
|
skb->dev->ifindex);
|
2013-01-18 06:00:24 +04:00
|
|
|
dst = icmp6_dst_alloc(skb->dev, &fl6);
|
2007-12-07 04:40:56 +03:00
|
|
|
|
2011-03-03 00:27:41 +03:00
|
|
|
err = 0;
|
|
|
|
if (IS_ERR(dst)) {
|
|
|
|
err = PTR_ERR(dst);
|
|
|
|
dst = NULL;
|
|
|
|
}
|
2009-06-02 09:19:30 +04:00
|
|
|
skb_dst_set(skb, dst);
|
2007-12-07 04:40:56 +03:00
|
|
|
if (err)
|
|
|
|
goto err_out;
|
|
|
|
|
2015-04-06 05:19:04 +03:00
|
|
|
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
2015-09-16 04:04:16 +03:00
|
|
|
net, net->ipv6.igmp_sk, skb, NULL, skb->dev,
|
2015-10-08 00:48:35 +03:00
|
|
|
dst_output);
|
2007-12-07 04:40:56 +03:00
|
|
|
out:
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!err) {
|
2014-03-31 22:14:10 +04:00
|
|
|
ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
|
|
|
|
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
|
|
|
|
} else {
|
|
|
|
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2007-12-07 04:40:56 +03:00
|
|
|
return;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel)
|
|
|
|
{
|
2005-10-05 23:08:13 +04:00
|
|
|
return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
|
2017-12-11 18:03:38 +03:00
|
|
|
int type, struct mld2_grec **ppgr, unsigned int mtu)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct mld2_report *pmr;
|
|
|
|
struct mld2_grec *pgr;
|
|
|
|
|
2017-12-11 18:03:38 +03:00
|
|
|
if (!skb) {
|
|
|
|
skb = mld_newpack(pmc->idev, mtu);
|
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
|
|
|
}
|
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
|
|
|
pgr = skb_put(skb, sizeof(struct mld2_grec));
|
2005-04-17 02:20:36 +04:00
|
|
|
pgr->grec_type = type;
|
|
|
|
pgr->grec_auxwords = 0;
|
|
|
|
pgr->grec_nsrcs = 0;
|
|
|
|
pgr->grec_mca = pmc->mca_addr; /* structure copy */
|
2007-04-26 05:04:18 +04:00
|
|
|
pmr = (struct mld2_report *)skb_transport_header(skb);
|
2010-04-18 07:42:05 +04:00
|
|
|
pmr->mld2r_ngrec = htons(ntohs(pmr->mld2r_ngrec)+1);
|
2005-04-17 02:20:36 +04:00
|
|
|
*ppgr = pgr;
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2014-11-05 22:27:38 +03:00
|
|
|
#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
|
2014-01-17 01:27:59 +04:00
|
|
|
int type, int gdeleted, int sdeleted, int crsend)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-06-29 17:30:49 +04:00
|
|
|
struct inet6_dev *idev = pmc->idev;
|
|
|
|
struct net_device *dev = idev->dev;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct mld2_report *pmr;
|
|
|
|
struct mld2_grec *pgr = NULL;
|
|
|
|
struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list;
|
2005-12-28 01:03:00 +03:00
|
|
|
int scount, stotal, first, isquery, truncate;
|
2017-12-11 18:03:38 +03:00
|
|
|
unsigned int mtu;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (pmc->mca_flags & MAF_NOREPORT)
|
|
|
|
return skb;
|
|
|
|
|
2017-12-11 18:03:38 +03:00
|
|
|
mtu = READ_ONCE(dev->mtu);
|
|
|
|
if (mtu < IPV6_MIN_MTU)
|
|
|
|
return skb;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
isquery = type == MLD2_MODE_IS_INCLUDE ||
|
|
|
|
type == MLD2_MODE_IS_EXCLUDE;
|
|
|
|
truncate = type == MLD2_MODE_IS_EXCLUDE ||
|
|
|
|
type == MLD2_CHANGE_TO_EXCLUDE;
|
|
|
|
|
2005-12-28 01:03:00 +03:00
|
|
|
stotal = scount = 0;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
psf_list = sdeleted ? &pmc->mca_tomb : &pmc->mca_sources;
|
|
|
|
|
2005-12-28 01:03:00 +03:00
|
|
|
if (!*psf_list)
|
|
|
|
goto empty_source;
|
|
|
|
|
2007-04-26 05:04:18 +04:00
|
|
|
pmr = skb ? (struct mld2_report *)skb_transport_header(skb) : NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* EX and TO_EX get a fresh packet, if needed */
|
|
|
|
if (truncate) {
|
2010-04-18 07:42:05 +04:00
|
|
|
if (pmr && pmr->mld2r_ngrec &&
|
2005-04-17 02:20:36 +04:00
|
|
|
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
|
|
|
|
if (skb)
|
|
|
|
mld_sendpack(skb);
|
2017-12-11 18:03:38 +03:00
|
|
|
skb = mld_newpack(idev, mtu);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
first = 1;
|
|
|
|
psf_prev = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = *psf_list; psf; psf = psf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct in6_addr *psrc;
|
|
|
|
|
|
|
|
psf_next = psf->sf_next;
|
|
|
|
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
|
2005-04-17 02:20:36 +04:00
|
|
|
psf_prev = psf;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2016-08-02 13:02:57 +03:00
|
|
|
/* Based on RFC3810 6.1. Should not send source-list change
|
|
|
|
* records when there is a filter mode change.
|
|
|
|
*/
|
|
|
|
if (((gdeleted && pmc->mca_sfmode == MCAST_EXCLUDE) ||
|
|
|
|
(!gdeleted && pmc->mca_crcount)) &&
|
|
|
|
(type == MLD2_ALLOW_NEW_SOURCES ||
|
|
|
|
type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount)
|
|
|
|
goto decrease_sf_crcount;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* clear marks on query responses */
|
|
|
|
if (isquery)
|
|
|
|
psf->sf_gsresp = 0;
|
|
|
|
|
|
|
|
if (AVAILABLE(skb) < sizeof(*psrc) +
|
|
|
|
first*sizeof(struct mld2_grec)) {
|
|
|
|
if (truncate && !first)
|
|
|
|
break; /* truncate these */
|
|
|
|
if (pgr)
|
|
|
|
pgr->grec_nsrcs = htons(scount);
|
|
|
|
if (skb)
|
|
|
|
mld_sendpack(skb);
|
2017-12-11 18:03:38 +03:00
|
|
|
skb = mld_newpack(idev, mtu);
|
2005-04-17 02:20:36 +04:00
|
|
|
first = 1;
|
|
|
|
scount = 0;
|
|
|
|
}
|
|
|
|
if (first) {
|
2017-12-11 18:03:38 +03:00
|
|
|
skb = add_grhead(skb, pmc, type, &pgr, mtu);
|
2005-04-17 02:20:36 +04:00
|
|
|
first = 0;
|
|
|
|
}
|
2007-02-07 01:35:25 +03:00
|
|
|
if (!skb)
|
|
|
|
return NULL;
|
networking: make skb_put & friends return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions (skb_put, __skb_put and pskb_put) return void *
and remove all the casts across the tree, adding a (u8 *) cast only
where the unsigned char pointer was used directly, all done with the
following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_put, __skb_put };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_put, __skb_put };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
which actually doesn't cover pskb_put since there are only three
users overall.
A handful of stragglers were converted manually, notably a macro in
drivers/isdn/i4l/isdn_bsdcomp.c and, oddly enough, one of the many
instances in net/bluetooth/hci_sock.c. In the former file, I also
had to fix one whitespace problem spatch introduced.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:21 +03:00
|
|
|
psrc = skb_put(skb, sizeof(*psrc));
|
2005-04-17 02:20:36 +04:00
|
|
|
*psrc = psf->sf_addr;
|
2005-12-28 01:03:00 +03:00
|
|
|
scount++; stotal++;
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((type == MLD2_ALLOW_NEW_SOURCES ||
|
|
|
|
type == MLD2_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
|
2016-08-02 13:02:57 +03:00
|
|
|
decrease_sf_crcount:
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_crcount--;
|
|
|
|
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
|
|
|
|
if (psf_prev)
|
|
|
|
psf_prev->sf_next = psf->sf_next;
|
|
|
|
else
|
|
|
|
*psf_list = psf->sf_next;
|
|
|
|
kfree(psf);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
psf_prev = psf;
|
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
|
|
|
|
empty_source:
|
|
|
|
if (!stotal) {
|
|
|
|
if (type == MLD2_ALLOW_NEW_SOURCES ||
|
|
|
|
type == MLD2_BLOCK_OLD_SOURCES)
|
|
|
|
return skb;
|
2014-01-17 01:27:59 +04:00
|
|
|
if (pmc->mca_crcount || isquery || crsend) {
|
2005-12-28 01:03:00 +03:00
|
|
|
/* make sure we have room for group header */
|
|
|
|
if (skb && AVAILABLE(skb) < sizeof(struct mld2_grec)) {
|
|
|
|
mld_sendpack(skb);
|
|
|
|
skb = NULL; /* add_grhead will get a new one */
|
|
|
|
}
|
2017-12-11 18:03:38 +03:00
|
|
|
skb = add_grhead(skb, pmc, type, &pgr, mtu);
|
2005-12-28 01:03:00 +03:00
|
|
|
}
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pgr)
|
|
|
|
pgr->grec_nsrcs = htons(scount);
|
|
|
|
|
|
|
|
if (isquery)
|
|
|
|
pmc->mca_flags &= ~MAF_GSQUERY; /* clear query state */
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
int type;
|
|
|
|
|
2013-06-29 17:30:49 +04:00
|
|
|
read_lock_bh(&idev->lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!pmc) {
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->mca_flags & MAF_NOREPORT)
|
|
|
|
continue;
|
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE])
|
|
|
|
type = MLD2_MODE_IS_EXCLUDE;
|
|
|
|
else
|
|
|
|
type = MLD2_MODE_IS_INCLUDE;
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 0, 0, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE])
|
|
|
|
type = MLD2_MODE_IS_EXCLUDE;
|
|
|
|
else
|
|
|
|
type = MLD2_MODE_IS_INCLUDE;
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 0, 0, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
|
|
|
}
|
2013-06-29 17:30:49 +04:00
|
|
|
read_unlock_bh(&idev->lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (skb)
|
|
|
|
mld_sendpack(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* remove zero-count source records from a source filter list
|
|
|
|
*/
|
|
|
|
static void mld_clear_zeros(struct ip6_sf_list **ppsf)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf_prev, *psf_next, *psf;
|
|
|
|
|
|
|
|
psf_prev = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = *ppsf; psf; psf = psf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
psf_next = psf->sf_next;
|
|
|
|
if (psf->sf_crcount == 0) {
|
|
|
|
if (psf_prev)
|
|
|
|
psf_prev->sf_next = psf->sf_next;
|
|
|
|
else
|
|
|
|
*ppsf = psf->sf_next;
|
|
|
|
kfree(psf);
|
|
|
|
} else
|
|
|
|
psf_prev = psf;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_send_cr(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc, *pmc_prev, *pmc_next;
|
|
|
|
struct sk_buff *skb = NULL;
|
|
|
|
int type, dtype;
|
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_lock(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* deleted MCA's */
|
|
|
|
pmc_prev = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_tomb; pmc; pmc = pmc_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
pmc_next = pmc->next;
|
|
|
|
if (pmc->mca_sfmode == MCAST_INCLUDE) {
|
|
|
|
type = MLD2_BLOCK_OLD_SOURCES;
|
|
|
|
dtype = MLD2_BLOCK_OLD_SOURCES;
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 1, 0, 0);
|
|
|
|
skb = add_grec(skb, pmc, dtype, 1, 1, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (pmc->mca_crcount) {
|
|
|
|
if (pmc->mca_sfmode == MCAST_EXCLUDE) {
|
|
|
|
type = MLD2_CHANGE_TO_INCLUDE;
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 1, 0, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2005-12-28 01:03:00 +03:00
|
|
|
pmc->mca_crcount--;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->mca_crcount == 0) {
|
|
|
|
mld_clear_zeros(&pmc->mca_tomb);
|
|
|
|
mld_clear_zeros(&pmc->mca_sources);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (pmc->mca_crcount == 0 && !pmc->mca_tomb &&
|
|
|
|
!pmc->mca_sources) {
|
|
|
|
if (pmc_prev)
|
|
|
|
pmc_prev->next = pmc_next;
|
|
|
|
else
|
|
|
|
idev->mc_tomb = pmc_next;
|
|
|
|
in6_dev_put(pmc->idev);
|
|
|
|
kfree(pmc);
|
|
|
|
} else
|
|
|
|
pmc_prev = pmc;
|
|
|
|
}
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_unlock(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* change recs */
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
|
|
|
|
type = MLD2_BLOCK_OLD_SOURCES;
|
|
|
|
dtype = MLD2_ALLOW_NEW_SOURCES;
|
|
|
|
} else {
|
|
|
|
type = MLD2_ALLOW_NEW_SOURCES;
|
|
|
|
dtype = MLD2_BLOCK_OLD_SOURCES;
|
|
|
|
}
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 0, 0, 0);
|
|
|
|
skb = add_grec(skb, pmc, dtype, 0, 1, 0); /* deleted sources */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* filter mode changes */
|
|
|
|
if (pmc->mca_crcount) {
|
|
|
|
if (pmc->mca_sfmode == MCAST_EXCLUDE)
|
|
|
|
type = MLD2_CHANGE_TO_EXCLUDE;
|
|
|
|
else
|
|
|
|
type = MLD2_CHANGE_TO_INCLUDE;
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 0, 0, 0);
|
2005-12-28 01:03:00 +03:00
|
|
|
pmc->mca_crcount--;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
(void) mld_sendpack(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
|
|
|
|
{
|
2008-03-25 15:47:49 +03:00
|
|
|
struct net *net = dev_net(dev);
|
2008-03-07 22:16:55 +03:00
|
|
|
struct sock *sk = net->ipv6.igmp_sk;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct inet6_dev *idev;
|
2007-02-09 17:24:49 +03:00
|
|
|
struct sk_buff *skb;
|
2010-04-18 07:42:05 +04:00
|
|
|
struct mld_msg *hdr;
|
2008-04-10 10:42:11 +04:00
|
|
|
const struct in6_addr *snd_addr, *saddr;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct in6_addr addr_buf;
|
2011-11-18 06:20:04 +04:00
|
|
|
int hlen = LL_RESERVED_SPACE(dev);
|
|
|
|
int tlen = dev->needed_tailroom;
|
2005-04-17 02:20:36 +04:00
|
|
|
int err, len, payload_len, full_len;
|
|
|
|
u8 ra[8] = { IPPROTO_ICMPV6, 0,
|
|
|
|
IPV6_TLV_ROUTERALERT, 2, 0, 0,
|
|
|
|
IPV6_TLV_PADN, 0 };
|
2011-03-13 00:22:43 +03:00
|
|
|
struct flowi6 fl6;
|
2009-06-02 09:19:30 +04:00
|
|
|
struct dst_entry *dst;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-04-10 10:42:11 +04:00
|
|
|
if (type == ICMPV6_MGM_REDUCTION)
|
|
|
|
snd_addr = &in6addr_linklocal_allrouters;
|
|
|
|
else
|
|
|
|
snd_addr = addr;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
len = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
|
|
|
|
payload_len = len + sizeof(ra);
|
|
|
|
full_len = sizeof(struct ipv6hdr) + payload_len;
|
|
|
|
|
2009-04-27 13:45:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
IP6_UPD_PO_STATS(net, __in6_dev_get(dev),
|
|
|
|
IPSTATS_MIB_OUT, full_len);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2011-11-18 06:20:04 +04:00
|
|
|
skb = sock_alloc_send_skb(sk, hlen + tlen + full_len, 1, &err);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-03-29 16:00:04 +03:00
|
|
|
if (!skb) {
|
2006-11-04 14:11:37 +03:00
|
|
|
rcu_read_lock();
|
2008-10-08 21:54:51 +04:00
|
|
|
IP6_INC_STATS(net, __in6_dev_get(dev),
|
2006-11-04 14:11:37 +03:00
|
|
|
IPSTATS_MIB_OUTDISCARDS);
|
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
|
|
|
}
|
2013-07-26 19:05:16 +04:00
|
|
|
skb->priority = TC_PRIO_CONTROL;
|
2011-11-18 06:20:04 +04:00
|
|
|
skb_reserve(skb, hlen);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-04-26 04:08:10 +04:00
|
|
|
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* <draft-ietf-magma-mld-source-05.txt>:
|
2007-02-09 17:24:49 +03:00
|
|
|
* use unspecified address as the source address
|
2005-04-17 02:20:36 +04:00
|
|
|
* when a valid link-local address is not available.
|
|
|
|
*/
|
2008-04-10 10:42:11 +04:00
|
|
|
saddr = &in6addr_any;
|
|
|
|
} else
|
|
|
|
saddr = &addr_buf;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-21 10:48:19 +04:00
|
|
|
ip6_mc_hdr(sk, skb, dev, saddr, snd_addr, NEXTHDR_HOP, payload_len);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
networking: introduce and use skb_put_data()
A common pattern with skb_put() is to just want to memcpy()
some data into the new space, introduce skb_put_data() for
this.
An spatch similar to the one for skb_put_zero() converts many
of the places using it:
@@
identifier p, p2;
expression len, skb, data;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_data(skb, data, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_data(skb, data, len);
)
(
p2 = (t2)p;
-memcpy(p2, data, len);
|
-memcpy(p, data, len);
)
@@
type t, t2;
identifier p, p2;
expression skb, data;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_data(skb, data, sizeof(t));
)
(
p2 = (t2)p;
-memcpy(p2, data, sizeof(*p));
|
-memcpy(p, data, sizeof(*p));
)
@@
expression skb, len, data;
@@
-memcpy(skb_put(skb, len), data, len);
+skb_put_data(skb, data, len);
(again, manually post-processed to retain some comments)
Reviewed-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:20 +03:00
|
|
|
skb_put_data(skb, ra, sizeof(ra));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
networking: convert many more places to skb_put_zero()
There were many places that my previous spatch didn't find,
as pointed out by yuan linyu in various patches.
The following spatch found many more and also removes the
now unnecessary casts:
@@
identifier p, p2;
expression len;
expression skb;
type t, t2;
@@
(
-p = skb_put(skb, len);
+p = skb_put_zero(skb, len);
|
-p = (t)skb_put(skb, len);
+p = skb_put_zero(skb, len);
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, len);
|
-memset(p, 0, len);
)
@@
type t, t2;
identifier p, p2;
expression skb;
@@
t *p;
...
(
-p = skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
|
-p = (t *)skb_put(skb, sizeof(t));
+p = skb_put_zero(skb, sizeof(t));
)
... when != p
(
p2 = (t2)p;
-memset(p2, 0, sizeof(*p));
|
-memset(p, 0, sizeof(*p));
)
@@
expression skb, len;
@@
-memset(skb_put(skb, len), 0, len);
+skb_put_zero(skb, len);
Apply it to the tree (with one manual fixup to keep the
comment in vxlan.c, which spatch removed.)
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 15:29:19 +03:00
|
|
|
hdr = skb_put_zero(skb, sizeof(struct mld_msg));
|
2010-04-18 07:42:05 +04:00
|
|
|
hdr->mld_type = type;
|
2011-11-21 07:39:03 +04:00
|
|
|
hdr->mld_mca = *addr;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-04-18 07:42:05 +04:00
|
|
|
hdr->mld_cksum = csum_ipv6_magic(saddr, snd_addr, len,
|
|
|
|
IPPROTO_ICMPV6,
|
|
|
|
csum_partial(hdr, len, 0));
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
idev = __in6_dev_get(skb->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2011-03-13 00:22:43 +03:00
|
|
|
icmpv6_flow_init(sk, &fl6, type,
|
2007-12-07 04:40:56 +03:00
|
|
|
&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr,
|
|
|
|
skb->dev->ifindex);
|
2013-01-18 06:00:24 +04:00
|
|
|
dst = icmp6_dst_alloc(skb->dev, &fl6);
|
2011-03-03 00:27:41 +03:00
|
|
|
if (IS_ERR(dst)) {
|
|
|
|
err = PTR_ERR(dst);
|
2007-12-07 04:40:56 +03:00
|
|
|
goto err_out;
|
2011-03-03 00:27:41 +03:00
|
|
|
}
|
2007-12-07 04:40:56 +03:00
|
|
|
|
2009-06-02 09:19:30 +04:00
|
|
|
skb_dst_set(skb, dst);
|
2015-09-16 04:04:16 +03:00
|
|
|
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
|
|
|
net, sk, skb, NULL, skb->dev,
|
2015-10-08 00:48:35 +03:00
|
|
|
dst_output);
|
2007-12-07 04:40:56 +03:00
|
|
|
out:
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!err) {
|
2008-10-08 21:33:50 +04:00
|
|
|
ICMP6MSGOUT_INC_STATS(net, idev, type);
|
2008-10-08 21:33:06 +04:00
|
|
|
ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
|
2005-04-17 02:20:36 +04:00
|
|
|
} else
|
2008-10-08 21:54:51 +04:00
|
|
|
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-06-08 01:05:02 +04:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
2007-12-07 04:40:56 +03:00
|
|
|
|
|
|
|
err_out:
|
|
|
|
kfree_skb(skb);
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2014-01-17 01:27:59 +04:00
|
|
|
static void mld_send_initial_cr(struct inet6_dev *idev)
|
2013-06-27 02:07:01 +04:00
|
|
|
{
|
2014-01-17 01:27:59 +04:00
|
|
|
struct sk_buff *skb;
|
|
|
|
struct ifmcaddr6 *pmc;
|
|
|
|
int type;
|
|
|
|
|
|
|
|
if (mld_in_v1_mode(idev))
|
|
|
|
return;
|
|
|
|
|
|
|
|
skb = NULL;
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
|
2014-01-17 01:27:59 +04:00
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE])
|
|
|
|
type = MLD2_CHANGE_TO_EXCLUDE;
|
|
|
|
else
|
ipv6/mcast: init as INCLUDE when join SSM INCLUDE group
This an IPv6 version patch of "ipv4/igmp: init group mode as INCLUDE when
join source group". From RFC3810, part 6.1:
If no per-interface state existed for that
multicast address before the change (i.e., the change consisted of
creating a new per-interface record), or if no state exists after the
change (i.e., the change consisted of deleting a per-interface
record), then the "non-existent" state is considered to have an
INCLUDE filter mode and an empty source list.
Which means a new multicast group should start with state IN(). Currently,
for MLDv2 SSM JOIN_SOURCE_GROUP mode, we first call ipv6_sock_mc_join(),
then ip6_mc_source(), which will trigger a TO_IN() message instead of
ALLOW().
The issue was exposed by commit a052517a8ff65 ("net/multicast: should not
send source list records when have filter mode change"). Before this change,
we sent both ALLOW(A) and TO_IN(A). Now, we only send TO_IN(A).
Fix it by adding a new parameter to init group mode. Also add some wrapper
functions to avoid changing too much code.
v1 -> v2:
In the first version I only cleared the group change record. But this is not
enough. Because when a new group join, it will init as EXCLUDE and trigger
a filter mode change in ip/ip6_mc_add_src(), which will clear all source
addresses sf_crcount. This will prevent early joined address sending state
change records if multi source addressed joined at the same time.
In v2 patch, I fixed it by directly initializing the mode to INCLUDE for SSM
JOIN_SOURCE_GROUP. I also split the original patch into two separated patches
for IPv4 and IPv6.
There is also a difference between v4 and v6 version. For IPv6, when the
interface goes down and up, we will send correct state change record with
unspecified IPv6 address (::) with function ipv6_mc_up(). But after DAD is
completed, we resend the change record TO_IN() in mld_send_initial_cr().
Fix it by sending ALLOW() for INCLUDE mode in mld_send_initial_cr().
Fixes: a052517a8ff65 ("net/multicast: should not send source list records when have filter mode change")
Reviewed-by: Stefano Brivio <sbrivio@redhat.com>
Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-07-10 17:41:27 +03:00
|
|
|
type = MLD2_ALLOW_NEW_SOURCES;
|
2014-01-17 01:27:59 +04:00
|
|
|
skb = add_grec(skb, pmc, type, 0, 0, 1);
|
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
2013-06-27 02:07:01 +04:00
|
|
|
}
|
2014-01-17 01:27:59 +04:00
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
if (skb)
|
|
|
|
mld_sendpack(skb);
|
2013-06-27 02:07:01 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void ipv6_mc_dad_complete(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
idev->mc_dad_count = idev->mc_qrv;
|
|
|
|
if (idev->mc_dad_count) {
|
2014-01-17 01:27:59 +04:00
|
|
|
mld_send_initial_cr(idev);
|
2013-06-27 02:07:01 +04:00
|
|
|
idev->mc_dad_count--;
|
|
|
|
if (idev->mc_dad_count)
|
2018-06-21 14:49:36 +03:00
|
|
|
mld_dad_start_timer(idev,
|
|
|
|
unsolicited_report_interval(idev));
|
2013-06-27 02:07:01 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void mld_dad_timer_expire(struct timer_list *t)
|
2013-06-27 02:07:01 +04:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
|
2013-06-27 02:07:01 +04:00
|
|
|
|
2014-01-17 01:27:59 +04:00
|
|
|
mld_send_initial_cr(idev);
|
2013-06-27 02:07:01 +04:00
|
|
|
if (idev->mc_dad_count) {
|
|
|
|
idev->mc_dad_count--;
|
|
|
|
if (idev->mc_dad_count)
|
2018-06-21 14:49:36 +03:00
|
|
|
mld_dad_start_timer(idev,
|
|
|
|
unsolicited_report_interval(idev));
|
2013-06-27 02:07:01 +04:00
|
|
|
}
|
2013-09-30 00:41:34 +04:00
|
|
|
in6_dev_put(idev);
|
2013-06-27 02:07:01 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct in6_addr *psfsrc)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf, *psf_prev;
|
|
|
|
int rv = 0;
|
|
|
|
|
|
|
|
psf_prev = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
|
|
|
|
break;
|
|
|
|
psf_prev = psf;
|
|
|
|
}
|
|
|
|
if (!psf || psf->sf_count[sfmode] == 0) {
|
|
|
|
/* source filter not found, or count wrong => bug */
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
psf->sf_count[sfmode]--;
|
|
|
|
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
|
|
|
|
struct inet6_dev *idev = pmc->idev;
|
|
|
|
|
|
|
|
/* no more filters for this source */
|
|
|
|
if (psf_prev)
|
|
|
|
psf_prev->sf_next = psf->sf_next;
|
|
|
|
else
|
|
|
|
pmc->mca_sources = psf->sf_next;
|
|
|
|
if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
|
2013-09-04 02:19:38 +04:00
|
|
|
!mld_in_v1_mode(idev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_crcount = idev->mc_qrv;
|
|
|
|
psf->sf_next = pmc->mca_tomb;
|
|
|
|
pmc->mca_tomb = psf;
|
|
|
|
rv = 1;
|
|
|
|
} else
|
|
|
|
kfree(psf);
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
2011-04-22 08:53:02 +04:00
|
|
|
static int ip6_mc_del_src(struct inet6_dev *idev, const struct in6_addr *pmca,
|
|
|
|
int sfmode, int sfcount, const struct in6_addr *psfsrc,
|
2005-04-17 02:20:36 +04:00
|
|
|
int delta)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc;
|
|
|
|
int changerec = 0;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (!idev)
|
|
|
|
return -ENODEV;
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!pmc) {
|
|
|
|
/* MCA not found?? bug */
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
sf_markstate(pmc);
|
|
|
|
if (!delta) {
|
|
|
|
if (!pmc->mca_sfcount[sfmode]) {
|
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
pmc->mca_sfcount[sfmode]--;
|
|
|
|
}
|
|
|
|
err = 0;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < sfcount; i++) {
|
2005-04-17 02:20:36 +04:00
|
|
|
int rv = ip6_mc_del1_src(pmc, sfmode, &psfsrc[i]);
|
|
|
|
|
|
|
|
changerec |= rv > 0;
|
|
|
|
if (!err && rv < 0)
|
|
|
|
err = rv;
|
|
|
|
}
|
|
|
|
if (pmc->mca_sfmode == MCAST_EXCLUDE &&
|
|
|
|
pmc->mca_sfcount[MCAST_EXCLUDE] == 0 &&
|
|
|
|
pmc->mca_sfcount[MCAST_INCLUDE]) {
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
|
|
|
|
/* filter mode change */
|
|
|
|
pmc->mca_sfmode = MCAST_INCLUDE;
|
|
|
|
pmc->mca_crcount = idev->mc_qrv;
|
|
|
|
idev->mc_ifc_count = pmc->mca_crcount;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_crcount = 0;
|
|
|
|
mld_ifc_event(pmc->idev);
|
|
|
|
} else if (sf_setstate(pmc) || changerec)
|
|
|
|
mld_ifc_event(pmc->idev);
|
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add multicast single-source filter to the interface list
|
|
|
|
*/
|
|
|
|
static int ip6_mc_add1_src(struct ifmcaddr6 *pmc, int sfmode,
|
2011-11-30 10:21:05 +04:00
|
|
|
const struct in6_addr *psfsrc)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf, *psf_prev;
|
|
|
|
|
|
|
|
psf_prev = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(&psf->sf_addr, psfsrc))
|
|
|
|
break;
|
|
|
|
psf_prev = psf;
|
|
|
|
}
|
|
|
|
if (!psf) {
|
2006-03-21 10:01:32 +03:00
|
|
|
psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!psf)
|
|
|
|
return -ENOBUFS;
|
2006-03-21 10:01:32 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_addr = *psfsrc;
|
|
|
|
if (psf_prev) {
|
|
|
|
psf_prev->sf_next = psf;
|
|
|
|
} else
|
|
|
|
pmc->mca_sources = psf;
|
|
|
|
}
|
|
|
|
psf->sf_count[sfmode]++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void sf_markstate(struct ifmcaddr6 *pmc)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
|
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
|
|
|
|
psf->sf_oldin = mca_xcount ==
|
|
|
|
psf->sf_count[MCAST_EXCLUDE] &&
|
|
|
|
!psf->sf_count[MCAST_INCLUDE];
|
|
|
|
} else
|
|
|
|
psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int sf_setstate(struct ifmcaddr6 *pmc)
|
|
|
|
{
|
2006-01-25 00:06:39 +03:00
|
|
|
struct ip6_sf_list *psf, *dpsf;
|
2005-04-17 02:20:36 +04:00
|
|
|
int mca_xcount = pmc->mca_sfcount[MCAST_EXCLUDE];
|
|
|
|
int qrv = pmc->idev->mc_qrv;
|
|
|
|
int new_in, rv;
|
|
|
|
|
|
|
|
rv = 0;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE]) {
|
|
|
|
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
|
|
|
|
!psf->sf_count[MCAST_INCLUDE];
|
|
|
|
} else
|
|
|
|
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
|
2006-01-25 00:06:39 +03:00
|
|
|
if (new_in) {
|
|
|
|
if (!psf->sf_oldin) {
|
2006-02-04 04:10:03 +03:00
|
|
|
struct ip6_sf_list *prev = NULL;
|
2006-01-25 00:06:39 +03:00
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (dpsf = pmc->mca_tomb; dpsf;
|
|
|
|
dpsf = dpsf->sf_next) {
|
2006-01-25 00:06:39 +03:00
|
|
|
if (ipv6_addr_equal(&dpsf->sf_addr,
|
|
|
|
&psf->sf_addr))
|
|
|
|
break;
|
|
|
|
prev = dpsf;
|
|
|
|
}
|
|
|
|
if (dpsf) {
|
|
|
|
if (prev)
|
|
|
|
prev->sf_next = dpsf->sf_next;
|
|
|
|
else
|
|
|
|
pmc->mca_tomb = dpsf->sf_next;
|
|
|
|
kfree(dpsf);
|
|
|
|
}
|
|
|
|
psf->sf_crcount = qrv;
|
|
|
|
rv++;
|
|
|
|
}
|
|
|
|
} else if (psf->sf_oldin) {
|
|
|
|
psf->sf_crcount = 0;
|
|
|
|
/*
|
|
|
|
* add or update "delete" records if an active filter
|
|
|
|
* is now inactive
|
|
|
|
*/
|
2014-08-25 00:53:10 +04:00
|
|
|
for (dpsf = pmc->mca_tomb; dpsf; dpsf = dpsf->sf_next)
|
2006-01-25 00:06:39 +03:00
|
|
|
if (ipv6_addr_equal(&dpsf->sf_addr,
|
|
|
|
&psf->sf_addr))
|
|
|
|
break;
|
|
|
|
if (!dpsf) {
|
2010-05-31 21:23:22 +04:00
|
|
|
dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
|
2006-01-25 00:06:39 +03:00
|
|
|
if (!dpsf)
|
|
|
|
continue;
|
|
|
|
*dpsf = *psf;
|
|
|
|
/* pmc->mca_lock held by callers */
|
|
|
|
dpsf->sf_next = pmc->mca_tomb;
|
|
|
|
pmc->mca_tomb = dpsf;
|
|
|
|
}
|
|
|
|
dpsf->sf_crcount = qrv;
|
2005-04-17 02:20:36 +04:00
|
|
|
rv++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return rv;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Add multicast source filter list to the interface list
|
|
|
|
*/
|
2011-04-22 08:53:02 +04:00
|
|
|
static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
|
|
|
|
int sfmode, int sfcount, const struct in6_addr *psfsrc,
|
2005-04-17 02:20:36 +04:00
|
|
|
int delta)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc;
|
|
|
|
int isexclude;
|
|
|
|
int i, err;
|
|
|
|
|
|
|
|
if (!idev)
|
|
|
|
return -ENODEV;
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-08-25 00:53:10 +04:00
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ipv6_addr_equal(pmca, &pmc->mca_addr))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!pmc) {
|
|
|
|
/* MCA not found?? bug */
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return -ESRCH;
|
|
|
|
}
|
|
|
|
spin_lock_bh(&pmc->mca_lock);
|
|
|
|
|
|
|
|
sf_markstate(pmc);
|
|
|
|
isexclude = pmc->mca_sfmode == MCAST_EXCLUDE;
|
|
|
|
if (!delta)
|
|
|
|
pmc->mca_sfcount[sfmode]++;
|
|
|
|
err = 0;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = 0; i < sfcount; i++) {
|
2011-11-30 10:21:05 +04:00
|
|
|
err = ip6_mc_add1_src(pmc, sfmode, &psfsrc[i]);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (err)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (err) {
|
|
|
|
int j;
|
|
|
|
|
|
|
|
if (!delta)
|
|
|
|
pmc->mca_sfcount[sfmode]--;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (j = 0; j < i; j++)
|
2012-04-04 20:47:04 +04:00
|
|
|
ip6_mc_del1_src(pmc, sfmode, &psfsrc[j]);
|
2005-04-17 02:20:36 +04:00
|
|
|
} else if (isexclude != (pmc->mca_sfcount[MCAST_EXCLUDE] != 0)) {
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
|
|
|
|
/* filter mode change */
|
|
|
|
if (pmc->mca_sfcount[MCAST_EXCLUDE])
|
|
|
|
pmc->mca_sfmode = MCAST_EXCLUDE;
|
|
|
|
else if (pmc->mca_sfcount[MCAST_INCLUDE])
|
|
|
|
pmc->mca_sfmode = MCAST_INCLUDE;
|
|
|
|
/* else no filters; keep old mode for reports */
|
|
|
|
|
|
|
|
pmc->mca_crcount = idev->mc_qrv;
|
|
|
|
idev->mc_ifc_count = pmc->mca_crcount;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = psf->sf_next)
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_crcount = 0;
|
|
|
|
mld_ifc_event(idev);
|
|
|
|
} else if (sf_setstate(pmc))
|
|
|
|
mld_ifc_event(idev);
|
|
|
|
spin_unlock_bh(&pmc->mca_lock);
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf, *nextpsf;
|
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_tomb; psf; psf = nextpsf) {
|
2005-04-17 02:20:36 +04:00
|
|
|
nextpsf = psf->sf_next;
|
|
|
|
kfree(psf);
|
|
|
|
}
|
|
|
|
pmc->mca_tomb = NULL;
|
2014-08-25 00:53:10 +04:00
|
|
|
for (psf = pmc->mca_sources; psf; psf = nextpsf) {
|
2005-04-17 02:20:36 +04:00
|
|
|
nextpsf = psf->sf_next;
|
|
|
|
kfree(psf);
|
|
|
|
}
|
|
|
|
pmc->mca_sources = NULL;
|
|
|
|
pmc->mca_sfmode = MCAST_EXCLUDE;
|
2005-09-15 07:53:42 +04:00
|
|
|
pmc->mca_sfcount[MCAST_INCLUDE] = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
pmc->mca_sfcount[MCAST_EXCLUDE] = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void igmp6_join_group(struct ifmcaddr6 *ma)
|
|
|
|
{
|
|
|
|
unsigned long delay;
|
|
|
|
|
|
|
|
if (ma->mca_flags & MAF_NOREPORT)
|
|
|
|
return;
|
|
|
|
|
|
|
|
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
|
|
|
|
|
2014-01-11 16:15:59 +04:00
|
|
|
delay = prandom_u32() % unsolicited_report_interval(ma->idev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
spin_lock_bh(&ma->mca_lock);
|
|
|
|
if (del_timer(&ma->mca_timer)) {
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_dec(&ma->mca_refcnt);
|
2005-04-17 02:20:36 +04:00
|
|
|
delay = ma->mca_timer.expires - jiffies;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!mod_timer(&ma->mca_timer, jiffies + delay))
|
2017-07-04 09:34:57 +03:00
|
|
|
refcount_inc(&ma->mca_refcnt);
|
2005-04-17 02:20:36 +04:00
|
|
|
ma->mca_flags |= MAF_TIMER_RUNNING | MAF_LAST_REPORTER;
|
|
|
|
spin_unlock_bh(&ma->mca_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
|
|
|
|
struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2018-10-13 04:58:53 +03:00
|
|
|
write_lock_bh(&iml->sflock);
|
2007-10-09 12:59:42 +04:00
|
|
|
if (!iml->sflist) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* any-source empty exclude case */
|
2018-10-13 04:58:53 +03:00
|
|
|
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0);
|
|
|
|
} else {
|
|
|
|
err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode,
|
|
|
|
iml->sflist->sl_count, iml->sflist->sl_addr, 0);
|
|
|
|
sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max));
|
|
|
|
iml->sflist = NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2018-10-13 04:58:53 +03:00
|
|
|
write_unlock_bh(&iml->sflock);
|
2005-04-17 02:20:36 +04:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp6_leave_group(struct ifmcaddr6 *ma)
|
|
|
|
{
|
2013-09-04 02:19:38 +04:00
|
|
|
if (mld_in_v1_mode(ma->idev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
if (ma->mca_flags & MAF_LAST_REPORTER)
|
|
|
|
igmp6_send(&ma->mca_addr, ma->idev->dev,
|
|
|
|
ICMPV6_MGM_REDUCTION);
|
|
|
|
} else {
|
|
|
|
mld_add_delrec(ma->idev, ma);
|
|
|
|
mld_ifc_event(ma->idev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void mld_gq_timer_expire(struct timer_list *t)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
idev->mc_gq_running = 0;
|
|
|
|
mld_send_report(idev, NULL);
|
2013-09-30 00:41:34 +04:00
|
|
|
in6_dev_put(idev);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void mld_ifc_timer_expire(struct timer_list *t)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
mld_send_cr(idev);
|
|
|
|
if (idev->mc_ifc_count) {
|
|
|
|
idev->mc_ifc_count--;
|
|
|
|
if (idev->mc_ifc_count)
|
2018-06-21 14:49:36 +03:00
|
|
|
mld_ifc_start_timer(idev,
|
|
|
|
unsolicited_report_interval(idev));
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2013-09-30 00:41:34 +04:00
|
|
|
in6_dev_put(idev);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mld_ifc_event(struct inet6_dev *idev)
|
|
|
|
{
|
2013-09-04 02:19:38 +04:00
|
|
|
if (mld_in_v1_mode(idev))
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
|
|
|
idev->mc_ifc_count = idev->mc_qrv;
|
|
|
|
mld_ifc_start_timer(idev, 1);
|
|
|
|
}
|
|
|
|
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
static void igmp6_timer_handler(struct timer_list *t)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-09-04 02:19:38 +04:00
|
|
|
if (mld_in_v1_mode(ma->idev))
|
2005-04-17 02:20:36 +04:00
|
|
|
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
|
|
|
|
else
|
|
|
|
mld_send_report(ma->idev, ma);
|
|
|
|
|
|
|
|
spin_lock(&ma->mca_lock);
|
|
|
|
ma->mca_flags |= MAF_LAST_REPORTER;
|
|
|
|
ma->mca_flags &= ~MAF_TIMER_RUNNING;
|
|
|
|
spin_unlock(&ma->mca_lock);
|
|
|
|
ma_put(ma);
|
|
|
|
}
|
|
|
|
|
2009-09-15 13:37:40 +04:00
|
|
|
/* Device changing type */
|
|
|
|
|
|
|
|
void ipv6_mc_unmap(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *i;
|
|
|
|
|
|
|
|
/* Install multicast list, except for all-nodes (already installed) */
|
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
for (i = idev->mc_list; i; i = i->next)
|
|
|
|
igmp6_group_dropped(i);
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ipv6_mc_remap(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
ipv6_mc_up(idev);
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Device going down */
|
|
|
|
|
|
|
|
void ipv6_mc_down(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *i;
|
|
|
|
|
|
|
|
/* Withdraw multicast list */
|
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
|
2014-08-25 00:53:10 +04:00
|
|
|
for (i = idev->mc_list; i; i = i->next)
|
2005-04-17 02:20:36 +04:00
|
|
|
igmp6_group_dropped(i);
|
|
|
|
|
2017-01-12 16:19:37 +03:00
|
|
|
/* Should stop timer after group drop. or we will
|
|
|
|
* start timer again in mld_ifc_event()
|
|
|
|
*/
|
|
|
|
mld_ifc_stop_timer(idev);
|
|
|
|
mld_gq_stop_timer(idev);
|
|
|
|
mld_dad_stop_timer(idev);
|
|
|
|
read_unlock_bh(&idev->lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2014-09-02 17:49:25 +04:00
|
|
|
static void ipv6_mc_reset(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
idev->mc_qrv = sysctl_mld_qrv;
|
|
|
|
idev->mc_qi = MLD_QI_DEFAULT;
|
|
|
|
idev->mc_qri = MLD_QRI_DEFAULT;
|
|
|
|
idev->mc_v1_seen = 0;
|
|
|
|
idev->mc_maxdelay = unsolicited_report_interval(idev);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Device going up */
|
|
|
|
|
|
|
|
void ipv6_mc_up(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *i;
|
|
|
|
|
|
|
|
/* Install multicast list, except for all-nodes (already installed) */
|
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
2014-09-02 17:49:25 +04:00
|
|
|
ipv6_mc_reset(idev);
|
2017-01-12 16:19:37 +03:00
|
|
|
for (i = idev->mc_list; i; i = i->next) {
|
|
|
|
mld_del_delrec(idev, i);
|
2018-07-20 09:07:42 +03:00
|
|
|
igmp6_group_added(i);
|
2017-01-12 16:19:37 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* IPv6 device initialization. */
|
|
|
|
|
|
|
|
void ipv6_mc_init_dev(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
write_lock_bh(&idev->lock);
|
2010-02-18 05:48:44 +03:00
|
|
|
spin_lock_init(&idev->mc_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
idev->mc_gq_running = 0;
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
idev->mc_tomb = NULL;
|
|
|
|
idev->mc_ifc_count = 0;
|
treewide: setup_timer() -> timer_setup()
This converts all remaining cases of the old setup_timer() API into using
timer_setup(), where the callback argument is the structure already
holding the struct timer_list. These should have no behavioral changes,
since they just change which pointer is passed into the callback with
the same available pointers after conversion. It handles the following
examples, in addition to some other variations.
Casting from unsigned long:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
setup_timer(&ptr->my_timer, my_callback, ptr);
and forced object casts:
void my_callback(struct something *ptr)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr);
become:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
Direct function assignments:
void my_callback(unsigned long data)
{
struct something *ptr = (struct something *)data;
...
}
...
ptr->my_timer.function = my_callback;
have a temporary cast added, along with converting the args:
void my_callback(struct timer_list *t)
{
struct something *ptr = from_timer(ptr, t, my_timer);
...
}
...
ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback;
And finally, callbacks without a data assignment:
void my_callback(unsigned long data)
{
...
}
...
setup_timer(&ptr->my_timer, my_callback, 0);
have their argument renamed to verify they're unused during conversion:
void my_callback(struct timer_list *unused)
{
...
}
...
timer_setup(&ptr->my_timer, my_callback, 0);
The conversion is done with the following Coccinelle script:
spatch --very-quiet --all-includes --include-headers \
-I ./arch/x86/include -I ./arch/x86/include/generated \
-I ./include -I ./arch/x86/include/uapi \
-I ./arch/x86/include/generated/uapi -I ./include/uapi \
-I ./include/generated/uapi --include ./include/linux/kconfig.h \
--dir . \
--cocci-file ~/src/data/timer_setup.cocci
@fix_address_of@
expression e;
@@
setup_timer(
-&(e)
+&e
, ...)
// Update any raw setup_timer() usages that have a NULL callback, but
// would otherwise match change_timer_function_usage, since the latter
// will update all function assignments done in the face of a NULL
// function initialization in setup_timer().
@change_timer_function_usage_NULL@
expression _E;
identifier _timer;
type _cast_data;
@@
(
-setup_timer(&_E->_timer, NULL, _E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E->_timer, NULL, (_cast_data)_E);
+timer_setup(&_E->_timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, &_E);
+timer_setup(&_E._timer, NULL, 0);
|
-setup_timer(&_E._timer, NULL, (_cast_data)&_E);
+timer_setup(&_E._timer, NULL, 0);
)
@change_timer_function_usage@
expression _E;
identifier _timer;
struct timer_list _stl;
identifier _callback;
type _cast_func, _cast_data;
@@
(
-setup_timer(&_E->_timer, _callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, &_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, _E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, &_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E);
+timer_setup(&_E._timer, _callback, 0);
|
_E->_timer@_stl.function = _callback;
|
_E->_timer@_stl.function = &_callback;
|
_E->_timer@_stl.function = (_cast_func)_callback;
|
_E->_timer@_stl.function = (_cast_func)&_callback;
|
_E._timer@_stl.function = _callback;
|
_E._timer@_stl.function = &_callback;
|
_E._timer@_stl.function = (_cast_func)_callback;
|
_E._timer@_stl.function = (_cast_func)&_callback;
)
// callback(unsigned long arg)
@change_callback_handle_cast
depends on change_timer_function_usage@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
identifier _handle;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
(
... when != _origarg
_handletype *_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(_handletype *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
|
... when != _origarg
_handletype *_handle;
... when != _handle
_handle =
-(void *)_origarg;
+from_timer(_handle, t, _timer);
... when != _origarg
)
}
// callback(unsigned long arg) without existing variable
@change_callback_handle_cast_no_arg
depends on change_timer_function_usage &&
!change_callback_handle_cast@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _origtype;
identifier _origarg;
type _handletype;
@@
void _callback(
-_origtype _origarg
+struct timer_list *t
)
{
+ _handletype *_origarg = from_timer(_origarg, t, _timer);
+
... when != _origarg
- (_handletype *)_origarg
+ _origarg
... when != _origarg
}
// Avoid already converted callbacks.
@match_callback_converted
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier t;
@@
void _callback(struct timer_list *t)
{ ... }
// callback(struct something *handle)
@change_callback_handle_arg
depends on change_timer_function_usage &&
!match_callback_converted &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
@@
void _callback(
-_handletype *_handle
+struct timer_list *t
)
{
+ _handletype *_handle = from_timer(_handle, t, _timer);
...
}
// If change_callback_handle_arg ran on an empty function, remove
// the added handler.
@unchange_callback_handle_arg
depends on change_timer_function_usage &&
change_callback_handle_arg@
identifier change_timer_function_usage._callback;
identifier change_timer_function_usage._timer;
type _handletype;
identifier _handle;
identifier t;
@@
void _callback(struct timer_list *t)
{
- _handletype *_handle = from_timer(_handle, t, _timer);
}
// We only want to refactor the setup_timer() data argument if we've found
// the matching callback. This undoes changes in change_timer_function_usage.
@unchange_timer_function_usage
depends on change_timer_function_usage &&
!change_callback_handle_cast &&
!change_callback_handle_cast_no_arg &&
!change_callback_handle_arg@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type change_timer_function_usage._cast_data;
@@
(
-timer_setup(&_E->_timer, _callback, 0);
+setup_timer(&_E->_timer, _callback, (_cast_data)_E);
|
-timer_setup(&_E._timer, _callback, 0);
+setup_timer(&_E._timer, _callback, (_cast_data)&_E);
)
// If we fixed a callback from a .function assignment, fix the
// assignment cast now.
@change_timer_function_assignment
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression change_timer_function_usage._E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_func;
typedef TIMER_FUNC_TYPE;
@@
(
_E->_timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E->_timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-&_callback;
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)_callback
+(TIMER_FUNC_TYPE)_callback
;
|
_E._timer.function =
-(_cast_func)&_callback
+(TIMER_FUNC_TYPE)_callback
;
)
// Sometimes timer functions are called directly. Replace matched args.
@change_timer_function_calls
depends on change_timer_function_usage &&
(change_callback_handle_cast ||
change_callback_handle_cast_no_arg ||
change_callback_handle_arg)@
expression _E;
identifier change_timer_function_usage._timer;
identifier change_timer_function_usage._callback;
type _cast_data;
@@
_callback(
(
-(_cast_data)_E
+&_E->_timer
|
-(_cast_data)&_E
+&_E._timer
|
-_E
+&_E->_timer
)
)
// If a timer has been configured without a data argument, it can be
// converted without regard to the callback argument, since it is unused.
@match_timer_function_unused_data@
expression _E;
identifier _timer;
identifier _callback;
@@
(
-setup_timer(&_E->_timer, _callback, 0);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0L);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E->_timer, _callback, 0UL);
+timer_setup(&_E->_timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0L);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_E._timer, _callback, 0UL);
+timer_setup(&_E._timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0L);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(&_timer, _callback, 0UL);
+timer_setup(&_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0L);
+timer_setup(_timer, _callback, 0);
|
-setup_timer(_timer, _callback, 0UL);
+timer_setup(_timer, _callback, 0);
)
@change_callback_unused_data
depends on match_timer_function_unused_data@
identifier match_timer_function_unused_data._callback;
type _origtype;
identifier _origarg;
@@
void _callback(
-_origtype _origarg
+struct timer_list *unused
)
{
... when != _origarg
}
Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
|
|
|
timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
|
|
|
|
timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
|
2014-09-02 17:49:25 +04:00
|
|
|
ipv6_mc_reset(idev);
|
2005-04-17 02:20:36 +04:00
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Device is about to be destroyed: clean up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void ipv6_mc_destroy_dev(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *i;
|
|
|
|
|
|
|
|
/* Deactivate timers */
|
|
|
|
ipv6_mc_down(idev);
|
2017-01-12 16:19:37 +03:00
|
|
|
mld_clear_delrec(idev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Delete all-nodes address. */
|
|
|
|
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
|
|
|
|
* addrconf.c has NULL'd out dev->ip6_ptr so in6_dev_get() will
|
|
|
|
* fail.
|
|
|
|
*/
|
2008-04-10 10:42:11 +04:00
|
|
|
__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allnodes);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-04-10 10:42:11 +04:00
|
|
|
if (idev->cnf.forwarding)
|
|
|
|
__ipv6_dev_mc_dec(idev, &in6addr_linklocal_allrouters);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
write_lock_bh(&idev->lock);
|
|
|
|
while ((i = idev->mc_list) != NULL) {
|
|
|
|
idev->mc_list = i->next;
|
|
|
|
|
2017-01-12 16:19:37 +03:00
|
|
|
write_unlock_bh(&idev->lock);
|
2020-06-11 10:57:50 +03:00
|
|
|
ip6_mc_clear_src(i);
|
2005-04-17 02:20:36 +04:00
|
|
|
ma_put(i);
|
|
|
|
write_lock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
write_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
|
2017-03-28 21:49:16 +03:00
|
|
|
static void ipv6_mc_rejoin_groups(struct inet6_dev *idev)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *pmc;
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
|
|
|
|
if (mld_in_v1_mode(idev)) {
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
for (pmc = idev->mc_list; pmc; pmc = pmc->next)
|
|
|
|
igmp6_join_group(pmc);
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
} else
|
|
|
|
mld_send_report(idev, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ipv6_mc_netdev_event(struct notifier_block *this,
|
|
|
|
unsigned long event,
|
|
|
|
void *ptr)
|
|
|
|
{
|
|
|
|
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
|
|
|
|
struct inet6_dev *idev = __in6_dev_get(dev);
|
|
|
|
|
|
|
|
switch (event) {
|
|
|
|
case NETDEV_RESEND_IGMP:
|
|
|
|
if (idev)
|
|
|
|
ipv6_mc_rejoin_groups(idev);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NOTIFY_DONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct notifier_block igmp6_netdev_notifier = {
|
|
|
|
.notifier_call = ipv6_mc_netdev_event,
|
|
|
|
};
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#ifdef CONFIG_PROC_FS
|
|
|
|
struct igmp6_mc_iter_state {
|
2008-03-07 22:16:55 +03:00
|
|
|
struct seq_net_private p;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct net_device *dev;
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define igmp6_mc_seq_private(seq) ((struct igmp6_mc_iter_state *)(seq)->private)
|
|
|
|
|
|
|
|
static inline struct ifmcaddr6 *igmp6_mc_get_first(struct seq_file *seq)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *im = NULL;
|
|
|
|
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
|
2008-03-25 20:36:06 +03:00
|
|
|
struct net *net = seq_file_net(seq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-04 02:13:45 +04:00
|
|
|
state->idev = NULL;
|
2009-11-11 20:34:30 +03:00
|
|
|
for_each_netdev_rcu(net, state->dev) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct inet6_dev *idev;
|
2009-11-11 20:34:30 +03:00
|
|
|
idev = __in6_dev_get(state->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!idev)
|
|
|
|
continue;
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
im = idev->mc_list;
|
|
|
|
if (im) {
|
|
|
|
state->idev = idev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
return im;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ifmcaddr6 *igmp6_mc_get_next(struct seq_file *seq, struct ifmcaddr6 *im)
|
|
|
|
{
|
|
|
|
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
|
|
|
|
|
|
|
|
im = im->next;
|
|
|
|
while (!im) {
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(state->idev))
|
2005-04-17 02:20:36 +04:00
|
|
|
read_unlock_bh(&state->idev->lock);
|
2009-11-11 20:34:30 +03:00
|
|
|
|
|
|
|
state->dev = next_net_device_rcu(state->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!state->dev) {
|
|
|
|
state->idev = NULL;
|
|
|
|
break;
|
|
|
|
}
|
2009-11-11 20:34:30 +03:00
|
|
|
state->idev = __in6_dev_get(state->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!state->idev)
|
|
|
|
continue;
|
|
|
|
read_lock_bh(&state->idev->lock);
|
|
|
|
im = state->idev->mc_list;
|
|
|
|
}
|
|
|
|
return im;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ifmcaddr6 *igmp6_mc_get_idx(struct seq_file *seq, loff_t pos)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *im = igmp6_mc_get_first(seq);
|
|
|
|
if (im)
|
|
|
|
while (pos && (im = igmp6_mc_get_next(seq, im)) != NULL)
|
|
|
|
--pos;
|
|
|
|
return pos ? NULL : im;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *igmp6_mc_seq_start(struct seq_file *seq, loff_t *pos)
|
2009-11-11 20:34:30 +03:00
|
|
|
__acquires(RCU)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-11-11 20:34:30 +03:00
|
|
|
rcu_read_lock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return igmp6_mc_get_idx(seq, *pos);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *igmp6_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
2009-11-11 20:34:30 +03:00
|
|
|
struct ifmcaddr6 *im = igmp6_mc_get_next(seq, v);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
++*pos;
|
|
|
|
return im;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp6_mc_seq_stop(struct seq_file *seq, void *v)
|
2009-11-11 20:34:30 +03:00
|
|
|
__releases(RCU)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
|
2009-11-11 20:34:30 +03:00
|
|
|
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(state->idev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
read_unlock_bh(&state->idev->lock);
|
|
|
|
state->idev = NULL;
|
|
|
|
}
|
|
|
|
state->dev = NULL;
|
2009-11-11 20:34:30 +03:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct ifmcaddr6 *im = (struct ifmcaddr6 *)v;
|
|
|
|
struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
|
|
|
|
|
|
|
|
seq_printf(seq,
|
2008-10-29 22:50:24 +03:00
|
|
|
"%-4d %-15s %pi6 %5d %08X %ld\n",
|
2005-04-17 02:20:36 +04:00
|
|
|
state->dev->ifindex, state->dev->name,
|
2008-10-29 02:05:40 +03:00
|
|
|
&im->mca_addr,
|
2005-04-17 02:20:36 +04:00
|
|
|
im->mca_users, im->mca_flags,
|
|
|
|
(im->mca_flags&MAF_TIMER_RUNNING) ?
|
|
|
|
jiffies_to_clock_t(im->mca_timer.expires-jiffies) : 0);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-11 10:07:31 +04:00
|
|
|
static const struct seq_operations igmp6_mc_seq_ops = {
|
2005-04-17 02:20:36 +04:00
|
|
|
.start = igmp6_mc_seq_start,
|
|
|
|
.next = igmp6_mc_seq_next,
|
|
|
|
.stop = igmp6_mc_seq_stop,
|
|
|
|
.show = igmp6_mc_seq_show,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct igmp6_mcf_iter_state {
|
2008-03-07 22:16:55 +03:00
|
|
|
struct seq_net_private p;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct net_device *dev;
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
struct ifmcaddr6 *im;
|
|
|
|
};
|
|
|
|
|
|
|
|
#define igmp6_mcf_seq_private(seq) ((struct igmp6_mcf_iter_state *)(seq)->private)
|
|
|
|
|
|
|
|
static inline struct ip6_sf_list *igmp6_mcf_get_first(struct seq_file *seq)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf = NULL;
|
|
|
|
struct ifmcaddr6 *im = NULL;
|
|
|
|
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
|
2008-03-25 20:36:06 +03:00
|
|
|
struct net *net = seq_file_net(seq);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-05-04 02:13:45 +04:00
|
|
|
state->idev = NULL;
|
|
|
|
state->im = NULL;
|
2009-11-11 20:34:30 +03:00
|
|
|
for_each_netdev_rcu(net, state->dev) {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct inet6_dev *idev;
|
2009-11-11 20:34:30 +03:00
|
|
|
idev = __in6_dev_get(state->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (unlikely(idev == NULL))
|
|
|
|
continue;
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
im = idev->mc_list;
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(im)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_lock_bh(&im->mca_lock);
|
|
|
|
psf = im->mca_sources;
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(psf)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
state->im = im;
|
|
|
|
state->idev = idev;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_bh(&im->mca_lock);
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
return psf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ip6_sf_list *igmp6_mcf_get_next(struct seq_file *seq, struct ip6_sf_list *psf)
|
|
|
|
{
|
|
|
|
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
|
|
|
|
|
|
|
|
psf = psf->sf_next;
|
|
|
|
while (!psf) {
|
|
|
|
spin_unlock_bh(&state->im->mca_lock);
|
|
|
|
state->im = state->im->next;
|
|
|
|
while (!state->im) {
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(state->idev))
|
2005-04-17 02:20:36 +04:00
|
|
|
read_unlock_bh(&state->idev->lock);
|
2009-11-11 20:34:30 +03:00
|
|
|
|
|
|
|
state->dev = next_net_device_rcu(state->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!state->dev) {
|
|
|
|
state->idev = NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-11-11 20:34:30 +03:00
|
|
|
state->idev = __in6_dev_get(state->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!state->idev)
|
|
|
|
continue;
|
|
|
|
read_lock_bh(&state->idev->lock);
|
|
|
|
state->im = state->idev->mc_list;
|
|
|
|
}
|
|
|
|
if (!state->im)
|
|
|
|
break;
|
|
|
|
spin_lock_bh(&state->im->mca_lock);
|
|
|
|
psf = state->im->mca_sources;
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return psf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct ip6_sf_list *igmp6_mcf_get_idx(struct seq_file *seq, loff_t pos)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf = igmp6_mcf_get_first(seq);
|
|
|
|
if (psf)
|
|
|
|
while (pos && (psf = igmp6_mcf_get_next(seq, psf)) != NULL)
|
|
|
|
--pos;
|
|
|
|
return pos ? NULL : psf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *igmp6_mcf_seq_start(struct seq_file *seq, loff_t *pos)
|
2009-11-11 20:34:30 +03:00
|
|
|
__acquires(RCU)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-11-11 20:34:30 +03:00
|
|
|
rcu_read_lock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return *pos ? igmp6_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *igmp6_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf;
|
|
|
|
if (v == SEQ_START_TOKEN)
|
|
|
|
psf = igmp6_mcf_get_first(seq);
|
|
|
|
else
|
|
|
|
psf = igmp6_mcf_get_next(seq, v);
|
|
|
|
++*pos;
|
|
|
|
return psf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void igmp6_mcf_seq_stop(struct seq_file *seq, void *v)
|
2009-11-11 20:34:30 +03:00
|
|
|
__releases(RCU)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(state->im)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
spin_unlock_bh(&state->im->mca_lock);
|
|
|
|
state->im = NULL;
|
|
|
|
}
|
2015-03-29 16:00:05 +03:00
|
|
|
if (likely(state->idev)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
read_unlock_bh(&state->idev->lock);
|
|
|
|
state->idev = NULL;
|
|
|
|
}
|
|
|
|
state->dev = NULL;
|
2009-11-11 20:34:30 +03:00
|
|
|
rcu_read_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
|
|
|
|
{
|
|
|
|
struct ip6_sf_list *psf = (struct ip6_sf_list *)v;
|
|
|
|
struct igmp6_mcf_iter_state *state = igmp6_mcf_seq_private(seq);
|
|
|
|
|
|
|
|
if (v == SEQ_START_TOKEN) {
|
2014-11-05 02:37:03 +03:00
|
|
|
seq_puts(seq, "Idx Device Multicast Address Source Address INC EXC\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
|
|
|
seq_printf(seq,
|
2008-10-29 22:50:24 +03:00
|
|
|
"%3d %6.6s %pi6 %pi6 %6lu %6lu\n",
|
2005-04-17 02:20:36 +04:00
|
|
|
state->dev->ifindex, state->dev->name,
|
2008-10-29 02:05:40 +03:00
|
|
|
&state->im->mca_addr,
|
|
|
|
&psf->sf_addr,
|
2005-04-17 02:20:36 +04:00
|
|
|
psf->sf_count[MCAST_INCLUDE],
|
|
|
|
psf->sf_count[MCAST_EXCLUDE]);
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-07-11 10:07:31 +04:00
|
|
|
static const struct seq_operations igmp6_mcf_seq_ops = {
|
2005-04-17 02:20:36 +04:00
|
|
|
.start = igmp6_mcf_seq_start,
|
|
|
|
.next = igmp6_mcf_seq_next,
|
|
|
|
.stop = igmp6_mcf_seq_stop,
|
|
|
|
.show = igmp6_mcf_seq_show,
|
|
|
|
};
|
|
|
|
|
2010-01-17 06:35:32 +03:00
|
|
|
static int __net_init igmp6_proc_init(struct net *net)
|
2008-03-21 14:10:53 +03:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
2018-04-10 20:42:55 +03:00
|
|
|
if (!proc_create_net("igmp6", 0444, net->proc_net, &igmp6_mc_seq_ops,
|
|
|
|
sizeof(struct igmp6_mc_iter_state)))
|
2008-03-21 14:10:53 +03:00
|
|
|
goto out;
|
2018-04-10 20:42:55 +03:00
|
|
|
if (!proc_create_net("mcfilter6", 0444, net->proc_net,
|
|
|
|
&igmp6_mcf_seq_ops,
|
|
|
|
sizeof(struct igmp6_mcf_iter_state)))
|
2008-03-21 14:10:53 +03:00
|
|
|
goto out_proc_net_igmp6;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
|
|
|
|
out_proc_net_igmp6:
|
2013-02-18 05:34:56 +04:00
|
|
|
remove_proc_entry("igmp6", net->proc_net);
|
2008-03-21 14:10:53 +03:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2010-01-17 06:35:32 +03:00
|
|
|
static void __net_exit igmp6_proc_exit(struct net *net)
|
2008-03-21 14:10:53 +03:00
|
|
|
{
|
2013-02-18 05:34:56 +04:00
|
|
|
remove_proc_entry("mcfilter6", net->proc_net);
|
|
|
|
remove_proc_entry("igmp6", net->proc_net);
|
2008-03-21 14:10:53 +03:00
|
|
|
}
|
|
|
|
#else
|
2010-01-17 06:35:32 +03:00
|
|
|
static inline int igmp6_proc_init(struct net *net)
|
2008-03-21 14:10:53 +03:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2010-01-17 06:35:32 +03:00
|
|
|
static inline void igmp6_proc_exit(struct net *net)
|
2008-03-21 14:10:53 +03:00
|
|
|
{
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
|
2010-01-17 06:35:32 +03:00
|
|
|
static int __net_init igmp6_net_init(struct net *net)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2008-04-04 01:31:03 +04:00
|
|
|
err = inet_ctl_sock_create(&net->ipv6.igmp_sk, PF_INET6,
|
|
|
|
SOCK_RAW, IPPROTO_ICMPV6, net);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (err < 0) {
|
2012-05-15 18:11:53 +04:00
|
|
|
pr_err("Failed to initialize the IGMP6 control socket (err %d)\n",
|
2005-04-17 02:20:36 +04:00
|
|
|
err);
|
2008-03-07 22:16:55 +03:00
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-04-04 01:31:03 +04:00
|
|
|
inet6_sk(net->ipv6.igmp_sk)->hop_limit = 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2015-02-25 20:58:35 +03:00
|
|
|
err = inet_ctl_sock_create(&net->ipv6.mc_autojoin_sk, PF_INET6,
|
|
|
|
SOCK_RAW, IPPROTO_ICMPV6, net);
|
|
|
|
if (err < 0) {
|
|
|
|
pr_err("Failed to initialize the IGMP6 autojoin socket (err %d)\n",
|
|
|
|
err);
|
|
|
|
goto out_sock_create;
|
|
|
|
}
|
|
|
|
|
2008-03-21 14:10:53 +03:00
|
|
|
err = igmp6_proc_init(net);
|
|
|
|
if (err)
|
2015-02-25 20:58:35 +03:00
|
|
|
goto out_sock_create_autojoin;
|
|
|
|
|
|
|
|
return 0;
|
2008-03-07 22:16:55 +03:00
|
|
|
|
2015-02-25 20:58:35 +03:00
|
|
|
out_sock_create_autojoin:
|
|
|
|
inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
|
2008-03-07 22:16:55 +03:00
|
|
|
out_sock_create:
|
2008-04-04 01:31:03 +04:00
|
|
|
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
|
2015-02-25 20:58:35 +03:00
|
|
|
out:
|
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2010-01-17 06:35:32 +03:00
|
|
|
static void __net_exit igmp6_net_exit(struct net *net)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2008-04-04 01:31:03 +04:00
|
|
|
inet_ctl_sock_destroy(net->ipv6.igmp_sk);
|
2015-02-25 20:58:35 +03:00
|
|
|
inet_ctl_sock_destroy(net->ipv6.mc_autojoin_sk);
|
2008-03-21 14:10:53 +03:00
|
|
|
igmp6_proc_exit(net);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2008-03-07 22:16:55 +03:00
|
|
|
|
|
|
|
static struct pernet_operations igmp6_net_ops = {
|
|
|
|
.init = igmp6_net_init,
|
|
|
|
.exit = igmp6_net_exit,
|
|
|
|
};
|
|
|
|
|
|
|
|
int __init igmp6_init(void)
|
|
|
|
{
|
|
|
|
return register_pernet_subsys(&igmp6_net_ops);
|
|
|
|
}
|
|
|
|
|
2017-03-28 21:49:16 +03:00
|
|
|
int __init igmp6_late_init(void)
|
|
|
|
{
|
|
|
|
return register_netdevice_notifier(&igmp6_netdev_notifier);
|
|
|
|
}
|
|
|
|
|
2008-03-07 22:16:55 +03:00
|
|
|
void igmp6_cleanup(void)
|
|
|
|
{
|
|
|
|
unregister_pernet_subsys(&igmp6_net_ops);
|
|
|
|
}
|
2017-03-28 21:49:16 +03:00
|
|
|
|
|
|
|
void igmp6_late_cleanup(void)
|
|
|
|
{
|
|
|
|
unregister_netdevice_notifier(&igmp6_netdev_notifier);
|
|
|
|
}
|