WSL2-Linux-Kernel/net/ipv4/ipmr.c

3169 строки
77 KiB
C
Исходник Обычный вид История

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* IP multicast routing support for mrouted 3.6/3.8
*
* (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Linux Consultancy and Custom Driver Development
*
* Fixes:
* Michael Chastain : Incorrect size of copying.
* Alan Cox : Added the cache manager code
* Alan Cox : Fixed the clone/copy bug and device race.
* Mike McLagan : Routing by source
* Malcolm Beattie : Buffer handling fixes.
* Alexey Kuznetsov : Double buffer free and other fixes.
* SVR Anand : Fixed several multicast bugs and problems.
* Alexey Kuznetsov : Status, optimisations and more.
* Brad Parker : Better behaviour on mrouted upcall
* overflow.
* Carlos Picoto : PIMv1 Support
* Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
* Relax this requirement to work with older peers.
*/
#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/if_ether.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <linux/export.h>
#include <linux/rhashtable.h>
#include <net/ip_tunnels.h>
#include <net/checksum.h>
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/rtnh.h>
#include <linux/nospec.h>
struct ipmr_rule {
struct fib_rule common;
};
struct ipmr_result {
struct mr_table *mrt;
};
/* Big lock, protecting vif table, mrt cache and mroute socket state.
* Note that the changes are semaphored via rtnl_lock.
*/
static DEFINE_SPINLOCK(mrt_lock);
static struct net_device *vif_dev_read(const struct vif_device *vif)
{
return rcu_dereference(vif->dev);
}
/* Multicast router control variables */
/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);
/* We return to original Alan's scheme. Hash table of resolved
* entries is changed only in process context and protected
* with weak lock mrt_lock. Queue of unresolved entries is protected
* with strong spinlock mfc_unres_lock.
*
* In this case data path is free of exclusive locks at all.
*/
static struct kmem_cache *mrt_cachep __ro_after_init;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
struct net_device *dev, struct sk_buff *skb,
struct mfc_cache *cache, int local);
static int ipmr_cache_report(const struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd);
static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, int flags);
treewide: setup_timer() -> timer_setup() This converts all remaining cases of the old setup_timer() API into using timer_setup(), where the callback argument is the structure already holding the struct timer_list. These should have no behavioral changes, since they just change which pointer is passed into the callback with the same available pointers after conversion. It handles the following examples, in addition to some other variations. Casting from unsigned long: void my_callback(unsigned long data) { struct something *ptr = (struct something *)data; ... } ... setup_timer(&ptr->my_timer, my_callback, ptr); and forced object casts: void my_callback(struct something *ptr) { ... } ... setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr); become: void my_callback(struct timer_list *t) { struct something *ptr = from_timer(ptr, t, my_timer); ... } ... timer_setup(&ptr->my_timer, my_callback, 0); Direct function assignments: void my_callback(unsigned long data) { struct something *ptr = (struct something *)data; ... } ... ptr->my_timer.function = my_callback; have a temporary cast added, along with converting the args: void my_callback(struct timer_list *t) { struct something *ptr = from_timer(ptr, t, my_timer); ... } ... ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback; And finally, callbacks without a data assignment: void my_callback(unsigned long data) { ... } ... setup_timer(&ptr->my_timer, my_callback, 0); have their argument renamed to verify they're unused during conversion: void my_callback(struct timer_list *unused) { ... } ... timer_setup(&ptr->my_timer, my_callback, 0); The conversion is done with the following Coccinelle script: spatch --very-quiet --all-includes --include-headers \ -I ./arch/x86/include -I ./arch/x86/include/generated \ -I ./include -I ./arch/x86/include/uapi \ -I ./arch/x86/include/generated/uapi -I ./include/uapi \ -I ./include/generated/uapi --include ./include/linux/kconfig.h \ --dir . \ --cocci-file ~/src/data/timer_setup.cocci @fix_address_of@ expression e; @@ setup_timer( -&(e) +&e , ...) // Update any raw setup_timer() usages that have a NULL callback, but // would otherwise match change_timer_function_usage, since the latter // will update all function assignments done in the face of a NULL // function initialization in setup_timer(). @change_timer_function_usage_NULL@ expression _E; identifier _timer; type _cast_data; @@ ( -setup_timer(&_E->_timer, NULL, _E); +timer_setup(&_E->_timer, NULL, 0); | -setup_timer(&_E->_timer, NULL, (_cast_data)_E); +timer_setup(&_E->_timer, NULL, 0); | -setup_timer(&_E._timer, NULL, &_E); +timer_setup(&_E._timer, NULL, 0); | -setup_timer(&_E._timer, NULL, (_cast_data)&_E); +timer_setup(&_E._timer, NULL, 0); ) @change_timer_function_usage@ expression _E; identifier _timer; struct timer_list _stl; identifier _callback; type _cast_func, _cast_data; @@ ( -setup_timer(&_E->_timer, _callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, &_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, &_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)&_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E._timer, _callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, &_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, &_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | _E->_timer@_stl.function = _callback; | _E->_timer@_stl.function = &_callback; | _E->_timer@_stl.function = (_cast_func)_callback; | _E->_timer@_stl.function = (_cast_func)&_callback; | _E._timer@_stl.function = _callback; | _E._timer@_stl.function = &_callback; | _E._timer@_stl.function = (_cast_func)_callback; | _E._timer@_stl.function = (_cast_func)&_callback; ) // callback(unsigned long arg) @change_callback_handle_cast depends on change_timer_function_usage@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _origtype; identifier _origarg; type _handletype; identifier _handle; @@ void _callback( -_origtype _origarg +struct timer_list *t ) { ( ... when != _origarg _handletype *_handle = -(_handletype *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle = -(void *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle; ... when != _handle _handle = -(_handletype *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle; ... when != _handle _handle = -(void *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg ) } // callback(unsigned long arg) without existing variable @change_callback_handle_cast_no_arg depends on change_timer_function_usage && !change_callback_handle_cast@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _origtype; identifier _origarg; type _handletype; @@ void _callback( -_origtype _origarg +struct timer_list *t ) { + _handletype *_origarg = from_timer(_origarg, t, _timer); + ... when != _origarg - (_handletype *)_origarg + _origarg ... when != _origarg } // Avoid already converted callbacks. @match_callback_converted depends on change_timer_function_usage && !change_callback_handle_cast && !change_callback_handle_cast_no_arg@ identifier change_timer_function_usage._callback; identifier t; @@ void _callback(struct timer_list *t) { ... } // callback(struct something *handle) @change_callback_handle_arg depends on change_timer_function_usage && !match_callback_converted && !change_callback_handle_cast && !change_callback_handle_cast_no_arg@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _handletype; identifier _handle; @@ void _callback( -_handletype *_handle +struct timer_list *t ) { + _handletype *_handle = from_timer(_handle, t, _timer); ... } // If change_callback_handle_arg ran on an empty function, remove // the added handler. @unchange_callback_handle_arg depends on change_timer_function_usage && change_callback_handle_arg@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _handletype; identifier _handle; identifier t; @@ void _callback(struct timer_list *t) { - _handletype *_handle = from_timer(_handle, t, _timer); } // We only want to refactor the setup_timer() data argument if we've found // the matching callback. This undoes changes in change_timer_function_usage. @unchange_timer_function_usage depends on change_timer_function_usage && !change_callback_handle_cast && !change_callback_handle_cast_no_arg && !change_callback_handle_arg@ expression change_timer_function_usage._E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type change_timer_function_usage._cast_data; @@ ( -timer_setup(&_E->_timer, _callback, 0); +setup_timer(&_E->_timer, _callback, (_cast_data)_E); | -timer_setup(&_E._timer, _callback, 0); +setup_timer(&_E._timer, _callback, (_cast_data)&_E); ) // If we fixed a callback from a .function assignment, fix the // assignment cast now. @change_timer_function_assignment depends on change_timer_function_usage && (change_callback_handle_cast || change_callback_handle_cast_no_arg || change_callback_handle_arg)@ expression change_timer_function_usage._E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type _cast_func; typedef TIMER_FUNC_TYPE; @@ ( _E->_timer.function = -_callback +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -&_callback +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -(_cast_func)_callback; +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -(_cast_func)&_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -&_callback; +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -(_cast_func)_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -(_cast_func)&_callback +(TIMER_FUNC_TYPE)_callback ; ) // Sometimes timer functions are called directly. Replace matched args. @change_timer_function_calls depends on change_timer_function_usage && (change_callback_handle_cast || change_callback_handle_cast_no_arg || change_callback_handle_arg)@ expression _E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type _cast_data; @@ _callback( ( -(_cast_data)_E +&_E->_timer | -(_cast_data)&_E +&_E._timer | -_E +&_E->_timer ) ) // If a timer has been configured without a data argument, it can be // converted without regard to the callback argument, since it is unused. @match_timer_function_unused_data@ expression _E; identifier _timer; identifier _callback; @@ ( -setup_timer(&_E->_timer, _callback, 0); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, 0L); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, 0UL); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0L); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0UL); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_timer, _callback, 0); +timer_setup(&_timer, _callback, 0); | -setup_timer(&_timer, _callback, 0L); +timer_setup(&_timer, _callback, 0); | -setup_timer(&_timer, _callback, 0UL); +timer_setup(&_timer, _callback, 0); | -setup_timer(_timer, _callback, 0); +timer_setup(_timer, _callback, 0); | -setup_timer(_timer, _callback, 0L); +timer_setup(_timer, _callback, 0); | -setup_timer(_timer, _callback, 0UL); +timer_setup(_timer, _callback, 0); ) @change_callback_unused_data depends on match_timer_function_unused_data@ identifier match_timer_function_unused_data._callback; type _origtype; identifier _origarg; @@ void _callback( -_origtype _origarg +struct timer_list *unused ) { ... when != _origarg } Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list, \
lockdep_rtnl_is_held() || \
list_empty(&net->ipv4.mr_tables))
static struct mr_table *ipmr_mr_table_iter(struct net *net,
struct mr_table *mrt)
{
struct mr_table *ret;
if (!mrt)
ret = list_entry_rcu(net->ipv4.mr_tables.next,
struct mr_table, list);
else
ret = list_entry_rcu(mrt->list.next,
struct mr_table, list);
if (&ret->list == &net->ipv4.mr_tables)
return NULL;
return ret;
}
static struct mr_table *ipmr_get_table(struct net *net, u32 id)
{
struct mr_table *mrt;
ipmr_for_each_table(mrt, net) {
if (mrt->id == id)
return mrt;
}
return NULL;
}
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
int err;
struct ipmr_result res;
struct fib_lookup_arg arg = {
.result = &res,
.flags = FIB_LOOKUP_NOREF,
};
/* update flow if oif or iif point to device enslaved to l3mdev */
l3mdev_update_flow(net, flowi4_to_flowi(flp4));
err = fib_rules_lookup(net->ipv4.mr_rules_ops,
flowi4_to_flowi(flp4), 0, &arg);
if (err < 0)
return err;
*mrt = res.mrt;
return 0;
}
static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
int flags, struct fib_lookup_arg *arg)
{
struct ipmr_result *res = arg->result;
struct mr_table *mrt;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
return -ENETUNREACH;
case FR_ACT_PROHIBIT:
return -EACCES;
case FR_ACT_BLACKHOLE:
default:
return -EINVAL;
}
arg->table = fib_rule_get_table(rule, arg);
mrt = ipmr_get_table(rule->fr_net, arg->table);
if (!mrt)
return -EAGAIN;
res->mrt = mrt;
return 0;
}
static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
return 1;
}
static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh, struct nlattr **tb,
struct netlink_ext_ack *extack)
{
return 0;
}
static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
struct nlattr **tb)
{
return 1;
}
static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
struct fib_rule_hdr *frh)
{
frh->dst_len = 0;
frh->src_len = 0;
frh->tos = 0;
return 0;
}
static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
.family = RTNL_FAMILY_IPMR,
.rule_size = sizeof(struct ipmr_rule),
.addr_size = sizeof(u32),
.action = ipmr_rule_action,
.match = ipmr_rule_match,
.configure = ipmr_rule_configure,
.compare = ipmr_rule_compare,
.fill = ipmr_rule_fill,
.nlgroup = RTNLGRP_IPV4_RULE,
.owner = THIS_MODULE,
};
static int __net_init ipmr_rules_init(struct net *net)
{
struct fib_rules_ops *ops;
struct mr_table *mrt;
int err;
ops = fib_rules_register(&ipmr_rules_ops_template, net);
if (IS_ERR(ops))
return PTR_ERR(ops);
INIT_LIST_HEAD(&net->ipv4.mr_tables);
mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
if (IS_ERR(mrt)) {
err = PTR_ERR(mrt);
goto err1;
}
err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
if (err < 0)
goto err2;
net->ipv4.mr_rules_ops = ops;
return 0;
err2:
ipmr,ip6mr: acquire RTNL before calling ip[6]mr_free_table() on failure path ip[6]mr_free_table() can only be called under RTNL lock. RTNL: assertion failed at net/core/dev.c (10367) WARNING: CPU: 1 PID: 5890 at net/core/dev.c:10367 unregister_netdevice_many+0x1246/0x1850 net/core/dev.c:10367 Modules linked in: CPU: 1 PID: 5890 Comm: syz-executor.2 Not tainted 5.16.0-syzkaller-11627-g422ee58dc0ef #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:unregister_netdevice_many+0x1246/0x1850 net/core/dev.c:10367 Code: 0f 85 9b ee ff ff e8 69 07 4b fa ba 7f 28 00 00 48 c7 c6 00 90 ae 8a 48 c7 c7 40 90 ae 8a c6 05 6d b1 51 06 01 e8 8c 90 d8 01 <0f> 0b e9 70 ee ff ff e8 3e 07 4b fa 4c 89 e7 e8 86 2a 59 fa e9 ee RSP: 0018:ffffc900046ff6e0 EFLAGS: 00010286 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: ffff888050f51d00 RSI: ffffffff815fa008 RDI: fffff520008dfece RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: ffffffff815f3d6e R11: 0000000000000000 R12: 00000000fffffff4 R13: dffffc0000000000 R14: ffffc900046ff750 R15: ffff88807b7dc000 FS: 00007f4ab736e700(0000) GS:ffff8880b9d00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fee0b4f8990 CR3: 000000001e7d2000 CR4: 00000000003506e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: <TASK> mroute_clean_tables+0x244/0xb40 net/ipv6/ip6mr.c:1509 ip6mr_free_table net/ipv6/ip6mr.c:389 [inline] ip6mr_rules_init net/ipv6/ip6mr.c:246 [inline] ip6mr_net_init net/ipv6/ip6mr.c:1306 [inline] ip6mr_net_init+0x3f0/0x4e0 net/ipv6/ip6mr.c:1298 ops_init+0xaf/0x470 net/core/net_namespace.c:140 setup_net+0x54f/0xbb0 net/core/net_namespace.c:331 copy_net_ns+0x318/0x760 net/core/net_namespace.c:475 create_new_namespaces+0x3f6/0xb20 kernel/nsproxy.c:110 copy_namespaces+0x391/0x450 kernel/nsproxy.c:178 copy_process+0x2e0c/0x7300 kernel/fork.c:2167 kernel_clone+0xe7/0xab0 kernel/fork.c:2555 __do_sys_clone+0xc8/0x110 kernel/fork.c:2672 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f4ab89f9059 Code: Unable to access opcode bytes at RIP 0x7f4ab89f902f. RSP: 002b:00007f4ab736e118 EFLAGS: 00000206 ORIG_RAX: 0000000000000038 RAX: ffffffffffffffda RBX: 00007f4ab8b0bf60 RCX: 00007f4ab89f9059 RDX: 0000000020000280 RSI: 0000000020000270 RDI: 0000000040200000 RBP: 00007f4ab8a5308d R08: 0000000020000300 R09: 0000000020000300 R10: 00000000200002c0 R11: 0000000000000206 R12: 0000000000000000 R13: 00007ffc3977cc1f R14: 00007f4ab736e300 R15: 0000000000022000 </TASK> Fixes: f243e5a7859a ("ipmr,ip6mr: call ip6mr_free_table() on failure path") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Cong Wang <cong.wang@bytedance.com> Reported-by: syzbot <syzkaller@googlegroups.com> Link: https://lore.kernel.org/r/20220208053451.2885398-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-02-08 08:34:51 +03:00
rtnl_lock();
ipmr_free_table(mrt);
ipmr,ip6mr: acquire RTNL before calling ip[6]mr_free_table() on failure path ip[6]mr_free_table() can only be called under RTNL lock. RTNL: assertion failed at net/core/dev.c (10367) WARNING: CPU: 1 PID: 5890 at net/core/dev.c:10367 unregister_netdevice_many+0x1246/0x1850 net/core/dev.c:10367 Modules linked in: CPU: 1 PID: 5890 Comm: syz-executor.2 Not tainted 5.16.0-syzkaller-11627-g422ee58dc0ef #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 RIP: 0010:unregister_netdevice_many+0x1246/0x1850 net/core/dev.c:10367 Code: 0f 85 9b ee ff ff e8 69 07 4b fa ba 7f 28 00 00 48 c7 c6 00 90 ae 8a 48 c7 c7 40 90 ae 8a c6 05 6d b1 51 06 01 e8 8c 90 d8 01 <0f> 0b e9 70 ee ff ff e8 3e 07 4b fa 4c 89 e7 e8 86 2a 59 fa e9 ee RSP: 0018:ffffc900046ff6e0 EFLAGS: 00010286 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: ffff888050f51d00 RSI: ffffffff815fa008 RDI: fffff520008dfece RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: ffffffff815f3d6e R11: 0000000000000000 R12: 00000000fffffff4 R13: dffffc0000000000 R14: ffffc900046ff750 R15: ffff88807b7dc000 FS: 00007f4ab736e700(0000) GS:ffff8880b9d00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fee0b4f8990 CR3: 000000001e7d2000 CR4: 00000000003506e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: <TASK> mroute_clean_tables+0x244/0xb40 net/ipv6/ip6mr.c:1509 ip6mr_free_table net/ipv6/ip6mr.c:389 [inline] ip6mr_rules_init net/ipv6/ip6mr.c:246 [inline] ip6mr_net_init net/ipv6/ip6mr.c:1306 [inline] ip6mr_net_init+0x3f0/0x4e0 net/ipv6/ip6mr.c:1298 ops_init+0xaf/0x470 net/core/net_namespace.c:140 setup_net+0x54f/0xbb0 net/core/net_namespace.c:331 copy_net_ns+0x318/0x760 net/core/net_namespace.c:475 create_new_namespaces+0x3f6/0xb20 kernel/nsproxy.c:110 copy_namespaces+0x391/0x450 kernel/nsproxy.c:178 copy_process+0x2e0c/0x7300 kernel/fork.c:2167 kernel_clone+0xe7/0xab0 kernel/fork.c:2555 __do_sys_clone+0xc8/0x110 kernel/fork.c:2672 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f4ab89f9059 Code: Unable to access opcode bytes at RIP 0x7f4ab89f902f. RSP: 002b:00007f4ab736e118 EFLAGS: 00000206 ORIG_RAX: 0000000000000038 RAX: ffffffffffffffda RBX: 00007f4ab8b0bf60 RCX: 00007f4ab89f9059 RDX: 0000000020000280 RSI: 0000000020000270 RDI: 0000000040200000 RBP: 00007f4ab8a5308d R08: 0000000020000300 R09: 0000000020000300 R10: 00000000200002c0 R11: 0000000000000206 R12: 0000000000000000 R13: 00007ffc3977cc1f R14: 00007f4ab736e300 R15: 0000000000022000 </TASK> Fixes: f243e5a7859a ("ipmr,ip6mr: call ip6mr_free_table() on failure path") Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Cong Wang <cong.wang@bytedance.com> Reported-by: syzbot <syzkaller@googlegroups.com> Link: https://lore.kernel.org/r/20220208053451.2885398-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-02-08 08:34:51 +03:00
rtnl_unlock();
err1:
fib_rules_unregister(ops);
return err;
}
static void __net_exit ipmr_rules_exit(struct net *net)
{
struct mr_table *mrt, *next;
ASSERT_RTNL();
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return fib_rules_dump(net, nb, RTNL_FAMILY_IPMR, extack);
}
static unsigned int ipmr_rules_seq_read(struct net *net)
{
return fib_rules_seq_read(net, RTNL_FAMILY_IPMR);
}
bool ipmr_rule_default(const struct fib_rule *rule)
{
return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT;
}
EXPORT_SYMBOL(ipmr_rule_default);
#else
#define ipmr_for_each_table(mrt, net) \
for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
static struct mr_table *ipmr_mr_table_iter(struct net *net,
struct mr_table *mrt)
{
if (!mrt)
return net->ipv4.mrt;
return NULL;
}
static struct mr_table *ipmr_get_table(struct net *net, u32 id)
{
return net->ipv4.mrt;
}
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
*mrt = net->ipv4.mrt;
return 0;
}
static int __net_init ipmr_rules_init(struct net *net)
{
struct mr_table *mrt;
mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
if (IS_ERR(mrt))
return PTR_ERR(mrt);
net->ipv4.mrt = mrt;
return 0;
}
static void __net_exit ipmr_rules_exit(struct net *net)
{
ASSERT_RTNL();
ipmr_free_table(net->ipv4.mrt);
net->ipv4.mrt = NULL;
}
static int ipmr_rules_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return 0;
}
static unsigned int ipmr_rules_seq_read(struct net *net)
{
return 0;
}
bool ipmr_rule_default(const struct fib_rule *rule)
{
return true;
}
EXPORT_SYMBOL(ipmr_rule_default);
#endif
static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
const void *ptr)
{
const struct mfc_cache_cmp_arg *cmparg = arg->key;
const struct mfc_cache *c = ptr;
return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
cmparg->mfc_origin != c->mfc_origin;
}
static const struct rhashtable_params ipmr_rht_params = {
.head_offset = offsetof(struct mr_mfc, mnode),
.key_offset = offsetof(struct mfc_cache, cmparg),
.key_len = sizeof(struct mfc_cache_cmp_arg),
.nelem_hint = 3,
.obj_cmpfn = ipmr_hash_cmp,
.automatic_shrinking = true,
};
static void ipmr_new_table_set(struct mr_table *mrt,
struct net *net)
{
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
#endif
}
static struct mfc_cache_cmp_arg ipmr_mr_table_ops_cmparg_any = {
.mfc_mcastgrp = htonl(INADDR_ANY),
.mfc_origin = htonl(INADDR_ANY),
};
static struct mr_table_ops ipmr_mr_table_ops = {
.rht_params = &ipmr_rht_params,
.cmparg_any = &ipmr_mr_table_ops_cmparg_any,
};
static struct mr_table *ipmr_new_table(struct net *net, u32 id)
{
struct mr_table *mrt;
/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
if (id != RT_TABLE_DEFAULT && id >= 1000000000)
return ERR_PTR(-EINVAL);
mrt = ipmr_get_table(net, id);
if (mrt)
return mrt;
return mr_table_alloc(net, id, &ipmr_mr_table_ops,
ipmr_expire_process, ipmr_new_table_set);
}
static void ipmr_free_table(struct mr_table *mrt)
{
timer_shutdown_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC |
MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC);
rhltable_destroy(&mrt->mfc_hash);
kfree(mrt);
}
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
/* Initialize ipmr pimreg/tunnel in_device */
static bool ipmr_init_vif_indev(const struct net_device *dev)
{
struct in_device *in_dev;
ASSERT_RTNL();
in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
return false;
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
return true;
}
static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v)
{
struct net_device *tunnel_dev, *new_dev;
struct ip_tunnel_parm p = { };
int err;
tunnel_dev = __dev_get_by_name(net, "tunl0");
if (!tunnel_dev)
goto out;
p.iph.daddr = v->vifc_rmt_addr.s_addr;
p.iph.saddr = v->vifc_lcl_addr.s_addr;
p.iph.version = 4;
p.iph.ihl = 5;
p.iph.protocol = IPPROTO_IPIP;
sprintf(p.name, "dvmrp%d", v->vifc_vifi);
if (!tunnel_dev->netdev_ops->ndo_tunnel_ctl)
goto out;
err = tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
SIOCADDTUNNEL);
if (err)
goto out;
new_dev = __dev_get_by_name(net, p.name);
if (!new_dev)
goto out;
new_dev->flags |= IFF_MULTICAST;
if (!ipmr_init_vif_indev(new_dev))
goto out_unregister;
if (dev_open(new_dev, NULL))
goto out_unregister;
dev_hold(new_dev);
err = dev_set_allmulti(new_dev, 1);
if (err) {
dev_close(new_dev);
tunnel_dev->netdev_ops->ndo_tunnel_ctl(tunnel_dev, &p,
SIOCDELTUNNEL);
dev_put(new_dev);
new_dev = ERR_PTR(err);
}
return new_dev;
out_unregister:
unregister_netdevice(new_dev);
out:
return ERR_PTR(-ENOBUFS);
}
#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net *net = dev_net(dev);
struct mr_table *mrt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
.flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
.flowi4_mark = skb->mark,
};
int err;
err = ipmr_fib_lookup(net, &fl4, &mrt);
if (err < 0) {
kfree_skb(skb);
return err;
}
DEV_STATS_ADD(dev, tx_bytes, skb->len);
DEV_STATS_INC(dev, tx_packets);
rcu_read_lock();
/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
ipmr_cache_report(mrt, skb, READ_ONCE(mrt->mroute_reg_vif_num),
IGMPMSG_WHOLEPKT);
rcu_read_unlock();
kfree_skb(skb);
return NETDEV_TX_OK;
}
static int reg_vif_get_iflink(const struct net_device *dev)
{
return 0;
}
static const struct net_device_ops reg_vif_netdev_ops = {
.ndo_start_xmit = reg_vif_xmit,
.ndo_get_iflink = reg_vif_get_iflink,
};
static void reg_vif_setup(struct net_device *dev)
{
dev->type = ARPHRD_PIMREG;
dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
dev->flags = IFF_NOARP;
dev->netdev_ops = &reg_vif_netdev_ops;
net: Fix inconsistent teardown and release of private netdev state. Network devices can allocate reasources and private memory using netdev_ops->ndo_init(). However, the release of these resources can occur in one of two different places. Either netdev_ops->ndo_uninit() or netdev->destructor(). The decision of which operation frees the resources depends upon whether it is necessary for all netdev refs to be released before it is safe to perform the freeing. netdev_ops->ndo_uninit() presumably can occur right after the NETDEV_UNREGISTER notifier completes and the unicast and multicast address lists are flushed. netdev->destructor(), on the other hand, does not run until the netdev references all go away. Further complicating the situation is that netdev->destructor() almost universally does also a free_netdev(). This creates a problem for the logic in register_netdevice(). Because all callers of register_netdevice() manage the freeing of the netdev, and invoke free_netdev(dev) if register_netdevice() fails. If netdev_ops->ndo_init() succeeds, but something else fails inside of register_netdevice(), it does call ndo_ops->ndo_uninit(). But it is not able to invoke netdev->destructor(). This is because netdev->destructor() will do a free_netdev() and then the caller of register_netdevice() will do the same. However, this means that the resources that would normally be released by netdev->destructor() will not be. Over the years drivers have added local hacks to deal with this, by invoking their destructor parts by hand when register_netdevice() fails. Many drivers do not try to deal with this, and instead we have leaks. Let's close this hole by formalizing the distinction between what private things need to be freed up by netdev->destructor() and whether the driver needs unregister_netdevice() to perform the free_netdev(). netdev->priv_destructor() performs all actions to free up the private resources that used to be freed by netdev->destructor(), except for free_netdev(). netdev->needs_free_netdev is a boolean that indicates whether free_netdev() should be done at the end of unregister_netdevice(). Now, register_netdevice() can sanely release all resources after ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit() and netdev->priv_destructor(). And at the end of unregister_netdevice(), we invoke netdev->priv_destructor() and optionally call free_netdev(). Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-08 19:52:56 +03:00
dev->needs_free_netdev = true;
dev->features |= NETIF_F_NETNS_LOCAL;
}
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
struct net_device *dev;
char name[IFNAMSIZ];
if (mrt->id == RT_TABLE_DEFAULT)
sprintf(name, "pimreg");
else
sprintf(name, "pimreg%u", mrt->id);
dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
if (!dev)
return NULL;
dev_net_set(dev, net);
if (register_netdevice(dev)) {
free_netdev(dev);
return NULL;
}
if (!ipmr_init_vif_indev(dev))
goto failure;
if (dev_open(dev, NULL))
goto failure;
ipv4: Fix ipmr unregister device oops An oops happens during device unregister. The following oops happened when I add two tunnels, which use a same device, and then delete one tunnel. Obviously deleting tunnel "A" causes device unregister, which send a notification, and after receiving notification, ipmr do unregister again for tunnel "B" which also use same device. That is wrong. After receiving notification, ipmr only needs to decrease reference count and don't do duplicated unregister. Fortunately, IPv6 side doesn't add tunnel in ip6mr, so it's clean. This patch fixs: - unregister device oops - using after dev_put() Here is the oops: === Jul 11 15:39:29 wangchen kernel: ------------[ cut here ]------------ Jul 11 15:39:29 wangchen kernel: kernel BUG at net/core/dev.c:3651! Jul 11 15:39:29 wangchen kernel: invalid opcode: 0000 [#1] Jul 11 15:39:29 wangchen kernel: Modules linked in: ipip tunnel4 nfsd lockd nfs_acl auth_rpcgss sunrpc exportfs ipv6 snd_pcm_oss snd_mixer_oss snd_seq snd_seq_device af_packet binfmt_misc button battery ac loop dm_mod usbhid ff_memless pcmcia firmware_class ohci1394 8139too mii ieee1394 yenta_socket rsrc_nonstatic pcmcia_core ide_cd_mod cdrom snd_intel8x0 snd_ac97_codec ac97_bus snd_pcm i2c_i801 snd_timer snd i2c_core soundcore snd_page_alloc rng_core shpchp ehci_hcd uhci_hcd pci_hotplug intel_agp agpgart usbcore ext3 jbd ata_piix ahci libata dock edd fan thermal processor thermal_sys piix sd_mod scsi_mod ide_disk ide_core [last unloaded: freq_table] Jul 11 15:39:29 wangchen kernel: Jul 11 15:39:29 wangchen kernel: Pid: 4102, comm: mroute Not tainted (2.6.26-rc9-default #69) Jul 11 15:39:29 wangchen kernel: EIP: 0060:[<c024636b>] EFLAGS: 00010202 CPU: 0 Jul 11 15:39:29 wangchen kernel: EIP is at rollback_registered+0x61/0xe3 Jul 11 15:39:29 wangchen kernel: EAX: 00000001 EBX: ecba6000 ECX: 00000000 EDX: ffffffff Jul 11 15:39:29 wangchen kernel: ESI: 00000001 EDI: ecba6000 EBP: c03de2e8 ESP: ed8e7c3c Jul 11 15:39:29 wangchen kernel: DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 Jul 11 15:39:29 wangchen kernel: Process mroute (pid: 4102, ti=ed8e6000 task=ed41e830 task.ti=ed8e6000) Jul 11 15:39:29 wangchen kernel: Stack: ecba6000 c024641c 00000028 c0284e1a 00000001 c03de2e8 ecba6000 eecff360 Jul 11 15:39:29 wangchen kernel: c0284e4c c03536f4 fffffff8 00000000 c029a819 ecba6000 00000006 ecba6000 Jul 11 15:39:29 wangchen kernel: 00000000 ecba6000 c03de2c0 c012841b ffffffff 00000000 c024639f ecba6000 Jul 11 15:39:29 wangchen kernel: Call Trace: Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0284e4c>] ipmr_device_event+0x1e/0x30 Jul 11 15:39:29 wangchen kernel: [<c029a819>] notifier_call_chain+0x2a/0x47 Jul 11 15:39:29 wangchen kernel: [<c012841b>] raw_notifier_call_chain+0x9/0xc Jul 11 15:39:29 wangchen kernel: [<c024639f>] rollback_registered+0x95/0xe3 Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0285eee>] ip_mroute_setsockopt+0x47a/0x801 Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<c01727c4>] __find_get_block_slow+0xda/0xe4 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<c0263501>] ip_setsockopt+0xa9/0x9ee Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea69287>] __ext3_get_inode_loc+0xcf/0x271 [ext3] Jul 11 15:39:29 wangchen kernel: [<eea743c7>] __ext3_journal_dirty_metadata+0x13/0x32 [ext3] Jul 11 15:39:29 wangchen kernel: [<c0116434>] __wake_up+0xf/0x15 Jul 11 15:39:29 wangchen kernel: [<eea5a424>] journal_stop+0x1bd/0x1c6 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea703a7>] __ext3_journal_stop+0x19/0x34 [ext3] Jul 11 15:39:29 wangchen kernel: [<c014291e>] get_page_from_freelist+0x94/0x369 Jul 11 15:39:29 wangchen kernel: [<c01408f2>] filemap_fault+0x1ac/0x2fe Jul 11 15:39:29 wangchen kernel: [<c01a605e>] security_sk_alloc+0xd/0xf Jul 11 15:39:29 wangchen kernel: [<c023edea>] sk_prot_alloc+0x36/0x78 Jul 11 15:39:29 wangchen kernel: [<c0240037>] sk_alloc+0x3a/0x40 Jul 11 15:39:29 wangchen kernel: [<c0276062>] raw_hash_sk+0x46/0x4e Jul 11 15:39:29 wangchen kernel: [<c0166aff>] d_alloc+0x1b/0x157 Jul 11 15:39:29 wangchen kernel: [<c023e4d1>] sock_common_setsockopt+0x12/0x16 Jul 11 15:39:29 wangchen kernel: [<c023cb1e>] sys_setsockopt+0x6f/0x8e Jul 11 15:39:29 wangchen kernel: [<c023e105>] sys_socketcall+0x15c/0x19e Jul 11 15:39:29 wangchen kernel: [<c0103611>] sysenter_past_esp+0x6a/0x99 Jul 11 15:39:29 wangchen kernel: [<c0290000>] unix_poll+0x69/0x78 Jul 11 15:39:29 wangchen kernel: ======================= Jul 11 15:39:29 wangchen kernel: Code: 83 e0 01 00 00 85 c0 75 1f 53 53 68 12 81 31 c0 e8 3c 30 ed ff ba 3f 0e 00 00 b8 b9 7f 31 c0 83 c4 0c 5b e9 f5 26 ed ff 48 74 04 <0f> 0b eb fe 89 d8 e8 21 ff ff ff 89 d8 e8 62 ea ff ff c7 83 e0 Jul 11 15:39:29 wangchen kernel: EIP: [<c024636b>] rollback_registered+0x61/0xe3 SS:ESP 0068:ed8e7c3c Jul 11 15:39:29 wangchen kernel: ---[ end trace c311acf85d169786 ]--- === Signed-off-by: Wang Chen <wangchen@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-07-15 07:56:34 +04:00
dev_hold(dev);
return dev;
failure:
unregister_netdevice(dev);
return NULL;
}
/* called with rcu_read_lock() */
static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
unsigned int pimlen)
{
struct net_device *reg_dev = NULL;
struct iphdr *encap;
int vif_num;
encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
/* Check that:
* a. packet is really sent to a multicast group
* b. packet is not a NULL-REGISTER
* c. packet is not truncated
*/
if (!ipv4_is_multicast(encap->daddr) ||
encap->tot_len == 0 ||
ntohs(encap->tot_len) + pimlen > skb->len)
return 1;
/* Pairs with WRITE_ONCE() in vif_add()/vid_delete() */
vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
if (vif_num >= 0)
reg_dev = vif_dev_read(&mrt->vif_table[vif_num]);
if (!reg_dev)
return 1;
skb->mac_header = skb->network_header;
skb_pull(skb, (u8 *)encap - skb->data);
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = CHECKSUM_NONE;
skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
netif_rx(skb);
return NET_RX_SUCCESS;
}
#else
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
{
return NULL;
}
#endif
static int call_ipmr_vif_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct vif_device *vif,
struct net_device *vif_dev,
vifi_t vif_index, u32 tb_id)
{
return mr_call_vif_notifiers(net, RTNL_FAMILY_IPMR, event_type,
vif, vif_dev, vif_index, tb_id,
&net->ipv4.ipmr_seq);
}
static int call_ipmr_mfc_entry_notifiers(struct net *net,
enum fib_event_type event_type,
struct mfc_cache *mfc, u32 tb_id)
{
return mr_call_mfc_notifiers(net, RTNL_FAMILY_IPMR, event_type,
&mfc->_c, tb_id, &net->ipv4.ipmr_seq);
}
/**
* vif_delete - Delete a VIF entry
* @mrt: Table to delete from
* @vifi: VIF identifier to delete
ipv4: Fix ipmr unregister device oops An oops happens during device unregister. The following oops happened when I add two tunnels, which use a same device, and then delete one tunnel. Obviously deleting tunnel "A" causes device unregister, which send a notification, and after receiving notification, ipmr do unregister again for tunnel "B" which also use same device. That is wrong. After receiving notification, ipmr only needs to decrease reference count and don't do duplicated unregister. Fortunately, IPv6 side doesn't add tunnel in ip6mr, so it's clean. This patch fixs: - unregister device oops - using after dev_put() Here is the oops: === Jul 11 15:39:29 wangchen kernel: ------------[ cut here ]------------ Jul 11 15:39:29 wangchen kernel: kernel BUG at net/core/dev.c:3651! Jul 11 15:39:29 wangchen kernel: invalid opcode: 0000 [#1] Jul 11 15:39:29 wangchen kernel: Modules linked in: ipip tunnel4 nfsd lockd nfs_acl auth_rpcgss sunrpc exportfs ipv6 snd_pcm_oss snd_mixer_oss snd_seq snd_seq_device af_packet binfmt_misc button battery ac loop dm_mod usbhid ff_memless pcmcia firmware_class ohci1394 8139too mii ieee1394 yenta_socket rsrc_nonstatic pcmcia_core ide_cd_mod cdrom snd_intel8x0 snd_ac97_codec ac97_bus snd_pcm i2c_i801 snd_timer snd i2c_core soundcore snd_page_alloc rng_core shpchp ehci_hcd uhci_hcd pci_hotplug intel_agp agpgart usbcore ext3 jbd ata_piix ahci libata dock edd fan thermal processor thermal_sys piix sd_mod scsi_mod ide_disk ide_core [last unloaded: freq_table] Jul 11 15:39:29 wangchen kernel: Jul 11 15:39:29 wangchen kernel: Pid: 4102, comm: mroute Not tainted (2.6.26-rc9-default #69) Jul 11 15:39:29 wangchen kernel: EIP: 0060:[<c024636b>] EFLAGS: 00010202 CPU: 0 Jul 11 15:39:29 wangchen kernel: EIP is at rollback_registered+0x61/0xe3 Jul 11 15:39:29 wangchen kernel: EAX: 00000001 EBX: ecba6000 ECX: 00000000 EDX: ffffffff Jul 11 15:39:29 wangchen kernel: ESI: 00000001 EDI: ecba6000 EBP: c03de2e8 ESP: ed8e7c3c Jul 11 15:39:29 wangchen kernel: DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 Jul 11 15:39:29 wangchen kernel: Process mroute (pid: 4102, ti=ed8e6000 task=ed41e830 task.ti=ed8e6000) Jul 11 15:39:29 wangchen kernel: Stack: ecba6000 c024641c 00000028 c0284e1a 00000001 c03de2e8 ecba6000 eecff360 Jul 11 15:39:29 wangchen kernel: c0284e4c c03536f4 fffffff8 00000000 c029a819 ecba6000 00000006 ecba6000 Jul 11 15:39:29 wangchen kernel: 00000000 ecba6000 c03de2c0 c012841b ffffffff 00000000 c024639f ecba6000 Jul 11 15:39:29 wangchen kernel: Call Trace: Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0284e4c>] ipmr_device_event+0x1e/0x30 Jul 11 15:39:29 wangchen kernel: [<c029a819>] notifier_call_chain+0x2a/0x47 Jul 11 15:39:29 wangchen kernel: [<c012841b>] raw_notifier_call_chain+0x9/0xc Jul 11 15:39:29 wangchen kernel: [<c024639f>] rollback_registered+0x95/0xe3 Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0285eee>] ip_mroute_setsockopt+0x47a/0x801 Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<c01727c4>] __find_get_block_slow+0xda/0xe4 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<c0263501>] ip_setsockopt+0xa9/0x9ee Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea69287>] __ext3_get_inode_loc+0xcf/0x271 [ext3] Jul 11 15:39:29 wangchen kernel: [<eea743c7>] __ext3_journal_dirty_metadata+0x13/0x32 [ext3] Jul 11 15:39:29 wangchen kernel: [<c0116434>] __wake_up+0xf/0x15 Jul 11 15:39:29 wangchen kernel: [<eea5a424>] journal_stop+0x1bd/0x1c6 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea703a7>] __ext3_journal_stop+0x19/0x34 [ext3] Jul 11 15:39:29 wangchen kernel: [<c014291e>] get_page_from_freelist+0x94/0x369 Jul 11 15:39:29 wangchen kernel: [<c01408f2>] filemap_fault+0x1ac/0x2fe Jul 11 15:39:29 wangchen kernel: [<c01a605e>] security_sk_alloc+0xd/0xf Jul 11 15:39:29 wangchen kernel: [<c023edea>] sk_prot_alloc+0x36/0x78 Jul 11 15:39:29 wangchen kernel: [<c0240037>] sk_alloc+0x3a/0x40 Jul 11 15:39:29 wangchen kernel: [<c0276062>] raw_hash_sk+0x46/0x4e Jul 11 15:39:29 wangchen kernel: [<c0166aff>] d_alloc+0x1b/0x157 Jul 11 15:39:29 wangchen kernel: [<c023e4d1>] sock_common_setsockopt+0x12/0x16 Jul 11 15:39:29 wangchen kernel: [<c023cb1e>] sys_setsockopt+0x6f/0x8e Jul 11 15:39:29 wangchen kernel: [<c023e105>] sys_socketcall+0x15c/0x19e Jul 11 15:39:29 wangchen kernel: [<c0103611>] sysenter_past_esp+0x6a/0x99 Jul 11 15:39:29 wangchen kernel: [<c0290000>] unix_poll+0x69/0x78 Jul 11 15:39:29 wangchen kernel: ======================= Jul 11 15:39:29 wangchen kernel: Code: 83 e0 01 00 00 85 c0 75 1f 53 53 68 12 81 31 c0 e8 3c 30 ed ff ba 3f 0e 00 00 b8 b9 7f 31 c0 83 c4 0c 5b e9 f5 26 ed ff 48 74 04 <0f> 0b eb fe 89 d8 e8 21 ff ff ff 89 d8 e8 62 ea ff ff c7 83 e0 Jul 11 15:39:29 wangchen kernel: EIP: [<c024636b>] rollback_registered+0x61/0xe3 SS:ESP 0068:ed8e7c3c Jul 11 15:39:29 wangchen kernel: ---[ end trace c311acf85d169786 ]--- === Signed-off-by: Wang Chen <wangchen@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-07-15 07:56:34 +04:00
* @notify: Set to 1, if the caller is a notifier_call
* @head: if unregistering the VIF, place it on this queue
*/
static int vif_delete(struct mr_table *mrt, int vifi, int notify,
struct list_head *head)
{
struct net *net = read_pnet(&mrt->net);
struct vif_device *v;
struct net_device *dev;
struct in_device *in_dev;
if (vifi < 0 || vifi >= mrt->maxvif)
return -EADDRNOTAVAIL;
v = &mrt->vif_table[vifi];
dev = rtnl_dereference(v->dev);
if (!dev)
return -EADDRNOTAVAIL;
spin_lock(&mrt_lock);
call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_DEL, v, dev,
vifi, mrt->id);
RCU_INIT_POINTER(v->dev, NULL);
if (vifi == mrt->mroute_reg_vif_num) {
/* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
WRITE_ONCE(mrt->mroute_reg_vif_num, -1);
}
if (vifi + 1 == mrt->maxvif) {
int tmp;
for (tmp = vifi - 1; tmp >= 0; tmp--) {
if (VIF_EXISTS(mrt, tmp))
break;
}
WRITE_ONCE(mrt->maxvif, tmp + 1);
}
spin_unlock(&mrt_lock);
dev_set_allmulti(dev, -1);
in_dev = __in_dev_get_rtnl(dev);
if (in_dev) {
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
inet_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
NETCONFA_MC_FORWARDING,
dev->ifindex, &in_dev->cnf);
ip_rt_multicast_event(in_dev);
}
if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
netdev_put(dev, &v->dev_tracker);
return 0;
}
static void ipmr_cache_free_rcu(struct rcu_head *head)
{
struct mr_mfc *c = container_of(head, struct mr_mfc, rcu);
kmem_cache_free(mrt_cachep, (struct mfc_cache *)c);
}
static void ipmr_cache_free(struct mfc_cache *c)
{
call_rcu(&c->_c.rcu, ipmr_cache_free_rcu);
}
/* Destroy an unresolved cache entry, killing queued skbs
* and reporting error to netlink readers.
*/
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
{
struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
struct nlmsgerr *e;
atomic_dec(&mrt->cache_resolve_queue_len);
while ((skb = skb_dequeue(&c->_c.mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = skb_pull(skb,
sizeof(struct iphdr));
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
skb_trim(skb, nlh->nlmsg_len);
e = nlmsg_data(nlh);
e->error = -ETIMEDOUT;
memset(&e->msg, 0, sizeof(e->msg));
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else {
kfree_skb(skb);
}
}
ipmr_cache_free(c);
}
/* Timer process for the unresolved queue. */
treewide: setup_timer() -> timer_setup() This converts all remaining cases of the old setup_timer() API into using timer_setup(), where the callback argument is the structure already holding the struct timer_list. These should have no behavioral changes, since they just change which pointer is passed into the callback with the same available pointers after conversion. It handles the following examples, in addition to some other variations. Casting from unsigned long: void my_callback(unsigned long data) { struct something *ptr = (struct something *)data; ... } ... setup_timer(&ptr->my_timer, my_callback, ptr); and forced object casts: void my_callback(struct something *ptr) { ... } ... setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr); become: void my_callback(struct timer_list *t) { struct something *ptr = from_timer(ptr, t, my_timer); ... } ... timer_setup(&ptr->my_timer, my_callback, 0); Direct function assignments: void my_callback(unsigned long data) { struct something *ptr = (struct something *)data; ... } ... ptr->my_timer.function = my_callback; have a temporary cast added, along with converting the args: void my_callback(struct timer_list *t) { struct something *ptr = from_timer(ptr, t, my_timer); ... } ... ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback; And finally, callbacks without a data assignment: void my_callback(unsigned long data) { ... } ... setup_timer(&ptr->my_timer, my_callback, 0); have their argument renamed to verify they're unused during conversion: void my_callback(struct timer_list *unused) { ... } ... timer_setup(&ptr->my_timer, my_callback, 0); The conversion is done with the following Coccinelle script: spatch --very-quiet --all-includes --include-headers \ -I ./arch/x86/include -I ./arch/x86/include/generated \ -I ./include -I ./arch/x86/include/uapi \ -I ./arch/x86/include/generated/uapi -I ./include/uapi \ -I ./include/generated/uapi --include ./include/linux/kconfig.h \ --dir . \ --cocci-file ~/src/data/timer_setup.cocci @fix_address_of@ expression e; @@ setup_timer( -&(e) +&e , ...) // Update any raw setup_timer() usages that have a NULL callback, but // would otherwise match change_timer_function_usage, since the latter // will update all function assignments done in the face of a NULL // function initialization in setup_timer(). @change_timer_function_usage_NULL@ expression _E; identifier _timer; type _cast_data; @@ ( -setup_timer(&_E->_timer, NULL, _E); +timer_setup(&_E->_timer, NULL, 0); | -setup_timer(&_E->_timer, NULL, (_cast_data)_E); +timer_setup(&_E->_timer, NULL, 0); | -setup_timer(&_E._timer, NULL, &_E); +timer_setup(&_E._timer, NULL, 0); | -setup_timer(&_E._timer, NULL, (_cast_data)&_E); +timer_setup(&_E._timer, NULL, 0); ) @change_timer_function_usage@ expression _E; identifier _timer; struct timer_list _stl; identifier _callback; type _cast_func, _cast_data; @@ ( -setup_timer(&_E->_timer, _callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, &_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, &_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)&_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E._timer, _callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, &_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, &_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | _E->_timer@_stl.function = _callback; | _E->_timer@_stl.function = &_callback; | _E->_timer@_stl.function = (_cast_func)_callback; | _E->_timer@_stl.function = (_cast_func)&_callback; | _E._timer@_stl.function = _callback; | _E._timer@_stl.function = &_callback; | _E._timer@_stl.function = (_cast_func)_callback; | _E._timer@_stl.function = (_cast_func)&_callback; ) // callback(unsigned long arg) @change_callback_handle_cast depends on change_timer_function_usage@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _origtype; identifier _origarg; type _handletype; identifier _handle; @@ void _callback( -_origtype _origarg +struct timer_list *t ) { ( ... when != _origarg _handletype *_handle = -(_handletype *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle = -(void *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle; ... when != _handle _handle = -(_handletype *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle; ... when != _handle _handle = -(void *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg ) } // callback(unsigned long arg) without existing variable @change_callback_handle_cast_no_arg depends on change_timer_function_usage && !change_callback_handle_cast@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _origtype; identifier _origarg; type _handletype; @@ void _callback( -_origtype _origarg +struct timer_list *t ) { + _handletype *_origarg = from_timer(_origarg, t, _timer); + ... when != _origarg - (_handletype *)_origarg + _origarg ... when != _origarg } // Avoid already converted callbacks. @match_callback_converted depends on change_timer_function_usage && !change_callback_handle_cast && !change_callback_handle_cast_no_arg@ identifier change_timer_function_usage._callback; identifier t; @@ void _callback(struct timer_list *t) { ... } // callback(struct something *handle) @change_callback_handle_arg depends on change_timer_function_usage && !match_callback_converted && !change_callback_handle_cast && !change_callback_handle_cast_no_arg@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _handletype; identifier _handle; @@ void _callback( -_handletype *_handle +struct timer_list *t ) { + _handletype *_handle = from_timer(_handle, t, _timer); ... } // If change_callback_handle_arg ran on an empty function, remove // the added handler. @unchange_callback_handle_arg depends on change_timer_function_usage && change_callback_handle_arg@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _handletype; identifier _handle; identifier t; @@ void _callback(struct timer_list *t) { - _handletype *_handle = from_timer(_handle, t, _timer); } // We only want to refactor the setup_timer() data argument if we've found // the matching callback. This undoes changes in change_timer_function_usage. @unchange_timer_function_usage depends on change_timer_function_usage && !change_callback_handle_cast && !change_callback_handle_cast_no_arg && !change_callback_handle_arg@ expression change_timer_function_usage._E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type change_timer_function_usage._cast_data; @@ ( -timer_setup(&_E->_timer, _callback, 0); +setup_timer(&_E->_timer, _callback, (_cast_data)_E); | -timer_setup(&_E._timer, _callback, 0); +setup_timer(&_E._timer, _callback, (_cast_data)&_E); ) // If we fixed a callback from a .function assignment, fix the // assignment cast now. @change_timer_function_assignment depends on change_timer_function_usage && (change_callback_handle_cast || change_callback_handle_cast_no_arg || change_callback_handle_arg)@ expression change_timer_function_usage._E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type _cast_func; typedef TIMER_FUNC_TYPE; @@ ( _E->_timer.function = -_callback +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -&_callback +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -(_cast_func)_callback; +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -(_cast_func)&_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -&_callback; +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -(_cast_func)_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -(_cast_func)&_callback +(TIMER_FUNC_TYPE)_callback ; ) // Sometimes timer functions are called directly. Replace matched args. @change_timer_function_calls depends on change_timer_function_usage && (change_callback_handle_cast || change_callback_handle_cast_no_arg || change_callback_handle_arg)@ expression _E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type _cast_data; @@ _callback( ( -(_cast_data)_E +&_E->_timer | -(_cast_data)&_E +&_E._timer | -_E +&_E->_timer ) ) // If a timer has been configured without a data argument, it can be // converted without regard to the callback argument, since it is unused. @match_timer_function_unused_data@ expression _E; identifier _timer; identifier _callback; @@ ( -setup_timer(&_E->_timer, _callback, 0); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, 0L); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, 0UL); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0L); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0UL); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_timer, _callback, 0); +timer_setup(&_timer, _callback, 0); | -setup_timer(&_timer, _callback, 0L); +timer_setup(&_timer, _callback, 0); | -setup_timer(&_timer, _callback, 0UL); +timer_setup(&_timer, _callback, 0); | -setup_timer(_timer, _callback, 0); +timer_setup(_timer, _callback, 0); | -setup_timer(_timer, _callback, 0L); +timer_setup(_timer, _callback, 0); | -setup_timer(_timer, _callback, 0UL); +timer_setup(_timer, _callback, 0); ) @change_callback_unused_data depends on match_timer_function_unused_data@ identifier match_timer_function_unused_data._callback; type _origtype; identifier _origarg; @@ void _callback( -_origtype _origarg +struct timer_list *unused ) { ... when != _origarg } Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
static void ipmr_expire_process(struct timer_list *t)
{
treewide: setup_timer() -> timer_setup() This converts all remaining cases of the old setup_timer() API into using timer_setup(), where the callback argument is the structure already holding the struct timer_list. These should have no behavioral changes, since they just change which pointer is passed into the callback with the same available pointers after conversion. It handles the following examples, in addition to some other variations. Casting from unsigned long: void my_callback(unsigned long data) { struct something *ptr = (struct something *)data; ... } ... setup_timer(&ptr->my_timer, my_callback, ptr); and forced object casts: void my_callback(struct something *ptr) { ... } ... setup_timer(&ptr->my_timer, my_callback, (unsigned long)ptr); become: void my_callback(struct timer_list *t) { struct something *ptr = from_timer(ptr, t, my_timer); ... } ... timer_setup(&ptr->my_timer, my_callback, 0); Direct function assignments: void my_callback(unsigned long data) { struct something *ptr = (struct something *)data; ... } ... ptr->my_timer.function = my_callback; have a temporary cast added, along with converting the args: void my_callback(struct timer_list *t) { struct something *ptr = from_timer(ptr, t, my_timer); ... } ... ptr->my_timer.function = (TIMER_FUNC_TYPE)my_callback; And finally, callbacks without a data assignment: void my_callback(unsigned long data) { ... } ... setup_timer(&ptr->my_timer, my_callback, 0); have their argument renamed to verify they're unused during conversion: void my_callback(struct timer_list *unused) { ... } ... timer_setup(&ptr->my_timer, my_callback, 0); The conversion is done with the following Coccinelle script: spatch --very-quiet --all-includes --include-headers \ -I ./arch/x86/include -I ./arch/x86/include/generated \ -I ./include -I ./arch/x86/include/uapi \ -I ./arch/x86/include/generated/uapi -I ./include/uapi \ -I ./include/generated/uapi --include ./include/linux/kconfig.h \ --dir . \ --cocci-file ~/src/data/timer_setup.cocci @fix_address_of@ expression e; @@ setup_timer( -&(e) +&e , ...) // Update any raw setup_timer() usages that have a NULL callback, but // would otherwise match change_timer_function_usage, since the latter // will update all function assignments done in the face of a NULL // function initialization in setup_timer(). @change_timer_function_usage_NULL@ expression _E; identifier _timer; type _cast_data; @@ ( -setup_timer(&_E->_timer, NULL, _E); +timer_setup(&_E->_timer, NULL, 0); | -setup_timer(&_E->_timer, NULL, (_cast_data)_E); +timer_setup(&_E->_timer, NULL, 0); | -setup_timer(&_E._timer, NULL, &_E); +timer_setup(&_E._timer, NULL, 0); | -setup_timer(&_E._timer, NULL, (_cast_data)&_E); +timer_setup(&_E._timer, NULL, 0); ) @change_timer_function_usage@ expression _E; identifier _timer; struct timer_list _stl; identifier _callback; type _cast_func, _cast_data; @@ ( -setup_timer(&_E->_timer, _callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, &_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, &_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)&_callback, _E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, (_cast_func)&_callback, (_cast_data)_E); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E._timer, _callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, &_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, &_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)_E); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, (_cast_func)&_callback, (_cast_data)&_E); +timer_setup(&_E._timer, _callback, 0); | _E->_timer@_stl.function = _callback; | _E->_timer@_stl.function = &_callback; | _E->_timer@_stl.function = (_cast_func)_callback; | _E->_timer@_stl.function = (_cast_func)&_callback; | _E._timer@_stl.function = _callback; | _E._timer@_stl.function = &_callback; | _E._timer@_stl.function = (_cast_func)_callback; | _E._timer@_stl.function = (_cast_func)&_callback; ) // callback(unsigned long arg) @change_callback_handle_cast depends on change_timer_function_usage@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _origtype; identifier _origarg; type _handletype; identifier _handle; @@ void _callback( -_origtype _origarg +struct timer_list *t ) { ( ... when != _origarg _handletype *_handle = -(_handletype *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle = -(void *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle; ... when != _handle _handle = -(_handletype *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg | ... when != _origarg _handletype *_handle; ... when != _handle _handle = -(void *)_origarg; +from_timer(_handle, t, _timer); ... when != _origarg ) } // callback(unsigned long arg) without existing variable @change_callback_handle_cast_no_arg depends on change_timer_function_usage && !change_callback_handle_cast@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _origtype; identifier _origarg; type _handletype; @@ void _callback( -_origtype _origarg +struct timer_list *t ) { + _handletype *_origarg = from_timer(_origarg, t, _timer); + ... when != _origarg - (_handletype *)_origarg + _origarg ... when != _origarg } // Avoid already converted callbacks. @match_callback_converted depends on change_timer_function_usage && !change_callback_handle_cast && !change_callback_handle_cast_no_arg@ identifier change_timer_function_usage._callback; identifier t; @@ void _callback(struct timer_list *t) { ... } // callback(struct something *handle) @change_callback_handle_arg depends on change_timer_function_usage && !match_callback_converted && !change_callback_handle_cast && !change_callback_handle_cast_no_arg@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _handletype; identifier _handle; @@ void _callback( -_handletype *_handle +struct timer_list *t ) { + _handletype *_handle = from_timer(_handle, t, _timer); ... } // If change_callback_handle_arg ran on an empty function, remove // the added handler. @unchange_callback_handle_arg depends on change_timer_function_usage && change_callback_handle_arg@ identifier change_timer_function_usage._callback; identifier change_timer_function_usage._timer; type _handletype; identifier _handle; identifier t; @@ void _callback(struct timer_list *t) { - _handletype *_handle = from_timer(_handle, t, _timer); } // We only want to refactor the setup_timer() data argument if we've found // the matching callback. This undoes changes in change_timer_function_usage. @unchange_timer_function_usage depends on change_timer_function_usage && !change_callback_handle_cast && !change_callback_handle_cast_no_arg && !change_callback_handle_arg@ expression change_timer_function_usage._E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type change_timer_function_usage._cast_data; @@ ( -timer_setup(&_E->_timer, _callback, 0); +setup_timer(&_E->_timer, _callback, (_cast_data)_E); | -timer_setup(&_E._timer, _callback, 0); +setup_timer(&_E._timer, _callback, (_cast_data)&_E); ) // If we fixed a callback from a .function assignment, fix the // assignment cast now. @change_timer_function_assignment depends on change_timer_function_usage && (change_callback_handle_cast || change_callback_handle_cast_no_arg || change_callback_handle_arg)@ expression change_timer_function_usage._E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type _cast_func; typedef TIMER_FUNC_TYPE; @@ ( _E->_timer.function = -_callback +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -&_callback +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -(_cast_func)_callback; +(TIMER_FUNC_TYPE)_callback ; | _E->_timer.function = -(_cast_func)&_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -&_callback; +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -(_cast_func)_callback +(TIMER_FUNC_TYPE)_callback ; | _E._timer.function = -(_cast_func)&_callback +(TIMER_FUNC_TYPE)_callback ; ) // Sometimes timer functions are called directly. Replace matched args. @change_timer_function_calls depends on change_timer_function_usage && (change_callback_handle_cast || change_callback_handle_cast_no_arg || change_callback_handle_arg)@ expression _E; identifier change_timer_function_usage._timer; identifier change_timer_function_usage._callback; type _cast_data; @@ _callback( ( -(_cast_data)_E +&_E->_timer | -(_cast_data)&_E +&_E._timer | -_E +&_E->_timer ) ) // If a timer has been configured without a data argument, it can be // converted without regard to the callback argument, since it is unused. @match_timer_function_unused_data@ expression _E; identifier _timer; identifier _callback; @@ ( -setup_timer(&_E->_timer, _callback, 0); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, 0L); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E->_timer, _callback, 0UL); +timer_setup(&_E->_timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0L); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_E._timer, _callback, 0UL); +timer_setup(&_E._timer, _callback, 0); | -setup_timer(&_timer, _callback, 0); +timer_setup(&_timer, _callback, 0); | -setup_timer(&_timer, _callback, 0L); +timer_setup(&_timer, _callback, 0); | -setup_timer(&_timer, _callback, 0UL); +timer_setup(&_timer, _callback, 0); | -setup_timer(_timer, _callback, 0); +timer_setup(_timer, _callback, 0); | -setup_timer(_timer, _callback, 0L); +timer_setup(_timer, _callback, 0); | -setup_timer(_timer, _callback, 0UL); +timer_setup(_timer, _callback, 0); ) @change_callback_unused_data depends on match_timer_function_unused_data@ identifier match_timer_function_unused_data._callback; type _origtype; identifier _origarg; @@ void _callback( -_origtype _origarg +struct timer_list *unused ) { ... when != _origarg } Signed-off-by: Kees Cook <keescook@chromium.org>
2017-10-17 00:43:17 +03:00
struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
struct mr_mfc *c, *next;
unsigned long expires;
unsigned long now;
if (!spin_trylock(&mfc_unres_lock)) {
mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
return;
}
if (list_empty(&mrt->mfc_unres_queue))
goto out;
now = jiffies;
expires = 10*HZ;
list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
if (time_after(c->mfc_un.unres.expires, now)) {
unsigned long interval = c->mfc_un.unres.expires - now;
if (interval < expires)
expires = interval;
continue;
}
list_del(&c->list);
mroute_netlink_event(mrt, (struct mfc_cache *)c, RTM_DELROUTE);
ipmr_destroy_unres(mrt, (struct mfc_cache *)c);
}
if (!list_empty(&mrt->mfc_unres_queue))
mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
out:
spin_unlock(&mfc_unres_lock);
}
/* Fill oifs list. It is called under locked mrt_lock. */
static void ipmr_update_thresholds(struct mr_table *mrt, struct mr_mfc *cache,
unsigned char *ttls)
{
int vifi;
cache->mfc_un.res.minvif = MAXVIFS;
cache->mfc_un.res.maxvif = 0;
memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
for (vifi = 0; vifi < mrt->maxvif; vifi++) {
if (VIF_EXISTS(mrt, vifi) &&
ttls[vifi] && ttls[vifi] < 255) {
cache->mfc_un.res.ttls[vifi] = ttls[vifi];
if (cache->mfc_un.res.minvif > vifi)
cache->mfc_un.res.minvif = vifi;
if (cache->mfc_un.res.maxvif <= vifi)
cache->mfc_un.res.maxvif = vifi + 1;
}
}
cache->mfc_un.res.lastuse = jiffies;
}
static int vif_add(struct net *net, struct mr_table *mrt,
struct vifctl *vifc, int mrtsock)
{
struct netdev_phys_item_id ppid = { };
int vifi = vifc->vifc_vifi;
struct vif_device *v = &mrt->vif_table[vifi];
struct net_device *dev;
struct in_device *in_dev;
int err;
/* Is vif busy ? */
if (VIF_EXISTS(mrt, vifi))
return -EADDRINUSE;
switch (vifc->vifc_flags) {
case VIFF_REGISTER:
if (!ipmr_pimsm_enabled())
return -EINVAL;
/* Special Purpose VIF in PIM
* All the packets will be sent to the daemon
*/
if (mrt->mroute_reg_vif_num >= 0)
return -EADDRINUSE;
dev = ipmr_reg_vif(net, mrt);
if (!dev)
return -ENOBUFS;
err = dev_set_allmulti(dev, 1);
if (err) {
unregister_netdevice(dev);
ipv4: Fix ipmr unregister device oops An oops happens during device unregister. The following oops happened when I add two tunnels, which use a same device, and then delete one tunnel. Obviously deleting tunnel "A" causes device unregister, which send a notification, and after receiving notification, ipmr do unregister again for tunnel "B" which also use same device. That is wrong. After receiving notification, ipmr only needs to decrease reference count and don't do duplicated unregister. Fortunately, IPv6 side doesn't add tunnel in ip6mr, so it's clean. This patch fixs: - unregister device oops - using after dev_put() Here is the oops: === Jul 11 15:39:29 wangchen kernel: ------------[ cut here ]------------ Jul 11 15:39:29 wangchen kernel: kernel BUG at net/core/dev.c:3651! Jul 11 15:39:29 wangchen kernel: invalid opcode: 0000 [#1] Jul 11 15:39:29 wangchen kernel: Modules linked in: ipip tunnel4 nfsd lockd nfs_acl auth_rpcgss sunrpc exportfs ipv6 snd_pcm_oss snd_mixer_oss snd_seq snd_seq_device af_packet binfmt_misc button battery ac loop dm_mod usbhid ff_memless pcmcia firmware_class ohci1394 8139too mii ieee1394 yenta_socket rsrc_nonstatic pcmcia_core ide_cd_mod cdrom snd_intel8x0 snd_ac97_codec ac97_bus snd_pcm i2c_i801 snd_timer snd i2c_core soundcore snd_page_alloc rng_core shpchp ehci_hcd uhci_hcd pci_hotplug intel_agp agpgart usbcore ext3 jbd ata_piix ahci libata dock edd fan thermal processor thermal_sys piix sd_mod scsi_mod ide_disk ide_core [last unloaded: freq_table] Jul 11 15:39:29 wangchen kernel: Jul 11 15:39:29 wangchen kernel: Pid: 4102, comm: mroute Not tainted (2.6.26-rc9-default #69) Jul 11 15:39:29 wangchen kernel: EIP: 0060:[<c024636b>] EFLAGS: 00010202 CPU: 0 Jul 11 15:39:29 wangchen kernel: EIP is at rollback_registered+0x61/0xe3 Jul 11 15:39:29 wangchen kernel: EAX: 00000001 EBX: ecba6000 ECX: 00000000 EDX: ffffffff Jul 11 15:39:29 wangchen kernel: ESI: 00000001 EDI: ecba6000 EBP: c03de2e8 ESP: ed8e7c3c Jul 11 15:39:29 wangchen kernel: DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 Jul 11 15:39:29 wangchen kernel: Process mroute (pid: 4102, ti=ed8e6000 task=ed41e830 task.ti=ed8e6000) Jul 11 15:39:29 wangchen kernel: Stack: ecba6000 c024641c 00000028 c0284e1a 00000001 c03de2e8 ecba6000 eecff360 Jul 11 15:39:29 wangchen kernel: c0284e4c c03536f4 fffffff8 00000000 c029a819 ecba6000 00000006 ecba6000 Jul 11 15:39:29 wangchen kernel: 00000000 ecba6000 c03de2c0 c012841b ffffffff 00000000 c024639f ecba6000 Jul 11 15:39:29 wangchen kernel: Call Trace: Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0284e4c>] ipmr_device_event+0x1e/0x30 Jul 11 15:39:29 wangchen kernel: [<c029a819>] notifier_call_chain+0x2a/0x47 Jul 11 15:39:29 wangchen kernel: [<c012841b>] raw_notifier_call_chain+0x9/0xc Jul 11 15:39:29 wangchen kernel: [<c024639f>] rollback_registered+0x95/0xe3 Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0285eee>] ip_mroute_setsockopt+0x47a/0x801 Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<c01727c4>] __find_get_block_slow+0xda/0xe4 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<c0263501>] ip_setsockopt+0xa9/0x9ee Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea69287>] __ext3_get_inode_loc+0xcf/0x271 [ext3] Jul 11 15:39:29 wangchen kernel: [<eea743c7>] __ext3_journal_dirty_metadata+0x13/0x32 [ext3] Jul 11 15:39:29 wangchen kernel: [<c0116434>] __wake_up+0xf/0x15 Jul 11 15:39:29 wangchen kernel: [<eea5a424>] journal_stop+0x1bd/0x1c6 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea703a7>] __ext3_journal_stop+0x19/0x34 [ext3] Jul 11 15:39:29 wangchen kernel: [<c014291e>] get_page_from_freelist+0x94/0x369 Jul 11 15:39:29 wangchen kernel: [<c01408f2>] filemap_fault+0x1ac/0x2fe Jul 11 15:39:29 wangchen kernel: [<c01a605e>] security_sk_alloc+0xd/0xf Jul 11 15:39:29 wangchen kernel: [<c023edea>] sk_prot_alloc+0x36/0x78 Jul 11 15:39:29 wangchen kernel: [<c0240037>] sk_alloc+0x3a/0x40 Jul 11 15:39:29 wangchen kernel: [<c0276062>] raw_hash_sk+0x46/0x4e Jul 11 15:39:29 wangchen kernel: [<c0166aff>] d_alloc+0x1b/0x157 Jul 11 15:39:29 wangchen kernel: [<c023e4d1>] sock_common_setsockopt+0x12/0x16 Jul 11 15:39:29 wangchen kernel: [<c023cb1e>] sys_setsockopt+0x6f/0x8e Jul 11 15:39:29 wangchen kernel: [<c023e105>] sys_socketcall+0x15c/0x19e Jul 11 15:39:29 wangchen kernel: [<c0103611>] sysenter_past_esp+0x6a/0x99 Jul 11 15:39:29 wangchen kernel: [<c0290000>] unix_poll+0x69/0x78 Jul 11 15:39:29 wangchen kernel: ======================= Jul 11 15:39:29 wangchen kernel: Code: 83 e0 01 00 00 85 c0 75 1f 53 53 68 12 81 31 c0 e8 3c 30 ed ff ba 3f 0e 00 00 b8 b9 7f 31 c0 83 c4 0c 5b e9 f5 26 ed ff 48 74 04 <0f> 0b eb fe 89 d8 e8 21 ff ff ff 89 d8 e8 62 ea ff ff c7 83 e0 Jul 11 15:39:29 wangchen kernel: EIP: [<c024636b>] rollback_registered+0x61/0xe3 SS:ESP 0068:ed8e7c3c Jul 11 15:39:29 wangchen kernel: ---[ end trace c311acf85d169786 ]--- === Signed-off-by: Wang Chen <wangchen@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-07-15 07:56:34 +04:00
dev_put(dev);
return err;
}
break;
case VIFF_TUNNEL:
dev = ipmr_new_tunnel(net, vifc);
if (IS_ERR(dev))
return PTR_ERR(dev);
break;
case VIFF_USE_IFINDEX:
case 0:
if (vifc->vifc_flags == VIFF_USE_IFINDEX) {
dev = dev_get_by_index(net, vifc->vifc_lcl_ifindex);
if (dev && !__in_dev_get_rtnl(dev)) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
} else {
dev = ip_dev_find(net, vifc->vifc_lcl_addr.s_addr);
}
if (!dev)
return -EADDRNOTAVAIL;
err = dev_set_allmulti(dev, 1);
ipv4: Fix ipmr unregister device oops An oops happens during device unregister. The following oops happened when I add two tunnels, which use a same device, and then delete one tunnel. Obviously deleting tunnel "A" causes device unregister, which send a notification, and after receiving notification, ipmr do unregister again for tunnel "B" which also use same device. That is wrong. After receiving notification, ipmr only needs to decrease reference count and don't do duplicated unregister. Fortunately, IPv6 side doesn't add tunnel in ip6mr, so it's clean. This patch fixs: - unregister device oops - using after dev_put() Here is the oops: === Jul 11 15:39:29 wangchen kernel: ------------[ cut here ]------------ Jul 11 15:39:29 wangchen kernel: kernel BUG at net/core/dev.c:3651! Jul 11 15:39:29 wangchen kernel: invalid opcode: 0000 [#1] Jul 11 15:39:29 wangchen kernel: Modules linked in: ipip tunnel4 nfsd lockd nfs_acl auth_rpcgss sunrpc exportfs ipv6 snd_pcm_oss snd_mixer_oss snd_seq snd_seq_device af_packet binfmt_misc button battery ac loop dm_mod usbhid ff_memless pcmcia firmware_class ohci1394 8139too mii ieee1394 yenta_socket rsrc_nonstatic pcmcia_core ide_cd_mod cdrom snd_intel8x0 snd_ac97_codec ac97_bus snd_pcm i2c_i801 snd_timer snd i2c_core soundcore snd_page_alloc rng_core shpchp ehci_hcd uhci_hcd pci_hotplug intel_agp agpgart usbcore ext3 jbd ata_piix ahci libata dock edd fan thermal processor thermal_sys piix sd_mod scsi_mod ide_disk ide_core [last unloaded: freq_table] Jul 11 15:39:29 wangchen kernel: Jul 11 15:39:29 wangchen kernel: Pid: 4102, comm: mroute Not tainted (2.6.26-rc9-default #69) Jul 11 15:39:29 wangchen kernel: EIP: 0060:[<c024636b>] EFLAGS: 00010202 CPU: 0 Jul 11 15:39:29 wangchen kernel: EIP is at rollback_registered+0x61/0xe3 Jul 11 15:39:29 wangchen kernel: EAX: 00000001 EBX: ecba6000 ECX: 00000000 EDX: ffffffff Jul 11 15:39:29 wangchen kernel: ESI: 00000001 EDI: ecba6000 EBP: c03de2e8 ESP: ed8e7c3c Jul 11 15:39:29 wangchen kernel: DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 Jul 11 15:39:29 wangchen kernel: Process mroute (pid: 4102, ti=ed8e6000 task=ed41e830 task.ti=ed8e6000) Jul 11 15:39:29 wangchen kernel: Stack: ecba6000 c024641c 00000028 c0284e1a 00000001 c03de2e8 ecba6000 eecff360 Jul 11 15:39:29 wangchen kernel: c0284e4c c03536f4 fffffff8 00000000 c029a819 ecba6000 00000006 ecba6000 Jul 11 15:39:29 wangchen kernel: 00000000 ecba6000 c03de2c0 c012841b ffffffff 00000000 c024639f ecba6000 Jul 11 15:39:29 wangchen kernel: Call Trace: Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0284e4c>] ipmr_device_event+0x1e/0x30 Jul 11 15:39:29 wangchen kernel: [<c029a819>] notifier_call_chain+0x2a/0x47 Jul 11 15:39:29 wangchen kernel: [<c012841b>] raw_notifier_call_chain+0x9/0xc Jul 11 15:39:29 wangchen kernel: [<c024639f>] rollback_registered+0x95/0xe3 Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0285eee>] ip_mroute_setsockopt+0x47a/0x801 Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<c01727c4>] __find_get_block_slow+0xda/0xe4 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<c0263501>] ip_setsockopt+0xa9/0x9ee Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea69287>] __ext3_get_inode_loc+0xcf/0x271 [ext3] Jul 11 15:39:29 wangchen kernel: [<eea743c7>] __ext3_journal_dirty_metadata+0x13/0x32 [ext3] Jul 11 15:39:29 wangchen kernel: [<c0116434>] __wake_up+0xf/0x15 Jul 11 15:39:29 wangchen kernel: [<eea5a424>] journal_stop+0x1bd/0x1c6 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea703a7>] __ext3_journal_stop+0x19/0x34 [ext3] Jul 11 15:39:29 wangchen kernel: [<c014291e>] get_page_from_freelist+0x94/0x369 Jul 11 15:39:29 wangchen kernel: [<c01408f2>] filemap_fault+0x1ac/0x2fe Jul 11 15:39:29 wangchen kernel: [<c01a605e>] security_sk_alloc+0xd/0xf Jul 11 15:39:29 wangchen kernel: [<c023edea>] sk_prot_alloc+0x36/0x78 Jul 11 15:39:29 wangchen kernel: [<c0240037>] sk_alloc+0x3a/0x40 Jul 11 15:39:29 wangchen kernel: [<c0276062>] raw_hash_sk+0x46/0x4e Jul 11 15:39:29 wangchen kernel: [<c0166aff>] d_alloc+0x1b/0x157 Jul 11 15:39:29 wangchen kernel: [<c023e4d1>] sock_common_setsockopt+0x12/0x16 Jul 11 15:39:29 wangchen kernel: [<c023cb1e>] sys_setsockopt+0x6f/0x8e Jul 11 15:39:29 wangchen kernel: [<c023e105>] sys_socketcall+0x15c/0x19e Jul 11 15:39:29 wangchen kernel: [<c0103611>] sysenter_past_esp+0x6a/0x99 Jul 11 15:39:29 wangchen kernel: [<c0290000>] unix_poll+0x69/0x78 Jul 11 15:39:29 wangchen kernel: ======================= Jul 11 15:39:29 wangchen kernel: Code: 83 e0 01 00 00 85 c0 75 1f 53 53 68 12 81 31 c0 e8 3c 30 ed ff ba 3f 0e 00 00 b8 b9 7f 31 c0 83 c4 0c 5b e9 f5 26 ed ff 48 74 04 <0f> 0b eb fe 89 d8 e8 21 ff ff ff 89 d8 e8 62 ea ff ff c7 83 e0 Jul 11 15:39:29 wangchen kernel: EIP: [<c024636b>] rollback_registered+0x61/0xe3 SS:ESP 0068:ed8e7c3c Jul 11 15:39:29 wangchen kernel: ---[ end trace c311acf85d169786 ]--- === Signed-off-by: Wang Chen <wangchen@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-07-15 07:56:34 +04:00
if (err) {
dev_put(dev);
return err;
ipv4: Fix ipmr unregister device oops An oops happens during device unregister. The following oops happened when I add two tunnels, which use a same device, and then delete one tunnel. Obviously deleting tunnel "A" causes device unregister, which send a notification, and after receiving notification, ipmr do unregister again for tunnel "B" which also use same device. That is wrong. After receiving notification, ipmr only needs to decrease reference count and don't do duplicated unregister. Fortunately, IPv6 side doesn't add tunnel in ip6mr, so it's clean. This patch fixs: - unregister device oops - using after dev_put() Here is the oops: === Jul 11 15:39:29 wangchen kernel: ------------[ cut here ]------------ Jul 11 15:39:29 wangchen kernel: kernel BUG at net/core/dev.c:3651! Jul 11 15:39:29 wangchen kernel: invalid opcode: 0000 [#1] Jul 11 15:39:29 wangchen kernel: Modules linked in: ipip tunnel4 nfsd lockd nfs_acl auth_rpcgss sunrpc exportfs ipv6 snd_pcm_oss snd_mixer_oss snd_seq snd_seq_device af_packet binfmt_misc button battery ac loop dm_mod usbhid ff_memless pcmcia firmware_class ohci1394 8139too mii ieee1394 yenta_socket rsrc_nonstatic pcmcia_core ide_cd_mod cdrom snd_intel8x0 snd_ac97_codec ac97_bus snd_pcm i2c_i801 snd_timer snd i2c_core soundcore snd_page_alloc rng_core shpchp ehci_hcd uhci_hcd pci_hotplug intel_agp agpgart usbcore ext3 jbd ata_piix ahci libata dock edd fan thermal processor thermal_sys piix sd_mod scsi_mod ide_disk ide_core [last unloaded: freq_table] Jul 11 15:39:29 wangchen kernel: Jul 11 15:39:29 wangchen kernel: Pid: 4102, comm: mroute Not tainted (2.6.26-rc9-default #69) Jul 11 15:39:29 wangchen kernel: EIP: 0060:[<c024636b>] EFLAGS: 00010202 CPU: 0 Jul 11 15:39:29 wangchen kernel: EIP is at rollback_registered+0x61/0xe3 Jul 11 15:39:29 wangchen kernel: EAX: 00000001 EBX: ecba6000 ECX: 00000000 EDX: ffffffff Jul 11 15:39:29 wangchen kernel: ESI: 00000001 EDI: ecba6000 EBP: c03de2e8 ESP: ed8e7c3c Jul 11 15:39:29 wangchen kernel: DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 0068 Jul 11 15:39:29 wangchen kernel: Process mroute (pid: 4102, ti=ed8e6000 task=ed41e830 task.ti=ed8e6000) Jul 11 15:39:29 wangchen kernel: Stack: ecba6000 c024641c 00000028 c0284e1a 00000001 c03de2e8 ecba6000 eecff360 Jul 11 15:39:29 wangchen kernel: c0284e4c c03536f4 fffffff8 00000000 c029a819 ecba6000 00000006 ecba6000 Jul 11 15:39:29 wangchen kernel: 00000000 ecba6000 c03de2c0 c012841b ffffffff 00000000 c024639f ecba6000 Jul 11 15:39:29 wangchen kernel: Call Trace: Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0284e4c>] ipmr_device_event+0x1e/0x30 Jul 11 15:39:29 wangchen kernel: [<c029a819>] notifier_call_chain+0x2a/0x47 Jul 11 15:39:29 wangchen kernel: [<c012841b>] raw_notifier_call_chain+0x9/0xc Jul 11 15:39:29 wangchen kernel: [<c024639f>] rollback_registered+0x95/0xe3 Jul 11 15:39:29 wangchen kernel: [<c024641c>] unregister_netdevice+0x2f/0x51 Jul 11 15:39:29 wangchen kernel: [<c0284e1a>] vif_delete+0xaf/0xc3 Jul 11 15:39:29 wangchen kernel: [<c0285eee>] ip_mroute_setsockopt+0x47a/0x801 Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<c01727c4>] __find_get_block_slow+0xda/0xe4 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<c0172a7f>] __find_get_block+0xf8/0x122 Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<c0263501>] ip_setsockopt+0xa9/0x9ee Jul 11 15:39:29 wangchen kernel: [<eea5d563>] journal_cancel_revoke+0xda/0x110 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea5a70c>] do_get_write_access+0x2df/0x313 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea69287>] __ext3_get_inode_loc+0xcf/0x271 [ext3] Jul 11 15:39:29 wangchen kernel: [<eea743c7>] __ext3_journal_dirty_metadata+0x13/0x32 [ext3] Jul 11 15:39:29 wangchen kernel: [<c0116434>] __wake_up+0xf/0x15 Jul 11 15:39:29 wangchen kernel: [<eea5a424>] journal_stop+0x1bd/0x1c6 [jbd] Jul 11 15:39:29 wangchen kernel: [<eea703a7>] __ext3_journal_stop+0x19/0x34 [ext3] Jul 11 15:39:29 wangchen kernel: [<c014291e>] get_page_from_freelist+0x94/0x369 Jul 11 15:39:29 wangchen kernel: [<c01408f2>] filemap_fault+0x1ac/0x2fe Jul 11 15:39:29 wangchen kernel: [<c01a605e>] security_sk_alloc+0xd/0xf Jul 11 15:39:29 wangchen kernel: [<c023edea>] sk_prot_alloc+0x36/0x78 Jul 11 15:39:29 wangchen kernel: [<c0240037>] sk_alloc+0x3a/0x40 Jul 11 15:39:29 wangchen kernel: [<c0276062>] raw_hash_sk+0x46/0x4e Jul 11 15:39:29 wangchen kernel: [<c0166aff>] d_alloc+0x1b/0x157 Jul 11 15:39:29 wangchen kernel: [<c023e4d1>] sock_common_setsockopt+0x12/0x16 Jul 11 15:39:29 wangchen kernel: [<c023cb1e>] sys_setsockopt+0x6f/0x8e Jul 11 15:39:29 wangchen kernel: [<c023e105>] sys_socketcall+0x15c/0x19e Jul 11 15:39:29 wangchen kernel: [<c0103611>] sysenter_past_esp+0x6a/0x99 Jul 11 15:39:29 wangchen kernel: [<c0290000>] unix_poll+0x69/0x78 Jul 11 15:39:29 wangchen kernel: ======================= Jul 11 15:39:29 wangchen kernel: Code: 83 e0 01 00 00 85 c0 75 1f 53 53 68 12 81 31 c0 e8 3c 30 ed ff ba 3f 0e 00 00 b8 b9 7f 31 c0 83 c4 0c 5b e9 f5 26 ed ff 48 74 04 <0f> 0b eb fe 89 d8 e8 21 ff ff ff 89 d8 e8 62 ea ff ff c7 83 e0 Jul 11 15:39:29 wangchen kernel: EIP: [<c024636b>] rollback_registered+0x61/0xe3 SS:ESP 0068:ed8e7c3c Jul 11 15:39:29 wangchen kernel: ---[ end trace c311acf85d169786 ]--- === Signed-off-by: Wang Chen <wangchen@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-07-15 07:56:34 +04:00
}
break;
default:
return -EINVAL;
}
in_dev = __in_dev_get_rtnl(dev);
if (!in_dev) {
dev_put(dev);
return -EADDRNOTAVAIL;
}
IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
inet_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_MC_FORWARDING,
dev->ifindex, &in_dev->cnf);
ip_rt_multicast_event(in_dev);
/* Fill in the VIF structures */
vif_device_init(v, dev, vifc->vifc_rate_limit,
vifc->vifc_threshold,
vifc->vifc_flags | (!mrtsock ? VIFF_STATIC : 0),
(VIFF_TUNNEL | VIFF_REGISTER));
err = dev_get_port_parent_id(dev, &ppid, true);
if (err == 0) {
memcpy(v->dev_parent_id.id, ppid.id, ppid.id_len);
v->dev_parent_id.id_len = ppid.id_len;
} else {
v->dev_parent_id.id_len = 0;
}
v->local = vifc->vifc_lcl_addr.s_addr;
v->remote = vifc->vifc_rmt_addr.s_addr;
/* And finish update writing critical data */
spin_lock(&mrt_lock);
rcu_assign_pointer(v->dev, dev);
netdev_tracker_alloc(dev, &v->dev_tracker, GFP_ATOMIC);
if (v->flags & VIFF_REGISTER) {
/* Pairs with READ_ONCE() in ipmr_cache_report() and reg_vif_xmit() */
WRITE_ONCE(mrt->mroute_reg_vif_num, vifi);
}
if (vifi+1 > mrt->maxvif)
WRITE_ONCE(mrt->maxvif, vifi + 1);
spin_unlock(&mrt_lock);
call_ipmr_vif_entry_notifiers(net, FIB_EVENT_VIF_ADD, v, dev,
vifi, mrt->id);
return 0;
}
/* called with rcu_read_lock() */
static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
__be32 origin,
__be32 mcastgrp)
{
struct mfc_cache_cmp_arg arg = {
.mfc_mcastgrp = mcastgrp,
.mfc_origin = origin
};
return mr_mfc_find(mrt, &arg);
}
/* Look for a (*,G) entry */
static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
__be32 mcastgrp, int vifi)
{
struct mfc_cache_cmp_arg arg = {
.mfc_mcastgrp = mcastgrp,
.mfc_origin = htonl(INADDR_ANY)
};
if (mcastgrp == htonl(INADDR_ANY))
return mr_mfc_find_any_parent(mrt, vifi);
return mr_mfc_find_any(mrt, vifi, &arg);
}
/* Look for a (S,G,iif) entry if parent != -1 */
static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
__be32 origin, __be32 mcastgrp,
int parent)
{
struct mfc_cache_cmp_arg arg = {
.mfc_mcastgrp = mcastgrp,
.mfc_origin = origin,
};
return mr_mfc_find_parent(mrt, &arg, parent);
}
/* Allocate a multicast cache entry */
static struct mfc_cache *ipmr_cache_alloc(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
if (c) {
c->_c.mfc_un.res.last_assert = jiffies - MFC_ASSERT_THRESH - 1;
c->_c.mfc_un.res.minvif = MAXVIFS;
c->_c.free = ipmr_cache_free_rcu;
refcount_set(&c->_c.mfc_un.res.refcount, 1);
}
return c;
}
static struct mfc_cache *ipmr_cache_alloc_unres(void)
{
struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
if (c) {
skb_queue_head_init(&c->_c.mfc_un.unres.unresolved);
c->_c.mfc_un.unres.expires = jiffies + 10 * HZ;
}
return c;
}
/* A cache entry has gone into a resolved state from queued */
static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
struct mfc_cache *uc, struct mfc_cache *c)
{
struct sk_buff *skb;
struct nlmsgerr *e;
/* Play the pending entries through our router */
while ((skb = __skb_dequeue(&uc->_c.mfc_un.unres.unresolved))) {
if (ip_hdr(skb)->version == 0) {
struct nlmsghdr *nlh = skb_pull(skb,
sizeof(struct iphdr));
if (mr_fill_mroute(mrt, skb, &c->_c,
nlmsg_data(nlh)) > 0) {
nlh->nlmsg_len = skb_tail_pointer(skb) -
(u8 *)nlh;
} else {
nlh->nlmsg_type = NLMSG_ERROR;
nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr));
skb_trim(skb, nlh->nlmsg_len);
e = nlmsg_data(nlh);
e->error = -EMSGSIZE;
memset(&e->msg, 0, sizeof(e->msg));
}
rtnl_unicast(skb, net, NETLINK_CB(skb).portid);
} else {
ipmr: Always call ip{,6}_mr_forward() from RCU read-side critical section These functions expect to be called from RCU read-side critical section, but this only happens when invoked from the data path via ip{,6}_mr_input(). They can also be invoked from process context in response to user space adding a multicast route which resolves a cache entry with queued packets [1][2]. Fix by adding missing rcu_read_lock() / rcu_read_unlock() in these call paths. [1] WARNING: suspicious RCU usage 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Not tainted ----------------------------- net/ipv4/ipmr.c:84 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by smcrouted/246: #0: ffffffff862389b0 (rtnl_mutex){+.+.}-{3:3}, at: ip_mroute_setsockopt+0x11c/0x1420 stack backtrace: CPU: 0 PID: 246 Comm: smcrouted Not tainted 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-1.fc36 04/01/2014 Call Trace: <TASK> dump_stack_lvl+0x91/0xb9 vif_dev_read+0xbf/0xd0 ipmr_queue_xmit+0x135/0x1ab0 ip_mr_forward+0xe7b/0x13d0 ipmr_mfc_add+0x1a06/0x2ad0 ip_mroute_setsockopt+0x5c1/0x1420 do_ip_setsockopt+0x23d/0x37f0 ip_setsockopt+0x56/0x80 raw_setsockopt+0x219/0x290 __sys_setsockopt+0x236/0x4d0 __x64_sys_setsockopt+0xbe/0x160 do_syscall_64+0x34/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd [2] WARNING: suspicious RCU usage 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Not tainted ----------------------------- net/ipv6/ip6mr.c:69 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by smcrouted/246: #0: ffffffff862389b0 (rtnl_mutex){+.+.}-{3:3}, at: ip6_mroute_setsockopt+0x6b9/0x2630 stack backtrace: CPU: 1 PID: 246 Comm: smcrouted Not tainted 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-1.fc36 04/01/2014 Call Trace: <TASK> dump_stack_lvl+0x91/0xb9 vif_dev_read+0xbf/0xd0 ip6mr_forward2.isra.0+0xc9/0x1160 ip6_mr_forward+0xef0/0x13f0 ip6mr_mfc_add+0x1ff2/0x31f0 ip6_mroute_setsockopt+0x1825/0x2630 do_ipv6_setsockopt+0x462/0x4440 ipv6_setsockopt+0x105/0x140 rawv6_setsockopt+0xd8/0x690 __sys_setsockopt+0x236/0x4d0 __x64_sys_setsockopt+0xbe/0x160 do_syscall_64+0x34/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Fixes: ebc3197963fc ("ipmr: add rcu protection over (struct vif_device)->dev") Signed-off-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-09-14 10:53:38 +03:00
rcu_read_lock();
ip_mr_forward(net, mrt, skb->dev, skb, c, 0);
ipmr: Always call ip{,6}_mr_forward() from RCU read-side critical section These functions expect to be called from RCU read-side critical section, but this only happens when invoked from the data path via ip{,6}_mr_input(). They can also be invoked from process context in response to user space adding a multicast route which resolves a cache entry with queued packets [1][2]. Fix by adding missing rcu_read_lock() / rcu_read_unlock() in these call paths. [1] WARNING: suspicious RCU usage 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Not tainted ----------------------------- net/ipv4/ipmr.c:84 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by smcrouted/246: #0: ffffffff862389b0 (rtnl_mutex){+.+.}-{3:3}, at: ip_mroute_setsockopt+0x11c/0x1420 stack backtrace: CPU: 0 PID: 246 Comm: smcrouted Not tainted 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-1.fc36 04/01/2014 Call Trace: <TASK> dump_stack_lvl+0x91/0xb9 vif_dev_read+0xbf/0xd0 ipmr_queue_xmit+0x135/0x1ab0 ip_mr_forward+0xe7b/0x13d0 ipmr_mfc_add+0x1a06/0x2ad0 ip_mroute_setsockopt+0x5c1/0x1420 do_ip_setsockopt+0x23d/0x37f0 ip_setsockopt+0x56/0x80 raw_setsockopt+0x219/0x290 __sys_setsockopt+0x236/0x4d0 __x64_sys_setsockopt+0xbe/0x160 do_syscall_64+0x34/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd [2] WARNING: suspicious RCU usage 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Not tainted ----------------------------- net/ipv6/ip6mr.c:69 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by smcrouted/246: #0: ffffffff862389b0 (rtnl_mutex){+.+.}-{3:3}, at: ip6_mroute_setsockopt+0x6b9/0x2630 stack backtrace: CPU: 1 PID: 246 Comm: smcrouted Not tainted 6.0.0-rc3-custom-15969-g049d233c8bcc-dirty #1387 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-1.fc36 04/01/2014 Call Trace: <TASK> dump_stack_lvl+0x91/0xb9 vif_dev_read+0xbf/0xd0 ip6mr_forward2.isra.0+0xc9/0x1160 ip6_mr_forward+0xef0/0x13f0 ip6mr_mfc_add+0x1ff2/0x31f0 ip6_mroute_setsockopt+0x1825/0x2630 do_ipv6_setsockopt+0x462/0x4440 ipv6_setsockopt+0x105/0x140 rawv6_setsockopt+0xd8/0x690 __sys_setsockopt+0x236/0x4d0 __x64_sys_setsockopt+0xbe/0x160 do_syscall_64+0x34/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Fixes: ebc3197963fc ("ipmr: add rcu protection over (struct vif_device)->dev") Signed-off-by: Ido Schimmel <idosch@nvidia.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-09-14 10:53:38 +03:00
rcu_read_unlock();
}
}
}
/* Bounce a cache query up to mrouted and netlink.
*
* Called under rcu_read_lock().
*/
static int ipmr_cache_report(const struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert)
{
const int ihl = ip_hdrlen(pkt);
struct sock *mroute_sk;
struct igmphdr *igmp;
struct igmpmsg *msg;
struct sk_buff *skb;
int ret;
if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE)
skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
else
skb = alloc_skb(128, GFP_ATOMIC);
if (!skb)
return -ENOBUFS;
if (assert == IGMPMSG_WHOLEPKT || assert == IGMPMSG_WRVIFWHOLE) {
/* Ugly, but we have no choice with this interface.
* Duplicate old header, fix ihl, length etc.
* And all this only to mangle msg->im_msgtype and
* to set msg->im_mbz to "mbz" :-)
*/
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
msg = (struct igmpmsg *)skb_network_header(skb);
memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
msg->im_msgtype = assert;
msg->im_mbz = 0;
if (assert == IGMPMSG_WRVIFWHOLE) {
msg->im_vif = vifi;
msg->im_vif_hi = vifi >> 8;
} else {
/* Pairs with WRITE_ONCE() in vif_add() and vif_delete() */
int vif_num = READ_ONCE(mrt->mroute_reg_vif_num);
msg->im_vif = vif_num;
msg->im_vif_hi = vif_num >> 8;
}
ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
sizeof(struct iphdr));
} else {
/* Copy the IP header */
skb_set_network_header(skb, skb->len);
skb_put(skb, ihl);
skb_copy_to_linear_data(skb, pkt->data, ihl);
/* Flag to the kernel this is a route add */
ip_hdr(skb)->protocol = 0;
msg = (struct igmpmsg *)skb_network_header(skb);
msg->im_vif = vifi;
msg->im_vif_hi = vifi >> 8;
skb_dst_set(skb, dst_clone(skb_dst(pkt)));
/* Add our header */
igmp = skb_put(skb, sizeof(struct igmphdr));
igmp->type = assert;
msg->im_msgtype = assert;
igmp->code = 0;
ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
skb->transport_header = skb->network_header;
}
mroute_sk = rcu_dereference(mrt->mroute_sk);
if (!mroute_sk) {
kfree_skb(skb);
return -EINVAL;
}
igmpmsg_netlink_event(mrt, skb);
/* Deliver to mrouted */
ret = sock_queue_rcv_skb(mroute_sk, skb);
if (ret < 0) {
net_warn_ratelimited("mroute: pending queue full, dropping entries\n");
kfree_skb(skb);
}
return ret;
}
/* Queue a packet for resolution. It gets locked cache entry! */
/* Called under rcu_read_lock() */
static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
struct sk_buff *skb, struct net_device *dev)
{
const struct iphdr *iph = ip_hdr(skb);
struct mfc_cache *c;
bool found = false;
int err;
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(c, &mrt->mfc_unres_queue, _c.list) {
if (c->mfc_mcastgrp == iph->daddr &&
c->mfc_origin == iph->saddr) {
found = true;
break;
}
}
if (!found) {
/* Create a new entry if allowable */
ipmr: remove hard code cache_resolve_queue_len limit This is a re-post of previous patch wrote by David Miller[1]. Phil Karn reported[2] that on busy networks with lots of unresolved multicast routing entries, the creation of new multicast group routes can be extremely slow and unreliable. The reason is we hard-coded multicast route entries with unresolved source addresses(cache_resolve_queue_len) to 10. If some multicast route never resolves and the unresolved source addresses increased, there will be no ability to create new multicast route cache. To resolve this issue, we need either add a sysctl entry to make the cache_resolve_queue_len configurable, or just remove cache_resolve_queue_len limit directly, as we already have the socket receive queue limits of mrouted socket, pointed by David. >From my side, I'd perfer to remove the cache_resolve_queue_len limit instead of creating two more(IPv4 and IPv6 version) sysctl entry. [1] https://lkml.org/lkml/2018/7/22/11 [2] https://lkml.org/lkml/2018/7/21/343 v3: instead of remove cache_resolve_queue_len totally, let's only remove the hard code limit when allocate the unresolved cache, as Eric Dumazet suggested, so we don't need to re-count it in other places. v2: hold the mfc_unres_lock while walking the unresolved list in queue_count(), as Nikolay Aleksandrov remind. Reported-by: Phil Karn <karn@ka9q.net> Signed-off-by: Hangbin Liu <liuhangbin@gmail.com> Reviewed-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-09-06 10:36:01 +03:00
c = ipmr_cache_alloc_unres();
if (!c) {
spin_unlock_bh(&mfc_unres_lock);
kfree_skb(skb);
return -ENOBUFS;
}
/* Fill in the new cache entry */
c->_c.mfc_parent = -1;
c->mfc_origin = iph->saddr;
c->mfc_mcastgrp = iph->daddr;
/* Reflect first query at mrouted. */
err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
if (err < 0) {
/* If the report failed throw the cache entry
out - Brad Parker
*/
spin_unlock_bh(&mfc_unres_lock);
ipmr_cache_free(c);
kfree_skb(skb);
return err;
}
atomic_inc(&mrt->cache_resolve_queue_len);
list_add(&c->_c.list, &mrt->mfc_unres_queue);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
if (atomic_read(&mrt->cache_resolve_queue_len) == 1)
mod_timer(&mrt->ipmr_expire_timer,
c->_c.mfc_un.unres.expires);
}
/* See if we can append the packet */
if (c->_c.mfc_un.unres.unresolved.qlen > 3) {
kfree_skb(skb);
err = -ENOBUFS;
} else {
if (dev) {
skb->dev = dev;
skb->skb_iif = dev->ifindex;
}
skb_queue_tail(&c->_c.mfc_un.unres.unresolved, skb);
err = 0;
}
spin_unlock_bh(&mfc_unres_lock);
return err;
}
/* MFC cache manipulation by user space mroute daemon */
static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
{
struct net *net = read_pnet(&mrt->net);
struct mfc_cache *c;
/* The entries are added/deleted only under RTNL */
rcu_read_lock();
c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
mfc->mfcc_mcastgrp.s_addr, parent);
rcu_read_unlock();
if (!c)
return -ENOENT;
rhltable_remove(&mrt->mfc_hash, &c->_c.mnode, ipmr_rht_params);
list_del_rcu(&c->_c.list);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
mr_cache_put(&c->_c);
return 0;
}
static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
struct mfcctl *mfc, int mrtsock, int parent)
{
struct mfc_cache *uc, *c;
struct mr_mfc *_uc;
bool found;
int ret;
if (mfc->mfcc_parent >= MAXVIFS)
return -ENFILE;
/* The entries are added/deleted only under RTNL */
rcu_read_lock();
c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
mfc->mfcc_mcastgrp.s_addr, parent);
rcu_read_unlock();
if (c) {
spin_lock(&mrt_lock);
c->_c.mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
if (!mrtsock)
c->_c.mfc_flags |= MFC_STATIC;
spin_unlock(&mrt_lock);
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, c,
mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
return 0;
}
if (mfc->mfcc_mcastgrp.s_addr != htonl(INADDR_ANY) &&
!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
return -EINVAL;
c = ipmr_cache_alloc();
if (!c)
return -ENOMEM;
c->mfc_origin = mfc->mfcc_origin.s_addr;
c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
c->_c.mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(mrt, &c->_c, mfc->mfcc_ttls);
if (!mrtsock)
c->_c.mfc_flags |= MFC_STATIC;
ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->_c.mnode,
ipmr_rht_params);
if (ret) {
pr_err("ipmr: rhtable insert error %d\n", ret);
ipmr_cache_free(c);
return ret;
}
list_add_tail_rcu(&c->_c.list, &mrt->mfc_cache_list);
/* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
found = false;
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(_uc, &mrt->mfc_unres_queue, list) {
uc = (struct mfc_cache *)_uc;
if (uc->mfc_origin == c->mfc_origin &&
uc->mfc_mcastgrp == c->mfc_mcastgrp) {
list_del(&_uc->list);
atomic_dec(&mrt->cache_resolve_queue_len);
found = true;
break;
}
}
if (list_empty(&mrt->mfc_unres_queue))
del_timer(&mrt->ipmr_expire_timer);
spin_unlock_bh(&mfc_unres_lock);
if (found) {
ipmr_cache_resolve(net, mrt, uc, c);
ipmr_cache_free(uc);
}
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_ADD, c, mrt->id);
mroute_netlink_event(mrt, c, RTM_NEWROUTE);
return 0;
}
/* Close the multicast socket, and clear the vif tables etc */
static void mroute_clean_tables(struct mr_table *mrt, int flags)
{
struct net *net = read_pnet(&mrt->net);
struct mr_mfc *c, *tmp;
struct mfc_cache *cache;
LIST_HEAD(list);
int i;
/* Shut down all active vif entries */
if (flags & (MRT_FLUSH_VIFS | MRT_FLUSH_VIFS_STATIC)) {
for (i = 0; i < mrt->maxvif; i++) {
if (((mrt->vif_table[i].flags & VIFF_STATIC) &&
!(flags & MRT_FLUSH_VIFS_STATIC)) ||
(!(mrt->vif_table[i].flags & VIFF_STATIC) && !(flags & MRT_FLUSH_VIFS)))
continue;
vif_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
}
/* Wipe the cache */
if (flags & (MRT_FLUSH_MFC | MRT_FLUSH_MFC_STATIC)) {
list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
if (((c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC_STATIC)) ||
(!(c->mfc_flags & MFC_STATIC) && !(flags & MRT_FLUSH_MFC)))
continue;
rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
list_del_rcu(&c->list);
cache = (struct mfc_cache *)c;
call_ipmr_mfc_entry_notifiers(net, FIB_EVENT_ENTRY_DEL, cache,
mrt->id);
mroute_netlink_event(mrt, cache, RTM_DELROUTE);
mr_cache_put(c);
}
}
if (flags & MRT_FLUSH_MFC) {
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
list_del(&c->list);
cache = (struct mfc_cache *)c;
mroute_netlink_event(mrt, cache, RTM_DELROUTE);
ipmr_destroy_unres(mrt, cache);
}
spin_unlock_bh(&mfc_unres_lock);
}
}
}
/* called from ip_ra_control(), before an RCU grace period,
* we don't need to call synchronize_rcu() here
*/
static void mrtsock_destruct(struct sock *sk)
{
struct net *net = sock_net(sk);
struct mr_table *mrt;
rtnl_lock();
ipmr_for_each_table(mrt, net) {
if (sk == rtnl_dereference(mrt->mroute_sk)) {
IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_MC_FORWARDING,
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
RCU_INIT_POINTER(mrt->mroute_sk, NULL);
mroute_clean_tables(mrt, MRT_FLUSH_VIFS | MRT_FLUSH_MFC);
}
}
rtnl_unlock();
}
/* Socket options and virtual interface manipulation. The whole
* virtual interface system is a complete heap, but unfortunately
* that's how BSD mrouted happens to think. Maybe one day with a proper
* MOSPF/PIM router set up we can clean this up.
*/
int ip_mroute_setsockopt(struct sock *sk, int optname, sockptr_t optval,
unsigned int optlen)
{
struct net *net = sock_net(sk);
int val, ret = 0, parent = 0;
struct mr_table *mrt;
struct vifctl vif;
struct mfcctl mfc;
bool do_wrvifwhole;
u32 uval;
/* There's one exception to the lock - MRT_DONE which needs to unlock */
rtnl_lock();
if (sk->sk_type != SOCK_RAW ||
inet_sk(sk)->inet_num != IPPROTO_IGMP) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
if (!mrt) {
ret = -ENOENT;
goto out_unlock;
}
if (optname != MRT_INIT) {
if (sk != rcu_access_pointer(mrt->mroute_sk) &&
!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
ret = -EACCES;
goto out_unlock;
}
}
switch (optname) {
case MRT_INIT:
if (optlen != sizeof(int)) {
ret = -EINVAL;
break;
}
if (rtnl_dereference(mrt->mroute_sk)) {
ret = -EADDRINUSE;
break;
}
ret = ip_ra_control(sk, 1, mrtsock_destruct);
if (ret == 0) {
rcu_assign_pointer(mrt->mroute_sk, sk);
IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
inet_netconf_notify_devconf(net, RTM_NEWNETCONF,
NETCONFA_MC_FORWARDING,
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
}
break;
case MRT_DONE:
if (sk != rcu_access_pointer(mrt->mroute_sk)) {
ret = -EACCES;
} else {
/* We need to unlock here because mrtsock_destruct takes
* care of rtnl itself and we can't change that due to
* the IP_ROUTER_ALERT setsockopt which runs without it.
*/
rtnl_unlock();
ret = ip_ra_control(sk, 0, NULL);
goto out;
}
break;
case MRT_ADD_VIF:
case MRT_DEL_VIF:
if (optlen != sizeof(vif)) {
ret = -EINVAL;
break;
}
if (copy_from_sockptr(&vif, optval, sizeof(vif))) {
ret = -EFAULT;
break;
}
if (vif.vifc_vifi >= MAXVIFS) {
ret = -ENFILE;
break;
}
if (optname == MRT_ADD_VIF) {
ret = vif_add(net, mrt, &vif,
sk == rtnl_dereference(mrt->mroute_sk));
} else {
ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
}
break;
/* Manipulate the forwarding caches. These live
* in a sort of kernel/user symbiosis.
*/
case MRT_ADD_MFC:
case MRT_DEL_MFC:
parent = -1;
fallthrough;
case MRT_ADD_MFC_PROXY:
case MRT_DEL_MFC_PROXY:
if (optlen != sizeof(mfc)) {
ret = -EINVAL;
break;
}
if (copy_from_sockptr(&mfc, optval, sizeof(mfc))) {
ret = -EFAULT;
break;
}
if (parent == 0)
parent = mfc.mfcc_parent;
if (optname == MRT_DEL_MFC || optname == MRT_DEL_MFC_PROXY)
ret = ipmr_mfc_delete(mrt, &mfc, parent);
else
ret = ipmr_mfc_add(net, mrt, &mfc,
sk == rtnl_dereference(mrt->mroute_sk),
parent);
break;
case MRT_FLUSH:
if (optlen != sizeof(val)) {
ret = -EINVAL;
break;
}
if (copy_from_sockptr(&val, optval, sizeof(val))) {
ret = -EFAULT;
break;
}
mroute_clean_tables(mrt, val);
break;
/* Control PIM assert. */
case MRT_ASSERT:
if (optlen != sizeof(val)) {
ret = -EINVAL;
break;
}
if (copy_from_sockptr(&val, optval, sizeof(val))) {
ret = -EFAULT;
break;
}
mrt->mroute_do_assert = val;
break;
case MRT_PIM:
if (!ipmr_pimsm_enabled()) {
ret = -ENOPROTOOPT;
break;
}
if (optlen != sizeof(val)) {
ret = -EINVAL;
break;
}
if (copy_from_sockptr(&val, optval, sizeof(val))) {
ret = -EFAULT;
break;
}
do_wrvifwhole = (val == IGMPMSG_WRVIFWHOLE);
val = !!val;
if (val != mrt->mroute_do_pim) {
mrt->mroute_do_pim = val;
mrt->mroute_do_assert = val;
mrt->mroute_do_wrvifwhole = do_wrvifwhole;
}
break;
case MRT_TABLE:
if (!IS_BUILTIN(CONFIG_IP_MROUTE_MULTIPLE_TABLES)) {
ret = -ENOPROTOOPT;
break;
}
if (optlen != sizeof(uval)) {
ret = -EINVAL;
break;
}
if (copy_from_sockptr(&uval, optval, sizeof(uval))) {
ret = -EFAULT;
break;
}
if (sk == rtnl_dereference(mrt->mroute_sk)) {
ret = -EBUSY;
} else {
mrt = ipmr_new_table(net, uval);
if (IS_ERR(mrt))
ret = PTR_ERR(mrt);
else
raw_sk(sk)->ipmr_table = uval;
}
break;
/* Spurious command, or MRT_VERSION which you cannot set. */
default:
ret = -ENOPROTOOPT;
}
out_unlock:
rtnl_unlock();
out:
return ret;
}
/* Getsock opt support for the multicast routing system. */
int ip_mroute_getsockopt(struct sock *sk, int optname, sockptr_t optval,
sockptr_t optlen)
{
int olr;
int val;
struct net *net = sock_net(sk);
struct mr_table *mrt;
if (sk->sk_type != SOCK_RAW ||
inet_sk(sk)->inet_num != IPPROTO_IGMP)
return -EOPNOTSUPP;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
if (!mrt)
return -ENOENT;
switch (optname) {
case MRT_VERSION:
val = 0x0305;
break;
case MRT_PIM:
if (!ipmr_pimsm_enabled())
return -ENOPROTOOPT;
val = mrt->mroute_do_pim;
break;
case MRT_ASSERT:
val = mrt->mroute_do_assert;
break;
default:
return -ENOPROTOOPT;
}
if (copy_from_sockptr(&olr, optlen, sizeof(int)))
return -EFAULT;
olr = min_t(unsigned int, olr, sizeof(int));
if (olr < 0)
return -EINVAL;
if (copy_to_sockptr(optlen, &olr, sizeof(int)))
return -EFAULT;
if (copy_to_sockptr(optval, &val, olr))
return -EFAULT;
return 0;
}
/* The IP multicast ioctl support routines. */
int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
{
struct sioc_sg_req sr;
struct sioc_vif_req vr;
struct vif_device *vif;
struct mfc_cache *c;
struct net *net = sock_net(sk);
struct mr_table *mrt;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
if (!mrt)
return -ENOENT;
switch (cmd) {
case SIOCGETVIFCNT:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
rcu_read_lock();
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
vr.icount = READ_ONCE(vif->pkt_in);
vr.ocount = READ_ONCE(vif->pkt_out);
vr.ibytes = READ_ONCE(vif->bytes_in);
vr.obytes = READ_ONCE(vif->bytes_out);
rcu_read_unlock();
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
rcu_read_unlock();
return -EADDRNOTAVAIL;
case SIOCGETSGCNT:
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
rcu_read_lock();
c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->_c.mfc_un.res.pkt;
sr.bytecnt = c->_c.mfc_un.res.bytes;
sr.wrong_if = c->_c.mfc_un.res.wrong_if;
rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
rcu_read_unlock();
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
}
}
#ifdef CONFIG_COMPAT
struct compat_sioc_sg_req {
struct in_addr src;
struct in_addr grp;
compat_ulong_t pktcnt;
compat_ulong_t bytecnt;
compat_ulong_t wrong_if;
};
struct compat_sioc_vif_req {
vifi_t vifi; /* Which iface */
compat_ulong_t icount;
compat_ulong_t ocount;
compat_ulong_t ibytes;
compat_ulong_t obytes;
};
int ipmr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
{
struct compat_sioc_sg_req sr;
struct compat_sioc_vif_req vr;
struct vif_device *vif;
struct mfc_cache *c;
struct net *net = sock_net(sk);
struct mr_table *mrt;
mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
if (!mrt)
return -ENOENT;
switch (cmd) {
case SIOCGETVIFCNT:
if (copy_from_user(&vr, arg, sizeof(vr)))
return -EFAULT;
if (vr.vifi >= mrt->maxvif)
return -EINVAL;
vr.vifi = array_index_nospec(vr.vifi, mrt->maxvif);
rcu_read_lock();
vif = &mrt->vif_table[vr.vifi];
if (VIF_EXISTS(mrt, vr.vifi)) {
vr.icount = READ_ONCE(vif->pkt_in);
vr.ocount = READ_ONCE(vif->pkt_out);
vr.ibytes = READ_ONCE(vif->bytes_in);
vr.obytes = READ_ONCE(vif->bytes_out);
rcu_read_unlock();
if (copy_to_user(arg, &vr, sizeof(vr)))
return -EFAULT;
return 0;
}
rcu_read_unlock();
return -EADDRNOTAVAIL;
case SIOCGETSGCNT:
if (copy_from_user(&sr, arg, sizeof(sr)))
return -EFAULT;
rcu_read_lock();
c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
if (c) {
sr.pktcnt = c->_c.mfc_un.res.pkt;
sr.bytecnt = c->_c.mfc_un.res.bytes;
sr.wrong_if = c->_c.mfc_un.res.wrong_if;
rcu_read_unlock();
if (copy_to_user(arg, &sr, sizeof(sr)))
return -EFAULT;
return 0;
}
rcu_read_unlock();
return -EADDRNOTAVAIL;
default:
return -ENOIOCTLCMD;
}
}
#endif
static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
struct mr_table *mrt;
struct vif_device *v;
int ct;
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
ipmr_for_each_table(mrt, net) {
v = &mrt->vif_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (rcu_access_pointer(v->dev) == dev)
vif_delete(mrt, ct, 1, NULL);
}
}
return NOTIFY_DONE;
}
static struct notifier_block ip_mr_notifier = {
.notifier_call = ipmr_device_event,
};
/* Encapsulate a packet by attaching a valid IPIP header to it.
* This avoids tunnel drivers and other mess and gives us the speed so
* important for multicast video.
*/
static void ip_encap(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr)
{
struct iphdr *iph;
const struct iphdr *old_iph = ip_hdr(skb);
skb_push(skb, sizeof(struct iphdr));
skb->transport_header = skb->network_header;
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->version = 4;
iph->tos = old_iph->tos;
iph->ttl = old_iph->ttl;
iph->frag_off = 0;
iph->daddr = daddr;
iph->saddr = saddr;
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
iph->tot_len = htons(skb->len);
ip_select_ident(net, skb, NULL);
ip_send_check(iph);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
nf_reset_ct(skb);
}
static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
if (unlikely(opt->optlen))
ip_forward_options(skb);
return dst_output(net, sk, skb);
}
#ifdef CONFIG_NET_SWITCHDEV
static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
int in_vifi, int out_vifi)
{
struct vif_device *out_vif = &mrt->vif_table[out_vifi];
struct vif_device *in_vif = &mrt->vif_table[in_vifi];
if (!skb->offload_l3_fwd_mark)
return false;
if (!out_vif->dev_parent_id.id_len || !in_vif->dev_parent_id.id_len)
return false;
return netdev_phys_item_id_same(&out_vif->dev_parent_id,
&in_vif->dev_parent_id);
}
#else
static bool ipmr_forward_offloaded(struct sk_buff *skb, struct mr_table *mrt,
int in_vifi, int out_vifi)
{
return false;
}
#endif
/* Processing handlers for ipmr_forward, under rcu_read_lock() */
static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
int in_vifi, struct sk_buff *skb, int vifi)
{
const struct iphdr *iph = ip_hdr(skb);
struct vif_device *vif = &mrt->vif_table[vifi];
struct net_device *vif_dev;
struct net_device *dev;
struct rtable *rt;
struct flowi4 fl4;
int encap = 0;
vif_dev = vif_dev_read(vif);
if (!vif_dev)
goto out_free;
if (vif->flags & VIFF_REGISTER) {
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
DEV_STATS_INC(vif_dev, tx_packets);
ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
goto out_free;
}
if (ipmr_forward_offloaded(skb, mrt, in_vifi, vifi))
goto out_free;
if (vif->flags & VIFF_TUNNEL) {
rt = ip_route_output_ports(net, &fl4, NULL,
vif->remote, vif->local,
0, 0,
IPPROTO_IPIP,
RT_TOS(iph->tos), vif->link);
if (IS_ERR(rt))
goto out_free;
encap = sizeof(struct iphdr);
} else {
rt = ip_route_output_ports(net, &fl4, NULL, iph->daddr, 0,
0, 0,
IPPROTO_IPIP,
RT_TOS(iph->tos), vif->link);
if (IS_ERR(rt))
goto out_free;
}
dev = rt->dst.dev;
if (skb->len+encap > dst_mtu(&rt->dst) && (ntohs(iph->frag_off) & IP_DF)) {
/* Do not fragment multicasts. Alas, IPv4 does not
* allow to send ICMP, so that packets will disappear
* to blackhole.
*/
IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
ip_rt_put(rt);
goto out_free;
}
encap += LL_RESERVED_SPACE(dev) + rt->dst.header_len;
if (skb_cow(skb, encap)) {
ip_rt_put(rt);
goto out_free;
}
WRITE_ONCE(vif->pkt_out, vif->pkt_out + 1);
WRITE_ONCE(vif->bytes_out, vif->bytes_out + skb->len);
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
ip_decrease_ttl(ip_hdr(skb));
/* FIXME: forward and output firewalls used to be called here.
* What do we do with netfilter? -- RR
*/
if (vif->flags & VIFF_TUNNEL) {
ip_encap(net, skb, vif->local, vif->remote);
/* FIXME: extra output firewall step used to be here. --RR */
DEV_STATS_INC(vif_dev, tx_packets);
DEV_STATS_ADD(vif_dev, tx_bytes, skb->len);
}
IPCB(skb)->flags |= IPSKB_FORWARDED;
/* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
* not only before forwarding, but after forwarding on all output
* interfaces. It is clear, if mrouter runs a multicasting
* program, it should receive packets not depending to what interface
* program is joined.
* If we will not make it, the program will have to join on all
* interfaces. On the other hand, multihoming host (or router, but
* not mrouter) cannot join to more than one interface - it will
* result in receiving multiple packets.
*/
2015-09-16 04:04:16 +03:00
NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dev,
ipmr_forward_finish);
return;
out_free:
kfree_skb(skb);
}
/* Called with mrt_lock or rcu_read_lock() */
static int ipmr_find_vif(const struct mr_table *mrt, struct net_device *dev)
{
int ct;
/* Pairs with WRITE_ONCE() in vif_delete()/vif_add() */
for (ct = READ_ONCE(mrt->maxvif) - 1; ct >= 0; ct--) {
if (rcu_access_pointer(mrt->vif_table[ct].dev) == dev)
break;
}
return ct;
}
/* "local" means that we should preserve one skb (for local delivery) */
/* Called uner rcu_read_lock() */
static void ip_mr_forward(struct net *net, struct mr_table *mrt,
struct net_device *dev, struct sk_buff *skb,
struct mfc_cache *c, int local)
{
int true_vifi = ipmr_find_vif(mrt, dev);
int psend = -1;
int vif, ct;
vif = c->_c.mfc_parent;
c->_c.mfc_un.res.pkt++;
c->_c.mfc_un.res.bytes += skb->len;
c->_c.mfc_un.res.lastuse = jiffies;
if (c->mfc_origin == htonl(INADDR_ANY) && true_vifi >= 0) {
struct mfc_cache *cache_proxy;
/* For an (*,G) entry, we only check that the incoming
* interface is part of the static tree.
*/
cache_proxy = mr_mfc_find_any_parent(mrt, vif);
if (cache_proxy &&
cache_proxy->_c.mfc_un.res.ttls[true_vifi] < 255)
goto forward;
}
/* Wrong interface: drop packet and (maybe) send PIM assert. */
if (rcu_access_pointer(mrt->vif_table[vif].dev) != dev) {
if (rt_is_output_route(skb_rtable(skb))) {
/* It is our own packet, looped back.
* Very complicated situation...
*
* The best workaround until routing daemons will be
* fixed is not to redistribute packet, if it was
* send through wrong interface. It means, that
* multicast applications WILL NOT work for
* (S,G), which have default multicast route pointing
* to wrong oif. In any case, it is not a good
* idea to use multicasting applications on router.
*/
goto dont_forward;
}
c->_c.mfc_un.res.wrong_if++;
if (true_vifi >= 0 && mrt->mroute_do_assert &&
/* pimsm uses asserts, when switching from RPT to SPT,
* so that we cannot check that packet arrived on an oif.
* It is bad, but otherwise we would need to move pretty
* large chunk of pimd to kernel. Ough... --ANK
*/
(mrt->mroute_do_pim ||
c->_c.mfc_un.res.ttls[true_vifi] < 255) &&
time_after(jiffies,
c->_c.mfc_un.res.last_assert +
MFC_ASSERT_THRESH)) {
c->_c.mfc_un.res.last_assert = jiffies;
ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
if (mrt->mroute_do_wrvifwhole)
ipmr_cache_report(mrt, skb, true_vifi,
IGMPMSG_WRVIFWHOLE);
}
goto dont_forward;
}
forward:
WRITE_ONCE(mrt->vif_table[vif].pkt_in,
mrt->vif_table[vif].pkt_in + 1);
WRITE_ONCE(mrt->vif_table[vif].bytes_in,
mrt->vif_table[vif].bytes_in + skb->len);
/* Forward the frame */
if (c->mfc_origin == htonl(INADDR_ANY) &&
c->mfc_mcastgrp == htonl(INADDR_ANY)) {
if (true_vifi >= 0 &&
true_vifi != c->_c.mfc_parent &&
ip_hdr(skb)->ttl >
c->_c.mfc_un.res.ttls[c->_c.mfc_parent]) {
/* It's an (*,*) entry and the packet is not coming from
* the upstream: forward the packet to the upstream
* only.
*/
psend = c->_c.mfc_parent;
goto last_forward;
}
goto dont_forward;
}
for (ct = c->_c.mfc_un.res.maxvif - 1;
ct >= c->_c.mfc_un.res.minvif; ct--) {
/* For (*,G) entry, don't forward to the incoming interface */
if ((c->mfc_origin != htonl(INADDR_ANY) ||
ct != true_vifi) &&
ip_hdr(skb)->ttl > c->_c.mfc_un.res.ttls[ct]) {
if (psend != -1) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
ipmr_queue_xmit(net, mrt, true_vifi,
skb2, psend);
}
psend = ct;
}
}
last_forward:
if (psend != -1) {
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
ipmr_queue_xmit(net, mrt, true_vifi, skb2,
psend);
} else {
ipmr_queue_xmit(net, mrt, true_vifi, skb, psend);
return;
}
}
dont_forward:
if (!local)
kfree_skb(skb);
}
static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
{
struct rtable *rt = skb_rtable(skb);
struct iphdr *iph = ip_hdr(skb);
struct flowi4 fl4 = {
.daddr = iph->daddr,
.saddr = iph->saddr,
.flowi4_tos = RT_TOS(iph->tos),
.flowi4_oif = (rt_is_output_route(rt) ?
skb->dev->ifindex : 0),
.flowi4_iif = (rt_is_output_route(rt) ?
LOOPBACK_IFINDEX :
skb->dev->ifindex),
.flowi4_mark = skb->mark,
};
struct mr_table *mrt;
int err;
err = ipmr_fib_lookup(net, &fl4, &mrt);
if (err)
return ERR_PTR(err);
return mrt;
}
/* Multicast packets for forwarding arrive here
* Called with rcu_read_lock();
*/
int ip_mr_input(struct sk_buff *skb)
{
struct mfc_cache *cache;
struct net *net = dev_net(skb->dev);
int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
struct mr_table *mrt;
struct net_device *dev;
/* skb->dev passed in is the loX master dev for vrfs.
* As there are no vifs associated with loopback devices,
* get the proper interface that does have a vif associated with it.
*/
dev = skb->dev;
if (netif_is_l3_master(skb->dev)) {
dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
if (!dev) {
kfree_skb(skb);
return -ENODEV;
}
}
/* Packet is looped back after forward, it should not be
* forwarded second time, but still can be delivered locally.
*/
if (IPCB(skb)->flags & IPSKB_FORWARDED)
goto dont_forward;
mrt = ipmr_rt_fib_lookup(net, skb);
if (IS_ERR(mrt)) {
kfree_skb(skb);
return PTR_ERR(mrt);
}
if (!local) {
if (IPCB(skb)->opt.router_alert) {
if (ip_call_ra_chain(skb))
return 0;
} else if (ip_hdr(skb)->protocol == IPPROTO_IGMP) {
/* IGMPv1 (and broken IGMPv2 implementations sort of
* Cisco IOS <= 11.2(8)) do not put router alert
* option to IGMP packets destined to routable
* groups. It is very bad, because it means
* that we can forward NO IGMP messages.
*/
struct sock *mroute_sk;
mroute_sk = rcu_dereference(mrt->mroute_sk);
if (mroute_sk) {
nf_reset_ct(skb);
raw_rcv(mroute_sk, skb);
return 0;
}
}
}
/* already under rcu_read_lock() */
cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
if (!cache) {
int vif = ipmr_find_vif(mrt, dev);
if (vif >= 0)
cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
vif);
}
/* No usable cache entry */
if (!cache) {
int vif;
if (local) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
ip_local_deliver(skb);
if (!skb2)
return -ENOBUFS;
skb = skb2;
}
vif = ipmr_find_vif(mrt, dev);
if (vif >= 0)
return ipmr_cache_unresolved(mrt, vif, skb, dev);
kfree_skb(skb);
return -ENODEV;
}
ip_mr_forward(net, mrt, dev, skb, cache, local);
if (local)
return ip_local_deliver(skb);
return 0;
dont_forward:
if (local)
return ip_local_deliver(skb);
kfree_skb(skb);
return 0;
}
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
#ifdef CONFIG_IP_PIMSM_V1
/* Handle IGMP messages of PIMv1 */
int pim_rcv_v1(struct sk_buff *skb)
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
{
struct igmphdr *pim;
struct net *net = dev_net(skb->dev);
struct mr_table *mrt;
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
pim = igmp_hdr(skb);
mrt = ipmr_rt_fib_lookup(net, skb);
if (IS_ERR(mrt))
goto drop;
if (!mrt->mroute_do_pim ||
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
goto drop;
if (__pim_rcv(mrt, skb, sizeof(*pim))) {
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
drop:
kfree_skb(skb);
}
return 0;
}
#endif
#ifdef CONFIG_IP_PIMSM_V2
static int pim_rcv(struct sk_buff *skb)
{
struct pimreghdr *pim;
struct net *net = dev_net(skb->dev);
struct mr_table *mrt;
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
goto drop;
pim = (struct pimreghdr *)skb_transport_header(skb);
if (pim->type != ((PIM_VERSION << 4) | (PIM_TYPE_REGISTER)) ||
(pim->flags & PIM_NULL_REGISTER) ||
(ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
csum_fold(skb_checksum(skb, 0, skb->len, 0))))
goto drop;
mrt = ipmr_rt_fib_lookup(net, skb);
if (IS_ERR(mrt))
goto drop;
if (__pim_rcv(mrt, skb, sizeof(*pim))) {
ipmr: merge common code Also removes redundant skb->len < x check which can't be true once pskb_may_pull(skb, x) succeeded. $ diff-funcs pim_rcv ipmr.c ipmr.c pim_rcv_v1 --- ipmr.c:pim_rcv() +++ ipmr.c:pim_rcv_v1() @@ -1,22 +1,27 @@ -static int pim_rcv(struct sk_buff * skb) +int pim_rcv_v1(struct sk_buff * skb) { - struct pimreghdr *pim; + struct igmphdr *pim; struct iphdr *encap; struct net_device *reg_dev = NULL; if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap))) goto drop; - pim = (struct pimreghdr *)skb_transport_header(skb); - if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || - (pim->flags&PIM_NULL_REGISTER) || - (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + pim = igmp_hdr(skb); + + if (!mroute_do_pim || + skb->len < sizeof(*pim) + sizeof(*encap) || + pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) goto drop; - /* check if the inner packet is destined to mcast group */ encap = (struct iphdr *)(skb_transport_header(skb) + - sizeof(struct pimreghdr)); + sizeof(struct igmphdr)); + /* + Check that: + a. packet is really destinted to a multicast group + b. packet is not a NULL-REGISTER + c. packet is not truncated + */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + sizeof(*pim) > skb->len) @@ -40,9 +45,9 @@ skb->ip_summed = 0; skb->pkt_type = PACKET_HOST; dst_release(skb->dst); + skb->dst = NULL; reg_dev->stats.rx_bytes += skb->len; reg_dev->stats.rx_packets++; - skb->dst = NULL; nf_reset(skb); netif_rx(skb); dev_put(reg_dev); $ codiff net/ipv4/ipmr.o.old net/ipv4/ipmr.o.new net/ipv4/ipmr.c: pim_rcv_v1 | -283 pim_rcv | -284 2 functions changed, 567 bytes removed net/ipv4/ipmr.c: __pim_rcv | +307 1 function changed, 307 bytes added net/ipv4/ipmr.o.new: 3 functions changed, 307 bytes added, 567 bytes removed, diff: -260 (Tested on x86_64). It seems that pimlen arg could be left out as well and eq-sizedness of structs trapped with BUILD_BUG_ON but I don't think that's more than a cosmetic flaw since there aren't that many args anyway. Compile tested. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-16 12:15:11 +03:00
drop:
kfree_skb(skb);
}
return 0;
}
#endif
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
struct rtmsg *rtm, u32 portid)
{
struct mfc_cache *cache;
struct mr_table *mrt;
int err;
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
if (!mrt)
return -ENOENT;
rcu_read_lock();
cache = ipmr_cache_find(mrt, saddr, daddr);
if (!cache && skb->dev) {
int vif = ipmr_find_vif(mrt, skb->dev);
if (vif >= 0)
cache = ipmr_cache_find_any(mrt, daddr, vif);
}
if (!cache) {
struct sk_buff *skb2;
struct iphdr *iph;
struct net_device *dev;
int vif = -1;
dev = skb->dev;
if (dev)
vif = ipmr_find_vif(mrt, dev);
if (vif < 0) {
rcu_read_unlock();
return -ENODEV;
}
ipmr: Fix skb headroom in ipmr_get_route(). In route.c, inet_rtm_getroute_build_skb() creates an skb with no headroom. This skb is then used by inet_rtm_getroute() which may pass it to rt_fill_info() and, from there, to ipmr_get_route(). The later might try to reuse this skb by cloning it and prepending an IPv4 header. But since the original skb has no headroom, skb_push() triggers skb_under_panic(): skbuff: skb_under_panic: text:00000000ca46ad8a len:80 put:20 head:00000000cd28494e data:000000009366fd6b tail:0x3c end:0xec0 dev:veth0 ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:108! invalid opcode: 0000 [#1] SMP KASAN PTI CPU: 6 PID: 587 Comm: ip Not tainted 5.4.0-rc6+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.12.0-2.fc30 04/01/2014 RIP: 0010:skb_panic+0xbf/0xd0 Code: 41 a2 ff 8b 4b 70 4c 8b 4d d0 48 c7 c7 20 76 f5 8b 44 8b 45 bc 48 8b 55 c0 48 8b 75 c8 41 54 41 57 41 56 41 55 e8 75 dc 7a ff <0f> 0b 0f 1f 44 00 00 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 RSP: 0018:ffff888059ddf0b0 EFLAGS: 00010286 RAX: 0000000000000086 RBX: ffff888060a315c0 RCX: ffffffff8abe4822 RDX: 0000000000000000 RSI: 0000000000000008 RDI: ffff88806c9a79cc RBP: ffff888059ddf118 R08: ffffed100d9361b1 R09: ffffed100d9361b0 R10: ffff88805c68aee3 R11: ffffed100d9361b1 R12: ffff88805d218000 R13: ffff88805c689fec R14: 000000000000003c R15: 0000000000000ec0 FS: 00007f6af184b700(0000) GS:ffff88806c980000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffc8204a000 CR3: 0000000057b40006 CR4: 0000000000360ee0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: skb_push+0x7e/0x80 ipmr_get_route+0x459/0x6fa rt_fill_info+0x692/0x9f0 inet_rtm_getroute+0xd26/0xf20 rtnetlink_rcv_msg+0x45d/0x630 netlink_rcv_skb+0x1a5/0x220 rtnetlink_rcv+0x15/0x20 netlink_unicast+0x305/0x3a0 netlink_sendmsg+0x575/0x730 sock_sendmsg+0xb5/0xc0 ___sys_sendmsg+0x497/0x4f0 __sys_sendmsg+0xcb/0x150 __x64_sys_sendmsg+0x48/0x50 do_syscall_64+0xd2/0xac0 entry_SYSCALL_64_after_hwframe+0x49/0xbe Actually the original skb used to have enough headroom, but the reserve_skb() call was lost with the introduction of inet_rtm_getroute_build_skb() by commit 404eb77ea766 ("ipv4: support sport, dport and ip_proto in RTM_GETROUTE"). We could reserve some headroom again in inet_rtm_getroute_build_skb(), but this function shouldn't be responsible for handling the special case of ipmr_get_route(). Let's handle that directly in ipmr_get_route() by calling skb_realloc_headroom() instead of skb_clone(). Fixes: 404eb77ea766 ("ipv4: support sport, dport and ip_proto in RTM_GETROUTE") Signed-off-by: Guillaume Nault <gnault@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-15 20:29:52 +03:00
skb2 = skb_realloc_headroom(skb, sizeof(struct iphdr));
if (!skb2) {
rcu_read_unlock();
return -ENOMEM;
}
ipmr, ip6mr: fix scheduling while atomic and a deadlock with ipmr_get_route Since the commit below the ipmr/ip6mr rtnl_unicast() code uses the portid instead of the previous dst_pid which was copied from in_skb's portid. Since the skb is new the portid is 0 at that point so the packets are sent to the kernel and we get scheduling while atomic or a deadlock (depending on where it happens) by trying to acquire rtnl two times. Also since this is RTM_GETROUTE, it can be triggered by a normal user. Here's the sleeping while atomic trace: [ 7858.212557] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:620 [ 7858.212748] in_atomic(): 1, irqs_disabled(): 0, pid: 0, name: swapper/0 [ 7858.212881] 2 locks held by swapper/0/0: [ 7858.213013] #0: (((&mrt->ipmr_expire_timer))){+.-...}, at: [<ffffffff810fbbf5>] call_timer_fn+0x5/0x350 [ 7858.213422] #1: (mfc_unres_lock){+.....}, at: [<ffffffff8161e005>] ipmr_expire_process+0x25/0x130 [ 7858.213807] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.8.0-rc7+ #179 [ 7858.213934] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.7.5-20140531_083030-gandalf 04/01/2014 [ 7858.214108] 0000000000000000 ffff88005b403c50 ffffffff813a7804 0000000000000000 [ 7858.214412] ffffffff81a1338e ffff88005b403c78 ffffffff810a4a72 ffffffff81a1338e [ 7858.214716] 000000000000026c 0000000000000000 ffff88005b403ca8 ffffffff810a4b9f [ 7858.215251] Call Trace: [ 7858.215412] <IRQ> [<ffffffff813a7804>] dump_stack+0x85/0xc1 [ 7858.215662] [<ffffffff810a4a72>] ___might_sleep+0x192/0x250 [ 7858.215868] [<ffffffff810a4b9f>] __might_sleep+0x6f/0x100 [ 7858.216072] [<ffffffff8165bea3>] mutex_lock_nested+0x33/0x4d0 [ 7858.216279] [<ffffffff815a7a5f>] ? netlink_lookup+0x25f/0x460 [ 7858.216487] [<ffffffff8157474b>] rtnetlink_rcv+0x1b/0x40 [ 7858.216687] [<ffffffff815a9a0c>] netlink_unicast+0x19c/0x260 [ 7858.216900] [<ffffffff81573c70>] rtnl_unicast+0x20/0x30 [ 7858.217128] [<ffffffff8161cd39>] ipmr_destroy_unres+0xa9/0xf0 [ 7858.217351] [<ffffffff8161e06f>] ipmr_expire_process+0x8f/0x130 [ 7858.217581] [<ffffffff8161dfe0>] ? ipmr_net_init+0x180/0x180 [ 7858.217785] [<ffffffff8161dfe0>] ? ipmr_net_init+0x180/0x180 [ 7858.217990] [<ffffffff810fbc95>] call_timer_fn+0xa5/0x350 [ 7858.218192] [<ffffffff810fbbf5>] ? call_timer_fn+0x5/0x350 [ 7858.218415] [<ffffffff8161dfe0>] ? ipmr_net_init+0x180/0x180 [ 7858.218656] [<ffffffff810fde10>] run_timer_softirq+0x260/0x640 [ 7858.218865] [<ffffffff8166379b>] ? __do_softirq+0xbb/0x54f [ 7858.219068] [<ffffffff816637c8>] __do_softirq+0xe8/0x54f [ 7858.219269] [<ffffffff8107a948>] irq_exit+0xb8/0xc0 [ 7858.219463] [<ffffffff81663452>] smp_apic_timer_interrupt+0x42/0x50 [ 7858.219678] [<ffffffff816625bc>] apic_timer_interrupt+0x8c/0xa0 [ 7858.219897] <EOI> [<ffffffff81055f16>] ? native_safe_halt+0x6/0x10 [ 7858.220165] [<ffffffff810d64dd>] ? trace_hardirqs_on+0xd/0x10 [ 7858.220373] [<ffffffff810298e3>] default_idle+0x23/0x190 [ 7858.220574] [<ffffffff8102a20f>] arch_cpu_idle+0xf/0x20 [ 7858.220790] [<ffffffff810c9f8c>] default_idle_call+0x4c/0x60 [ 7858.221016] [<ffffffff810ca33b>] cpu_startup_entry+0x39b/0x4d0 [ 7858.221257] [<ffffffff8164f995>] rest_init+0x135/0x140 [ 7858.221469] [<ffffffff81f83014>] start_kernel+0x50e/0x51b [ 7858.221670] [<ffffffff81f82120>] ? early_idt_handler_array+0x120/0x120 [ 7858.221894] [<ffffffff81f8243f>] x86_64_start_reservations+0x2a/0x2c [ 7858.222113] [<ffffffff81f8257c>] x86_64_start_kernel+0x13b/0x14a Fixes: 2942e9005056 ("[RTNETLINK]: Use rtnl_unicast() for rtnetlink unicasts") Signed-off-by: Nikolay Aleksandrov <nikolay@cumulusnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2016-09-26 00:08:31 +03:00
NETLINK_CB(skb2).portid = portid;
skb_push(skb2, sizeof(struct iphdr));
skb_reset_network_header(skb2);
iph = ip_hdr(skb2);
iph->ihl = sizeof(struct iphdr) >> 2;
iph->saddr = saddr;
iph->daddr = daddr;
iph->version = 0;
err = ipmr_cache_unresolved(mrt, vif, skb2, dev);
rcu_read_unlock();
return err;
}
err = mr_fill_mroute(mrt, skb, &cache->_c, rtm);
rcu_read_unlock();
return err;
}
static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
u32 portid, u32 seq, struct mfc_cache *c, int cmd,
int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
rtm = nlmsg_data(nlh);
rtm->rtm_family = RTNL_FAMILY_IPMR;
rtm->rtm_dst_len = 32;
rtm->rtm_src_len = 32;
rtm->rtm_tos = 0;
rtm->rtm_table = mrt->id;
if (nla_put_u32(skb, RTA_TABLE, mrt->id))
goto nla_put_failure;
rtm->rtm_type = RTN_MULTICAST;
rtm->rtm_scope = RT_SCOPE_UNIVERSE;
if (c->_c.mfc_flags & MFC_STATIC)
rtm->rtm_protocol = RTPROT_STATIC;
else
rtm->rtm_protocol = RTPROT_MROUTED;
rtm->rtm_flags = 0;
if (nla_put_in_addr(skb, RTA_SRC, c->mfc_origin) ||
nla_put_in_addr(skb, RTA_DST, c->mfc_mcastgrp))
goto nla_put_failure;
err = mr_fill_mroute(mrt, skb, &c->_c, rtm);
/* do not break the dump if cache is unresolved */
if (err < 0 && err != -ENOENT)
goto nla_put_failure;
netlink: make nlmsg_end() and genlmsg_end() void Contrary to common expectations for an "int" return, these functions return only a positive value -- if used correctly they cannot even return 0 because the message header will necessarily be in the skb. This makes the very common pattern of if (genlmsg_end(...) < 0) { ... } be a whole bunch of dead code. Many places also simply do return nlmsg_end(...); and the caller is expected to deal with it. This also commonly (at least for me) causes errors, because it is very common to write if (my_function(...)) /* error condition */ and if my_function() does "return nlmsg_end()" this is of course wrong. Additionally, there's not a single place in the kernel that actually needs the message length returned, and if anyone needs it later then it'll be very easy to just use skb->len there. Remove this, and make the functions void. This removes a bunch of dead code as described above. The patch adds lines because I did - return nlmsg_end(...); + nlmsg_end(...); + return 0; I could have preserved all the function's return values by returning skb->len, but instead I've audited all the places calling the affected functions and found that none cared. A few places actually compared the return value with <= 0 in dump functionality, but that could just be changed to < 0 with no change in behaviour, so I opted for the more efficient version. One instance of the error I've made numerous times now is also present in net/phonet/pn_netlink.c in the route_dumpit() function - it didn't check for <0 or <=0 and thus broke out of the loop every single time. I've preserved this since it will (I think) have caused the messages to userspace to be formatted differently with just a single message for every SKB returned to userspace. It's possible that this isn't needed for the tools that actually use this, but I don't even know what they are so couldn't test that changing this behaviour would be acceptable. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-17 00:09:00 +03:00
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int _ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
u32 portid, u32 seq, struct mr_mfc *c, int cmd,
int flags)
{
return ipmr_fill_mroute(mrt, skb, portid, seq, (struct mfc_cache *)c,
cmd, flags);
}
static size_t mroute_msgsize(bool unresolved, int maxvif)
{
size_t len =
NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(4) /* RTA_SRC */
+ nla_total_size(4) /* RTA_DST */
;
if (!unresolved)
len = len
+ nla_total_size(4) /* RTA_IIF */
+ nla_total_size(0) /* RTA_MULTIPATH */
+ maxvif * NLA_ALIGN(sizeof(struct rtnexthop))
/* RTA_MFC_STATS */
+ nla_total_size_64bit(sizeof(struct rta_mfc_stats))
;
return len;
}
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
int cmd)
{
struct net *net = read_pnet(&mrt->net);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(mroute_msgsize(mfc->_c.mfc_parent >= MAXVIFS,
mrt->maxvif),
GFP_ATOMIC);
if (!skb)
goto errout;
err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE, NULL, GFP_ATOMIC);
return;
errout:
kfree_skb(skb);
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE, err);
}
static size_t igmpmsg_netlink_msgsize(size_t payloadlen)
{
size_t len =
NLMSG_ALIGN(sizeof(struct rtgenmsg))
+ nla_total_size(1) /* IPMRA_CREPORT_MSGTYPE */
+ nla_total_size(4) /* IPMRA_CREPORT_VIF_ID */
+ nla_total_size(4) /* IPMRA_CREPORT_SRC_ADDR */
+ nla_total_size(4) /* IPMRA_CREPORT_DST_ADDR */
+ nla_total_size(4) /* IPMRA_CREPORT_TABLE */
/* IPMRA_CREPORT_PKT */
+ nla_total_size(payloadlen)
;
return len;
}
static void igmpmsg_netlink_event(const struct mr_table *mrt, struct sk_buff *pkt)
{
struct net *net = read_pnet(&mrt->net);
struct nlmsghdr *nlh;
struct rtgenmsg *rtgenm;
struct igmpmsg *msg;
struct sk_buff *skb;
struct nlattr *nla;
int payloadlen;
payloadlen = pkt->len - sizeof(struct igmpmsg);
msg = (struct igmpmsg *)skb_network_header(pkt);
skb = nlmsg_new(igmpmsg_netlink_msgsize(payloadlen), GFP_ATOMIC);
if (!skb)
goto errout;
nlh = nlmsg_put(skb, 0, 0, RTM_NEWCACHEREPORT,
sizeof(struct rtgenmsg), 0);
if (!nlh)
goto errout;
rtgenm = nlmsg_data(nlh);
rtgenm->rtgen_family = RTNL_FAMILY_IPMR;
if (nla_put_u8(skb, IPMRA_CREPORT_MSGTYPE, msg->im_msgtype) ||
nla_put_u32(skb, IPMRA_CREPORT_VIF_ID, msg->im_vif | (msg->im_vif_hi << 8)) ||
nla_put_in_addr(skb, IPMRA_CREPORT_SRC_ADDR,
msg->im_src.s_addr) ||
nla_put_in_addr(skb, IPMRA_CREPORT_DST_ADDR,
msg->im_dst.s_addr) ||
nla_put_u32(skb, IPMRA_CREPORT_TABLE, mrt->id))
goto nla_put_failure;
nla = nla_reserve(skb, IPMRA_CREPORT_PKT, payloadlen);
if (!nla || skb_copy_bits(pkt, sizeof(struct igmpmsg),
nla_data(nla), payloadlen))
goto nla_put_failure;
nlmsg_end(skb, nlh);
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_MROUTE_R, NULL, GFP_ATOMIC);
return;
nla_put_failure:
nlmsg_cancel(skb, nlh);
errout:
kfree_skb(skb);
rtnl_set_sk_err(net, RTNLGRP_IPV4_MROUTE_R, -ENOBUFS);
}
static int ipmr_rtm_valid_getroute_req(struct sk_buff *skb,
const struct nlmsghdr *nlh,
struct nlattr **tb,
struct netlink_ext_ack *extack)
{
struct rtmsg *rtm;
int i, err;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
NL_SET_ERR_MSG(extack, "ipv4: Invalid header for multicast route get request");
return -EINVAL;
}
if (!netlink_strict_get_check(skb))
netlink: make validation more configurable for future strictness We currently have two levels of strict validation: 1) liberal (default) - undefined (type >= max) & NLA_UNSPEC attributes accepted - attribute length >= expected accepted - garbage at end of message accepted 2) strict (opt-in) - NLA_UNSPEC attributes accepted - attribute length >= expected accepted Split out parsing strictness into four different options: * TRAILING - check that there's no trailing data after parsing attributes (in message or nested) * MAXTYPE - reject attrs > max known type * UNSPEC - reject attributes with NLA_UNSPEC policy entries * STRICT_ATTRS - strictly validate attribute size The default for future things should be *everything*. The current *_strict() is a combination of TRAILING and MAXTYPE, and is renamed to _deprecated_strict(). The current regular parsing has none of this, and is renamed to *_parse_deprecated(). Additionally it allows us to selectively set one of the new flags even on old policies. Notably, the UNSPEC flag could be useful in this case, since it can be arranged (by filling in the policy) to not be an incompatible userspace ABI change, but would then going forward prevent forgetting attribute entries. Similar can apply to the POLICY flag. We end up with the following renames: * nla_parse -> nla_parse_deprecated * nla_parse_strict -> nla_parse_deprecated_strict * nlmsg_parse -> nlmsg_parse_deprecated * nlmsg_parse_strict -> nlmsg_parse_deprecated_strict * nla_parse_nested -> nla_parse_nested_deprecated * nla_validate_nested -> nla_validate_nested_deprecated Using spatch, of course: @@ expression TB, MAX, HEAD, LEN, POL, EXT; @@ -nla_parse(TB, MAX, HEAD, LEN, POL, EXT) +nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT) @@ expression NLH, HDRLEN, TB, MAX, POL, EXT; @@ -nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT) +nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT) @@ expression NLH, HDRLEN, TB, MAX, POL, EXT; @@ -nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT) +nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT) @@ expression TB, MAX, NLA, POL, EXT; @@ -nla_parse_nested(TB, MAX, NLA, POL, EXT) +nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT) @@ expression START, MAX, POL, EXT; @@ -nla_validate_nested(START, MAX, POL, EXT) +nla_validate_nested_deprecated(START, MAX, POL, EXT) @@ expression NLH, HDRLEN, MAX, POL, EXT; @@ -nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT) +nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT) For this patch, don't actually add the strict, non-renamed versions yet so that it breaks compile if I get it wrong. Also, while at it, make nla_validate and nla_parse go down to a common __nla_validate_parse() function to avoid code duplication. Ultimately, this allows us to have very strict validation for every new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the next patch, while existing things will continue to work as is. In effect then, this adds fully strict validation for any new command. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_ipv4_policy, extack);
rtm = nlmsg_data(nlh);
if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
(rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
rtm->rtm_tos || rtm->rtm_table || rtm->rtm_protocol ||
rtm->rtm_scope || rtm->rtm_type || rtm->rtm_flags) {
NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for multicast route get request");
return -EINVAL;
}
netlink: make validation more configurable for future strictness We currently have two levels of strict validation: 1) liberal (default) - undefined (type >= max) & NLA_UNSPEC attributes accepted - attribute length >= expected accepted - garbage at end of message accepted 2) strict (opt-in) - NLA_UNSPEC attributes accepted - attribute length >= expected accepted Split out parsing strictness into four different options: * TRAILING - check that there's no trailing data after parsing attributes (in message or nested) * MAXTYPE - reject attrs > max known type * UNSPEC - reject attributes with NLA_UNSPEC policy entries * STRICT_ATTRS - strictly validate attribute size The default for future things should be *everything*. The current *_strict() is a combination of TRAILING and MAXTYPE, and is renamed to _deprecated_strict(). The current regular parsing has none of this, and is renamed to *_parse_deprecated(). Additionally it allows us to selectively set one of the new flags even on old policies. Notably, the UNSPEC flag could be useful in this case, since it can be arranged (by filling in the policy) to not be an incompatible userspace ABI change, but would then going forward prevent forgetting attribute entries. Similar can apply to the POLICY flag. We end up with the following renames: * nla_parse -> nla_parse_deprecated * nla_parse_strict -> nla_parse_deprecated_strict * nlmsg_parse -> nlmsg_parse_deprecated * nlmsg_parse_strict -> nlmsg_parse_deprecated_strict * nla_parse_nested -> nla_parse_nested_deprecated * nla_validate_nested -> nla_validate_nested_deprecated Using spatch, of course: @@ expression TB, MAX, HEAD, LEN, POL, EXT; @@ -nla_parse(TB, MAX, HEAD, LEN, POL, EXT) +nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT) @@ expression NLH, HDRLEN, TB, MAX, POL, EXT; @@ -nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT) +nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT) @@ expression NLH, HDRLEN, TB, MAX, POL, EXT; @@ -nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT) +nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT) @@ expression TB, MAX, NLA, POL, EXT; @@ -nla_parse_nested(TB, MAX, NLA, POL, EXT) +nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT) @@ expression START, MAX, POL, EXT; @@ -nla_validate_nested(START, MAX, POL, EXT) +nla_validate_nested_deprecated(START, MAX, POL, EXT) @@ expression NLH, HDRLEN, MAX, POL, EXT; @@ -nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT) +nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT) For this patch, don't actually add the strict, non-renamed versions yet so that it breaks compile if I get it wrong. Also, while at it, make nla_validate and nla_parse go down to a common __nla_validate_parse() function to avoid code duplication. Ultimately, this allows us to have very strict validation for every new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the next patch, while existing things will continue to work as is. In effect then, this adds fully strict validation for any new command. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
rtm_ipv4_policy, extack);
if (err)
return err;
if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
(tb[RTA_DST] && !rtm->rtm_dst_len)) {
NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
return -EINVAL;
}
for (i = 0; i <= RTA_MAX; i++) {
if (!tb[i])
continue;
switch (i) {
case RTA_SRC:
case RTA_DST:
case RTA_TABLE:
break;
default:
NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in multicast route get request");
return -EINVAL;
}
}
return 0;
}
static int ipmr_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[RTA_MAX + 1];
struct sk_buff *skb = NULL;
struct mfc_cache *cache;
struct mr_table *mrt;
__be32 src, grp;
u32 tableid;
int err;
err = ipmr_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
if (err < 0)
goto errout;
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
grp = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0;
mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT);
if (!mrt) {
err = -ENOENT;
goto errout_free;
}
/* entries are added/deleted only under RTNL */
rcu_read_lock();
cache = ipmr_cache_find(mrt, src, grp);
rcu_read_unlock();
if (!cache) {
err = -ENOENT;
goto errout_free;
}
skb = nlmsg_new(mroute_msgsize(false, mrt->maxvif), GFP_KERNEL);
if (!skb) {
err = -ENOBUFS;
goto errout_free;
}
err = ipmr_fill_mroute(mrt, skb, NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, cache,
RTM_NEWROUTE, 0);
if (err < 0)
goto errout_free;
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
errout_free:
kfree_skb(skb);
goto errout;
}
static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
{
struct fib_dump_filter filter = {};
int err;
if (cb->strict_check) {
err = ip_valid_fib_dump_req(sock_net(skb->sk), cb->nlh,
&filter, cb);
if (err < 0)
return err;
}
if (filter.table_id) {
struct mr_table *mrt;
mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id);
if (!mrt) {
net: don't return invalid table id error when we fall back to PF_UNSPEC In case we can't find a ->dumpit callback for the requested (family,type) pair, we fall back to (PF_UNSPEC,type). In effect, we're in the same situation as if userspace had requested a PF_UNSPEC dump. For RTM_GETROUTE, that handler is rtnl_dump_all, which calls all the registered RTM_GETROUTE handlers. The requested table id may or may not exist for all of those families. commit ae677bbb4441 ("net: Don't return invalid table id error when dumping all families") fixed the problem when userspace explicitly requests a PF_UNSPEC dump, but missed the fallback case. For example, when we pass ipv6.disable=1 to a kernel with CONFIG_IP_MROUTE=y and CONFIG_IP_MROUTE_MULTIPLE_TABLES=y, the (PF_INET6, RTM_GETROUTE) handler isn't registered, so we end up in rtnl_dump_all, and listing IPv6 routes will unexpectedly print: # ip -6 r Error: ipv4: MR table does not exist. Dump terminated commit ae677bbb4441 introduced the dump_all_families variable, which gets set when userspace requests a PF_UNSPEC dump. However, we can't simply set the family to PF_UNSPEC in rtnetlink_rcv_msg in the fallback case to get dump_all_families == true, because some messages types (for example RTM_GETRULE and RTM_GETNEIGH) only register the PF_UNSPEC handler and use the family to filter in the kernel what is dumped to userspace. We would then export more entries, that userspace would have to filter. iproute does that, but other programs may not. Instead, this patch removes dump_all_families and updates the RTM_GETROUTE handlers to check if the family that is being dumped is their own. When it's not, which covers both the intentional PF_UNSPEC dumps (as dump_all_families did) and the fallback case, ignore the missing table id error. Fixes: cb167893f41e ("net: Plumb support for filtering ipv4 and ipv6 multicast route dumps") Signed-off-by: Sabrina Dubroca <sd@queasysnail.net> Reviewed-by: David Ahern <dsahern@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2020-05-20 12:15:46 +03:00
if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR)
return skb->len;
NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
return -ENOENT;
}
err = mr_table_dump(mrt, skb, cb, _ipmr_fill_mroute,
&mfc_unres_lock, &filter);
return skb->len ? : err;
}
return mr_rtm_dumproute(skb, cb, ipmr_mr_table_iter,
_ipmr_fill_mroute, &mfc_unres_lock, &filter);
}
static const struct nla_policy rtm_ipmr_policy[RTA_MAX + 1] = {
[RTA_SRC] = { .type = NLA_U32 },
[RTA_DST] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_TABLE] = { .type = NLA_U32 },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
};
static bool ipmr_rtm_validate_proto(unsigned char rtm_protocol)
{
switch (rtm_protocol) {
case RTPROT_STATIC:
case RTPROT_MROUTED:
return true;
}
return false;
}
static int ipmr_nla_get_ttls(const struct nlattr *nla, struct mfcctl *mfcc)
{
struct rtnexthop *rtnh = nla_data(nla);
int remaining = nla_len(nla), vifi = 0;
while (rtnh_ok(rtnh, remaining)) {
mfcc->mfcc_ttls[vifi] = rtnh->rtnh_hops;
if (++vifi == MAXVIFS)
break;
rtnh = rtnh_next(rtnh, &remaining);
}
return remaining > 0 ? -EINVAL : vifi;
}
/* returns < 0 on error, 0 for ADD_MFC and 1 for ADD_MFC_PROXY */
static int rtm_to_ipmr_mfcc(struct net *net, struct nlmsghdr *nlh,
struct mfcctl *mfcc, int *mrtsock,
struct mr_table **mrtret,
struct netlink_ext_ack *extack)
{
struct net_device *dev = NULL;
u32 tblid = RT_TABLE_DEFAULT;
struct mr_table *mrt;
struct nlattr *attr;
struct rtmsg *rtm;
int ret, rem;
netlink: make validation more configurable for future strictness We currently have two levels of strict validation: 1) liberal (default) - undefined (type >= max) & NLA_UNSPEC attributes accepted - attribute length >= expected accepted - garbage at end of message accepted 2) strict (opt-in) - NLA_UNSPEC attributes accepted - attribute length >= expected accepted Split out parsing strictness into four different options: * TRAILING - check that there's no trailing data after parsing attributes (in message or nested) * MAXTYPE - reject attrs > max known type * UNSPEC - reject attributes with NLA_UNSPEC policy entries * STRICT_ATTRS - strictly validate attribute size The default for future things should be *everything*. The current *_strict() is a combination of TRAILING and MAXTYPE, and is renamed to _deprecated_strict(). The current regular parsing has none of this, and is renamed to *_parse_deprecated(). Additionally it allows us to selectively set one of the new flags even on old policies. Notably, the UNSPEC flag could be useful in this case, since it can be arranged (by filling in the policy) to not be an incompatible userspace ABI change, but would then going forward prevent forgetting attribute entries. Similar can apply to the POLICY flag. We end up with the following renames: * nla_parse -> nla_parse_deprecated * nla_parse_strict -> nla_parse_deprecated_strict * nlmsg_parse -> nlmsg_parse_deprecated * nlmsg_parse_strict -> nlmsg_parse_deprecated_strict * nla_parse_nested -> nla_parse_nested_deprecated * nla_validate_nested -> nla_validate_nested_deprecated Using spatch, of course: @@ expression TB, MAX, HEAD, LEN, POL, EXT; @@ -nla_parse(TB, MAX, HEAD, LEN, POL, EXT) +nla_parse_deprecated(TB, MAX, HEAD, LEN, POL, EXT) @@ expression NLH, HDRLEN, TB, MAX, POL, EXT; @@ -nlmsg_parse(NLH, HDRLEN, TB, MAX, POL, EXT) +nlmsg_parse_deprecated(NLH, HDRLEN, TB, MAX, POL, EXT) @@ expression NLH, HDRLEN, TB, MAX, POL, EXT; @@ -nlmsg_parse_strict(NLH, HDRLEN, TB, MAX, POL, EXT) +nlmsg_parse_deprecated_strict(NLH, HDRLEN, TB, MAX, POL, EXT) @@ expression TB, MAX, NLA, POL, EXT; @@ -nla_parse_nested(TB, MAX, NLA, POL, EXT) +nla_parse_nested_deprecated(TB, MAX, NLA, POL, EXT) @@ expression START, MAX, POL, EXT; @@ -nla_validate_nested(START, MAX, POL, EXT) +nla_validate_nested_deprecated(START, MAX, POL, EXT) @@ expression NLH, HDRLEN, MAX, POL, EXT; @@ -nlmsg_validate(NLH, HDRLEN, MAX, POL, EXT) +nlmsg_validate_deprecated(NLH, HDRLEN, MAX, POL, EXT) For this patch, don't actually add the strict, non-renamed versions yet so that it breaks compile if I get it wrong. Also, while at it, make nla_validate and nla_parse go down to a common __nla_validate_parse() function to avoid code duplication. Ultimately, this allows us to have very strict validation for every new caller of nla_parse()/nlmsg_parse() etc as re-introduced in the next patch, while existing things will continue to work as is. In effect then, this adds fully strict validation for any new command. Signed-off-by: Johannes Berg <johannes.berg@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2019-04-26 15:07:28 +03:00
ret = nlmsg_validate_deprecated(nlh, sizeof(*rtm), RTA_MAX,
rtm_ipmr_policy, extack);
if (ret < 0)
goto out;
rtm = nlmsg_data(nlh);
ret = -EINVAL;
if (rtm->rtm_family != RTNL_FAMILY_IPMR || rtm->rtm_dst_len != 32 ||
rtm->rtm_type != RTN_MULTICAST ||
rtm->rtm_scope != RT_SCOPE_UNIVERSE ||
!ipmr_rtm_validate_proto(rtm->rtm_protocol))
goto out;
memset(mfcc, 0, sizeof(*mfcc));
mfcc->mfcc_parent = -1;
ret = 0;
nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), rem) {
switch (nla_type(attr)) {
case RTA_SRC:
mfcc->mfcc_origin.s_addr = nla_get_be32(attr);
break;
case RTA_DST:
mfcc->mfcc_mcastgrp.s_addr = nla_get_be32(attr);
break;
case RTA_IIF:
dev = __dev_get_by_index(net, nla_get_u32(attr));
if (!dev) {
ret = -ENODEV;
goto out;
}
break;
case RTA_MULTIPATH:
if (ipmr_nla_get_ttls(attr, mfcc) < 0) {
ret = -EINVAL;
goto out;
}
break;
case RTA_PREFSRC:
ret = 1;
break;
case RTA_TABLE:
tblid = nla_get_u32(attr);
break;
}
}
mrt = ipmr_get_table(net, tblid);
if (!mrt) {
ret = -ENOENT;
goto out;
}
*mrtret = mrt;
*mrtsock = rtm->rtm_protocol == RTPROT_MROUTED ? 1 : 0;
if (dev)
mfcc->mfcc_parent = ipmr_find_vif(mrt, dev);
out:
return ret;
}
/* takes care of both newroute and delroute */
static int ipmr_rtm_route(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct net *net = sock_net(skb->sk);
int ret, mrtsock, parent;
struct mr_table *tbl;
struct mfcctl mfcc;
mrtsock = 0;
tbl = NULL;
ret = rtm_to_ipmr_mfcc(net, nlh, &mfcc, &mrtsock, &tbl, extack);
if (ret < 0)
return ret;
parent = ret ? mfcc.mfcc_parent : -1;
if (nlh->nlmsg_type == RTM_NEWROUTE)
return ipmr_mfc_add(net, tbl, &mfcc, mrtsock, parent);
else
return ipmr_mfc_delete(tbl, &mfcc, parent);
}
static bool ipmr_fill_table(struct mr_table *mrt, struct sk_buff *skb)
{
u32 queue_len = atomic_read(&mrt->cache_resolve_queue_len);
if (nla_put_u32(skb, IPMRA_TABLE_ID, mrt->id) ||
nla_put_u32(skb, IPMRA_TABLE_CACHE_RES_QUEUE_LEN, queue_len) ||
nla_put_s32(skb, IPMRA_TABLE_MROUTE_REG_VIF_NUM,
mrt->mroute_reg_vif_num) ||
nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_ASSERT,
mrt->mroute_do_assert) ||
nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_PIM, mrt->mroute_do_pim) ||
nla_put_u8(skb, IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
mrt->mroute_do_wrvifwhole))
return false;
return true;
}
static bool ipmr_fill_vif(struct mr_table *mrt, u32 vifid, struct sk_buff *skb)
{
struct net_device *vif_dev;
struct nlattr *vif_nest;
struct vif_device *vif;
vif = &mrt->vif_table[vifid];
ipmr: fix a lockdep splat in ipmr_rtm_dumplink() vif_dev_read() should be used from RCU protected sections only. ipmr_rtm_dumplink() is holding RTNL, so the data structures can not be changed. syzbot reported: net/ipv4/ipmr.c:84 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by syz-executor.4/3068: stack backtrace: CPU: 1 PID: 3068 Comm: syz-executor.4 Not tainted 5.19.0-rc3-syzkaller-00565-g5d04b0b634bb #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: <TASK> __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 vif_dev_read net/ipv4/ipmr.c:84 [inline] vif_dev_read net/ipv4/ipmr.c:82 [inline] ipmr_fill_vif net/ipv4/ipmr.c:2756 [inline] ipmr_rtm_dumplink+0x1343/0x18c0 net/ipv4/ipmr.c:2866 netlink_dump+0x541/0xc20 net/netlink/af_netlink.c:2275 __netlink_dump_start+0x647/0x900 net/netlink/af_netlink.c:2380 netlink_dump_start include/linux/netlink.h:245 [inline] rtnetlink_rcv_msg+0x73e/0xc90 net/core/rtnetlink.c:6046 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2501 netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline] netlink_unicast+0x543/0x7f0 net/netlink/af_netlink.c:1345 netlink_sendmsg+0x917/0xe10 net/netlink/af_netlink.c:1921 sock_sendmsg_nosec net/socket.c:714 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:734 ____sys_sendmsg+0x334/0x810 net/socket.c:2489 ___sys_sendmsg+0xf3/0x170 net/socket.c:2543 __sys_sendmmsg+0x195/0x470 net/socket.c:2629 __do_sys_sendmmsg net/socket.c:2658 [inline] __se_sys_sendmmsg net/socket.c:2655 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2655 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x46/0xb0 RIP: 0033:0x7fefd8a89109 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fefd9ca6168 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00007fefd8b9bf60 RCX: 00007fefd8a89109 RDX: 0000000004924b68 RSI: 0000000020000140 RDI: 0000000000000003 RBP: 00007fefd8ae305d R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007ffc346febaf R14: 00007fefd9ca6300 R15: 0000000000022000 </TASK> Fixes: ebc3197963fc ("ipmr: add rcu protection over (struct vif_device)->dev") Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2022-06-25 09:47:22 +03:00
vif_dev = rtnl_dereference(vif->dev);
/* if the VIF doesn't exist just continue */
if (!vif_dev)
return true;
vif_nest = nla_nest_start_noflag(skb, IPMRA_VIF);
if (!vif_nest)
return false;
if (nla_put_u32(skb, IPMRA_VIFA_IFINDEX, vif_dev->ifindex) ||
nla_put_u32(skb, IPMRA_VIFA_VIF_ID, vifid) ||
nla_put_u16(skb, IPMRA_VIFA_FLAGS, vif->flags) ||
nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_IN, vif->bytes_in,
IPMRA_VIFA_PAD) ||
nla_put_u64_64bit(skb, IPMRA_VIFA_BYTES_OUT, vif->bytes_out,
IPMRA_VIFA_PAD) ||
nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_IN, vif->pkt_in,
IPMRA_VIFA_PAD) ||
nla_put_u64_64bit(skb, IPMRA_VIFA_PACKETS_OUT, vif->pkt_out,
IPMRA_VIFA_PAD) ||
nla_put_be32(skb, IPMRA_VIFA_LOCAL_ADDR, vif->local) ||
nla_put_be32(skb, IPMRA_VIFA_REMOTE_ADDR, vif->remote)) {
nla_nest_cancel(skb, vif_nest);
return false;
}
nla_nest_end(skb, vif_nest);
return true;
}
static int ipmr_valid_dumplink(const struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
struct ifinfomsg *ifm;
if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
NL_SET_ERR_MSG(extack, "ipv4: Invalid header for ipmr link dump");
return -EINVAL;
}
if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
NL_SET_ERR_MSG(extack, "Invalid data after header in ipmr link dump");
return -EINVAL;
}
ifm = nlmsg_data(nlh);
if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
ifm->ifi_change || ifm->ifi_index) {
NL_SET_ERR_MSG(extack, "Invalid values in header for ipmr link dump request");
return -EINVAL;
}
return 0;
}
static int ipmr_rtm_dumplink(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh = NULL;
unsigned int t = 0, s_t;
unsigned int e = 0, s_e;
struct mr_table *mrt;
if (cb->strict_check) {
int err = ipmr_valid_dumplink(cb->nlh, cb->extack);
if (err < 0)
return err;
}
s_t = cb->args[0];
s_e = cb->args[1];
ipmr_for_each_table(mrt, net) {
struct nlattr *vifs, *af;
struct ifinfomsg *hdr;
u32 i;
if (t < s_t)
goto skip_table;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_NEWLINK,
sizeof(*hdr), NLM_F_MULTI);
if (!nlh)
break;
hdr = nlmsg_data(nlh);
memset(hdr, 0, sizeof(*hdr));
hdr->ifi_family = RTNL_FAMILY_IPMR;
af = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
if (!af) {
nlmsg_cancel(skb, nlh);
goto out;
}
if (!ipmr_fill_table(mrt, skb)) {
nlmsg_cancel(skb, nlh);
goto out;
}
vifs = nla_nest_start_noflag(skb, IPMRA_TABLE_VIFS);
if (!vifs) {
nla_nest_end(skb, af);
nlmsg_end(skb, nlh);
goto out;
}
for (i = 0; i < mrt->maxvif; i++) {
if (e < s_e)
goto skip_entry;
if (!ipmr_fill_vif(mrt, i, skb)) {
nla_nest_end(skb, vifs);
nla_nest_end(skb, af);
nlmsg_end(skb, nlh);
goto out;
}
skip_entry:
e++;
}
s_e = 0;
e = 0;
nla_nest_end(skb, vifs);
nla_nest_end(skb, af);
nlmsg_end(skb, nlh);
skip_table:
t++;
}
out:
cb->args[1] = e;
cb->args[0] = t;
return skb->len;
}
#ifdef CONFIG_PROC_FS
/* The /proc interfaces to multicast routing :
* /proc/net/ip_mr_cache & /proc/net/ip_mr_vif
*/
static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
struct mr_vif_iter *iter = seq->private;
struct net *net = seq_file_net(seq);
struct mr_table *mrt;
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
if (!mrt)
return ERR_PTR(-ENOENT);
iter->mrt = mrt;
rcu_read_lock();
return mr_vif_seq_start(seq, pos);
}
static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
{
struct mr_vif_iter *iter = seq->private;
struct mr_table *mrt = iter->mrt;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
} else {
const struct vif_device *vif = v;
const struct net_device *vif_dev;
const char *name;
vif_dev = vif_dev_read(vif);
name = vif_dev ? vif_dev->name : "none";
seq_printf(seq,
"%2td %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
vif - mrt->vif_table,
name, vif->bytes_in, vif->pkt_in,
vif->bytes_out, vif->pkt_out,
vif->flags, vif->local, vif->remote);
}
return 0;
}
static const struct seq_operations ipmr_vif_seq_ops = {
.start = ipmr_vif_seq_start,
.next = mr_vif_seq_next,
.stop = ipmr_vif_seq_stop,
.show = ipmr_vif_seq_show,
};
static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
struct mr_table *mrt;
mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
if (!mrt)
return ERR_PTR(-ENOENT);
return mr_mfc_seq_start(seq, pos, mrt, &mfc_unres_lock);
}
static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
{
int n;
if (v == SEQ_START_TOKEN) {
seq_puts(seq,
"Group Origin Iif Pkts Bytes Wrong Oifs\n");
} else {
const struct mfc_cache *mfc = v;
const struct mr_mfc_iter *it = seq->private;
const struct mr_table *mrt = it->mrt;
seq_printf(seq, "%08X %08X %-3hd",
(__force u32) mfc->mfc_mcastgrp,
(__force u32) mfc->mfc_origin,
mfc->_c.mfc_parent);
if (it->cache != &mrt->mfc_unres_queue) {
net: fix /proc/net/ip_mr_cache display - V2 /proc/net/ip_mr_cache and /proc/net/ip6_mr_cache displays garbage when showing unresolved mfc_cache entries. [root@qemu tests]# cat /proc/net/ip_mr_cache Group Origin Iif Pkts Bytes Wrong Oifs 014C00EF 010014AC 1 10 10050 0 2:1 3:1 024C00EF 010014AC 65535 514 2 -559067475 The first line is correct. It is a resolved cache entry, 10 packets used it... The second line represents an unresolved entry, and the columns Pkts(4th), Bytes(5th) and Wrong(6th) just show garbage. In struct mfc_cache, there's an union to store data for resolved and unresolved cases. And what ipmr_mfc_seq_show() is printing in these columns for the unresolved entries is some bytes from mfc_cache.mfc_un.res. Bad. (eg. In our case -559067475 is in fact 0xdead4ead which is the spinlock magic from mfc_cache.mfc_un.unres.unresolved.lock.magic). This patch replaces the garbage data written in these columns for the unresolved entries by '0' (zeros) which is more correct. This change doesn't break the ABI. Also, mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if are unsigned long. It applies on top of net-next-2.6. The patch for net-2.6 is slightly different because of the NIP6_FMT to %pI6 conversion that was made in the seq_printf. Changelog: ========== V2: * Instead of breaking the ABI by suppressing the columns that have no meaning for unresolved entries, fill them with 0 values. Signed-off-by: Benjamin Thery <benjamin.thery@bull.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-04 09:21:47 +03:00
seq_printf(seq, " %8lu %8lu %8lu",
mfc->_c.mfc_un.res.pkt,
mfc->_c.mfc_un.res.bytes,
mfc->_c.mfc_un.res.wrong_if);
for (n = mfc->_c.mfc_un.res.minvif;
n < mfc->_c.mfc_un.res.maxvif; n++) {
if (VIF_EXISTS(mrt, n) &&
mfc->_c.mfc_un.res.ttls[n] < 255)
seq_printf(seq,
" %2d:%-3d",
n, mfc->_c.mfc_un.res.ttls[n]);
}
net: fix /proc/net/ip_mr_cache display - V2 /proc/net/ip_mr_cache and /proc/net/ip6_mr_cache displays garbage when showing unresolved mfc_cache entries. [root@qemu tests]# cat /proc/net/ip_mr_cache Group Origin Iif Pkts Bytes Wrong Oifs 014C00EF 010014AC 1 10 10050 0 2:1 3:1 024C00EF 010014AC 65535 514 2 -559067475 The first line is correct. It is a resolved cache entry, 10 packets used it... The second line represents an unresolved entry, and the columns Pkts(4th), Bytes(5th) and Wrong(6th) just show garbage. In struct mfc_cache, there's an union to store data for resolved and unresolved cases. And what ipmr_mfc_seq_show() is printing in these columns for the unresolved entries is some bytes from mfc_cache.mfc_un.res. Bad. (eg. In our case -559067475 is in fact 0xdead4ead which is the spinlock magic from mfc_cache.mfc_un.unres.unresolved.lock.magic). This patch replaces the garbage data written in these columns for the unresolved entries by '0' (zeros) which is more correct. This change doesn't break the ABI. Also, mfc->mfc_un.res.pkt, mfc->mfc_un.res.bytes, mfc->mfc_un.res.wrong_if are unsigned long. It applies on top of net-next-2.6. The patch for net-2.6 is slightly different because of the NIP6_FMT to %pI6 conversion that was made in the seq_printf. Changelog: ========== V2: * Instead of breaking the ABI by suppressing the columns that have no meaning for unresolved entries, fill them with 0 values. Signed-off-by: Benjamin Thery <benjamin.thery@bull.net> Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-04 09:21:47 +03:00
} else {
/* unresolved mfc_caches don't contain
* pkt, bytes and wrong_if values
*/
seq_printf(seq, " %8lu %8lu %8lu", 0ul, 0ul, 0ul);
}
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations ipmr_mfc_seq_ops = {
.start = ipmr_mfc_seq_start,
.next = mr_mfc_seq_next,
.stop = mr_mfc_seq_stop,
.show = ipmr_mfc_seq_show,
};
#endif
#ifdef CONFIG_IP_PIMSM_V2
static const struct net_protocol pim_protocol = {
.handler = pim_rcv,
};
#endif
static unsigned int ipmr_seq_read(struct net *net)
{
ASSERT_RTNL();
return net->ipv4.ipmr_seq + ipmr_rules_seq_read(net);
}
static int ipmr_dump(struct net *net, struct notifier_block *nb,
struct netlink_ext_ack *extack)
{
return mr_dump(net, nb, RTNL_FAMILY_IPMR, ipmr_rules_dump,
ipmr_mr_table_iter, extack);
}
static const struct fib_notifier_ops ipmr_notifier_ops_template = {
.family = RTNL_FAMILY_IPMR,
.fib_seq_read = ipmr_seq_read,
.fib_dump = ipmr_dump,
.owner = THIS_MODULE,
};
static int __net_init ipmr_notifier_init(struct net *net)
{
struct fib_notifier_ops *ops;
net->ipv4.ipmr_seq = 0;
ops = fib_notifier_ops_register(&ipmr_notifier_ops_template, net);
if (IS_ERR(ops))
return PTR_ERR(ops);
net->ipv4.ipmr_notifier_ops = ops;
return 0;
}
static void __net_exit ipmr_notifier_exit(struct net *net)
{
fib_notifier_ops_unregister(net->ipv4.ipmr_notifier_ops);
net->ipv4.ipmr_notifier_ops = NULL;
}
/* Setup for IP multicast routing */
static int __net_init ipmr_net_init(struct net *net)
{
int err;
err = ipmr_notifier_init(net);
if (err)
goto ipmr_notifier_fail;
err = ipmr_rules_init(net);
if (err < 0)
goto ipmr_rules_fail;
#ifdef CONFIG_PROC_FS
err = -ENOMEM;
if (!proc_create_net("ip_mr_vif", 0, net->proc_net, &ipmr_vif_seq_ops,
sizeof(struct mr_vif_iter)))
goto proc_vif_fail;
if (!proc_create_net("ip_mr_cache", 0, net->proc_net, &ipmr_mfc_seq_ops,
sizeof(struct mr_mfc_iter)))
goto proc_cache_fail;
#endif
return 0;
#ifdef CONFIG_PROC_FS
proc_cache_fail:
remove_proc_entry("ip_mr_vif", net->proc_net);
proc_vif_fail:
rtnl_lock();
ipmr_rules_exit(net);
rtnl_unlock();
#endif
ipmr_rules_fail:
ipmr_notifier_exit(net);
ipmr_notifier_fail:
return err;
}
static void __net_exit ipmr_net_exit(struct net *net)
{
#ifdef CONFIG_PROC_FS
remove_proc_entry("ip_mr_cache", net->proc_net);
remove_proc_entry("ip_mr_vif", net->proc_net);
#endif
ipmr_notifier_exit(net);
}
static void __net_exit ipmr_net_exit_batch(struct list_head *net_list)
{
struct net *net;
rtnl_lock();
list_for_each_entry(net, net_list, exit_list)
ipmr_rules_exit(net);
rtnl_unlock();
}
static struct pernet_operations ipmr_net_ops = {
.init = ipmr_net_init,
.exit = ipmr_net_exit,
.exit_batch = ipmr_net_exit_batch,
};
int __init ip_mr_init(void)
{
int err;
mrt_cachep = kmem_cache_create("ip_mrt_cache",
sizeof(struct mfc_cache),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
NULL);
err = register_pernet_subsys(&ipmr_net_ops);
if (err)
goto reg_pernet_fail;
err = register_netdevice_notifier(&ip_mr_notifier);
if (err)
goto reg_notif_fail;
#ifdef CONFIG_IP_PIMSM_V2
if (inet_add_protocol(&pim_protocol, IPPROTO_PIM) < 0) {
pr_err("%s: can't add PIM protocol\n", __func__);
err = -EAGAIN;
goto add_proto_fail;
}
#endif
rtnl_register(RTNL_FAMILY_IPMR, RTM_GETROUTE,
ipmr_rtm_getroute, ipmr_rtm_dumproute, 0);
rtnl_register(RTNL_FAMILY_IPMR, RTM_NEWROUTE,
ipmr_rtm_route, NULL, 0);
rtnl_register(RTNL_FAMILY_IPMR, RTM_DELROUTE,
ipmr_rtm_route, NULL, 0);
rtnl_register(RTNL_FAMILY_IPMR, RTM_GETLINK,
NULL, ipmr_rtm_dumplink, 0);
return 0;
#ifdef CONFIG_IP_PIMSM_V2
add_proto_fail:
unregister_netdevice_notifier(&ip_mr_notifier);
#endif
reg_notif_fail:
unregister_pernet_subsys(&ipmr_net_ops);
reg_pernet_fail:
kmem_cache_destroy(mrt_cachep);
return err;
}