WSL2-Linux-Kernel/net/l2tp/l2tp_eth.c

361 строка
8.4 KiB
C
Исходник Обычный вид История

/*
* L2TPv3 ethernet pseudowire driver
*
* Copyright (c) 2008,2009,2010 Katalix Systems Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/socket.h>
#include <linux/hash.h>
#include <linux/l2tp.h>
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/spinlock.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/inet_hashtables.h>
#include <net/tcp_states.h>
#include <net/protocol.h>
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include "l2tp_core.h"
/* Default device name. May be overridden by name specified by user */
#define L2TP_ETH_DEV_NAME "l2tpeth%d"
/* via netdev_priv() */
struct l2tp_eth {
struct net_device *dev;
struct sock *tunnel_sock;
struct l2tp_session *session;
struct list_head list;
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
atomic_long_t tx_bytes;
atomic_long_t tx_packets;
atomic_long_t tx_dropped;
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
atomic_long_t rx_bytes;
atomic_long_t rx_packets;
atomic_long_t rx_errors;
};
/* via l2tp_session_priv() */
struct l2tp_eth_sess {
struct net_device *dev;
};
/* per-net private data for this module */
static unsigned int l2tp_eth_net_id;
struct l2tp_eth_net {
struct list_head l2tp_eth_dev_list;
spinlock_t l2tp_eth_lock;
};
static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
{
return net_generic(net, l2tp_eth_net_id);
}
net: qdisc busylock needs lockdep annotations It seems we need to provide ability for stacked devices to use specific lock_class_key for sch->busylock We could instead default l2tpeth tx_queue_len to 0 (no qdisc), but a user might use a qdisc anyway. (So same fixes are probably needed on non LLTX stacked drivers) Noticed while stressing L2TPV3 setup : ====================================================== [ INFO: possible circular locking dependency detected ] 3.6.0-rc3+ #788 Not tainted ------------------------------------------------------- netperf/4660 is trying to acquire lock: (l2tpsock){+.-...}, at: [<ffffffffa0208db2>] l2tp_xmit_skb+0x172/0xa50 [l2tp_core] but task is already holding lock: (&(&sch->busylock)->rlock){+.-...}, at: [<ffffffff81596595>] dev_queue_xmit+0xd75/0xe00 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&(&sch->busylock)->rlock){+.-...}: [<ffffffff810a5df0>] lock_acquire+0x90/0x200 [<ffffffff817499fc>] _raw_spin_lock_irqsave+0x4c/0x60 [<ffffffff81074872>] __wake_up+0x32/0x70 [<ffffffff8136d39e>] tty_wakeup+0x3e/0x80 [<ffffffff81378fb3>] pty_write+0x73/0x80 [<ffffffff8136cb4c>] tty_put_char+0x3c/0x40 [<ffffffff813722b2>] process_echoes+0x142/0x330 [<ffffffff813742ab>] n_tty_receive_buf+0x8fb/0x1230 [<ffffffff813777b2>] flush_to_ldisc+0x142/0x1c0 [<ffffffff81062818>] process_one_work+0x198/0x760 [<ffffffff81063236>] worker_thread+0x186/0x4b0 [<ffffffff810694d3>] kthread+0x93/0xa0 [<ffffffff81753e24>] kernel_thread_helper+0x4/0x10 -> #0 (l2tpsock){+.-...}: [<ffffffff810a5288>] __lock_acquire+0x1628/0x1b10 [<ffffffff810a5df0>] lock_acquire+0x90/0x200 [<ffffffff817498c1>] _raw_spin_lock+0x41/0x50 [<ffffffffa0208db2>] l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffffa021a802>] l2tp_eth_dev_xmit+0x32/0x60 [l2tp_eth] [<ffffffff815952b2>] dev_hard_start_xmit+0x502/0xa70 [<ffffffff815b63ce>] sch_direct_xmit+0xfe/0x290 [<ffffffff81595a05>] dev_queue_xmit+0x1e5/0xe00 [<ffffffff815d9d60>] ip_finish_output+0x3d0/0x890 [<ffffffff815db019>] ip_output+0x59/0xf0 [<ffffffff815da36d>] ip_local_out+0x2d/0xa0 [<ffffffff815da5a3>] ip_queue_xmit+0x1c3/0x680 [<ffffffff815f4192>] tcp_transmit_skb+0x402/0xa60 [<ffffffff815f4a94>] tcp_write_xmit+0x1f4/0xa30 [<ffffffff815f5300>] tcp_push_one+0x30/0x40 [<ffffffff815e6672>] tcp_sendmsg+0xe82/0x1040 [<ffffffff81614495>] inet_sendmsg+0x125/0x230 [<ffffffff81576cdc>] sock_sendmsg+0xdc/0xf0 [<ffffffff81579ece>] sys_sendto+0xfe/0x130 [<ffffffff81752c92>] system_call_fastpath+0x16/0x1b Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&(&sch->busylock)->rlock); lock(l2tpsock); lock(&(&sch->busylock)->rlock); lock(l2tpsock); *** DEADLOCK *** 5 locks held by netperf/4660: #0: (sk_lock-AF_INET){+.+.+.}, at: [<ffffffff815e581c>] tcp_sendmsg+0x2c/0x1040 #1: (rcu_read_lock){.+.+..}, at: [<ffffffff815da3e0>] ip_queue_xmit+0x0/0x680 #2: (rcu_read_lock_bh){.+....}, at: [<ffffffff815d9ac5>] ip_finish_output+0x135/0x890 #3: (rcu_read_lock_bh){.+....}, at: [<ffffffff81595820>] dev_queue_xmit+0x0/0xe00 #4: (&(&sch->busylock)->rlock){+.-...}, at: [<ffffffff81596595>] dev_queue_xmit+0xd75/0xe00 stack backtrace: Pid: 4660, comm: netperf Not tainted 3.6.0-rc3+ #788 Call Trace: [<ffffffff8173dbf8>] print_circular_bug+0x1fb/0x20c [<ffffffff810a5288>] __lock_acquire+0x1628/0x1b10 [<ffffffff810a334b>] ? check_usage+0x9b/0x4d0 [<ffffffff810a3f44>] ? __lock_acquire+0x2e4/0x1b10 [<ffffffff810a5df0>] lock_acquire+0x90/0x200 [<ffffffffa0208db2>] ? l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffff817498c1>] _raw_spin_lock+0x41/0x50 [<ffffffffa0208db2>] ? l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffffa0208db2>] l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffffa021a802>] l2tp_eth_dev_xmit+0x32/0x60 [l2tp_eth] [<ffffffff815952b2>] dev_hard_start_xmit+0x502/0xa70 [<ffffffff81594e0e>] ? dev_hard_start_xmit+0x5e/0xa70 [<ffffffff81595961>] ? dev_queue_xmit+0x141/0xe00 [<ffffffff815b63ce>] sch_direct_xmit+0xfe/0x290 [<ffffffff81595a05>] dev_queue_xmit+0x1e5/0xe00 [<ffffffff81595820>] ? dev_hard_start_xmit+0xa70/0xa70 [<ffffffff815d9d60>] ip_finish_output+0x3d0/0x890 [<ffffffff815d9ac5>] ? ip_finish_output+0x135/0x890 [<ffffffff815db019>] ip_output+0x59/0xf0 [<ffffffff815da36d>] ip_local_out+0x2d/0xa0 [<ffffffff815da5a3>] ip_queue_xmit+0x1c3/0x680 [<ffffffff815da3e0>] ? ip_local_out+0xa0/0xa0 [<ffffffff815f4192>] tcp_transmit_skb+0x402/0xa60 [<ffffffff815fa25e>] ? tcp_md5_do_lookup+0x18e/0x1a0 [<ffffffff815f4a94>] tcp_write_xmit+0x1f4/0xa30 [<ffffffff815f5300>] tcp_push_one+0x30/0x40 [<ffffffff815e6672>] tcp_sendmsg+0xe82/0x1040 [<ffffffff81614495>] inet_sendmsg+0x125/0x230 [<ffffffff81614370>] ? inet_create+0x6b0/0x6b0 [<ffffffff8157e6e2>] ? sock_update_classid+0xc2/0x3b0 [<ffffffff8157e750>] ? sock_update_classid+0x130/0x3b0 [<ffffffff81576cdc>] sock_sendmsg+0xdc/0xf0 [<ffffffff81162579>] ? fget_light+0x3f9/0x4f0 [<ffffffff81579ece>] sys_sendto+0xfe/0x130 [<ffffffff810a69ad>] ? trace_hardirqs_on+0xd/0x10 [<ffffffff8174a0b0>] ? _raw_spin_unlock_irq+0x30/0x50 [<ffffffff810757e3>] ? finish_task_switch+0x83/0xf0 [<ffffffff810757a6>] ? finish_task_switch+0x46/0xf0 [<ffffffff81752cb7>] ? sysret_check+0x1b/0x56 [<ffffffff81752c92>] system_call_fastpath+0x16/0x1b Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-09-05 05:02:56 +04:00
static struct lock_class_key l2tp_eth_tx_busylock;
static int l2tp_eth_dev_init(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
priv->dev = dev;
eth_hw_addr_random(dev);
memset(&dev->broadcast[0], 0xff, 6);
net: qdisc busylock needs lockdep annotations It seems we need to provide ability for stacked devices to use specific lock_class_key for sch->busylock We could instead default l2tpeth tx_queue_len to 0 (no qdisc), but a user might use a qdisc anyway. (So same fixes are probably needed on non LLTX stacked drivers) Noticed while stressing L2TPV3 setup : ====================================================== [ INFO: possible circular locking dependency detected ] 3.6.0-rc3+ #788 Not tainted ------------------------------------------------------- netperf/4660 is trying to acquire lock: (l2tpsock){+.-...}, at: [<ffffffffa0208db2>] l2tp_xmit_skb+0x172/0xa50 [l2tp_core] but task is already holding lock: (&(&sch->busylock)->rlock){+.-...}, at: [<ffffffff81596595>] dev_queue_xmit+0xd75/0xe00 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&(&sch->busylock)->rlock){+.-...}: [<ffffffff810a5df0>] lock_acquire+0x90/0x200 [<ffffffff817499fc>] _raw_spin_lock_irqsave+0x4c/0x60 [<ffffffff81074872>] __wake_up+0x32/0x70 [<ffffffff8136d39e>] tty_wakeup+0x3e/0x80 [<ffffffff81378fb3>] pty_write+0x73/0x80 [<ffffffff8136cb4c>] tty_put_char+0x3c/0x40 [<ffffffff813722b2>] process_echoes+0x142/0x330 [<ffffffff813742ab>] n_tty_receive_buf+0x8fb/0x1230 [<ffffffff813777b2>] flush_to_ldisc+0x142/0x1c0 [<ffffffff81062818>] process_one_work+0x198/0x760 [<ffffffff81063236>] worker_thread+0x186/0x4b0 [<ffffffff810694d3>] kthread+0x93/0xa0 [<ffffffff81753e24>] kernel_thread_helper+0x4/0x10 -> #0 (l2tpsock){+.-...}: [<ffffffff810a5288>] __lock_acquire+0x1628/0x1b10 [<ffffffff810a5df0>] lock_acquire+0x90/0x200 [<ffffffff817498c1>] _raw_spin_lock+0x41/0x50 [<ffffffffa0208db2>] l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffffa021a802>] l2tp_eth_dev_xmit+0x32/0x60 [l2tp_eth] [<ffffffff815952b2>] dev_hard_start_xmit+0x502/0xa70 [<ffffffff815b63ce>] sch_direct_xmit+0xfe/0x290 [<ffffffff81595a05>] dev_queue_xmit+0x1e5/0xe00 [<ffffffff815d9d60>] ip_finish_output+0x3d0/0x890 [<ffffffff815db019>] ip_output+0x59/0xf0 [<ffffffff815da36d>] ip_local_out+0x2d/0xa0 [<ffffffff815da5a3>] ip_queue_xmit+0x1c3/0x680 [<ffffffff815f4192>] tcp_transmit_skb+0x402/0xa60 [<ffffffff815f4a94>] tcp_write_xmit+0x1f4/0xa30 [<ffffffff815f5300>] tcp_push_one+0x30/0x40 [<ffffffff815e6672>] tcp_sendmsg+0xe82/0x1040 [<ffffffff81614495>] inet_sendmsg+0x125/0x230 [<ffffffff81576cdc>] sock_sendmsg+0xdc/0xf0 [<ffffffff81579ece>] sys_sendto+0xfe/0x130 [<ffffffff81752c92>] system_call_fastpath+0x16/0x1b Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&(&sch->busylock)->rlock); lock(l2tpsock); lock(&(&sch->busylock)->rlock); lock(l2tpsock); *** DEADLOCK *** 5 locks held by netperf/4660: #0: (sk_lock-AF_INET){+.+.+.}, at: [<ffffffff815e581c>] tcp_sendmsg+0x2c/0x1040 #1: (rcu_read_lock){.+.+..}, at: [<ffffffff815da3e0>] ip_queue_xmit+0x0/0x680 #2: (rcu_read_lock_bh){.+....}, at: [<ffffffff815d9ac5>] ip_finish_output+0x135/0x890 #3: (rcu_read_lock_bh){.+....}, at: [<ffffffff81595820>] dev_queue_xmit+0x0/0xe00 #4: (&(&sch->busylock)->rlock){+.-...}, at: [<ffffffff81596595>] dev_queue_xmit+0xd75/0xe00 stack backtrace: Pid: 4660, comm: netperf Not tainted 3.6.0-rc3+ #788 Call Trace: [<ffffffff8173dbf8>] print_circular_bug+0x1fb/0x20c [<ffffffff810a5288>] __lock_acquire+0x1628/0x1b10 [<ffffffff810a334b>] ? check_usage+0x9b/0x4d0 [<ffffffff810a3f44>] ? __lock_acquire+0x2e4/0x1b10 [<ffffffff810a5df0>] lock_acquire+0x90/0x200 [<ffffffffa0208db2>] ? l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffff817498c1>] _raw_spin_lock+0x41/0x50 [<ffffffffa0208db2>] ? l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffffa0208db2>] l2tp_xmit_skb+0x172/0xa50 [l2tp_core] [<ffffffffa021a802>] l2tp_eth_dev_xmit+0x32/0x60 [l2tp_eth] [<ffffffff815952b2>] dev_hard_start_xmit+0x502/0xa70 [<ffffffff81594e0e>] ? dev_hard_start_xmit+0x5e/0xa70 [<ffffffff81595961>] ? dev_queue_xmit+0x141/0xe00 [<ffffffff815b63ce>] sch_direct_xmit+0xfe/0x290 [<ffffffff81595a05>] dev_queue_xmit+0x1e5/0xe00 [<ffffffff81595820>] ? dev_hard_start_xmit+0xa70/0xa70 [<ffffffff815d9d60>] ip_finish_output+0x3d0/0x890 [<ffffffff815d9ac5>] ? ip_finish_output+0x135/0x890 [<ffffffff815db019>] ip_output+0x59/0xf0 [<ffffffff815da36d>] ip_local_out+0x2d/0xa0 [<ffffffff815da5a3>] ip_queue_xmit+0x1c3/0x680 [<ffffffff815da3e0>] ? ip_local_out+0xa0/0xa0 [<ffffffff815f4192>] tcp_transmit_skb+0x402/0xa60 [<ffffffff815fa25e>] ? tcp_md5_do_lookup+0x18e/0x1a0 [<ffffffff815f4a94>] tcp_write_xmit+0x1f4/0xa30 [<ffffffff815f5300>] tcp_push_one+0x30/0x40 [<ffffffff815e6672>] tcp_sendmsg+0xe82/0x1040 [<ffffffff81614495>] inet_sendmsg+0x125/0x230 [<ffffffff81614370>] ? inet_create+0x6b0/0x6b0 [<ffffffff8157e6e2>] ? sock_update_classid+0xc2/0x3b0 [<ffffffff8157e750>] ? sock_update_classid+0x130/0x3b0 [<ffffffff81576cdc>] sock_sendmsg+0xdc/0xf0 [<ffffffff81162579>] ? fget_light+0x3f9/0x4f0 [<ffffffff81579ece>] sys_sendto+0xfe/0x130 [<ffffffff810a69ad>] ? trace_hardirqs_on+0xd/0x10 [<ffffffff8174a0b0>] ? _raw_spin_unlock_irq+0x30/0x50 [<ffffffff810757e3>] ? finish_task_switch+0x83/0xf0 [<ffffffff810757a6>] ? finish_task_switch+0x46/0xf0 [<ffffffff81752cb7>] ? sysret_check+0x1b/0x56 [<ffffffff81752c92>] system_call_fastpath+0x16/0x1b Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-09-05 05:02:56 +04:00
dev->qdisc_tx_busylock = &l2tp_eth_tx_busylock;
return 0;
}
static void l2tp_eth_dev_uninit(struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_del_init(&priv->list);
spin_unlock(&pn->l2tp_eth_lock);
dev_put(dev);
}
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct l2tp_eth *priv = netdev_priv(dev);
struct l2tp_session *session = priv->session;
unsigned int len = skb->len;
int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
if (likely(ret == NET_XMIT_SUCCESS)) {
atomic_long_add(len, &priv->tx_bytes);
atomic_long_inc(&priv->tx_packets);
} else {
atomic_long_inc(&priv->tx_dropped);
}
return NETDEV_TX_OK;
}
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
static struct rtnl_link_stats64 *l2tp_eth_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct l2tp_eth *priv = netdev_priv(dev);
stats->tx_bytes = atomic_long_read(&priv->tx_bytes);
stats->tx_packets = atomic_long_read(&priv->tx_packets);
stats->tx_dropped = atomic_long_read(&priv->tx_dropped);
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
stats->rx_bytes = atomic_long_read(&priv->rx_bytes);
stats->rx_packets = atomic_long_read(&priv->rx_packets);
stats->rx_errors = atomic_long_read(&priv->rx_errors);
return stats;
}
static struct net_device_ops l2tp_eth_netdev_ops = {
.ndo_init = l2tp_eth_dev_init,
.ndo_uninit = l2tp_eth_dev_uninit,
.ndo_start_xmit = l2tp_eth_dev_xmit,
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
.ndo_get_stats64 = l2tp_eth_get_stats64,
.ndo_set_mac_address = eth_mac_addr,
};
static void l2tp_eth_dev_setup(struct net_device *dev)
{
ether_setup(dev);
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->features |= NETIF_F_LLTX;
dev->netdev_ops = &l2tp_eth_netdev_ops;
dev->destructor = free_netdev;
}
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
{
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev = spriv->dev;
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
struct l2tp_eth *priv = netdev_priv(dev);
if (session->debug & L2TP_MSG_DATA) {
unsigned int length;
length = min(32u, skb->len);
if (!pskb_may_pull(skb, length))
goto error;
pr_debug("%s: eth recv\n", session->name);
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
}
if (!pskb_may_pull(skb, ETH_HLEN))
goto error;
secpath_reset(skb);
/* checksums verified by L2TP */
skb->ip_summed = CHECKSUM_NONE;
skb_dst_drop(skb);
nf_reset(skb);
if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
atomic_long_inc(&priv->rx_packets);
atomic_long_add(data_len, &priv->rx_bytes);
} else {
atomic_long_inc(&priv->rx_errors);
}
return;
error:
net: l2tp_eth: use LLTX to avoid LOCKDEP splats Denys Fedoryshchenko reported a LOCKDEP issue with l2tp code. [ 8683.927442] ====================================================== [ 8683.927555] [ INFO: possible circular locking dependency detected ] [ 8683.927672] 3.4.1-build-0061 #14 Not tainted [ 8683.927782] ------------------------------------------------------- [ 8683.927895] swapper/0/0 is trying to acquire lock: [ 8683.928007] (slock-AF_INET){+.-...}, at: [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [ 8683.928121] but task is already holding lock: [ 8683.928121] (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] which lock already depends on the new lock. [ 8683.928121] [ 8683.928121] [ 8683.928121] the existing dependency chain (in reverse order) is: [ 8683.928121] [ 8683.928121] -> #1 (_xmit_ETHER#2){+.-...}: [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<c0304e0c>] ip_send_reply+0xf2/0x1ce [ 8683.928121] [<c0317dbc>] tcp_v4_send_reset+0x153/0x16f [ 8683.928121] [<c0317f4a>] tcp_v4_do_rcv+0x172/0x194 [ 8683.928121] [<c031929b>] tcp_v4_rcv+0x387/0x5a0 [ 8683.928121] [<c03001d0>] ip_local_deliver_finish+0x13a/0x1e9 [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c030075b>] ip_local_deliver+0x41/0x45 [ 8683.928121] [<c03005dd>] ip_rcv_finish+0x31a/0x33c [ 8683.928121] [<c0300645>] NF_HOOK.clone.11+0x46/0x4d [ 8683.928121] [<c0300960>] ip_rcv+0x201/0x23d [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02deae8>] netif_receive_skb+0x4e/0x7d [ 8683.928121] [<e08d5ef3>] rtl8139_poll+0x243/0x33d [8139too] [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] -> #0 (slock-AF_INET){+.-...}: [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [ 8683.928121] other info that might help us debug this: [ 8683.928121] [ 8683.928121] Possible unsafe locking scenario: [ 8683.928121] [ 8683.928121] CPU0 CPU1 [ 8683.928121] ---- ---- [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] lock(_xmit_ETHER#2); [ 8683.928121] lock(slock-AF_INET); [ 8683.928121] [ 8683.928121] *** DEADLOCK *** [ 8683.928121] [ 8683.928121] 3 locks held by swapper/0/0: [ 8683.928121] #0: (rcu_read_lock){.+.+..}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #1: (rcu_read_lock_bh){.+....}, at: [<c02dbc10>] rcu_lock_acquire+0x0/0x30 [ 8683.928121] #2: (_xmit_ETHER#2){+.-...}, at: [<c02f062d>] sch_direct_xmit+0x36/0x119 [ 8683.928121] [ 8683.928121] stack backtrace: [ 8683.928121] Pid: 0, comm: swapper/0 Not tainted 3.4.1-build-0061 #14 [ 8683.928121] Call Trace: [ 8683.928121] [<c034bdd2>] ? printk+0x18/0x1a [ 8683.928121] [<c0158904>] print_circular_bug+0x1ac/0x1b6 [ 8683.928121] [<c0159f1b>] __lock_acquire+0x9a3/0xc27 [ 8683.928121] [<c015a561>] lock_acquire+0x71/0x85 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<c034da2d>] _raw_spin_lock+0x33/0x40 [ 8683.928121] [<e0fc73ec>] ? l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fc73ec>] l2tp_xmit_skb+0x173/0x47e [l2tp_core] [ 8683.928121] [<e0fe31fb>] l2tp_eth_dev_xmit+0x1a/0x2f [l2tp_eth] [ 8683.928121] [<c02e01e7>] dev_hard_start_xmit+0x333/0x3f2 [ 8683.928121] [<c02f064c>] sch_direct_xmit+0x55/0x119 [ 8683.928121] [<c02e0528>] dev_queue_xmit+0x282/0x418 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f524>] arp_xmit+0x22/0x24 [ 8683.928121] [<c02e02a6>] ? dev_hard_start_xmit+0x3f2/0x3f2 [ 8683.928121] [<c031f567>] arp_send+0x41/0x48 [ 8683.928121] [<c031fa7d>] arp_process+0x289/0x491 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c031f4fb>] NF_HOOK.clone.19+0x45/0x4c [ 8683.928121] [<c031f7a0>] arp_rcv+0xb1/0xc3 [ 8683.928121] [<c031f7f4>] ? __neigh_lookup.clone.20+0x42/0x42 [ 8683.928121] [<c02de91b>] __netif_receive_skb+0x329/0x378 [ 8683.928121] [<c02de9d3>] process_backlog+0x69/0x130 [ 8683.928121] [<c02df103>] net_rx_action+0x90/0x15d [ 8683.928121] [<c012b2b5>] __do_softirq+0x7b/0x118 [ 8683.928121] [<c012b23a>] ? local_bh_enable+0xd/0xd [ 8683.928121] <IRQ> [<c012b4d0>] ? irq_exit+0x41/0x91 [ 8683.928121] [<c0103c6f>] ? do_IRQ+0x79/0x8d [ 8683.928121] [<c0157ea1>] ? trace_hardirqs_off_caller+0x2e/0x86 [ 8683.928121] [<c034ef6e>] ? common_interrupt+0x2e/0x34 [ 8683.928121] [<c0108a33>] ? default_idle+0x23/0x38 [ 8683.928121] [<c01091a8>] ? cpu_idle+0x55/0x6f [ 8683.928121] [<c033df25>] ? rest_init+0xa1/0xa7 [ 8683.928121] [<c033de84>] ? __read_lock_failed+0x14/0x14 [ 8683.928121] [<c0498745>] ? start_kernel+0x303/0x30a [ 8683.928121] [<c0498209>] ? repair_env_string+0x51/0x51 [ 8683.928121] [<c04980a8>] ? i386_start_kernel+0xa8/0xaf It appears that like most virtual devices, l2tp should be converted to LLTX mode. This patch takes care of statistics using atomic_long in both RX and TX paths, and fix a bug in l2tp_eth_dev_recv(), which was caching skb->data before a pskb_may_pull() call. Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Denys Fedoryshchenko <denys@visp.net.lb> Cc: James Chapman <jchapman@katalix.com> Cc: Hong zhi guo <honkiko@gmail.com> Cc: Francois Romieu <romieu@fr.zoreil.com> Signed-off-by: David S. Miller <davem@davemloft.net>
2012-06-25 09:35:45 +04:00
atomic_long_inc(&priv->rx_errors);
kfree_skb(skb);
}
static void l2tp_eth_delete(struct l2tp_session *session)
{
struct l2tp_eth_sess *spriv;
struct net_device *dev;
if (session) {
spriv = l2tp_session_priv(session);
dev = spriv->dev;
if (dev) {
unregister_netdev(dev);
spriv->dev = NULL;
module_put(THIS_MODULE);
}
}
}
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
static void l2tp_eth_show(struct seq_file *m, void *arg)
{
struct l2tp_session *session = arg;
struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
struct net_device *dev = spriv->dev;
seq_printf(m, " interface %s\n", dev->name);
}
#endif
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
struct net_device *dev;
char name[IFNAMSIZ];
struct l2tp_tunnel *tunnel;
struct l2tp_session *session;
struct l2tp_eth *priv;
struct l2tp_eth_sess *spriv;
int rc;
struct l2tp_eth_net *pn;
tunnel = l2tp_tunnel_find(net, tunnel_id);
if (!tunnel) {
rc = -ENODEV;
goto out;
}
session = l2tp_session_find(net, tunnel, session_id);
if (session) {
rc = -EEXIST;
goto out;
}
if (cfg->ifname) {
dev = dev_get_by_name(net, cfg->ifname);
if (dev) {
dev_put(dev);
rc = -EEXIST;
goto out;
}
strlcpy(name, cfg->ifname, IFNAMSIZ);
} else
strcpy(name, L2TP_ETH_DEV_NAME);
session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
peer_session_id, cfg);
if (!session) {
rc = -ENOMEM;
goto out;
}
dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
l2tp_eth_dev_setup);
if (!dev) {
rc = -ENOMEM;
goto out_del_session;
}
dev_net_set(dev, net);
if (session->mtu == 0)
session->mtu = dev->mtu - session->hdr_len;
dev->mtu = session->mtu;
dev->needed_headroom += session->hdr_len;
priv = netdev_priv(dev);
priv->dev = dev;
priv->session = session;
INIT_LIST_HEAD(&priv->list);
priv->tunnel_sock = tunnel->sock;
session->recv_skb = l2tp_eth_dev_recv;
session->session_close = l2tp_eth_delete;
#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
session->show = l2tp_eth_show;
#endif
spriv = l2tp_session_priv(session);
spriv->dev = dev;
rc = register_netdev(dev);
if (rc < 0)
goto out_del_dev;
__module_get(THIS_MODULE);
/* Must be done after register_netdev() */
strlcpy(session->ifname, dev->name, IFNAMSIZ);
dev_hold(dev);
pn = l2tp_eth_pernet(dev_net(dev));
spin_lock(&pn->l2tp_eth_lock);
list_add(&priv->list, &pn->l2tp_eth_dev_list);
spin_unlock(&pn->l2tp_eth_lock);
return 0;
out_del_dev:
free_netdev(dev);
spriv->dev = NULL;
out_del_session:
l2tp_session_delete(session);
out:
return rc;
}
static __net_init int l2tp_eth_init_net(struct net *net)
{
struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id);
INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
spin_lock_init(&pn->l2tp_eth_lock);
return 0;
}
static struct pernet_operations l2tp_eth_net_ops = {
.init = l2tp_eth_init_net,
.id = &l2tp_eth_net_id,
.size = sizeof(struct l2tp_eth_net),
};
static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
.session_create = l2tp_eth_create,
.session_delete = l2tp_session_delete,
};
static int __init l2tp_eth_init(void)
{
int err = 0;
err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
if (err)
goto out;
err = register_pernet_device(&l2tp_eth_net_ops);
if (err)
goto out_unreg;
pr_info("L2TP ethernet pseudowire support (L2TPv3)\n");
return 0;
out_unreg:
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
out:
return err;
}
static void __exit l2tp_eth_exit(void)
{
unregister_pernet_device(&l2tp_eth_net_ops);
l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
}
module_init(l2tp_eth_init);
module_exit(l2tp_eth_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
MODULE_VERSION("1.0");