2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Common framework for low-level network console, dump, and debugger code
|
|
|
|
*
|
|
|
|
* Sep 8 2003 Matt Mackall <mpm@selenic.com>
|
|
|
|
*
|
|
|
|
* based on the netconsole code from:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001 Ingo Molnar <mingo@redhat.com>
|
|
|
|
* Copyright (C) 2002 Red Hat, Inc.
|
|
|
|
*/
|
|
|
|
|
2012-01-29 19:50:43 +04:00
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
2009-07-08 22:10:56 +04:00
|
|
|
#include <linux/moduleparam.h>
|
2013-06-04 20:46:26 +04:00
|
|
|
#include <linux/kernel.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/string.h>
|
2005-12-27 07:43:12 +03:00
|
|
|
#include <linux/if_arp.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/inetdevice.h>
|
|
|
|
#include <linux/inet.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/netpoll.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/workqueue.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
|
|
|
#include <linux/slab.h>
|
2011-07-15 19:47:34 +04:00
|
|
|
#include <linux/export.h>
|
2012-08-10 05:24:49 +04:00
|
|
|
#include <linux/if_vlan.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <net/tcp.h>
|
|
|
|
#include <net/udp.h>
|
2013-01-08 00:52:41 +04:00
|
|
|
#include <net/addrconf.h>
|
|
|
|
#include <net/ndisc.h>
|
|
|
|
#include <net/ip6_checksum.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/unaligned.h>
|
2009-06-15 14:02:23 +04:00
|
|
|
#include <trace/events/napi.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We maintain a small pool of fully-sized skbs, to make sure the
|
|
|
|
* message gets out even in extreme OOM situations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define MAX_UDP_CHUNK 1460
|
|
|
|
#define MAX_SKBS 32
|
|
|
|
|
2006-11-14 21:43:58 +03:00
|
|
|
static struct sk_buff_head skb_pool;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
static atomic_t trapped;
|
|
|
|
|
2013-03-15 10:50:52 +04:00
|
|
|
DEFINE_STATIC_SRCU(netpoll_srcu);
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
|
2006-10-27 02:46:54 +04:00
|
|
|
#define USEC_PER_POLL 50
|
2008-03-04 23:28:49 +03:00
|
|
|
#define NETPOLL_RX_ENABLED 1
|
|
|
|
#define NETPOLL_RX_DROP 2
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-01-29 19:50:44 +04:00
|
|
|
#define MAX_SKB_SIZE \
|
|
|
|
(sizeof(struct ethhdr) + \
|
|
|
|
sizeof(struct iphdr) + \
|
|
|
|
sizeof(struct udphdr) + \
|
|
|
|
MAX_UDP_CHUNK)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-08-03 11:24:04 +04:00
|
|
|
static void zap_completion_queue(void);
|
2013-01-08 00:52:39 +04:00
|
|
|
static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
|
2013-02-11 14:25:30 +04:00
|
|
|
static void netpoll_async_cleanup(struct work_struct *work);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-07-08 22:10:56 +04:00
|
|
|
static unsigned int carrier_timeout = 4;
|
|
|
|
module_param(carrier_timeout, uint, 0644);
|
|
|
|
|
2012-01-29 19:50:43 +04:00
|
|
|
#define np_info(np, fmt, ...) \
|
|
|
|
pr_info("%s: " fmt, np->name, ##__VA_ARGS__)
|
|
|
|
#define np_err(np, fmt, ...) \
|
|
|
|
pr_err("%s: " fmt, np->name, ##__VA_ARGS__)
|
|
|
|
#define np_notice(np, fmt, ...) \
|
|
|
|
pr_notice("%s: " fmt, np->name, ##__VA_ARGS__)
|
|
|
|
|
2006-11-22 17:57:56 +03:00
|
|
|
static void queue_process(struct work_struct *work)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-12-05 17:37:56 +03:00
|
|
|
struct netpoll_info *npinfo =
|
|
|
|
container_of(work, struct netpoll_info, tx_work.work);
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sk_buff *skb;
|
2006-12-12 19:20:42 +03:00
|
|
|
unsigned long flags;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-10-27 02:46:53 +04:00
|
|
|
while ((skb = skb_dequeue(&npinfo->txq))) {
|
|
|
|
struct net_device *dev = skb->dev;
|
2008-11-21 07:14:53 +03:00
|
|
|
const struct net_device_ops *ops = dev->netdev_ops;
|
2008-07-17 12:56:23 +04:00
|
|
|
struct netdev_queue *txq;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-10-27 02:46:53 +04:00
|
|
|
if (!netif_device_present(dev) || !netif_running(dev)) {
|
|
|
|
__kfree_skb(skb);
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-07-17 12:56:23 +04:00
|
|
|
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
|
|
|
|
|
2006-12-12 19:20:42 +03:00
|
|
|
local_irq_save(flags);
|
2008-07-17 12:56:23 +04:00
|
|
|
__netif_tx_lock(txq, smp_processor_id());
|
2011-11-28 20:32:44 +04:00
|
|
|
if (netif_xmit_frozen_or_stopped(txq) ||
|
2008-11-21 07:14:53 +03:00
|
|
|
ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
|
2006-10-27 02:46:53 +04:00
|
|
|
skb_queue_head(&npinfo->txq, skb);
|
2008-07-17 12:56:23 +04:00
|
|
|
__netif_tx_unlock(txq);
|
2006-12-12 19:20:42 +03:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-07-06 04:42:44 +04:00
|
|
|
schedule_delayed_work(&npinfo->tx_work, HZ/10);
|
2006-10-27 02:46:53 +04:00
|
|
|
return;
|
|
|
|
}
|
2008-07-17 12:56:23 +04:00
|
|
|
__netif_tx_unlock(txq);
|
2006-12-12 19:20:42 +03:00
|
|
|
local_irq_restore(flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-11-15 08:40:42 +03:00
|
|
|
static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
|
|
|
|
unsigned short ulen, __be32 saddr, __be32 daddr)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-11-15 08:26:08 +03:00
|
|
|
__wsum psum;
|
2005-11-11 00:01:24 +03:00
|
|
|
|
2007-04-09 22:59:39 +04:00
|
|
|
if (uh->check == 0 || skb_csum_unnecessary(skb))
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
|
2005-11-11 00:01:24 +03:00
|
|
|
psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
|
|
|
|
|
2006-08-30 03:44:56 +04:00
|
|
|
if (skb->ip_summed == CHECKSUM_COMPLETE &&
|
2006-11-15 08:24:49 +03:00
|
|
|
!csum_fold(csum_add(psum, skb->csum)))
|
2005-11-11 00:01:24 +03:00
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-11 00:01:24 +03:00
|
|
|
skb->csum = psum;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-11-11 00:01:24 +03:00
|
|
|
return __skb_checksum_complete(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether delayed processing was scheduled for our NIC. If so,
|
|
|
|
* we attempt to grab the poll lock and use ->poll() to pump the card.
|
|
|
|
* If this fails, either we've recursed in ->poll() or it's already
|
|
|
|
* running on another CPU.
|
|
|
|
*
|
|
|
|
* Note: we don't mask interrupts with this lock because we're using
|
|
|
|
* trylock here and interrupts are already disabled in the softirq
|
|
|
|
* case. Further, we test the poll_owner to avoid recursion on UP
|
|
|
|
* systems where the lock doesn't exist.
|
|
|
|
*
|
|
|
|
* In cases where there is bi-directional communications, reading only
|
|
|
|
* one message at a time can lead to packets being dropped by the
|
|
|
|
* network adapter, forcing superfluous retries and possibly timeouts.
|
|
|
|
* Thus, we set our budget to greater than 1.
|
|
|
|
*/
|
2007-10-30 07:28:47 +03:00
|
|
|
static int poll_one_napi(struct netpoll_info *npinfo,
|
|
|
|
struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
int work;
|
|
|
|
|
|
|
|
/* net_rx_action's ->poll() invocations and our's are
|
|
|
|
* synchronized by this test which is only made while
|
|
|
|
* holding the napi->poll_lock.
|
|
|
|
*/
|
|
|
|
if (!test_bit(NAPI_STATE_SCHED, &napi->state))
|
|
|
|
return budget;
|
|
|
|
|
2008-03-04 23:28:49 +03:00
|
|
|
npinfo->rx_flags |= NETPOLL_RX_DROP;
|
2007-10-30 07:28:47 +03:00
|
|
|
atomic_inc(&trapped);
|
netpoll: fix race on poll_list resulting in garbage entry
A few months back a race was discused between the netpoll napi service
path, and the fast path through net_rx_action:
http://kerneltrap.org/mailarchive/linux-netdev/2007/10/16/345470
A patch was submitted for that bug, but I think we missed a case.
Consider the following scenario:
INITIAL STATE
CPU0 has one napi_struct A on its poll_list
CPU1 is calling netpoll_send_skb and needs to call poll_napi on the same
napi_struct A that CPU0 has on its list
CPU0 CPU1
net_rx_action poll_napi
!list_empty (returns true) locks poll_lock for A
poll_one_napi
napi->poll
netif_rx_complete
__napi_complete
(removes A from poll_list)
list_entry(list->next)
In the above scenario, net_rx_action assumes that the per-cpu poll_list is
exclusive to that cpu. netpoll of course violates that, and because the netpoll
path can dequeue from the poll list, its possible for CPU0 to detect a non-empty
list at the top of the while loop in net_rx_action, but have it become empty by
the time it calls list_entry. Since the poll_list isn't surrounded by any other
structure, the returned data from that list_entry call in this situation is
garbage, and any number of crashes can result based on what exactly that garbage
is.
Given that its not fasible for performance reasons to place exclusive locks
arround each cpus poll list to provide that mutal exclusion, I think the best
solution is modify the netpoll path in such a way that we continue to guarantee
that the poll_list for a cpu is in fact exclusive to that cpu. To do this I've
implemented the patch below. It adds an additional bit to the state field in
the napi_struct. When executing napi->poll from the netpoll_path, this bit will
be set. When a driver calls netif_rx_complete, if that bit is set, it will not
remove the napi_struct from the poll_list. That work will be saved for the next
iteration of net_rx_action.
I've tested this and it seems to work well. About the biggest drawback I can
see to it is the fact that it might result in an extra loop through
net_rx_action in the event that the device is actually contended for (i.e. the
netpoll path actually preforms all the needed work no the device, and the call
to net_rx_action winds up doing nothing, except removing the napi_struct from
the poll_list. However I think this is probably a small price to pay, given
that the alternative is a crash.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-10 10:22:26 +03:00
|
|
|
set_bit(NAPI_STATE_NPSVC, &napi->state);
|
2007-10-30 07:28:47 +03:00
|
|
|
|
|
|
|
work = napi->poll(napi, budget);
|
2009-05-22 10:30:09 +04:00
|
|
|
trace_napi_poll(napi);
|
2007-10-30 07:28:47 +03:00
|
|
|
|
netpoll: fix race on poll_list resulting in garbage entry
A few months back a race was discused between the netpoll napi service
path, and the fast path through net_rx_action:
http://kerneltrap.org/mailarchive/linux-netdev/2007/10/16/345470
A patch was submitted for that bug, but I think we missed a case.
Consider the following scenario:
INITIAL STATE
CPU0 has one napi_struct A on its poll_list
CPU1 is calling netpoll_send_skb and needs to call poll_napi on the same
napi_struct A that CPU0 has on its list
CPU0 CPU1
net_rx_action poll_napi
!list_empty (returns true) locks poll_lock for A
poll_one_napi
napi->poll
netif_rx_complete
__napi_complete
(removes A from poll_list)
list_entry(list->next)
In the above scenario, net_rx_action assumes that the per-cpu poll_list is
exclusive to that cpu. netpoll of course violates that, and because the netpoll
path can dequeue from the poll list, its possible for CPU0 to detect a non-empty
list at the top of the while loop in net_rx_action, but have it become empty by
the time it calls list_entry. Since the poll_list isn't surrounded by any other
structure, the returned data from that list_entry call in this situation is
garbage, and any number of crashes can result based on what exactly that garbage
is.
Given that its not fasible for performance reasons to place exclusive locks
arround each cpus poll list to provide that mutal exclusion, I think the best
solution is modify the netpoll path in such a way that we continue to guarantee
that the poll_list for a cpu is in fact exclusive to that cpu. To do this I've
implemented the patch below. It adds an additional bit to the state field in
the napi_struct. When executing napi->poll from the netpoll_path, this bit will
be set. When a driver calls netif_rx_complete, if that bit is set, it will not
remove the napi_struct from the poll_list. That work will be saved for the next
iteration of net_rx_action.
I've tested this and it seems to work well. About the biggest drawback I can
see to it is the fact that it might result in an extra loop through
net_rx_action in the event that the device is actually contended for (i.e. the
netpoll path actually preforms all the needed work no the device, and the call
to net_rx_action winds up doing nothing, except removing the napi_struct from
the poll_list. However I think this is probably a small price to pay, given
that the alternative is a crash.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-12-10 10:22:26 +03:00
|
|
|
clear_bit(NAPI_STATE_NPSVC, &napi->state);
|
2007-10-30 07:28:47 +03:00
|
|
|
atomic_dec(&trapped);
|
2008-03-04 23:28:49 +03:00
|
|
|
npinfo->rx_flags &= ~NETPOLL_RX_DROP;
|
2007-10-30 07:28:47 +03:00
|
|
|
|
|
|
|
return budget - work;
|
|
|
|
}
|
|
|
|
|
2007-11-20 06:18:11 +03:00
|
|
|
static void poll_napi(struct net_device *dev)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
struct napi_struct *napi;
|
2005-04-17 02:20:36 +04:00
|
|
|
int budget = 16;
|
|
|
|
|
2010-10-19 11:04:26 +04:00
|
|
|
list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
2007-10-30 07:28:47 +03:00
|
|
|
if (napi->poll_owner != smp_processor_id() &&
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
spin_trylock(&napi->poll_lock)) {
|
2012-08-10 05:24:42 +04:00
|
|
|
budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
|
|
|
|
napi, budget);
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
spin_unlock(&napi->poll_lock);
|
2007-10-30 07:28:47 +03:00
|
|
|
|
2012-08-25 01:41:11 +04:00
|
|
|
if (!budget)
|
2007-10-30 07:28:47 +03:00
|
|
|
break;
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
static void service_neigh_queue(struct netpoll_info *npi)
|
2006-06-26 11:04:27 +04:00
|
|
|
{
|
2007-11-20 06:18:11 +03:00
|
|
|
if (npi) {
|
|
|
|
struct sk_buff *skb;
|
2006-06-26 11:04:27 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
while ((skb = skb_dequeue(&npi->neigh_tx)))
|
|
|
|
netpoll_neigh_reply(skb, npi);
|
2006-06-26 11:04:27 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-30 19:08:57 +04:00
|
|
|
static void netpoll_poll_dev(struct net_device *dev)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-05-11 04:36:35 +04:00
|
|
|
const struct net_device_ops *ops;
|
2012-08-10 05:24:42 +04:00
|
|
|
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
|
2007-11-20 06:18:11 +03:00
|
|
|
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
/* Don't do any rx activity if the dev_lock mutex is held
|
|
|
|
* the dev_open/close paths use this to block netpoll activity
|
|
|
|
* while changing device state
|
|
|
|
*/
|
2013-05-06 06:15:13 +04:00
|
|
|
if (down_trylock(&ni->dev_lock))
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
return;
|
|
|
|
|
2013-02-13 20:32:42 +04:00
|
|
|
if (!netif_running(dev)) {
|
2013-04-30 09:35:05 +04:00
|
|
|
up(&ni->dev_lock);
|
2009-05-11 04:36:35 +04:00
|
|
|
return;
|
2013-02-13 20:32:42 +04:00
|
|
|
}
|
2009-05-11 04:36:35 +04:00
|
|
|
|
|
|
|
ops = dev->netdev_ops;
|
2013-02-13 20:32:42 +04:00
|
|
|
if (!ops->ndo_poll_controller) {
|
2013-04-30 09:35:05 +04:00
|
|
|
up(&ni->dev_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
return;
|
2013-02-13 20:32:42 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Process pending work on NIC */
|
2008-11-20 08:32:24 +03:00
|
|
|
ops->ndo_poll_controller(dev);
|
2007-11-20 06:18:11 +03:00
|
|
|
|
|
|
|
poll_napi(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-04-30 09:35:05 +04:00
|
|
|
up(&ni->dev_lock);
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
|
2012-02-14 14:11:59 +04:00
|
|
|
if (dev->flags & IFF_SLAVE) {
|
2012-08-10 05:24:42 +04:00
|
|
|
if (ni) {
|
2013-01-04 02:48:55 +04:00
|
|
|
struct net_device *bond_dev;
|
2011-02-18 02:43:34 +03:00
|
|
|
struct sk_buff *skb;
|
2013-01-04 02:48:55 +04:00
|
|
|
struct netpoll_info *bond_ni;
|
|
|
|
|
|
|
|
bond_dev = netdev_master_upper_dev_get_rcu(dev);
|
|
|
|
bond_ni = rcu_dereference_bh(bond_dev->npinfo);
|
2013-01-08 00:52:39 +04:00
|
|
|
while ((skb = skb_dequeue(&ni->neigh_tx))) {
|
2011-02-18 02:43:34 +03:00
|
|
|
skb->dev = bond_dev;
|
2013-01-08 00:52:39 +04:00
|
|
|
skb_queue_tail(&bond_ni->neigh_tx, skb);
|
2011-02-18 02:43:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
service_neigh_queue(ni);
|
2006-06-26 11:04:27 +04:00
|
|
|
|
2010-08-03 11:24:04 +04:00
|
|
|
zap_completion_queue();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-05-27 23:53:31 +04:00
|
|
|
void netpoll_rx_disable(struct net_device *dev)
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
{
|
|
|
|
struct netpoll_info *ni;
|
|
|
|
int idx;
|
|
|
|
might_sleep();
|
|
|
|
idx = srcu_read_lock(&netpoll_srcu);
|
|
|
|
ni = srcu_dereference(dev->npinfo, &netpoll_srcu);
|
|
|
|
if (ni)
|
2013-04-30 09:35:05 +04:00
|
|
|
down(&ni->dev_lock);
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
srcu_read_unlock(&netpoll_srcu, idx);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netpoll_rx_disable);
|
|
|
|
|
|
|
|
void netpoll_rx_enable(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct netpoll_info *ni;
|
|
|
|
rcu_read_lock();
|
|
|
|
ni = rcu_dereference(dev->npinfo);
|
|
|
|
if (ni)
|
2013-04-30 09:35:05 +04:00
|
|
|
up(&ni->dev_lock);
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
rcu_read_unlock();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netpoll_rx_enable);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static void refill_skbs(void)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2006-11-14 21:43:58 +03:00
|
|
|
spin_lock_irqsave(&skb_pool.lock, flags);
|
|
|
|
while (skb_pool.qlen < MAX_SKBS) {
|
2005-04-17 02:20:36 +04:00
|
|
|
skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC);
|
|
|
|
if (!skb)
|
|
|
|
break;
|
|
|
|
|
2006-11-14 21:43:58 +03:00
|
|
|
__skb_queue_tail(&skb_pool, skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2006-11-14 21:43:58 +03:00
|
|
|
spin_unlock_irqrestore(&skb_pool.lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2010-08-03 11:24:04 +04:00
|
|
|
static void zap_completion_queue(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
struct softnet_data *sd = &get_cpu_var(softnet_data);
|
|
|
|
|
|
|
|
if (sd->completion_queue) {
|
|
|
|
struct sk_buff *clist;
|
|
|
|
|
|
|
|
local_irq_save(flags);
|
|
|
|
clist = sd->completion_queue;
|
|
|
|
sd->completion_queue = NULL;
|
|
|
|
local_irq_restore(flags);
|
|
|
|
|
|
|
|
while (clist != NULL) {
|
|
|
|
struct sk_buff *skb = clist;
|
|
|
|
clist = clist->next;
|
|
|
|
if (skb->destructor) {
|
|
|
|
atomic_inc(&skb->users);
|
|
|
|
dev_kfree_skb_any(skb); /* put this one back */
|
|
|
|
} else {
|
|
|
|
__kfree_skb(skb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
put_cpu_var(softnet_data);
|
|
|
|
}
|
|
|
|
|
2006-11-14 21:43:58 +03:00
|
|
|
static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-11-14 21:43:58 +03:00
|
|
|
int count = 0;
|
|
|
|
struct sk_buff *skb;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-08-03 11:24:04 +04:00
|
|
|
zap_completion_queue();
|
2006-11-14 21:43:58 +03:00
|
|
|
refill_skbs();
|
2005-04-17 02:20:36 +04:00
|
|
|
repeat:
|
|
|
|
|
|
|
|
skb = alloc_skb(len, GFP_ATOMIC);
|
2006-11-14 21:43:58 +03:00
|
|
|
if (!skb)
|
|
|
|
skb = skb_dequeue(&skb_pool);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!skb) {
|
2006-11-14 21:43:58 +03:00
|
|
|
if (++count < 10) {
|
2011-06-30 19:08:58 +04:00
|
|
|
netpoll_poll_dev(np->dev);
|
2006-11-14 21:43:58 +03:00
|
|
|
goto repeat;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2006-11-14 21:43:58 +03:00
|
|
|
return NULL;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
atomic_set(&skb->users, 1);
|
|
|
|
skb_reserve(skb, reserve);
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
static int netpoll_owner_active(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct napi_struct *napi;
|
|
|
|
|
|
|
|
list_for_each_entry(napi, &dev->napi_list, dev_list) {
|
|
|
|
if (napi->poll_owner == smp_processor_id())
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-08-10 05:24:42 +04:00
|
|
|
/* call with IRQ disabled */
|
2010-10-13 20:01:49 +04:00
|
|
|
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2006-10-27 02:46:54 +04:00
|
|
|
int status = NETDEV_TX_BUSY;
|
|
|
|
unsigned long tries;
|
2008-11-21 07:14:53 +03:00
|
|
|
const struct net_device_ops *ops = dev->netdev_ops;
|
2010-06-10 20:12:44 +04:00
|
|
|
/* It is up to the caller to keep npinfo alive. */
|
2012-08-10 05:24:42 +04:00
|
|
|
struct netpoll_info *npinfo;
|
2006-10-27 02:46:54 +04:00
|
|
|
|
2012-08-10 05:24:42 +04:00
|
|
|
WARN_ON_ONCE(!irqs_disabled());
|
|
|
|
|
|
|
|
npinfo = rcu_dereference_bh(np->dev->npinfo);
|
2007-02-09 17:24:36 +03:00
|
|
|
if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) {
|
|
|
|
__kfree_skb(skb);
|
|
|
|
return;
|
|
|
|
}
|
2006-10-27 02:46:54 +04:00
|
|
|
|
|
|
|
/* don't get messages out of order, and no recursion */
|
[NET]: Make NAPI polling independent of struct net_device objects.
Several devices have multiple independant RX queues per net
device, and some have a single interrupt doorbell for several
queues.
In either case, it's easier to support layouts like that if the
structure representing the poll is independant from the net
device itself.
The signature of the ->poll() call back goes from:
int foo_poll(struct net_device *dev, int *budget)
to
int foo_poll(struct napi_struct *napi, int budget)
The caller is returned the number of RX packets processed (or
the number of "NAPI credits" consumed if you want to get
abstract). The callee no longer messes around bumping
dev->quota, *budget, etc. because that is all handled in the
caller upon return.
The napi_struct is to be embedded in the device driver private data
structures.
Furthermore, it is the driver's responsibility to disable all NAPI
instances in it's ->stop() device close handler. Since the
napi_struct is privatized into the driver's private data structures,
only the driver knows how to get at all of the napi_struct instances
it may have per-device.
With lots of help and suggestions from Rusty Russell, Roland Dreier,
Michael Chan, Jeff Garzik, and Jamal Hadi Salim.
Bug fixes from Thomas Graf, Roland Dreier, Peter Zijlstra,
Joseph Fannin, Scott Wood, Hans J. Koch, and Michael Chan.
[ Ported to current tree and all drivers converted. Integrated
Stephen's follow-on kerneldoc additions, and restored poll_list
handling to the old style to fix mutual exclusion issues. -DaveM ]
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-10-04 03:41:36 +04:00
|
|
|
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
|
2008-07-17 12:56:23 +04:00
|
|
|
struct netdev_queue *txq;
|
2006-12-12 04:24:46 +03:00
|
|
|
|
2014-01-10 12:18:26 +04:00
|
|
|
txq = netdev_pick_tx(dev, skb, NULL);
|
2008-07-17 12:56:23 +04:00
|
|
|
|
2007-06-27 11:39:42 +04:00
|
|
|
/* try until next clock tick */
|
|
|
|
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
|
|
|
|
tries > 0; --tries) {
|
2008-07-17 12:56:23 +04:00
|
|
|
if (__netif_tx_trylock(txq)) {
|
2011-11-28 20:32:44 +04:00
|
|
|
if (!netif_xmit_stopped(txq)) {
|
2012-08-10 05:24:49 +04:00
|
|
|
if (vlan_tx_tag_present(skb) &&
|
2013-04-19 06:04:30 +04:00
|
|
|
!vlan_hw_offload_capable(netif_skb_features(skb),
|
|
|
|
skb->vlan_proto)) {
|
|
|
|
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
|
2014-01-03 04:50:52 +04:00
|
|
|
if (unlikely(!skb)) {
|
|
|
|
/* This is actually a packet drop, but we
|
|
|
|
* don't want the code at the end of this
|
|
|
|
* function to try and re-queue a NULL skb.
|
|
|
|
*/
|
|
|
|
status = NETDEV_TX_OK;
|
|
|
|
goto unlock_txq;
|
|
|
|
}
|
2012-08-10 05:24:49 +04:00
|
|
|
skb->vlan_tci = 0;
|
|
|
|
}
|
|
|
|
|
2008-11-21 07:14:53 +03:00
|
|
|
status = ops->ndo_start_xmit(skb, dev);
|
2009-05-26 09:58:01 +04:00
|
|
|
if (status == NETDEV_TX_OK)
|
|
|
|
txq_trans_update(txq);
|
|
|
|
}
|
2014-01-03 04:50:52 +04:00
|
|
|
unlock_txq:
|
2008-07-17 12:56:23 +04:00
|
|
|
__netif_tx_unlock(txq);
|
2006-12-10 01:01:49 +03:00
|
|
|
|
|
|
|
if (status == NETDEV_TX_OK)
|
|
|
|
break;
|
|
|
|
|
|
|
|
}
|
2007-06-27 11:39:42 +04:00
|
|
|
|
|
|
|
/* tickle device maybe there is some cleanup */
|
2011-06-30 19:08:58 +04:00
|
|
|
netpoll_poll_dev(np->dev);
|
2007-06-27 11:39:42 +04:00
|
|
|
|
|
|
|
udelay(USEC_PER_POLL);
|
2005-08-12 06:25:54 +04:00
|
|
|
}
|
2009-08-21 07:33:36 +04:00
|
|
|
|
|
|
|
WARN_ONCE(!irqs_disabled(),
|
2012-08-10 05:24:42 +04:00
|
|
|
"netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n",
|
2009-08-21 07:33:36 +04:00
|
|
|
dev->name, ops->ndo_start_xmit);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-10-27 02:46:54 +04:00
|
|
|
if (status != NETDEV_TX_OK) {
|
2006-10-27 02:46:55 +04:00
|
|
|
skb_queue_tail(&npinfo->txq, skb);
|
2006-12-05 17:37:56 +03:00
|
|
|
schedule_delayed_work(&npinfo->tx_work,0);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2010-10-13 20:01:49 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_send_skb_on_dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
|
|
|
|
{
|
2012-06-12 23:30:21 +04:00
|
|
|
int total_len, ip_len, udp_len;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sk_buff *skb;
|
|
|
|
struct udphdr *udph;
|
|
|
|
struct iphdr *iph;
|
|
|
|
struct ethhdr *eth;
|
2012-08-24 05:47:26 +04:00
|
|
|
static atomic_t ip_ident;
|
2013-01-08 00:52:41 +04:00
|
|
|
struct ipv6hdr *ip6h;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
udp_len = len + sizeof(*udph);
|
2013-01-08 00:52:41 +04:00
|
|
|
if (np->ipv6)
|
|
|
|
ip_len = udp_len + sizeof(*ip6h);
|
|
|
|
else
|
2013-01-08 00:52:39 +04:00
|
|
|
ip_len = udp_len + sizeof(*iph);
|
|
|
|
|
2012-06-12 23:30:21 +04:00
|
|
|
total_len = ip_len + LL_RESERVED_SPACE(np->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-06-12 23:30:21 +04:00
|
|
|
skb = find_skb(np, total_len + np->dev->needed_tailroom,
|
|
|
|
total_len - len);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!skb)
|
|
|
|
return;
|
|
|
|
|
2007-03-31 18:55:19 +04:00
|
|
|
skb_copy_to_linear_data(skb, msg, len);
|
2012-06-12 23:30:21 +04:00
|
|
|
skb_put(skb, len);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-03-13 20:28:48 +03:00
|
|
|
skb_push(skb, sizeof(*udph));
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
udph = udp_hdr(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
udph->source = htons(np->local_port);
|
|
|
|
udph->dest = htons(np->remote_port);
|
|
|
|
udph->len = htons(udp_len);
|
2013-01-08 00:52:39 +04:00
|
|
|
|
2013-01-08 00:52:41 +04:00
|
|
|
if (np->ipv6) {
|
|
|
|
udph->check = 0;
|
|
|
|
udph->check = csum_ipv6_magic(&np->local_ip.in6,
|
|
|
|
&np->remote_ip.in6,
|
|
|
|
udp_len, IPPROTO_UDP,
|
|
|
|
csum_partial(udph, udp_len, 0));
|
|
|
|
if (udph->check == 0)
|
|
|
|
udph->check = CSUM_MANGLED_0;
|
|
|
|
|
|
|
|
skb_push(skb, sizeof(*ip6h));
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
ip6h = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
/* ip6h->version = 6; ip6h->priority = 0; */
|
|
|
|
put_unaligned(0x60, (unsigned char *)ip6h);
|
|
|
|
ip6h->flow_lbl[0] = 0;
|
|
|
|
ip6h->flow_lbl[1] = 0;
|
|
|
|
ip6h->flow_lbl[2] = 0;
|
|
|
|
|
|
|
|
ip6h->payload_len = htons(sizeof(struct udphdr) + len);
|
|
|
|
ip6h->nexthdr = IPPROTO_UDP;
|
|
|
|
ip6h->hop_limit = 32;
|
|
|
|
ip6h->saddr = np->local_ip.in6;
|
|
|
|
ip6h->daddr = np->remote_ip.in6;
|
|
|
|
|
|
|
|
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->protocol = eth->h_proto = htons(ETH_P_IPV6);
|
|
|
|
} else {
|
2013-01-08 00:52:39 +04:00
|
|
|
udph->check = 0;
|
|
|
|
udph->check = csum_tcpudp_magic(np->local_ip.ip,
|
|
|
|
np->remote_ip.ip,
|
|
|
|
udp_len, IPPROTO_UDP,
|
|
|
|
csum_partial(udph, udp_len, 0));
|
|
|
|
if (udph->check == 0)
|
|
|
|
udph->check = CSUM_MANGLED_0;
|
|
|
|
|
|
|
|
skb_push(skb, sizeof(*iph));
|
|
|
|
skb_reset_network_header(skb);
|
|
|
|
iph = ip_hdr(skb);
|
|
|
|
|
|
|
|
/* iph->version = 4; iph->ihl = 5; */
|
|
|
|
put_unaligned(0x45, (unsigned char *)iph);
|
|
|
|
iph->tos = 0;
|
|
|
|
put_unaligned(htons(ip_len), &(iph->tot_len));
|
|
|
|
iph->id = htons(atomic_inc_return(&ip_ident));
|
|
|
|
iph->frag_off = 0;
|
|
|
|
iph->ttl = 64;
|
|
|
|
iph->protocol = IPPROTO_UDP;
|
|
|
|
iph->check = 0;
|
|
|
|
put_unaligned(np->local_ip.ip, &(iph->saddr));
|
|
|
|
put_unaligned(np->remote_ip.ip, &(iph->daddr));
|
|
|
|
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
|
|
|
|
|
|
|
|
eth = (struct ethhdr *) skb_push(skb, ETH_HLEN);
|
|
|
|
skb_reset_mac_header(skb);
|
|
|
|
skb->protocol = eth->h_proto = htons(ETH_P_IP);
|
|
|
|
}
|
|
|
|
|
2014-01-20 21:52:18 +04:00
|
|
|
ether_addr_copy(eth->h_source, np->dev->dev_addr);
|
|
|
|
ether_addr_copy(eth->h_dest, np->remote_mac);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
skb->dev = np->dev;
|
|
|
|
|
|
|
|
netpoll_send_skb(np, skb);
|
|
|
|
}
|
2010-07-10 01:22:04 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_send_udp);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-01-08 00:52:41 +04:00
|
|
|
int size, type = ARPOP_REPLY;
|
2006-11-15 07:48:11 +03:00
|
|
|
__be32 sip, tip;
|
2006-12-08 11:05:55 +03:00
|
|
|
unsigned char *sha;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sk_buff *send_skb;
|
2010-01-12 17:27:30 +03:00
|
|
|
struct netpoll *np, *tmp;
|
|
|
|
unsigned long flags;
|
2011-11-18 06:20:04 +04:00
|
|
|
int hlen, tlen;
|
2013-01-08 00:52:39 +04:00
|
|
|
int hits = 0, proto;
|
2010-01-12 17:27:30 +03:00
|
|
|
|
|
|
|
if (list_empty(&npinfo->rx_np))
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Before checking the packet, we do some early
|
|
|
|
inspection whether this is interesting at all */
|
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
|
|
|
|
if (np->dev == skb->dev)
|
|
|
|
hits++;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2010-01-12 17:27:30 +03:00
|
|
|
/* No netpoll struct is using this dev */
|
|
|
|
if (!hits)
|
2005-06-23 09:05:31 +04:00
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
proto = ntohs(eth_hdr(skb)->h_proto);
|
2013-09-11 07:31:53 +04:00
|
|
|
if (proto == ETH_P_ARP) {
|
2013-01-08 00:52:41 +04:00
|
|
|
struct arphdr *arp;
|
|
|
|
unsigned char *arp_ptr;
|
2013-01-08 00:52:39 +04:00
|
|
|
/* No arp on this interface */
|
|
|
|
if (skb->dev->flags & IFF_NOARP)
|
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
if (!pskb_may_pull(skb, arp_hdr_len(skb->dev)))
|
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
skb_reset_network_header(skb);
|
|
|
|
skb_reset_transport_header(skb);
|
|
|
|
arp = arp_hdr(skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
|
|
|
|
arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
|
|
|
|
arp->ar_pro != htons(ETH_P_IP) ||
|
|
|
|
arp->ar_op != htons(ARPOP_REQUEST))
|
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
arp_ptr = (unsigned char *)(arp+1);
|
|
|
|
/* save the location of the src hw addr */
|
|
|
|
sha = arp_ptr;
|
|
|
|
arp_ptr += skb->dev->addr_len;
|
|
|
|
memcpy(&sip, arp_ptr, 4);
|
|
|
|
arp_ptr += 4;
|
|
|
|
/* If we actually cared about dst hw addr,
|
|
|
|
it would get copied here */
|
|
|
|
arp_ptr += skb->dev->addr_len;
|
|
|
|
memcpy(&tip, arp_ptr, 4);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
/* Should we ignore arp? */
|
|
|
|
if (ipv4_is_loopback(tip) || ipv4_is_multicast(tip))
|
|
|
|
return;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
size = arp_hdr_len(skb->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
|
|
|
|
if (tip != np->local_ip.ip)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hlen = LL_RESERVED_SPACE(np->dev);
|
|
|
|
tlen = np->dev->needed_tailroom;
|
|
|
|
send_skb = find_skb(np, size + hlen + tlen, hlen);
|
|
|
|
if (!send_skb)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
skb_reset_network_header(send_skb);
|
|
|
|
arp = (struct arphdr *) skb_put(send_skb, size);
|
|
|
|
send_skb->dev = skb->dev;
|
|
|
|
send_skb->protocol = htons(ETH_P_ARP);
|
|
|
|
|
|
|
|
/* Fill the device header for the ARP frame */
|
2013-01-08 00:52:41 +04:00
|
|
|
if (dev_hard_header(send_skb, skb->dev, ETH_P_ARP,
|
2013-01-08 00:52:39 +04:00
|
|
|
sha, np->dev->dev_addr,
|
|
|
|
send_skb->len) < 0) {
|
|
|
|
kfree_skb(send_skb);
|
|
|
|
continue;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
/*
|
|
|
|
* Fill out the arp protocol part.
|
|
|
|
*
|
|
|
|
* we only support ethernet device type,
|
|
|
|
* which (according to RFC 1390) should
|
|
|
|
* always equal 1 (Ethernet).
|
|
|
|
*/
|
|
|
|
|
|
|
|
arp->ar_hrd = htons(np->dev->type);
|
|
|
|
arp->ar_pro = htons(ETH_P_IP);
|
|
|
|
arp->ar_hln = np->dev->addr_len;
|
|
|
|
arp->ar_pln = 4;
|
|
|
|
arp->ar_op = htons(type);
|
|
|
|
|
|
|
|
arp_ptr = (unsigned char *)(arp + 1);
|
|
|
|
memcpy(arp_ptr, np->dev->dev_addr, np->dev->addr_len);
|
|
|
|
arp_ptr += np->dev->addr_len;
|
|
|
|
memcpy(arp_ptr, &tip, 4);
|
|
|
|
arp_ptr += 4;
|
|
|
|
memcpy(arp_ptr, sha, np->dev->addr_len);
|
|
|
|
arp_ptr += np->dev->addr_len;
|
|
|
|
memcpy(arp_ptr, &sip, 4);
|
|
|
|
|
|
|
|
netpoll_send_skb(np, send_skb);
|
|
|
|
|
2013-10-24 01:36:30 +04:00
|
|
|
/* If there are several rx_skb_hooks for the same
|
|
|
|
* address we're fine by sending a single reply
|
|
|
|
*/
|
2013-01-08 00:52:39 +04:00
|
|
|
break;
|
2010-01-12 17:27:30 +03:00
|
|
|
}
|
2013-01-08 00:52:39 +04:00
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
2013-01-08 00:52:41 +04:00
|
|
|
} else if( proto == ETH_P_IPV6) {
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
struct nd_msg *msg;
|
|
|
|
u8 *lladdr = NULL;
|
|
|
|
struct ipv6hdr *hdr;
|
|
|
|
struct icmp6hdr *icmp6h;
|
|
|
|
const struct in6_addr *saddr;
|
|
|
|
const struct in6_addr *daddr;
|
|
|
|
struct inet6_dev *in6_dev = NULL;
|
|
|
|
struct in6_addr *target;
|
|
|
|
|
|
|
|
in6_dev = in6_dev_get(skb->dev);
|
|
|
|
if (!in6_dev || !in6_dev->cnf.accept_ra)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, skb->len))
|
|
|
|
return;
|
|
|
|
|
|
|
|
msg = (struct nd_msg *)skb_transport_header(skb);
|
|
|
|
|
|
|
|
__skb_push(skb, skb->data - skb_transport_header(skb));
|
|
|
|
|
|
|
|
if (ipv6_hdr(skb)->hop_limit != 255)
|
|
|
|
return;
|
|
|
|
if (msg->icmph.icmp6_code != 0)
|
|
|
|
return;
|
|
|
|
if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
|
|
|
|
return;
|
|
|
|
|
|
|
|
saddr = &ipv6_hdr(skb)->saddr;
|
|
|
|
daddr = &ipv6_hdr(skb)->daddr;
|
|
|
|
|
|
|
|
size = sizeof(struct icmp6hdr) + sizeof(struct in6_addr);
|
|
|
|
|
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
|
2013-01-27 19:55:20 +04:00
|
|
|
if (!ipv6_addr_equal(daddr, &np->local_ip.in6))
|
2013-01-08 00:52:41 +04:00
|
|
|
continue;
|
|
|
|
|
|
|
|
hlen = LL_RESERVED_SPACE(np->dev);
|
|
|
|
tlen = np->dev->needed_tailroom;
|
|
|
|
send_skb = find_skb(np, size + hlen + tlen, hlen);
|
|
|
|
if (!send_skb)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
send_skb->protocol = htons(ETH_P_IPV6);
|
|
|
|
send_skb->dev = skb->dev;
|
|
|
|
|
|
|
|
skb_reset_network_header(send_skb);
|
2013-06-03 20:31:36 +04:00
|
|
|
hdr = (struct ipv6hdr *) skb_put(send_skb, sizeof(struct ipv6hdr));
|
2013-01-08 00:52:41 +04:00
|
|
|
*(__be32*)hdr = htonl(0x60000000);
|
|
|
|
hdr->payload_len = htons(size);
|
|
|
|
hdr->nexthdr = IPPROTO_ICMPV6;
|
|
|
|
hdr->hop_limit = 255;
|
|
|
|
hdr->saddr = *saddr;
|
|
|
|
hdr->daddr = *daddr;
|
|
|
|
|
2013-06-03 20:31:36 +04:00
|
|
|
icmp6h = (struct icmp6hdr *) skb_put(send_skb, sizeof(struct icmp6hdr));
|
2013-01-08 00:52:41 +04:00
|
|
|
icmp6h->icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
|
|
|
|
icmp6h->icmp6_router = 0;
|
|
|
|
icmp6h->icmp6_solicited = 1;
|
2013-06-03 20:31:36 +04:00
|
|
|
|
|
|
|
target = (struct in6_addr *) skb_put(send_skb, sizeof(struct in6_addr));
|
2013-01-08 00:52:41 +04:00
|
|
|
*target = msg->target;
|
|
|
|
icmp6h->icmp6_cksum = csum_ipv6_magic(saddr, daddr, size,
|
|
|
|
IPPROTO_ICMPV6,
|
|
|
|
csum_partial(icmp6h,
|
|
|
|
size, 0));
|
|
|
|
|
|
|
|
if (dev_hard_header(send_skb, skb->dev, ETH_P_IPV6,
|
|
|
|
lladdr, np->dev->dev_addr,
|
|
|
|
send_skb->len) < 0) {
|
|
|
|
kfree_skb(send_skb);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
netpoll_send_skb(np, send_skb);
|
|
|
|
|
2013-10-24 01:36:30 +04:00
|
|
|
/* If there are several rx_skb_hooks for the same
|
|
|
|
* address, we're fine by sending a single reply
|
|
|
|
*/
|
2013-01-08 00:52:41 +04:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
#endif
|
2010-01-12 17:27:30 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-01-08 00:52:41 +04:00
|
|
|
static bool pkt_is_ns(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
struct nd_msg *msg;
|
|
|
|
struct ipv6hdr *hdr;
|
|
|
|
|
|
|
|
if (skb->protocol != htons(ETH_P_ARP))
|
|
|
|
return false;
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
msg = (struct nd_msg *)skb_transport_header(skb);
|
|
|
|
__skb_push(skb, skb->data - skb_transport_header(skb));
|
|
|
|
hdr = ipv6_hdr(skb);
|
|
|
|
|
|
|
|
if (hdr->nexthdr != IPPROTO_ICMPV6)
|
|
|
|
return false;
|
|
|
|
if (hdr->hop_limit != 255)
|
|
|
|
return false;
|
|
|
|
if (msg->icmph.icmp6_code != 0)
|
|
|
|
return false;
|
|
|
|
if (msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-08-10 05:24:40 +04:00
|
|
|
int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2013-10-24 01:36:30 +04:00
|
|
|
int proto, len, ulen, data_len;
|
|
|
|
int hits = 0, offset;
|
2011-04-22 08:53:02 +04:00
|
|
|
const struct iphdr *iph;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct udphdr *uh;
|
2010-01-12 17:27:30 +03:00
|
|
|
struct netpoll *np, *tmp;
|
2013-10-24 01:36:30 +04:00
|
|
|
uint16_t source;
|
2006-06-26 11:04:27 +04:00
|
|
|
|
2010-01-12 17:27:30 +03:00
|
|
|
if (list_empty(&npinfo->rx_np))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto out;
|
2010-01-12 17:27:30 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (skb->dev->type != ARPHRD_ETHER)
|
|
|
|
goto out;
|
|
|
|
|
2008-03-04 23:28:49 +03:00
|
|
|
/* check if netpoll clients need ARP */
|
2013-01-08 00:52:41 +04:00
|
|
|
if (skb->protocol == htons(ETH_P_ARP) && atomic_read(&trapped)) {
|
|
|
|
skb_queue_tail(&npinfo->neigh_tx, skb);
|
|
|
|
return 1;
|
|
|
|
} else if (pkt_is_ns(skb) && atomic_read(&trapped)) {
|
2013-01-08 00:52:39 +04:00
|
|
|
skb_queue_tail(&npinfo->neigh_tx, skb);
|
2005-04-17 02:20:36 +04:00
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2012-08-10 05:24:49 +04:00
|
|
|
if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) {
|
|
|
|
skb = vlan_untag(skb);
|
|
|
|
if (unlikely(!skb))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
proto = ntohs(eth_hdr(skb)->h_proto);
|
2013-01-08 00:52:39 +04:00
|
|
|
if (proto != ETH_P_IP && proto != ETH_P_IPV6)
|
2005-04-17 02:20:36 +04:00
|
|
|
goto out;
|
|
|
|
if (skb->pkt_type == PACKET_OTHERHOST)
|
|
|
|
goto out;
|
|
|
|
if (skb_shared(skb))
|
|
|
|
goto out;
|
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
if (proto == ETH_P_IP) {
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
|
|
|
|
goto out;
|
|
|
|
iph = (struct iphdr *)skb->data;
|
|
|
|
if (iph->ihl < 5 || iph->version != 4)
|
|
|
|
goto out;
|
|
|
|
if (!pskb_may_pull(skb, iph->ihl*4))
|
|
|
|
goto out;
|
|
|
|
iph = (struct iphdr *)skb->data;
|
|
|
|
if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
|
|
|
|
goto out;
|
2007-04-17 23:40:20 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
len = ntohs(iph->tot_len);
|
|
|
|
if (skb->len < len || len < iph->ihl*4)
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
/*
|
|
|
|
* Our transport medium may have padded the buffer out.
|
|
|
|
* Now We trim to the true length of the frame.
|
|
|
|
*/
|
|
|
|
if (pskb_trim_rcsum(skb, len))
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
iph = (struct iphdr *)skb->data;
|
|
|
|
if (iph->protocol != IPPROTO_UDP)
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
len -= iph->ihl*4;
|
|
|
|
uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
|
2013-10-24 01:36:30 +04:00
|
|
|
offset = (unsigned char *)(uh + 1) - skb->data;
|
2013-01-08 00:52:39 +04:00
|
|
|
ulen = ntohs(uh->len);
|
2013-10-24 01:36:30 +04:00
|
|
|
data_len = skb->len - offset;
|
|
|
|
source = ntohs(uh->source);
|
2010-01-12 17:27:30 +03:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
if (ulen != len)
|
|
|
|
goto out;
|
|
|
|
if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
|
|
|
|
goto out;
|
|
|
|
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
|
|
|
|
if (np->local_ip.ip && np->local_ip.ip != iph->daddr)
|
|
|
|
continue;
|
|
|
|
if (np->remote_ip.ip && np->remote_ip.ip != iph->saddr)
|
|
|
|
continue;
|
|
|
|
if (np->local_port && np->local_port != ntohs(uh->dest))
|
|
|
|
continue;
|
|
|
|
|
2013-10-24 01:36:30 +04:00
|
|
|
np->rx_skb_hook(np, source, skb, offset, data_len);
|
2013-01-08 00:52:39 +04:00
|
|
|
hits++;
|
|
|
|
}
|
2013-01-08 00:52:41 +04:00
|
|
|
} else {
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
const struct ipv6hdr *ip6h;
|
|
|
|
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
|
|
|
|
goto out;
|
|
|
|
ip6h = (struct ipv6hdr *)skb->data;
|
|
|
|
if (ip6h->version != 6)
|
|
|
|
goto out;
|
|
|
|
len = ntohs(ip6h->payload_len);
|
|
|
|
if (!len)
|
|
|
|
goto out;
|
|
|
|
if (len + sizeof(struct ipv6hdr) > skb->len)
|
|
|
|
goto out;
|
|
|
|
if (pskb_trim_rcsum(skb, len + sizeof(struct ipv6hdr)))
|
|
|
|
goto out;
|
|
|
|
ip6h = ipv6_hdr(skb);
|
|
|
|
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
|
|
|
|
goto out;
|
|
|
|
uh = udp_hdr(skb);
|
2013-10-24 01:36:30 +04:00
|
|
|
offset = (unsigned char *)(uh + 1) - skb->data;
|
2013-01-08 00:52:41 +04:00
|
|
|
ulen = ntohs(uh->len);
|
2013-10-24 01:36:30 +04:00
|
|
|
data_len = skb->len - offset;
|
|
|
|
source = ntohs(uh->source);
|
2013-01-08 00:52:41 +04:00
|
|
|
if (ulen != skb->len)
|
|
|
|
goto out;
|
|
|
|
if (udp6_csum_init(skb, uh, IPPROTO_UDP))
|
|
|
|
goto out;
|
|
|
|
list_for_each_entry_safe(np, tmp, &npinfo->rx_np, rx) {
|
2013-01-27 19:55:20 +04:00
|
|
|
if (!ipv6_addr_equal(&np->local_ip.in6, &ip6h->daddr))
|
2013-01-08 00:52:41 +04:00
|
|
|
continue;
|
2013-01-27 19:55:20 +04:00
|
|
|
if (!ipv6_addr_equal(&np->remote_ip.in6, &ip6h->saddr))
|
2013-01-08 00:52:41 +04:00
|
|
|
continue;
|
|
|
|
if (np->local_port && np->local_port != ntohs(uh->dest))
|
|
|
|
continue;
|
|
|
|
|
2013-10-24 01:36:30 +04:00
|
|
|
np->rx_skb_hook(np, source, skb, offset, data_len);
|
2013-01-08 00:52:41 +04:00
|
|
|
hits++;
|
|
|
|
}
|
|
|
|
#endif
|
2010-01-12 17:27:30 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!hits)
|
|
|
|
goto out;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (atomic_read(&trapped)) {
|
|
|
|
kfree_skb(skb);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
[NET] netconsole: Support dynamic reconfiguration using configfs
Based upon initial work by Keiichi Kii <k-keiichi@bx.jp.nec.com>.
This patch introduces support for dynamic reconfiguration (adding, removing
and/or modifying parameters of netconsole targets at runtime) using a
userspace interface exported via configfs. Documentation is also updated
accordingly.
Issues and brief design overview:
(1) Kernel-initiated creation / destruction of kernel objects is not
possible with configfs -- the lifetimes of the "config items" is managed
exclusively from userspace. But netconsole must support boot/module
params too, and these are parsed in kernel and hence netpolls must be
setup from the kernel. Joel Becker suggested to separately manage the
lifetimes of the two kinds of netconsole_target objects -- those created
via configfs mkdir(2) from userspace and those specified from the
boot/module option string. This adds complexity and some redundancy here
and also means that boot/module param-created targets are not exposed
through the configfs namespace (and hence cannot be updated / destroyed
dynamically). However, this saves us from locking / refcounting
complexities that would need to be introduced in configfs to support
kernel-initiated item creation / destroy there.
(2) In configfs, item creation takes place in the call chain of the
mkdir(2) syscall in the driver subsystem. If we used an ioctl(2) to
create / destroy objects from userspace, the special userspace program is
able to fill out the structure to be passed into the ioctl and hence
specify attributes such as local interface that are required at the time
we set up the netpoll. For configfs, this information is not available at
the time of mkdir(2). So, we keep all newly-created targets (via
configfs) disabled by default. The user is expected to set various
attributes appropriately (including the local network interface if
required) and then write(2) "1" to the "enabled" attribute. Thus,
netpoll_setup() is then called on the set parameters in the context of
_this_ write(2) on the "enabled" attribute itself. This design enables
the user to reconfigure existing netconsole targets at runtime to be
attached to newly-come-up interfaces that may not have existed when
netconsole was loaded or when the targets were actually created. All this
effectively enables us to get rid of custom ioctls.
(3) Ultra-paranoid configfs attribute show() and store() operations, with
sanity and input range checking, using only safe string primitives, and
compliant with the recommendations in Documentation/filesystems/sysfs.txt.
(4) A new function netpoll_print_options() is created in the netpoll API,
that just prints out the configured parameters for a netpoll structure.
netpoll_parse_options() is modified to use that and it is also exported to
be used from netconsole.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Acked-by: Keiichi Kii <k-keiichi@bx.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-08-11 02:35:05 +04:00
|
|
|
void netpoll_print_options(struct netpoll *np)
|
|
|
|
{
|
2012-01-29 19:50:43 +04:00
|
|
|
np_info(np, "local port %d\n", np->local_port);
|
2013-01-08 00:52:41 +04:00
|
|
|
if (np->ipv6)
|
|
|
|
np_info(np, "local IPv6 address %pI6c\n", &np->local_ip.in6);
|
|
|
|
else
|
2013-01-08 00:52:39 +04:00
|
|
|
np_info(np, "local IPv4 address %pI4\n", &np->local_ip.ip);
|
2012-01-29 19:50:43 +04:00
|
|
|
np_info(np, "interface '%s'\n", np->dev_name);
|
|
|
|
np_info(np, "remote port %d\n", np->remote_port);
|
2013-01-08 00:52:41 +04:00
|
|
|
if (np->ipv6)
|
|
|
|
np_info(np, "remote IPv6 address %pI6c\n", &np->remote_ip.in6);
|
|
|
|
else
|
2013-01-08 00:52:39 +04:00
|
|
|
np_info(np, "remote IPv4 address %pI4\n", &np->remote_ip.ip);
|
2012-01-29 19:50:43 +04:00
|
|
|
np_info(np, "remote ethernet address %pM\n", np->remote_mac);
|
[NET] netconsole: Support dynamic reconfiguration using configfs
Based upon initial work by Keiichi Kii <k-keiichi@bx.jp.nec.com>.
This patch introduces support for dynamic reconfiguration (adding, removing
and/or modifying parameters of netconsole targets at runtime) using a
userspace interface exported via configfs. Documentation is also updated
accordingly.
Issues and brief design overview:
(1) Kernel-initiated creation / destruction of kernel objects is not
possible with configfs -- the lifetimes of the "config items" is managed
exclusively from userspace. But netconsole must support boot/module
params too, and these are parsed in kernel and hence netpolls must be
setup from the kernel. Joel Becker suggested to separately manage the
lifetimes of the two kinds of netconsole_target objects -- those created
via configfs mkdir(2) from userspace and those specified from the
boot/module option string. This adds complexity and some redundancy here
and also means that boot/module param-created targets are not exposed
through the configfs namespace (and hence cannot be updated / destroyed
dynamically). However, this saves us from locking / refcounting
complexities that would need to be introduced in configfs to support
kernel-initiated item creation / destroy there.
(2) In configfs, item creation takes place in the call chain of the
mkdir(2) syscall in the driver subsystem. If we used an ioctl(2) to
create / destroy objects from userspace, the special userspace program is
able to fill out the structure to be passed into the ioctl and hence
specify attributes such as local interface that are required at the time
we set up the netpoll. For configfs, this information is not available at
the time of mkdir(2). So, we keep all newly-created targets (via
configfs) disabled by default. The user is expected to set various
attributes appropriately (including the local network interface if
required) and then write(2) "1" to the "enabled" attribute. Thus,
netpoll_setup() is then called on the set parameters in the context of
_this_ write(2) on the "enabled" attribute itself. This design enables
the user to reconfigure existing netconsole targets at runtime to be
attached to newly-come-up interfaces that may not have existed when
netconsole was loaded or when the targets were actually created. All this
effectively enables us to get rid of custom ioctls.
(3) Ultra-paranoid configfs attribute show() and store() operations, with
sanity and input range checking, using only safe string primitives, and
compliant with the recommendations in Documentation/filesystems/sysfs.txt.
(4) A new function netpoll_print_options() is created in the netpoll API,
that just prints out the configured parameters for a netpoll structure.
netpoll_parse_options() is modified to use that and it is also exported to
be used from netconsole.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Acked-by: Keiichi Kii <k-keiichi@bx.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-08-11 02:35:05 +04:00
|
|
|
}
|
2010-07-10 01:22:04 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_print_options);
|
[NET] netconsole: Support dynamic reconfiguration using configfs
Based upon initial work by Keiichi Kii <k-keiichi@bx.jp.nec.com>.
This patch introduces support for dynamic reconfiguration (adding, removing
and/or modifying parameters of netconsole targets at runtime) using a
userspace interface exported via configfs. Documentation is also updated
accordingly.
Issues and brief design overview:
(1) Kernel-initiated creation / destruction of kernel objects is not
possible with configfs -- the lifetimes of the "config items" is managed
exclusively from userspace. But netconsole must support boot/module
params too, and these are parsed in kernel and hence netpolls must be
setup from the kernel. Joel Becker suggested to separately manage the
lifetimes of the two kinds of netconsole_target objects -- those created
via configfs mkdir(2) from userspace and those specified from the
boot/module option string. This adds complexity and some redundancy here
and also means that boot/module param-created targets are not exposed
through the configfs namespace (and hence cannot be updated / destroyed
dynamically). However, this saves us from locking / refcounting
complexities that would need to be introduced in configfs to support
kernel-initiated item creation / destroy there.
(2) In configfs, item creation takes place in the call chain of the
mkdir(2) syscall in the driver subsystem. If we used an ioctl(2) to
create / destroy objects from userspace, the special userspace program is
able to fill out the structure to be passed into the ioctl and hence
specify attributes such as local interface that are required at the time
we set up the netpoll. For configfs, this information is not available at
the time of mkdir(2). So, we keep all newly-created targets (via
configfs) disabled by default. The user is expected to set various
attributes appropriately (including the local network interface if
required) and then write(2) "1" to the "enabled" attribute. Thus,
netpoll_setup() is then called on the set parameters in the context of
_this_ write(2) on the "enabled" attribute itself. This design enables
the user to reconfigure existing netconsole targets at runtime to be
attached to newly-come-up interfaces that may not have existed when
netconsole was loaded or when the targets were actually created. All this
effectively enables us to get rid of custom ioctls.
(3) Ultra-paranoid configfs attribute show() and store() operations, with
sanity and input range checking, using only safe string primitives, and
compliant with the recommendations in Documentation/filesystems/sysfs.txt.
(4) A new function netpoll_print_options() is created in the netpoll API,
that just prints out the configured parameters for a netpoll structure.
netpoll_parse_options() is modified to use that and it is also exported to
be used from netconsole.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Acked-by: Keiichi Kii <k-keiichi@bx.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-08-11 02:35:05 +04:00
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
static int netpoll_parse_ip_addr(const char *str, union inet_addr *addr)
|
|
|
|
{
|
|
|
|
const char *end;
|
|
|
|
|
|
|
|
if (!strchr(str, ':') &&
|
|
|
|
in4_pton(str, -1, (void *)addr, -1, &end) > 0) {
|
|
|
|
if (!*end)
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
if (in6_pton(str, -1, addr->in6.s6_addr, -1, &end) > 0) {
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (!*end)
|
|
|
|
return 1;
|
|
|
|
#else
|
|
|
|
return -1;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
int netpoll_parse_options(struct netpoll *np, char *opt)
|
|
|
|
{
|
|
|
|
char *cur=opt, *delim;
|
2013-01-08 00:52:39 +04:00
|
|
|
int ipv6;
|
2014-02-06 21:34:12 +04:00
|
|
|
bool ipversion_set = false;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-11-15 07:40:49 +03:00
|
|
|
if (*cur != '@') {
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((delim = strchr(cur, '@')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
*delim = 0;
|
2012-12-10 03:12:28 +04:00
|
|
|
if (kstrtou16(cur, 10, &np->local_port))
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
cur = delim;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
|
2006-11-15 07:40:49 +03:00
|
|
|
if (*cur != '/') {
|
2014-02-06 21:34:12 +04:00
|
|
|
ipversion_set = true;
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((delim = strchr(cur, '/')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
*delim = 0;
|
2013-01-08 00:52:39 +04:00
|
|
|
ipv6 = netpoll_parse_ip_addr(cur, &np->local_ip);
|
|
|
|
if (ipv6 < 0)
|
|
|
|
goto parse_failed;
|
|
|
|
else
|
|
|
|
np->ipv6 = (bool)ipv6;
|
2006-11-15 07:40:49 +03:00
|
|
|
cur = delim;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
|
2006-11-15 07:40:49 +03:00
|
|
|
if (*cur != ',') {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* parse out dev name */
|
|
|
|
if ((delim = strchr(cur, ',')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
*delim = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
strlcpy(np->dev_name, cur, sizeof(np->dev_name));
|
2006-11-15 07:40:49 +03:00
|
|
|
cur = delim;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
|
2006-11-15 07:40:49 +03:00
|
|
|
if (*cur != '@') {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* dst port */
|
|
|
|
if ((delim = strchr(cur, '@')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
*delim = 0;
|
2010-03-22 01:59:58 +03:00
|
|
|
if (*cur == ' ' || *cur == '\t')
|
2012-01-29 19:50:43 +04:00
|
|
|
np_info(np, "warning: whitespace is not allowed\n");
|
2012-12-10 03:12:28 +04:00
|
|
|
if (kstrtou16(cur, 10, &np->remote_port))
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
cur = delim;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
cur++;
|
|
|
|
|
|
|
|
/* dst ip */
|
|
|
|
if ((delim = strchr(cur, '/')) == NULL)
|
|
|
|
goto parse_failed;
|
2006-11-15 07:40:49 +03:00
|
|
|
*delim = 0;
|
2013-01-08 00:52:39 +04:00
|
|
|
ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
|
|
|
|
if (ipv6 < 0)
|
|
|
|
goto parse_failed;
|
2014-02-06 21:34:12 +04:00
|
|
|
else if (ipversion_set && np->ipv6 != (bool)ipv6)
|
2013-01-08 00:52:39 +04:00
|
|
|
goto parse_failed;
|
|
|
|
else
|
|
|
|
np->ipv6 = (bool)ipv6;
|
2006-11-15 07:40:49 +03:00
|
|
|
cur = delim + 1;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-11-15 07:40:49 +03:00
|
|
|
if (*cur != 0) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* MAC address */
|
2011-05-08 03:00:07 +04:00
|
|
|
if (!mac_pton(cur, np->remote_mac))
|
2005-04-17 02:20:36 +04:00
|
|
|
goto parse_failed;
|
|
|
|
}
|
|
|
|
|
[NET] netconsole: Support dynamic reconfiguration using configfs
Based upon initial work by Keiichi Kii <k-keiichi@bx.jp.nec.com>.
This patch introduces support for dynamic reconfiguration (adding, removing
and/or modifying parameters of netconsole targets at runtime) using a
userspace interface exported via configfs. Documentation is also updated
accordingly.
Issues and brief design overview:
(1) Kernel-initiated creation / destruction of kernel objects is not
possible with configfs -- the lifetimes of the "config items" is managed
exclusively from userspace. But netconsole must support boot/module
params too, and these are parsed in kernel and hence netpolls must be
setup from the kernel. Joel Becker suggested to separately manage the
lifetimes of the two kinds of netconsole_target objects -- those created
via configfs mkdir(2) from userspace and those specified from the
boot/module option string. This adds complexity and some redundancy here
and also means that boot/module param-created targets are not exposed
through the configfs namespace (and hence cannot be updated / destroyed
dynamically). However, this saves us from locking / refcounting
complexities that would need to be introduced in configfs to support
kernel-initiated item creation / destroy there.
(2) In configfs, item creation takes place in the call chain of the
mkdir(2) syscall in the driver subsystem. If we used an ioctl(2) to
create / destroy objects from userspace, the special userspace program is
able to fill out the structure to be passed into the ioctl and hence
specify attributes such as local interface that are required at the time
we set up the netpoll. For configfs, this information is not available at
the time of mkdir(2). So, we keep all newly-created targets (via
configfs) disabled by default. The user is expected to set various
attributes appropriately (including the local network interface if
required) and then write(2) "1" to the "enabled" attribute. Thus,
netpoll_setup() is then called on the set parameters in the context of
_this_ write(2) on the "enabled" attribute itself. This design enables
the user to reconfigure existing netconsole targets at runtime to be
attached to newly-come-up interfaces that may not have existed when
netconsole was loaded or when the targets were actually created. All this
effectively enables us to get rid of custom ioctls.
(3) Ultra-paranoid configfs attribute show() and store() operations, with
sanity and input range checking, using only safe string primitives, and
compliant with the recommendations in Documentation/filesystems/sysfs.txt.
(4) A new function netpoll_print_options() is created in the netpoll API,
that just prints out the configured parameters for a netpoll structure.
netpoll_parse_options() is modified to use that and it is also exported to
be used from netconsole.
Signed-off-by: Satyam Sharma <satyam@infradead.org>
Acked-by: Keiichi Kii <k-keiichi@bx.jp.nec.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-08-11 02:35:05 +04:00
|
|
|
netpoll_print_options(np);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
parse_failed:
|
2012-01-29 19:50:43 +04:00
|
|
|
np_info(np, "couldn't parse config at '%s'!\n", cur);
|
2005-04-17 02:20:36 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2010-07-10 01:22:04 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_parse_options);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-08-10 05:24:37 +04:00
|
|
|
int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2005-06-23 09:05:31 +04:00
|
|
|
struct netpoll_info *npinfo;
|
2010-06-10 20:12:47 +04:00
|
|
|
const struct net_device_ops *ops;
|
2005-06-23 09:05:59 +04:00
|
|
|
unsigned long flags;
|
2006-10-27 02:46:52 +04:00
|
|
|
int err;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2012-07-17 09:22:35 +04:00
|
|
|
np->dev = ndev;
|
|
|
|
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
|
2013-02-11 14:25:30 +04:00
|
|
|
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
|
2012-07-17 09:22:35 +04:00
|
|
|
|
2010-06-10 20:12:48 +04:00
|
|
|
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
|
|
|
|
!ndev->netdev_ops->ndo_poll_controller) {
|
2012-01-29 19:50:43 +04:00
|
|
|
np_err(np, "%s doesn't support polling, aborting\n",
|
|
|
|
np->dev_name);
|
2010-06-10 20:12:48 +04:00
|
|
|
err = -ENOTSUPP;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ndev->npinfo) {
|
2012-08-10 05:24:37 +04:00
|
|
|
npinfo = kmalloc(sizeof(*npinfo), gfp);
|
2010-06-10 20:12:48 +04:00
|
|
|
if (!npinfo) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
npinfo->rx_flags = 0;
|
|
|
|
INIT_LIST_HEAD(&npinfo->rx_np);
|
|
|
|
|
|
|
|
spin_lock_init(&npinfo->rx_lock);
|
2013-04-30 09:35:05 +04:00
|
|
|
sema_init(&npinfo->dev_lock, 1);
|
2013-01-08 00:52:39 +04:00
|
|
|
skb_queue_head_init(&npinfo->neigh_tx);
|
2010-06-10 20:12:48 +04:00
|
|
|
skb_queue_head_init(&npinfo->txq);
|
|
|
|
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
|
|
|
|
|
|
|
|
atomic_set(&npinfo->refcnt, 1);
|
|
|
|
|
|
|
|
ops = np->dev->netdev_ops;
|
|
|
|
if (ops->ndo_netpoll_setup) {
|
2012-08-10 05:24:37 +04:00
|
|
|
err = ops->ndo_netpoll_setup(ndev, npinfo, gfp);
|
2010-06-10 20:12:48 +04:00
|
|
|
if (err)
|
|
|
|
goto free_npinfo;
|
|
|
|
}
|
|
|
|
} else {
|
2013-02-11 14:25:31 +04:00
|
|
|
npinfo = rtnl_dereference(ndev->npinfo);
|
2010-06-10 20:12:48 +04:00
|
|
|
atomic_inc(&npinfo->refcnt);
|
|
|
|
}
|
|
|
|
|
|
|
|
npinfo->netpoll = np;
|
|
|
|
|
2013-10-24 01:36:30 +04:00
|
|
|
if (np->rx_skb_hook) {
|
2010-06-10 20:12:48 +04:00
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
npinfo->rx_flags |= NETPOLL_RX_ENABLED;
|
|
|
|
list_add_tail(&np->rx, &npinfo->rx_np);
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* last thing to do is link it to the net device structure */
|
2012-01-12 08:41:32 +04:00
|
|
|
rcu_assign_pointer(ndev->npinfo, npinfo);
|
2010-06-10 20:12:48 +04:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
free_npinfo:
|
|
|
|
kfree(npinfo);
|
|
|
|
out:
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__netpoll_setup);
|
|
|
|
|
|
|
|
int netpoll_setup(struct netpoll *np)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = NULL;
|
|
|
|
struct in_device *in_dev;
|
|
|
|
int err;
|
|
|
|
|
2013-01-15 03:34:06 +04:00
|
|
|
rtnl_lock();
|
2013-01-27 19:55:21 +04:00
|
|
|
if (np->dev_name) {
|
|
|
|
struct net *net = current->nsproxy->net_ns;
|
|
|
|
ndev = __dev_get_by_name(net, np->dev_name);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!ndev) {
|
2012-01-29 19:50:43 +04:00
|
|
|
np_err(np, "%s doesn't exist, aborting\n", np->dev_name);
|
2013-01-15 03:34:06 +04:00
|
|
|
err = -ENODEV;
|
|
|
|
goto unlock;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2013-01-17 08:21:08 +04:00
|
|
|
dev_hold(ndev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2013-01-04 02:48:55 +04:00
|
|
|
if (netdev_master_upper_dev_get(ndev)) {
|
2012-01-29 19:50:43 +04:00
|
|
|
np_err(np, "%s is a slave device, aborting\n", np->dev_name);
|
2011-06-12 05:55:22 +04:00
|
|
|
err = -EBUSY;
|
|
|
|
goto put;
|
2011-06-09 11:28:13 +04:00
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (!netif_running(ndev)) {
|
|
|
|
unsigned long atmost, atleast;
|
|
|
|
|
2012-01-29 19:50:43 +04:00
|
|
|
np_info(np, "device %s not up yet, forcing it\n", np->dev_name);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-10-27 02:46:52 +04:00
|
|
|
err = dev_open(ndev);
|
|
|
|
|
|
|
|
if (err) {
|
2012-01-29 19:50:43 +04:00
|
|
|
np_err(np, "failed to open %s\n", ndev->name);
|
2010-06-10 20:12:46 +04:00
|
|
|
goto put;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-01-15 03:34:06 +04:00
|
|
|
rtnl_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
atleast = jiffies + HZ/10;
|
2009-07-08 22:10:56 +04:00
|
|
|
atmost = jiffies + carrier_timeout * HZ;
|
2005-04-17 02:20:36 +04:00
|
|
|
while (!netif_carrier_ok(ndev)) {
|
|
|
|
if (time_after(jiffies, atmost)) {
|
2012-01-29 19:50:43 +04:00
|
|
|
np_notice(np, "timeout waiting for carrier\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
break;
|
|
|
|
}
|
2009-07-09 07:09:44 +04:00
|
|
|
msleep(1);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* If carrier appears to come up instantly, we don't
|
|
|
|
* trust it and pause so that we don't pump all our
|
|
|
|
* queued console messages into the bitbucket.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (time_before(jiffies, atleast)) {
|
2012-01-29 19:50:43 +04:00
|
|
|
np_notice(np, "carrier detect appears untrustworthy, waiting 4 seconds\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
msleep(4000);
|
|
|
|
}
|
2013-01-15 03:34:06 +04:00
|
|
|
rtnl_lock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
if (!np->local_ip.ip) {
|
|
|
|
if (!np->ipv6) {
|
2013-01-15 03:34:06 +04:00
|
|
|
in_dev = __in_dev_get_rtnl(ndev);
|
2013-01-08 00:52:39 +04:00
|
|
|
|
|
|
|
if (!in_dev || !in_dev->ifa_list) {
|
|
|
|
np_err(np, "no IP address for %s, aborting\n",
|
|
|
|
np->dev_name);
|
|
|
|
err = -EDESTADDRREQ;
|
|
|
|
goto put;
|
|
|
|
}
|
|
|
|
|
|
|
|
np->local_ip.ip = in_dev->ifa_list->ifa_local;
|
|
|
|
np_info(np, "local IP %pI4\n", &np->local_ip.ip);
|
2013-01-08 00:52:41 +04:00
|
|
|
} else {
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
struct inet6_dev *idev;
|
|
|
|
|
|
|
|
err = -EDESTADDRREQ;
|
|
|
|
idev = __in6_dev_get(ndev);
|
|
|
|
if (idev) {
|
|
|
|
struct inet6_ifaddr *ifp;
|
|
|
|
|
|
|
|
read_lock_bh(&idev->lock);
|
|
|
|
list_for_each_entry(ifp, &idev->addr_list, if_list) {
|
|
|
|
if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
|
|
|
|
continue;
|
|
|
|
np->local_ip.in6 = ifp->addr;
|
|
|
|
err = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
read_unlock_bh(&idev->lock);
|
|
|
|
}
|
|
|
|
if (err) {
|
|
|
|
np_err(np, "no IPv6 address for %s, aborting\n",
|
|
|
|
np->dev_name);
|
|
|
|
goto put;
|
|
|
|
} else
|
|
|
|
np_info(np, "local IPv6 %pI6c\n", &np->local_ip.in6);
|
|
|
|
#else
|
|
|
|
np_err(np, "IPv6 is not supported %s, aborting\n",
|
|
|
|
np->dev_name);
|
2013-01-22 21:39:11 +04:00
|
|
|
err = -EINVAL;
|
2013-01-08 00:52:41 +04:00
|
|
|
goto put;
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-10 20:12:46 +04:00
|
|
|
/* fill up the skb queue */
|
|
|
|
refill_skbs();
|
|
|
|
|
2012-08-10 05:24:37 +04:00
|
|
|
err = __netpoll_setup(np, ndev, GFP_KERNEL);
|
2010-06-10 20:12:48 +04:00
|
|
|
if (err)
|
|
|
|
goto put;
|
|
|
|
|
2013-01-15 03:34:06 +04:00
|
|
|
rtnl_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
|
2010-03-16 08:29:54 +03:00
|
|
|
put:
|
2005-04-17 02:20:36 +04:00
|
|
|
dev_put(ndev);
|
2013-01-15 03:34:06 +04:00
|
|
|
unlock:
|
|
|
|
rtnl_unlock();
|
2006-10-27 02:46:52 +04:00
|
|
|
return err;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2010-07-10 01:22:04 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_setup);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-11-15 07:40:49 +03:00
|
|
|
static int __init netpoll_init(void)
|
|
|
|
{
|
2006-11-14 21:43:58 +03:00
|
|
|
skb_queue_head_init(&skb_pool);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
core_initcall(netpoll_init);
|
|
|
|
|
2012-08-10 05:24:38 +04:00
|
|
|
static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
|
|
|
|
{
|
|
|
|
struct netpoll_info *npinfo =
|
|
|
|
container_of(rcu_head, struct netpoll_info, rcu);
|
|
|
|
|
2013-01-08 00:52:39 +04:00
|
|
|
skb_queue_purge(&npinfo->neigh_tx);
|
2012-08-10 05:24:38 +04:00
|
|
|
skb_queue_purge(&npinfo->txq);
|
|
|
|
|
|
|
|
/* we can't call cancel_delayed_work_sync here, as we are in softirq */
|
|
|
|
cancel_delayed_work(&npinfo->tx_work);
|
|
|
|
|
|
|
|
/* clean after last, unfinished work */
|
|
|
|
__skb_queue_purge(&npinfo->txq);
|
|
|
|
/* now cancel it again */
|
|
|
|
cancel_delayed_work(&npinfo->tx_work);
|
|
|
|
kfree(npinfo);
|
|
|
|
}
|
|
|
|
|
2010-06-10 20:12:48 +04:00
|
|
|
void __netpoll_cleanup(struct netpoll *np)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2005-06-23 09:05:59 +04:00
|
|
|
struct netpoll_info *npinfo;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2013-02-11 14:25:31 +04:00
|
|
|
/* rtnl_dereference would be preferable here but
|
|
|
|
* rcu_cleanup_netpoll path can put us in here safely without
|
|
|
|
* holding the rtnl, so plain rcu_dereference it is
|
|
|
|
*/
|
|
|
|
npinfo = rtnl_dereference(np->dev->npinfo);
|
2010-06-10 20:12:48 +04:00
|
|
|
if (!npinfo)
|
2010-06-10 20:12:46 +04:00
|
|
|
return;
|
2006-10-27 02:46:50 +04:00
|
|
|
|
2010-06-10 20:12:48 +04:00
|
|
|
if (!list_empty(&npinfo->rx_np)) {
|
|
|
|
spin_lock_irqsave(&npinfo->rx_lock, flags);
|
|
|
|
list_del(&np->rx);
|
|
|
|
if (list_empty(&npinfo->rx_np))
|
|
|
|
npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
|
|
|
|
spin_unlock_irqrestore(&npinfo->rx_lock, flags);
|
|
|
|
}
|
2010-06-10 20:12:44 +04:00
|
|
|
|
netpoll: protect napi_poll and poll_controller during dev_[open|close]
Ivan Vercera was recently backporting commit
9c13cb8bb477a83b9a3c9e5a5478a4e21294a760 to a RHEL kernel, and I noticed that,
while this patch protects the tg3 driver from having its ndo_poll_controller
routine called during device initalization, it does nothing for the driver
during shutdown. I.e. it would be entirely possible to have the
ndo_poll_controller method (or subsequently the ndo_poll) routine called for a
driver in the netpoll path on CPU A while in parallel on CPU B, the ndo_close or
ndo_open routine could be called. Given that the two latter routines tend to
initizlize and free many data structures that the former two rely on, the result
can easily be data corruption or various other crashes. Furthermore, it seems
that this is potentially a problem with all net drivers that support netpoll,
and so this should ideally be fixed in a common path.
As Ben H Pointed out to me, we can't preform dev_open/dev_close in atomic
context, so I've come up with this solution. We can use a mutex to sleep in
open/close paths and just do a mutex_trylock in the napi poll path and abandon
the poll attempt if we're locked, as we'll just retry the poll on the next send
anyway.
I've tested this here by flooding netconsole with messages on a system whos nic
driver I modfied to periodically return NETDEV_TX_BUSY, so that the netpoll tx
workqueue would be forced to send frames and poll the device. While this was
going on I rapidly ifdown/up'ed the interface and watched for any problems.
I've not found any.
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
CC: Ivan Vecera <ivecera@redhat.com>
CC: "David S. Miller" <davem@davemloft.net>
CC: Ben Hutchings <bhutchings@solarflare.com>
CC: Francois Romieu <romieu@fr.zoreil.com>
CC: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2013-02-05 12:05:43 +04:00
|
|
|
synchronize_srcu(&netpoll_srcu);
|
|
|
|
|
2010-06-10 20:12:48 +04:00
|
|
|
if (atomic_dec_and_test(&npinfo->refcnt)) {
|
|
|
|
const struct net_device_ops *ops;
|
2010-06-10 20:12:44 +04:00
|
|
|
|
2010-06-10 20:12:48 +04:00
|
|
|
ops = np->dev->netdev_ops;
|
|
|
|
if (ops->ndo_netpoll_cleanup)
|
|
|
|
ops->ndo_netpoll_cleanup(np->dev);
|
2010-06-10 20:12:44 +04:00
|
|
|
|
2013-02-11 14:25:30 +04:00
|
|
|
rcu_assign_pointer(np->dev->npinfo, NULL);
|
2012-08-10 05:24:38 +04:00
|
|
|
call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
|
2010-06-10 20:12:44 +04:00
|
|
|
|
2013-02-11 14:25:30 +04:00
|
|
|
static void netpoll_async_cleanup(struct work_struct *work)
|
2012-08-10 05:24:38 +04:00
|
|
|
{
|
2013-02-11 14:25:30 +04:00
|
|
|
struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
|
2006-10-27 02:46:50 +04:00
|
|
|
|
2013-02-11 14:25:30 +04:00
|
|
|
rtnl_lock();
|
2012-08-10 05:24:38 +04:00
|
|
|
__netpoll_cleanup(np);
|
2013-02-11 14:25:30 +04:00
|
|
|
rtnl_unlock();
|
2012-08-10 05:24:38 +04:00
|
|
|
kfree(np);
|
|
|
|
}
|
2006-10-27 02:46:50 +04:00
|
|
|
|
2013-02-11 14:25:30 +04:00
|
|
|
void __netpoll_free_async(struct netpoll *np)
|
2012-08-10 05:24:38 +04:00
|
|
|
{
|
2013-02-11 14:25:30 +04:00
|
|
|
schedule_work(&np->cleanup_work);
|
2010-06-10 20:12:48 +04:00
|
|
|
}
|
2013-02-11 14:25:30 +04:00
|
|
|
EXPORT_SYMBOL_GPL(__netpoll_free_async);
|
2005-06-23 09:05:59 +04:00
|
|
|
|
2010-06-10 20:12:48 +04:00
|
|
|
void netpoll_cleanup(struct netpoll *np)
|
|
|
|
{
|
|
|
|
rtnl_lock();
|
2013-09-19 17:02:35 +04:00
|
|
|
if (!np->dev)
|
|
|
|
goto out;
|
2010-06-10 20:12:48 +04:00
|
|
|
__netpoll_cleanup(np);
|
|
|
|
dev_put(np->dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
np->dev = NULL;
|
2013-09-19 17:02:35 +04:00
|
|
|
out:
|
|
|
|
rtnl_unlock();
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2010-07-10 01:22:04 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_cleanup);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
int netpoll_trap(void)
|
|
|
|
{
|
|
|
|
return atomic_read(&trapped);
|
|
|
|
}
|
2010-07-10 01:22:04 +04:00
|
|
|
EXPORT_SYMBOL(netpoll_trap);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
void netpoll_set_trap(int trap)
|
|
|
|
{
|
|
|
|
if (trap)
|
|
|
|
atomic_inc(&trapped);
|
|
|
|
else
|
|
|
|
atomic_dec(&trapped);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(netpoll_set_trap);
|