2005-04-17 02:20:36 +04:00
|
|
|
#ifndef __LINUX_NETFILTER_H
|
|
|
|
#define __LINUX_NETFILTER_H
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/net.h>
|
|
|
|
#include <linux/if.h>
|
2008-01-15 10:40:34 +03:00
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/in6.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <linux/wait.h>
|
|
|
|
#include <linux/list.h>
|
2014-08-22 06:40:15 +04:00
|
|
|
#include <linux/static_key.h>
|
2015-06-17 18:28:27 +03:00
|
|
|
#include <linux/netfilter_defs.h>
|
2015-07-11 02:15:06 +03:00
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <net/net_namespace.h>
|
2015-06-17 18:28:27 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#ifdef CONFIG_NETFILTER
|
2011-01-18 17:52:14 +03:00
|
|
|
static inline int NF_DROP_GETERR(int verdict)
|
|
|
|
{
|
|
|
|
return -(verdict >> NF_VERDICT_QBITS);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-03-26 06:09:33 +03:00
|
|
|
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
|
|
|
|
const union nf_inet_addr *a2)
|
|
|
|
{
|
|
|
|
return a1->all[0] == a2->all[0] &&
|
|
|
|
a1->all[1] == a2->all[1] &&
|
|
|
|
a1->all[2] == a2->all[2] &&
|
|
|
|
a1->all[3] == a2->all[3];
|
|
|
|
}
|
|
|
|
|
2012-05-18 00:08:57 +04:00
|
|
|
static inline void nf_inet_addr_mask(const union nf_inet_addr *a1,
|
|
|
|
union nf_inet_addr *result,
|
|
|
|
const union nf_inet_addr *mask)
|
|
|
|
{
|
|
|
|
result->all[0] = a1->all[0] & mask->all[0];
|
|
|
|
result->all[1] = a1->all[1] & mask->all[1];
|
|
|
|
result->all[2] = a1->all[2] & mask->all[2];
|
|
|
|
result->all[3] = a1->all[3] & mask->all[3];
|
|
|
|
}
|
|
|
|
|
2013-09-27 01:48:15 +04:00
|
|
|
int netfilter_init(void);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
struct sk_buff;
|
|
|
|
|
2013-10-10 11:21:55 +04:00
|
|
|
struct nf_hook_ops;
|
2015-04-03 23:23:58 +03:00
|
|
|
|
2015-04-06 05:19:00 +03:00
|
|
|
struct sock;
|
|
|
|
|
2015-04-03 23:23:58 +03:00
|
|
|
struct nf_hook_state {
|
|
|
|
unsigned int hook;
|
|
|
|
int thresh;
|
|
|
|
u_int8_t pf;
|
|
|
|
struct net_device *in;
|
|
|
|
struct net_device *out;
|
2015-04-06 05:19:00 +03:00
|
|
|
struct sock *sk;
|
2015-09-16 04:03:50 +03:00
|
|
|
struct net *net;
|
2015-05-13 19:19:35 +03:00
|
|
|
struct list_head *hook_list;
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *);
|
2015-04-03 23:23:58 +03:00
|
|
|
};
|
|
|
|
|
2015-04-06 05:18:54 +03:00
|
|
|
static inline void nf_hook_state_init(struct nf_hook_state *p,
|
2015-05-13 19:19:35 +03:00
|
|
|
struct list_head *hook_list,
|
2015-04-06 05:18:54 +03:00
|
|
|
unsigned int hook,
|
|
|
|
int thresh, u_int8_t pf,
|
|
|
|
struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
2015-04-06 05:19:00 +03:00
|
|
|
struct sock *sk,
|
2015-09-16 04:03:50 +03:00
|
|
|
struct net *net,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
2015-04-06 05:18:54 +03:00
|
|
|
{
|
|
|
|
p->hook = hook;
|
|
|
|
p->thresh = thresh;
|
|
|
|
p->pf = pf;
|
|
|
|
p->in = indev;
|
|
|
|
p->out = outdev;
|
2015-04-06 05:19:00 +03:00
|
|
|
p->sk = sk;
|
2015-09-16 04:03:50 +03:00
|
|
|
p->net = net;
|
2015-05-13 19:19:35 +03:00
|
|
|
p->hook_list = hook_list;
|
2015-04-06 05:18:54 +03:00
|
|
|
p->okfn = okfn;
|
|
|
|
}
|
|
|
|
|
2015-09-18 22:33:06 +03:00
|
|
|
typedef unsigned int nf_hookfn(void *priv,
|
2007-10-15 11:53:15 +04:00
|
|
|
struct sk_buff *skb,
|
2015-04-04 03:32:56 +03:00
|
|
|
const struct nf_hook_state *state);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-11-04 20:50:58 +03:00
|
|
|
struct nf_hook_ops {
|
2015-05-13 19:19:34 +03:00
|
|
|
struct list_head list;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* User fills in from here down. */
|
2015-05-13 19:19:34 +03:00
|
|
|
nf_hookfn *hook;
|
netfilter: add netfilter ingress hook after handle_ing() under unique static key
This patch adds the Netfilter ingress hook just after the existing tc ingress
hook, that seems to be the consensus solution for this.
Note that the Netfilter hook resides under the global static key that enables
ingress filtering. Nonetheless, Netfilter still also has its own static key for
minimal impact on the existing handle_ing().
* Without this patch:
Result: OK: 6216490(c6216338+d152) usec, 100000000 (60byte,0frags)
16086246pps 7721Mb/sec (7721398080bps) errors: 100000000
42.46% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
25.92% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
7.81% kpktgend_0 [pktgen] [k] pktgen_thread_worker
5.62% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
2.70% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
2.34% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
1.44% kpktgend_0 [kernel.kallsyms] [k] __build_skb
* With this patch:
Result: OK: 6214833(c6214731+d101) usec, 100000000 (60byte,0frags)
16090536pps 7723Mb/sec (7723457280bps) errors: 100000000
41.23% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
26.57% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
7.72% kpktgend_0 [pktgen] [k] pktgen_thread_worker
5.55% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
2.78% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
2.06% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
1.43% kpktgend_0 [kernel.kallsyms] [k] __build_skb
* Without this patch + tc ingress:
tc filter add dev eth4 parent ffff: protocol ip prio 1 \
u32 match ip dst 4.3.2.1/32
Result: OK: 9269001(c9268821+d179) usec, 100000000 (60byte,0frags)
10788648pps 5178Mb/sec (5178551040bps) errors: 100000000
40.99% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
17.50% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
11.77% kpktgend_0 [cls_u32] [k] u32_classify
5.62% kpktgend_0 [kernel.kallsyms] [k] tc_classify_compat
5.18% kpktgend_0 [pktgen] [k] pktgen_thread_worker
3.23% kpktgend_0 [kernel.kallsyms] [k] tc_classify
2.97% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
1.83% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
1.50% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
0.99% kpktgend_0 [kernel.kallsyms] [k] __build_skb
* With this patch + tc ingress:
tc filter add dev eth4 parent ffff: protocol ip prio 1 \
u32 match ip dst 4.3.2.1/32
Result: OK: 9308218(c9308091+d126) usec, 100000000 (60byte,0frags)
10743194pps 5156Mb/sec (5156733120bps) errors: 100000000
42.01% kpktgend_0 [kernel.kallsyms] [k] __netif_receive_skb_core
17.78% kpktgend_0 [kernel.kallsyms] [k] kfree_skb
11.70% kpktgend_0 [cls_u32] [k] u32_classify
5.46% kpktgend_0 [kernel.kallsyms] [k] tc_classify_compat
5.16% kpktgend_0 [pktgen] [k] pktgen_thread_worker
2.98% kpktgend_0 [kernel.kallsyms] [k] ip_rcv
2.84% kpktgend_0 [kernel.kallsyms] [k] tc_classify
1.96% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_internal
1.57% kpktgend_0 [kernel.kallsyms] [k] netif_receive_skb_sk
Note that the results are very similar before and after.
I can see gcc gets the code under the ingress static key out of the hot path.
Then, on that cold branch, it generates the code to accomodate the netfilter
ingress static key. My explanation for this is that this reduces the pressure
on the instruction cache for non-users as the new code is out of the hot path,
and it comes with minimal impact for tc ingress users.
Using gcc version 4.8.4 on:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 8
[...]
L1d cache: 16K
L1i cache: 64K
L2 cache: 2048K
L3 cache: 8192K
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-13 19:19:38 +03:00
|
|
|
struct net_device *dev;
|
2015-05-13 19:19:34 +03:00
|
|
|
void *priv;
|
|
|
|
u_int8_t pf;
|
|
|
|
unsigned int hooknum;
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Hooks are ordered in ascending priority. */
|
2015-05-13 19:19:34 +03:00
|
|
|
int priority;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2009-11-04 20:50:58 +03:00
|
|
|
struct nf_sockopt_ops {
|
2005-04-17 02:20:36 +04:00
|
|
|
struct list_head list;
|
|
|
|
|
2008-10-08 13:35:00 +04:00
|
|
|
u_int8_t pf;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Non-inclusive ranges: use 0/0/NULL to never get called. */
|
|
|
|
int set_optmin;
|
|
|
|
int set_optmax;
|
|
|
|
int (*set)(struct sock *sk, int optval, void __user *user, unsigned int len);
|
2010-02-02 17:03:24 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2006-03-21 09:45:21 +03:00
|
|
|
int (*compat_set)(struct sock *sk, int optval,
|
|
|
|
void __user *user, unsigned int len);
|
2010-02-02 17:03:24 +03:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
int get_optmin;
|
|
|
|
int get_optmax;
|
|
|
|
int (*get)(struct sock *sk, int optval, void __user *user, int *len);
|
2010-02-02 17:03:24 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2006-03-21 09:45:21 +03:00
|
|
|
int (*compat_get)(struct sock *sk, int optval,
|
|
|
|
void __user *user, int *len);
|
2010-02-02 17:03:24 +03:00
|
|
|
#endif
|
[NETFILTER]: Fix/improve deadlock condition on module removal netfilter
So I've had a deadlock reported to me. I've found that the sequence of
events goes like this:
1) process A (modprobe) runs to remove ip_tables.ko
2) process B (iptables-restore) runs and calls setsockopt on a netfilter socket,
increasing the ip_tables socket_ops use count
3) process A acquires a file lock on the file ip_tables.ko, calls remove_module
in the kernel, which in turn executes the ip_tables module cleanup routine,
which calls nf_unregister_sockopt
4) nf_unregister_sockopt, seeing that the use count is non-zero, puts the
calling process into uninterruptible sleep, expecting the process using the
socket option code to wake it up when it exits the kernel
4) the user of the socket option code (process B) in do_ipt_get_ctl, calls
ipt_find_table_lock, which in this case calls request_module to load
ip_tables_nat.ko
5) request_module forks a copy of modprobe (process C) to load the module and
blocks until modprobe exits.
6) Process C. forked by request_module process the dependencies of
ip_tables_nat.ko, of which ip_tables.ko is one.
7) Process C attempts to lock the request module and all its dependencies, it
blocks when it attempts to lock ip_tables.ko (which was previously locked in
step 3)
Theres not really any great permanent solution to this that I can see, but I've
developed a two part solution that corrects the problem
Part 1) Modifies the nf_sockopt registration code so that, instead of using a
use counter internal to the nf_sockopt_ops structure, we instead use a pointer
to the registering modules owner to do module reference counting when nf_sockopt
calls a modules set/get routine. This prevents the deadlock by preventing set 4
from happening.
Part 2) Enhances the modprobe utilty so that by default it preforms non-blocking
remove operations (the same way rmmod does), and add an option to explicity
request blocking operation. So if you select blocking operation in modprobe you
can still cause the above deadlock, but only if you explicity try (and since
root can do any old stupid thing it would like.... :) ).
Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2007-09-11 13:28:26 +04:00
|
|
|
/* Use the module struct to lock set/get code in place */
|
|
|
|
struct module *owner;
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* Function to register/unregister hook points. */
|
2015-07-11 02:15:06 +03:00
|
|
|
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *ops);
|
|
|
|
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *ops);
|
|
|
|
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
|
|
|
|
unsigned int n);
|
|
|
|
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
|
|
|
|
unsigned int n);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
int nf_register_hook(struct nf_hook_ops *reg);
|
|
|
|
void nf_unregister_hook(struct nf_hook_ops *reg);
|
2006-04-07 01:09:12 +04:00
|
|
|
int nf_register_hooks(struct nf_hook_ops *reg, unsigned int n);
|
|
|
|
void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Functions to register get/setsockopt ranges (non-inclusive). You
|
|
|
|
need to check permissions yourself! */
|
|
|
|
int nf_register_sockopt(struct nf_sockopt_ops *reg);
|
|
|
|
void nf_unregister_sockopt(struct nf_sockopt_ops *reg);
|
|
|
|
|
2014-08-22 06:40:15 +04:00
|
|
|
#ifdef HAVE_JUMP_LABEL
|
2012-02-24 11:31:31 +04:00
|
|
|
extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
|
2011-11-18 21:32:46 +04:00
|
|
|
#endif
|
|
|
|
|
2015-04-03 23:23:58 +03:00
|
|
|
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state);
|
2006-01-07 10:01:48 +03:00
|
|
|
|
|
|
|
/**
|
|
|
|
* nf_hook_thresh - call a netfilter hook
|
2015-05-13 19:19:36 +03:00
|
|
|
*
|
2006-01-07 10:01:48 +03:00
|
|
|
* Returns 1 if the hook has allowed the packet to pass. The function
|
|
|
|
* okfn must be invoked by the caller in this case. Any other return
|
|
|
|
* value indicates the packet has been consumed by the hook.
|
|
|
|
*/
|
2008-10-08 13:35:00 +04:00
|
|
|
static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook,
|
2015-09-16 04:03:51 +03:00
|
|
|
struct net *net,
|
2015-04-06 05:19:04 +03:00
|
|
|
struct sock *sk,
|
2007-10-15 11:53:15 +04:00
|
|
|
struct sk_buff *skb,
|
2006-01-07 10:01:48 +03:00
|
|
|
struct net_device *indev,
|
|
|
|
struct net_device *outdev,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
|
2015-04-06 05:19:04 +03:00
|
|
|
int thresh)
|
2006-01-07 10:01:48 +03:00
|
|
|
{
|
2016-02-25 12:08:38 +03:00
|
|
|
struct list_head *hook_list;
|
|
|
|
|
|
|
|
#ifdef HAVE_JUMP_LABEL
|
|
|
|
if (__builtin_constant_p(pf) &&
|
|
|
|
__builtin_constant_p(hook) &&
|
|
|
|
!static_key_false(&nf_hooks_needed[pf][hook]))
|
|
|
|
return 1;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
hook_list = &net->nf.hooks[pf][hook];
|
2015-07-11 02:13:20 +03:00
|
|
|
|
2016-02-25 12:08:38 +03:00
|
|
|
if (!list_empty(hook_list)) {
|
2015-04-06 05:18:54 +03:00
|
|
|
struct nf_hook_state state;
|
2016-09-21 18:35:02 +03:00
|
|
|
int ret;
|
2015-04-03 23:23:58 +03:00
|
|
|
|
2016-09-21 18:35:02 +03:00
|
|
|
/* We may already have this, but read-locks nest anyway */
|
|
|
|
rcu_read_lock();
|
2015-07-20 14:32:52 +03:00
|
|
|
nf_hook_state_init(&state, hook_list, hook, thresh,
|
2015-09-16 04:03:50 +03:00
|
|
|
pf, indev, outdev, sk, net, okfn);
|
2016-09-21 18:35:02 +03:00
|
|
|
|
|
|
|
ret = nf_hook_slow(skb, &state);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
2015-04-03 23:23:58 +03:00
|
|
|
}
|
2011-11-18 21:32:46 +04:00
|
|
|
return 1;
|
2006-01-07 10:01:48 +03:00
|
|
|
}
|
|
|
|
|
2015-09-16 04:04:16 +03:00
|
|
|
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
|
|
|
struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct net_device *indev, struct net_device *outdev,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
2006-01-07 10:01:48 +03:00
|
|
|
{
|
2015-09-16 04:03:51 +03:00
|
|
|
return nf_hook_thresh(pf, hook, net, sk, skb, indev, outdev, okfn, INT_MIN);
|
2006-01-07 10:01:48 +03:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Activate hook; either okfn or kfree_skb called, unless a hook
|
|
|
|
returns NF_STOLEN (in which case, it's up to the hook to deal with
|
|
|
|
the consequences).
|
|
|
|
|
|
|
|
Returns -ERRNO if packet dropped. Zero means queued, stolen or
|
|
|
|
accepted.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* RR:
|
|
|
|
> I don't want nf_hook to return anything because people might forget
|
|
|
|
> about async and trust the return value to mean "packet was ok".
|
|
|
|
|
|
|
|
AK:
|
|
|
|
Just document it clearly, then you can expect some sense from kernel
|
|
|
|
coders :)
|
|
|
|
*/
|
|
|
|
|
2009-06-13 06:13:26 +04:00
|
|
|
static inline int
|
2015-09-16 04:04:16 +03:00
|
|
|
NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
2015-04-06 05:19:04 +03:00
|
|
|
struct sk_buff *skb, struct net_device *in,
|
|
|
|
struct net_device *out,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
|
|
|
|
int thresh)
|
2009-06-13 06:13:26 +04:00
|
|
|
{
|
2015-09-16 04:03:51 +03:00
|
|
|
int ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, thresh);
|
2009-06-13 06:13:26 +04:00
|
|
|
if (ret == 1)
|
2015-09-16 04:04:18 +03:00
|
|
|
ret = okfn(net, sk, skb);
|
2009-06-13 06:13:26 +04:00
|
|
|
return ret;
|
|
|
|
}
|
2006-02-16 02:10:22 +03:00
|
|
|
|
2009-06-13 06:13:26 +04:00
|
|
|
static inline int
|
2015-09-16 04:04:16 +03:00
|
|
|
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
2015-04-06 05:19:04 +03:00
|
|
|
struct sk_buff *skb, struct net_device *in, struct net_device *out,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
|
|
|
|
bool cond)
|
2009-06-13 06:13:26 +04:00
|
|
|
{
|
2010-02-19 10:03:28 +03:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!cond ||
|
2015-09-16 04:03:51 +03:00
|
|
|
((ret = nf_hook_thresh(pf, hook, net, sk, skb, in, out, okfn, INT_MIN)) == 1))
|
2015-09-16 04:04:18 +03:00
|
|
|
ret = okfn(net, sk, skb);
|
2009-06-13 06:13:26 +04:00
|
|
|
return ret;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-06-13 06:13:26 +04:00
|
|
|
static inline int
|
2015-09-16 04:04:16 +03:00
|
|
|
NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct sk_buff *skb,
|
2009-06-13 06:13:26 +04:00
|
|
|
struct net_device *in, struct net_device *out,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
2009-06-13 06:13:26 +04:00
|
|
|
{
|
2015-09-16 04:04:16 +03:00
|
|
|
return NF_HOOK_THRESH(pf, hook, net, sk, skb, in, out, okfn, INT_MIN);
|
2009-06-13 06:13:26 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Call setsockopt() */
|
2008-10-08 13:35:00 +04:00
|
|
|
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
2009-10-01 03:12:20 +04:00
|
|
|
unsigned int len);
|
2008-10-08 13:35:00 +04:00
|
|
|
int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
|
2005-04-17 02:20:36 +04:00
|
|
|
int *len);
|
2010-02-02 17:03:24 +03:00
|
|
|
#ifdef CONFIG_COMPAT
|
2008-10-08 13:35:00 +04:00
|
|
|
int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
|
2009-10-01 03:12:20 +04:00
|
|
|
char __user *opt, unsigned int len);
|
2008-10-08 13:35:00 +04:00
|
|
|
int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
|
2006-03-21 09:45:21 +03:00
|
|
|
char __user *opt, int *len);
|
2010-02-02 17:03:24 +03:00
|
|
|
#endif
|
2006-03-21 09:45:21 +03:00
|
|
|
|
2005-08-10 06:37:23 +04:00
|
|
|
/* Call this before modifying an existing packet: ensures it is
|
|
|
|
modifiable and linear to the point you care about (writable_len).
|
|
|
|
Returns true or false. */
|
2013-09-27 01:48:15 +04:00
|
|
|
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len);
|
2005-08-10 06:37:23 +04:00
|
|
|
|
2007-12-05 12:22:05 +03:00
|
|
|
struct flowi;
|
2007-12-05 12:26:33 +03:00
|
|
|
struct nf_queue_entry;
|
2007-12-05 12:24:48 +03:00
|
|
|
|
2006-04-07 01:18:09 +04:00
|
|
|
struct nf_afinfo {
|
|
|
|
unsigned short family;
|
2006-11-15 08:40:42 +03:00
|
|
|
__sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
|
2006-04-07 01:18:43 +04:00
|
|
|
unsigned int dataoff, u_int8_t protocol);
|
2008-03-20 17:15:53 +03:00
|
|
|
__sum16 (*checksum_partial)(struct sk_buff *skb,
|
|
|
|
unsigned int hook,
|
|
|
|
unsigned int dataoff,
|
|
|
|
unsigned int len,
|
|
|
|
u_int8_t protocol);
|
2011-04-04 18:56:29 +04:00
|
|
|
int (*route)(struct net *net, struct dst_entry **dst,
|
2011-04-04 19:00:54 +04:00
|
|
|
struct flowi *fl, bool strict);
|
2006-04-07 01:18:09 +04:00
|
|
|
void (*saveroute)(const struct sk_buff *skb,
|
2007-12-05 12:26:33 +03:00
|
|
|
struct nf_queue_entry *entry);
|
2015-09-25 23:07:28 +03:00
|
|
|
int (*reroute)(struct net *net, struct sk_buff *skb,
|
2007-12-05 12:26:33 +03:00
|
|
|
const struct nf_queue_entry *entry);
|
2006-04-07 01:18:09 +04:00
|
|
|
int route_key_size;
|
2005-08-10 06:42:34 +04:00
|
|
|
};
|
|
|
|
|
2010-11-15 20:17:21 +03:00
|
|
|
extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO];
|
2007-12-18 09:42:27 +03:00
|
|
|
static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family)
|
2006-04-07 01:18:09 +04:00
|
|
|
{
|
|
|
|
return rcu_dereference(nf_afinfo[family]);
|
|
|
|
}
|
2005-08-10 06:42:34 +04:00
|
|
|
|
2006-11-15 08:40:42 +03:00
|
|
|
static inline __sum16
|
2006-04-07 01:18:43 +04:00
|
|
|
nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff,
|
|
|
|
u_int8_t protocol, unsigned short family)
|
|
|
|
{
|
2007-12-18 09:42:27 +03:00
|
|
|
const struct nf_afinfo *afinfo;
|
2006-11-15 08:40:42 +03:00
|
|
|
__sum16 csum = 0;
|
2006-04-07 01:18:43 +04:00
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
afinfo = nf_get_afinfo(family);
|
|
|
|
if (afinfo)
|
|
|
|
csum = afinfo->checksum(skb, hook, dataoff, protocol);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return csum;
|
|
|
|
}
|
|
|
|
|
2008-03-20 17:15:53 +03:00
|
|
|
static inline __sum16
|
|
|
|
nf_checksum_partial(struct sk_buff *skb, unsigned int hook,
|
|
|
|
unsigned int dataoff, unsigned int len,
|
|
|
|
u_int8_t protocol, unsigned short family)
|
|
|
|
{
|
|
|
|
const struct nf_afinfo *afinfo;
|
|
|
|
__sum16 csum = 0;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
afinfo = nf_get_afinfo(family);
|
|
|
|
if (afinfo)
|
|
|
|
csum = afinfo->checksum_partial(skb, hook, dataoff, len,
|
|
|
|
protocol);
|
|
|
|
rcu_read_unlock();
|
|
|
|
return csum;
|
|
|
|
}
|
|
|
|
|
2013-09-27 01:48:15 +04:00
|
|
|
int nf_register_afinfo(const struct nf_afinfo *afinfo);
|
|
|
|
void nf_unregister_afinfo(const struct nf_afinfo *afinfo);
|
2006-04-07 01:18:09 +04:00
|
|
|
|
2006-01-07 10:06:30 +03:00
|
|
|
#include <net/flow.h>
|
2012-08-26 21:14:06 +04:00
|
|
|
extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
|
2006-01-07 10:06:30 +03:00
|
|
|
|
|
|
|
static inline void
|
2008-10-08 13:35:00 +04:00
|
|
|
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
2006-01-07 10:06:30 +03:00
|
|
|
{
|
2007-12-18 09:42:51 +03:00
|
|
|
#ifdef CONFIG_NF_NAT_NEEDED
|
2006-01-07 10:06:30 +03:00
|
|
|
void (*decodefn)(struct sk_buff *, struct flowi *);
|
|
|
|
|
2012-08-26 21:14:06 +04:00
|
|
|
rcu_read_lock();
|
|
|
|
decodefn = rcu_dereference(nf_nat_decode_session_hook);
|
|
|
|
if (decodefn)
|
|
|
|
decodefn(skb, fl);
|
|
|
|
rcu_read_unlock();
|
2006-01-07 10:06:30 +03:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#else /* !CONFIG_NETFILTER */
|
2015-10-09 21:45:42 +03:00
|
|
|
static inline int
|
|
|
|
NF_HOOK_COND(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
|
|
|
struct sk_buff *skb, struct net_device *in, struct net_device *out,
|
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *),
|
|
|
|
bool cond)
|
|
|
|
{
|
|
|
|
return okfn(net, sk, skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
|
|
|
|
struct sk_buff *skb, struct net_device *in, struct net_device *out,
|
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
|
|
|
{
|
|
|
|
return okfn(net, sk, skb);
|
|
|
|
}
|
|
|
|
|
2015-09-16 04:04:16 +03:00
|
|
|
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
|
|
|
struct sock *sk, struct sk_buff *skb,
|
|
|
|
struct net_device *indev, struct net_device *outdev,
|
2015-09-16 04:04:18 +03:00
|
|
|
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
|
2006-01-07 23:50:27 +03:00
|
|
|
{
|
2006-02-16 02:18:19 +03:00
|
|
|
return 1;
|
2006-01-07 23:50:27 +03:00
|
|
|
}
|
|
|
|
struct flowi;
|
2006-01-07 10:06:30 +03:00
|
|
|
static inline void
|
2008-10-08 13:35:00 +04:00
|
|
|
nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
|
|
|
|
{
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /*CONFIG_NETFILTER*/
|
|
|
|
|
2007-03-23 21:17:07 +03:00
|
|
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
2015-09-03 02:26:07 +03:00
|
|
|
#include <linux/netfilter/nf_conntrack_zones_common.h>
|
|
|
|
|
2013-07-29 00:54:08 +04:00
|
|
|
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
|
2013-09-27 01:48:15 +04:00
|
|
|
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
|
2010-11-15 20:17:21 +03:00
|
|
|
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
|
2015-10-01 00:53:44 +03:00
|
|
|
#else
|
|
|
|
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
|
|
|
|
#endif
|
2012-06-07 14:13:39 +04:00
|
|
|
|
|
|
|
struct nf_conn;
|
2013-08-27 10:50:12 +04:00
|
|
|
enum ip_conntrack_info;
|
2012-06-07 14:13:39 +04:00
|
|
|
struct nlattr;
|
|
|
|
|
2015-10-05 05:47:13 +03:00
|
|
|
struct nfnl_ct_hook {
|
2015-10-05 05:49:56 +03:00
|
|
|
struct nf_conn *(*get_ct)(const struct sk_buff *skb,
|
2015-10-01 00:53:44 +03:00
|
|
|
enum ip_conntrack_info *ctinfo);
|
2012-06-07 14:13:39 +04:00
|
|
|
size_t (*build_size)(const struct nf_conn *ct);
|
2015-10-01 00:53:44 +03:00
|
|
|
int (*build)(struct sk_buff *skb, struct nf_conn *ct,
|
|
|
|
enum ip_conntrack_info ctinfo,
|
|
|
|
u_int16_t ct_attr, u_int16_t ct_info_attr);
|
2012-06-07 14:13:39 +04:00
|
|
|
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
|
2013-08-07 20:13:20 +04:00
|
|
|
int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
|
|
|
|
u32 portid, u32 report);
|
2012-06-07 15:31:25 +04:00
|
|
|
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
|
2013-08-27 10:50:12 +04:00
|
|
|
enum ip_conntrack_info ctinfo, s32 off);
|
2012-06-07 14:13:39 +04:00
|
|
|
};
|
2015-10-05 05:47:13 +03:00
|
|
|
extern struct nfnl_ct_hook __rcu *nfnl_ct_hook;
|
2007-03-23 21:17:07 +03:00
|
|
|
|
2015-07-14 18:51:07 +03:00
|
|
|
/**
|
|
|
|
* nf_skb_duplicated - TEE target has sent a packet
|
|
|
|
*
|
|
|
|
* When a xtables target sends a packet, the OUTPUT and POSTROUTING
|
|
|
|
* hooks are traversed again, i.e. nft and xtables are invoked recursively.
|
|
|
|
*
|
|
|
|
* This is used by xtables TEE target to prevent the duplicated skb from
|
|
|
|
* being duplicated again.
|
|
|
|
*/
|
|
|
|
DECLARE_PER_CPU(bool, nf_skb_duplicated);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif /*__LINUX_NETFILTER_H*/
|