Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says: ==================== Netfilter updates for net-next The following patchset contains Netfilter updates for your net-next tree, they are: 1) Stash ctinfo 3-bit field into pointer to nf_conntrack object from sk_buff so we only access one single cacheline in the conntrack hotpath. Patchset from Florian Westphal. 2) Don't leak pointer to internal structures when exporting x_tables ruleset back to userspace, from Willem DeBruijn. This includes new helper functions to copy data to userspace such as xt_data_to_user() as well as conversions of our ip_tables, ip6_tables and arp_tables clients to use it. Not surprinsingly, ebtables requires an ad-hoc update. There is also a new field in x_tables extensions to indicate the amount of bytes that we copy to userspace. 3) Add nf_log_all_netns sysctl: This new knob allows you to enable logging via nf_log infrastructure for all existing netnamespaces. Given the effort to provide pernet syslog has been discontinued, let's provide a way to restore logging using netfilter kernel logging facilities in trusted environments. Patch from Michal Kubecek. 4) Validate SCTP checksum from conntrack helper, from Davide Caratti. 5) Merge UDPlite conntrack and NAT helpers into UDP, this was mostly a copy&paste from the original helper, from Florian Westphal. 6) Reset netfilter state when duplicating packets, also from Florian. 7) Remove unnecessary check for broadcast in IPv6 in pkttype match and nft_meta, from Liping Zhang. 8) Add missing code to deal with loopback packets from nft_meta when used by the netdev family, also from Liping. 9) Several cleanups on nf_tables, one to remove unnecessary check from the netlink control plane path to add table, set and stateful objects and code consolidation when unregister chain hooks, from Gao Feng. 10) Fix harmless reference counter underflow in IPVS that, however, results in problems with the introduction of the new refcount_t type, from David Windsor. 11) Enable LIBCRC32C from nf_ct_sctp instead of nf_nat_sctp, from Davide Caratti. 12) Missing documentation on nf_tables uapi header, from Liping Zhang. 13) Use rb_entry() helper in xt_connlimit, from Geliang Tang. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Коммит
52e01b84a2
|
@ -0,0 +1,10 @@
|
||||||
|
/proc/sys/net/netfilter/* Variables:
|
||||||
|
|
||||||
|
nf_log_all_netns - BOOLEAN
|
||||||
|
0 - disabled (default)
|
||||||
|
not 0 - enabled
|
||||||
|
|
||||||
|
By default, only init_net namespace can log packets into kernel log
|
||||||
|
with LOG target; this aims to prevent containers from flooding host
|
||||||
|
kernel log. If enabled, this target also works in other network
|
||||||
|
namespaces. This variable is only accessible from init_net.
|
|
@ -167,6 +167,7 @@ struct xt_match {
|
||||||
|
|
||||||
const char *table;
|
const char *table;
|
||||||
unsigned int matchsize;
|
unsigned int matchsize;
|
||||||
|
unsigned int usersize;
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
unsigned int compatsize;
|
unsigned int compatsize;
|
||||||
#endif
|
#endif
|
||||||
|
@ -207,6 +208,7 @@ struct xt_target {
|
||||||
|
|
||||||
const char *table;
|
const char *table;
|
||||||
unsigned int targetsize;
|
unsigned int targetsize;
|
||||||
|
unsigned int usersize;
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
unsigned int compatsize;
|
unsigned int compatsize;
|
||||||
#endif
|
#endif
|
||||||
|
@ -287,6 +289,13 @@ int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto,
|
||||||
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
|
int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto,
|
||||||
bool inv_proto);
|
bool inv_proto);
|
||||||
|
|
||||||
|
int xt_match_to_user(const struct xt_entry_match *m,
|
||||||
|
struct xt_entry_match __user *u);
|
||||||
|
int xt_target_to_user(const struct xt_entry_target *t,
|
||||||
|
struct xt_entry_target __user *u);
|
||||||
|
int xt_data_to_user(void __user *dst, const void *src,
|
||||||
|
int usersize, int size);
|
||||||
|
|
||||||
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
|
||||||
struct xt_counters_info *info, bool compat);
|
struct xt_counters_info *info, bool compat);
|
||||||
|
|
||||||
|
|
|
@ -585,7 +585,6 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
|
||||||
* @cloned: Head may be cloned (check refcnt to be sure)
|
* @cloned: Head may be cloned (check refcnt to be sure)
|
||||||
* @ip_summed: Driver fed us an IP checksum
|
* @ip_summed: Driver fed us an IP checksum
|
||||||
* @nohdr: Payload reference only, must not modify header
|
* @nohdr: Payload reference only, must not modify header
|
||||||
* @nfctinfo: Relationship of this skb to the connection
|
|
||||||
* @pkt_type: Packet class
|
* @pkt_type: Packet class
|
||||||
* @fclone: skbuff clone status
|
* @fclone: skbuff clone status
|
||||||
* @ipvs_property: skbuff is owned by ipvs
|
* @ipvs_property: skbuff is owned by ipvs
|
||||||
|
@ -598,7 +597,7 @@ static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
|
||||||
* @nf_trace: netfilter packet trace flag
|
* @nf_trace: netfilter packet trace flag
|
||||||
* @protocol: Packet protocol from driver
|
* @protocol: Packet protocol from driver
|
||||||
* @destructor: Destruct function
|
* @destructor: Destruct function
|
||||||
* @nfct: Associated connection, if any
|
* @_nfct: Associated connection, if any (with nfctinfo bits)
|
||||||
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
|
* @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
|
||||||
* @skb_iif: ifindex of device we arrived on
|
* @skb_iif: ifindex of device we arrived on
|
||||||
* @tc_index: Traffic control index
|
* @tc_index: Traffic control index
|
||||||
|
@ -671,7 +670,7 @@ struct sk_buff {
|
||||||
struct sec_path *sp;
|
struct sec_path *sp;
|
||||||
#endif
|
#endif
|
||||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||||
struct nf_conntrack *nfct;
|
unsigned long _nfct;
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||||
struct nf_bridge_info *nf_bridge;
|
struct nf_bridge_info *nf_bridge;
|
||||||
|
@ -724,7 +723,6 @@ struct sk_buff {
|
||||||
__u8 pkt_type:3;
|
__u8 pkt_type:3;
|
||||||
__u8 pfmemalloc:1;
|
__u8 pfmemalloc:1;
|
||||||
__u8 ignore_df:1;
|
__u8 ignore_df:1;
|
||||||
__u8 nfctinfo:3;
|
|
||||||
|
|
||||||
__u8 nf_trace:1;
|
__u8 nf_trace:1;
|
||||||
__u8 ip_summed:2;
|
__u8 ip_summed:2;
|
||||||
|
@ -841,6 +839,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
|
||||||
#define SKB_DST_NOREF 1UL
|
#define SKB_DST_NOREF 1UL
|
||||||
#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
|
#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
|
||||||
|
|
||||||
|
#define SKB_NFCT_PTRMASK ~(7UL)
|
||||||
/**
|
/**
|
||||||
* skb_dst - returns skb dst_entry
|
* skb_dst - returns skb dst_entry
|
||||||
* @skb: buffer
|
* @skb: buffer
|
||||||
|
@ -3558,6 +3557,15 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
|
||||||
skb->csum = csum_add(skb->csum, delta);
|
skb->csum = csum_add(skb->csum, delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
|
return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
|
||||||
|
#else
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||||
void nf_conntrack_destroy(struct nf_conntrack *nfct);
|
void nf_conntrack_destroy(struct nf_conntrack *nfct);
|
||||||
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
|
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
|
||||||
|
@ -3586,8 +3594,8 @@ static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
|
||||||
static inline void nf_reset(struct sk_buff *skb)
|
static inline void nf_reset(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||||
nf_conntrack_put(skb->nfct);
|
nf_conntrack_put(skb_nfct(skb));
|
||||||
skb->nfct = NULL;
|
skb->_nfct = 0;
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||||
nf_bridge_put(skb->nf_bridge);
|
nf_bridge_put(skb->nf_bridge);
|
||||||
|
@ -3607,10 +3615,8 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
|
||||||
bool copy)
|
bool copy)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||||
dst->nfct = src->nfct;
|
dst->_nfct = src->_nfct;
|
||||||
nf_conntrack_get(src->nfct);
|
nf_conntrack_get(skb_nfct(src));
|
||||||
if (copy)
|
|
||||||
dst->nfctinfo = src->nfctinfo;
|
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||||
dst->nf_bridge = src->nf_bridge;
|
dst->nf_bridge = src->nf_bridge;
|
||||||
|
@ -3625,7 +3631,7 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src,
|
||||||
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
|
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
|
||||||
nf_conntrack_put(dst->nfct);
|
nf_conntrack_put(skb_nfct(dst));
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||||
nf_bridge_put(dst->nf_bridge);
|
nf_bridge_put(dst->nf_bridge);
|
||||||
|
@ -3657,9 +3663,7 @@ static inline bool skb_irq_freeable(const struct sk_buff *skb)
|
||||||
#if IS_ENABLED(CONFIG_XFRM)
|
#if IS_ENABLED(CONFIG_XFRM)
|
||||||
!skb->sp &&
|
!skb->sp &&
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
!skb_nfct(skb) &&
|
||||||
!skb->nfct &&
|
|
||||||
#endif
|
|
||||||
!skb->_skb_refdst &&
|
!skb->_skb_refdst &&
|
||||||
!skb_has_frag_list(skb);
|
!skb_has_frag_list(skb);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1421,7 +1421,7 @@ static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
|
||||||
|
|
||||||
static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
|
static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
|
||||||
{
|
{
|
||||||
if (atomic_dec_return(&dest->refcnt) < 0)
|
if (atomic_dec_and_test(&dest->refcnt))
|
||||||
kfree(dest);
|
kfree(dest);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1554,10 +1554,12 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
|
||||||
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||||
|
|
||||||
if (!ct || !nf_ct_is_untracked(ct)) {
|
if (!ct || !nf_ct_is_untracked(ct)) {
|
||||||
nf_conntrack_put(skb->nfct);
|
struct nf_conn *untracked;
|
||||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
nf_conntrack_put(&ct->ct_general);
|
||||||
nf_conntrack_get(skb->nfct);
|
untracked = nf_ct_untracked_get();
|
||||||
|
nf_conntrack_get(&untracked->ct_general);
|
||||||
|
nf_ct_set(skb, untracked, IP_CT_NEW);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,6 +14,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
|
||||||
|
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
|
||||||
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
|
||||||
#ifdef CONFIG_NF_CT_PROTO_DCCP
|
#ifdef CONFIG_NF_CT_PROTO_DCCP
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
|
||||||
|
|
|
@ -5,6 +5,7 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
|
||||||
|
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
|
||||||
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
|
||||||
#ifdef CONFIG_NF_CT_PROTO_DCCP
|
#ifdef CONFIG_NF_CT_PROTO_DCCP
|
||||||
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
|
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
|
||||||
|
|
|
@ -34,6 +34,7 @@ union nf_conntrack_proto {
|
||||||
struct ip_ct_sctp sctp;
|
struct ip_ct_sctp sctp;
|
||||||
struct ip_ct_tcp tcp;
|
struct ip_ct_tcp tcp;
|
||||||
struct nf_ct_gre gre;
|
struct nf_ct_gre gre;
|
||||||
|
unsigned int tmpl_padto;
|
||||||
};
|
};
|
||||||
|
|
||||||
union nf_conntrack_expect_proto {
|
union nf_conntrack_expect_proto {
|
||||||
|
@ -75,7 +76,7 @@ struct nf_conn {
|
||||||
/* Usage count in here is 1 for hash table, 1 per skb,
|
/* Usage count in here is 1 for hash table, 1 per skb,
|
||||||
* plus 1 for any connection(s) we are `master' for
|
* plus 1 for any connection(s) we are `master' for
|
||||||
*
|
*
|
||||||
* Hint, SKB address this struct and refcnt via skb->nfct and
|
* Hint, SKB address this struct and refcnt via skb->_nfct and
|
||||||
* helpers nf_conntrack_get() and nf_conntrack_put().
|
* helpers nf_conntrack_get() and nf_conntrack_put().
|
||||||
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
|
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
|
||||||
* beware nf_ct_get() is different and don't inc refcnt.
|
* beware nf_ct_get() is different and don't inc refcnt.
|
||||||
|
@ -162,12 +163,16 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
|
||||||
int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
|
||||||
const struct nf_conn *ignored_conntrack);
|
const struct nf_conn *ignored_conntrack);
|
||||||
|
|
||||||
|
#define NFCT_INFOMASK 7UL
|
||||||
|
#define NFCT_PTRMASK ~(NFCT_INFOMASK)
|
||||||
|
|
||||||
/* Return conntrack_info and tuple hash for given skb. */
|
/* Return conntrack_info and tuple hash for given skb. */
|
||||||
static inline struct nf_conn *
|
static inline struct nf_conn *
|
||||||
nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
|
nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
|
||||||
{
|
{
|
||||||
*ctinfo = skb->nfctinfo;
|
*ctinfo = skb->_nfct & NFCT_INFOMASK;
|
||||||
return (struct nf_conn *)skb->nfct;
|
|
||||||
|
return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* decrement reference count on a conntrack */
|
/* decrement reference count on a conntrack */
|
||||||
|
@ -341,6 +346,12 @@ struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
||||||
gfp_t flags);
|
gfp_t flags);
|
||||||
void nf_ct_tmpl_free(struct nf_conn *tmpl);
|
void nf_ct_tmpl_free(struct nf_conn *tmpl);
|
||||||
|
|
||||||
|
static inline void
|
||||||
|
nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
|
||||||
|
{
|
||||||
|
skb->_nfct = (unsigned long)ct | info;
|
||||||
|
}
|
||||||
|
|
||||||
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
|
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
|
||||||
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
|
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
|
||||||
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
|
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
|
||||||
|
|
|
@ -62,7 +62,7 @@ int __nf_conntrack_confirm(struct sk_buff *skb);
|
||||||
/* Confirm a connection: returns NF_DROP if packet must be dropped. */
|
/* Confirm a connection: returns NF_DROP if packet must be dropped. */
|
||||||
static inline int nf_conntrack_confirm(struct sk_buff *skb)
|
static inline int nf_conntrack_confirm(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct nf_conn *ct = (struct nf_conn *)skb->nfct;
|
struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb);
|
||||||
int ret = NF_ACCEPT;
|
int ret = NF_ACCEPT;
|
||||||
|
|
||||||
if (ct && !nf_ct_is_untracked(ct)) {
|
if (ct && !nf_ct_is_untracked(ct)) {
|
||||||
|
|
|
@ -55,7 +55,7 @@ struct nf_conntrack_l4proto {
|
||||||
void (*destroy)(struct nf_conn *ct);
|
void (*destroy)(struct nf_conn *ct);
|
||||||
|
|
||||||
int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
int (*error)(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||||
unsigned int dataoff, enum ip_conntrack_info *ctinfo,
|
unsigned int dataoff,
|
||||||
u_int8_t pf, unsigned int hooknum);
|
u_int8_t pf, unsigned int hooknum);
|
||||||
|
|
||||||
/* Print out the per-protocol part of the tuple. Return like seq_* */
|
/* Print out the per-protocol part of the tuple. Return like seq_* */
|
||||||
|
|
|
@ -51,6 +51,9 @@ struct nf_logger {
|
||||||
struct module *me;
|
struct module *me;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */
|
||||||
|
extern int sysctl_nf_log_all_netns;
|
||||||
|
|
||||||
/* Function to register/unregister log function. */
|
/* Function to register/unregister log function. */
|
||||||
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
|
int nf_log_register(u_int8_t pf, struct nf_logger *logger);
|
||||||
void nf_log_unregister(struct nf_logger *logger);
|
void nf_log_unregister(struct nf_logger *logger);
|
||||||
|
|
|
@ -69,19 +69,6 @@ struct nf_sctp_net {
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
|
|
||||||
enum udplite_conntrack {
|
|
||||||
UDPLITE_CT_UNREPLIED,
|
|
||||||
UDPLITE_CT_REPLIED,
|
|
||||||
UDPLITE_CT_MAX
|
|
||||||
};
|
|
||||||
|
|
||||||
struct nf_udplite_net {
|
|
||||||
struct nf_proto_net pn;
|
|
||||||
unsigned int timeouts[UDPLITE_CT_MAX];
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct nf_ip_net {
|
struct nf_ip_net {
|
||||||
struct nf_generic_net generic;
|
struct nf_generic_net generic;
|
||||||
struct nf_tcp_net tcp;
|
struct nf_tcp_net tcp;
|
||||||
|
@ -94,9 +81,6 @@ struct nf_ip_net {
|
||||||
#ifdef CONFIG_NF_CT_PROTO_SCTP
|
#ifdef CONFIG_NF_CT_PROTO_SCTP
|
||||||
struct nf_sctp_net sctp;
|
struct nf_sctp_net sctp;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
|
|
||||||
struct nf_udplite_net udplite;
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ct_pcpu {
|
struct ct_pcpu {
|
||||||
|
|
|
@ -860,6 +860,10 @@ enum nft_rt_attributes {
|
||||||
* @NFT_CT_PROTOCOL: conntrack layer 4 protocol
|
* @NFT_CT_PROTOCOL: conntrack layer 4 protocol
|
||||||
* @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
|
* @NFT_CT_PROTO_SRC: conntrack layer 4 protocol source
|
||||||
* @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
|
* @NFT_CT_PROTO_DST: conntrack layer 4 protocol destination
|
||||||
|
* @NFT_CT_LABELS: conntrack labels
|
||||||
|
* @NFT_CT_PKTS: conntrack packets
|
||||||
|
* @NFT_CT_BYTES: conntrack bytes
|
||||||
|
* @NFT_CT_AVGPKT: conntrack average bytes per packet
|
||||||
*/
|
*/
|
||||||
enum nft_ct_keys {
|
enum nft_ct_keys {
|
||||||
NFT_CT_STATE,
|
NFT_CT_STATE,
|
||||||
|
@ -878,6 +882,7 @@ enum nft_ct_keys {
|
||||||
NFT_CT_LABELS,
|
NFT_CT_LABELS,
|
||||||
NFT_CT_PKTS,
|
NFT_CT_PKTS,
|
||||||
NFT_CT_BYTES,
|
NFT_CT_BYTES,
|
||||||
|
NFT_CT_AVGPKT,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -105,6 +105,7 @@ static struct xt_match ebt_limit_mt_reg __read_mostly = {
|
||||||
.match = ebt_limit_mt,
|
.match = ebt_limit_mt,
|
||||||
.checkentry = ebt_limit_mt_check,
|
.checkentry = ebt_limit_mt_check,
|
||||||
.matchsize = sizeof(struct ebt_limit_info),
|
.matchsize = sizeof(struct ebt_limit_info),
|
||||||
|
.usersize = offsetof(struct ebt_limit_info, prev),
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compatsize = sizeof(struct ebt_compat_limit_info),
|
.compatsize = sizeof(struct ebt_compat_limit_info),
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -78,7 +78,7 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||||
unsigned int bitmask;
|
unsigned int bitmask;
|
||||||
|
|
||||||
/* FIXME: Disabled from containers until syslog ns is supported */
|
/* FIXME: Disabled from containers until syslog ns is supported */
|
||||||
if (!net_eq(net, &init_net))
|
if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
spin_lock_bh(&ebt_log_lock);
|
spin_lock_bh(&ebt_log_lock);
|
||||||
|
|
|
@ -1346,56 +1346,72 @@ static int update_counters(struct net *net, const void __user *user,
|
||||||
hlp.num_counters, user, len);
|
hlp.num_counters, user, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ebt_make_matchname(const struct ebt_entry_match *m,
|
static inline int ebt_obj_to_user(char __user *um, const char *_name,
|
||||||
const char *base, char __user *ubase)
|
const char *data, int entrysize,
|
||||||
|
int usersize, int datasize)
|
||||||
{
|
{
|
||||||
char __user *hlp = ubase + ((char *)m - base);
|
char name[EBT_FUNCTION_MAXNAMELEN] = {0};
|
||||||
char name[EBT_FUNCTION_MAXNAMELEN] = {};
|
|
||||||
|
|
||||||
/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
|
/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
|
||||||
* long. Copy 29 bytes and fill remaining bytes with zeroes.
|
* long. Copy 29 bytes and fill remaining bytes with zeroes.
|
||||||
*/
|
*/
|
||||||
strlcpy(name, m->u.match->name, sizeof(name));
|
strlcpy(name, _name, sizeof(name));
|
||||||
if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
|
if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
|
||||||
|
put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
|
||||||
|
xt_data_to_user(um + entrysize, data, usersize, datasize))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
|
static inline int ebt_match_to_user(const struct ebt_entry_match *m,
|
||||||
const char *base, char __user *ubase)
|
const char *base, char __user *ubase)
|
||||||
{
|
{
|
||||||
char __user *hlp = ubase + ((char *)w - base);
|
return ebt_obj_to_user(ubase + ((char *)m - base),
|
||||||
char name[EBT_FUNCTION_MAXNAMELEN] = {};
|
m->u.match->name, m->data, sizeof(*m),
|
||||||
|
m->u.match->usersize, m->match_size);
|
||||||
strlcpy(name, w->u.watcher->name, sizeof(name));
|
|
||||||
if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
|
|
||||||
return -EFAULT;
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int ebt_make_names(struct ebt_entry *e, const char *base,
|
static inline int ebt_watcher_to_user(const struct ebt_entry_watcher *w,
|
||||||
char __user *ubase)
|
const char *base, char __user *ubase)
|
||||||
|
{
|
||||||
|
return ebt_obj_to_user(ubase + ((char *)w - base),
|
||||||
|
w->u.watcher->name, w->data, sizeof(*w),
|
||||||
|
w->u.watcher->usersize, w->watcher_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ebt_entry_to_user(struct ebt_entry *e, const char *base,
|
||||||
|
char __user *ubase)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
char __user *hlp;
|
char __user *hlp;
|
||||||
const struct ebt_entry_target *t;
|
const struct ebt_entry_target *t;
|
||||||
char name[EBT_FUNCTION_MAXNAMELEN] = {};
|
|
||||||
|
|
||||||
if (e->bitmask == 0)
|
if (e->bitmask == 0) {
|
||||||
|
/* special case !EBT_ENTRY_OR_ENTRIES */
|
||||||
|
if (copy_to_user(ubase + ((char *)e - base), e,
|
||||||
|
sizeof(struct ebt_entries)))
|
||||||
|
return -EFAULT;
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (copy_to_user(ubase + ((char *)e - base), e, sizeof(*e)))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
hlp = ubase + (((char *)e + e->target_offset) - base);
|
hlp = ubase + (((char *)e + e->target_offset) - base);
|
||||||
t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
|
t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
|
||||||
|
|
||||||
ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
|
ret = EBT_MATCH_ITERATE(e, ebt_match_to_user, base, ubase);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
|
ret = EBT_WATCHER_ITERATE(e, ebt_watcher_to_user, base, ubase);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
strlcpy(name, t->u.target->name, sizeof(name));
|
ret = ebt_obj_to_user(hlp, t->u.target->name, t->data, sizeof(*t),
|
||||||
if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
|
t->u.target->usersize, t->target_size);
|
||||||
return -EFAULT;
|
if (ret != 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1475,13 +1491,9 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (copy_to_user(tmp.entries, entries, entries_size)) {
|
|
||||||
BUGPRINT("Couldn't copy entries to userspace\n");
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
/* set the match/watcher/target names right */
|
/* set the match/watcher/target names right */
|
||||||
return EBT_ENTRY_ITERATE(entries, entries_size,
|
return EBT_ENTRY_ITERATE(entries, entries_size,
|
||||||
ebt_make_names, entries, tmp.entries);
|
ebt_entry_to_user, entries, tmp.entries);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int do_ebt_set_ctl(struct sock *sk,
|
static int do_ebt_set_ctl(struct sock *sk,
|
||||||
|
@ -1630,8 +1642,10 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
|
||||||
if (match->compat_to_user) {
|
if (match->compat_to_user) {
|
||||||
if (match->compat_to_user(cm->data, m->data))
|
if (match->compat_to_user(cm->data, m->data))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (copy_to_user(cm->data, m->data, msize))
|
} else {
|
||||||
|
if (xt_data_to_user(cm->data, m->data, match->usersize, msize))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
*size -= ebt_compat_entry_padsize() + off;
|
*size -= ebt_compat_entry_padsize() + off;
|
||||||
*dstptr = cm->data;
|
*dstptr = cm->data;
|
||||||
|
@ -1657,8 +1671,10 @@ static int compat_target_to_user(struct ebt_entry_target *t,
|
||||||
if (target->compat_to_user) {
|
if (target->compat_to_user) {
|
||||||
if (target->compat_to_user(cm->data, t->data))
|
if (target->compat_to_user(cm->data, t->data))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else if (copy_to_user(cm->data, t->data, tsize))
|
} else {
|
||||||
return -EFAULT;
|
if (xt_data_to_user(cm->data, t->data, target->usersize, tsize))
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
*size -= ebt_compat_entry_padsize() + off;
|
*size -= ebt_compat_entry_padsize() + off;
|
||||||
*dstptr = cm->data;
|
*dstptr = cm->data;
|
||||||
|
|
|
@ -654,7 +654,7 @@ static void skb_release_head_state(struct sk_buff *skb)
|
||||||
skb->destructor(skb);
|
skb->destructor(skb);
|
||||||
}
|
}
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
nf_conntrack_put(skb->nfct);
|
nf_conntrack_put(skb_nfct(skb));
|
||||||
#endif
|
#endif
|
||||||
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
|
||||||
nf_bridge_put(skb->nf_bridge);
|
nf_bridge_put(skb->nf_bridge);
|
||||||
|
|
|
@ -677,11 +677,6 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||||
return PTR_ERR(counters);
|
return PTR_ERR(counters);
|
||||||
|
|
||||||
loc_cpu_entry = private->entries;
|
loc_cpu_entry = private->entries;
|
||||||
/* ... then copy entire thing ... */
|
|
||||||
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto free_counters;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* FIXME: use iterator macros --RR */
|
/* FIXME: use iterator macros --RR */
|
||||||
/* ... then go back and fix counters and names */
|
/* ... then go back and fix counters and names */
|
||||||
|
@ -689,6 +684,10 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||||
const struct xt_entry_target *t;
|
const struct xt_entry_target *t;
|
||||||
|
|
||||||
e = (struct arpt_entry *)(loc_cpu_entry + off);
|
e = (struct arpt_entry *)(loc_cpu_entry + off);
|
||||||
|
if (copy_to_user(userptr + off, e, sizeof(*e))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto free_counters;
|
||||||
|
}
|
||||||
if (copy_to_user(userptr + off
|
if (copy_to_user(userptr + off
|
||||||
+ offsetof(struct arpt_entry, counters),
|
+ offsetof(struct arpt_entry, counters),
|
||||||
&counters[num],
|
&counters[num],
|
||||||
|
@ -698,11 +697,7 @@ static int copy_entries_to_user(unsigned int total_size,
|
||||||
}
|
}
|
||||||
|
|
||||||
t = arpt_get_target_c(e);
|
t = arpt_get_target_c(e);
|
||||||
if (copy_to_user(userptr + off + e->target_offset
|
if (xt_target_to_user(t, userptr + off + e->target_offset)) {
|
||||||
+ offsetof(struct xt_entry_target,
|
|
||||||
u.user.name),
|
|
||||||
t->u.kernel.target->name,
|
|
||||||
strlen(t->u.kernel.target->name)+1) != 0) {
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto free_counters;
|
goto free_counters;
|
||||||
}
|
}
|
||||||
|
|
|
@ -826,10 +826,6 @@ copy_entries_to_user(unsigned int total_size,
|
||||||
return PTR_ERR(counters);
|
return PTR_ERR(counters);
|
||||||
|
|
||||||
loc_cpu_entry = private->entries;
|
loc_cpu_entry = private->entries;
|
||||||
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto free_counters;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* FIXME: use iterator macros --RR */
|
/* FIXME: use iterator macros --RR */
|
||||||
/* ... then go back and fix counters and names */
|
/* ... then go back and fix counters and names */
|
||||||
|
@ -839,6 +835,10 @@ copy_entries_to_user(unsigned int total_size,
|
||||||
const struct xt_entry_target *t;
|
const struct xt_entry_target *t;
|
||||||
|
|
||||||
e = (struct ipt_entry *)(loc_cpu_entry + off);
|
e = (struct ipt_entry *)(loc_cpu_entry + off);
|
||||||
|
if (copy_to_user(userptr + off, e, sizeof(*e))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto free_counters;
|
||||||
|
}
|
||||||
if (copy_to_user(userptr + off
|
if (copy_to_user(userptr + off
|
||||||
+ offsetof(struct ipt_entry, counters),
|
+ offsetof(struct ipt_entry, counters),
|
||||||
&counters[num],
|
&counters[num],
|
||||||
|
@ -852,23 +852,14 @@ copy_entries_to_user(unsigned int total_size,
|
||||||
i += m->u.match_size) {
|
i += m->u.match_size) {
|
||||||
m = (void *)e + i;
|
m = (void *)e + i;
|
||||||
|
|
||||||
if (copy_to_user(userptr + off + i
|
if (xt_match_to_user(m, userptr + off + i)) {
|
||||||
+ offsetof(struct xt_entry_match,
|
|
||||||
u.user.name),
|
|
||||||
m->u.kernel.match->name,
|
|
||||||
strlen(m->u.kernel.match->name)+1)
|
|
||||||
!= 0) {
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto free_counters;
|
goto free_counters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t = ipt_get_target_c(e);
|
t = ipt_get_target_c(e);
|
||||||
if (copy_to_user(userptr + off + e->target_offset
|
if (xt_target_to_user(t, userptr + off + e->target_offset)) {
|
||||||
+ offsetof(struct xt_entry_target,
|
|
||||||
u.user.name),
|
|
||||||
t->u.kernel.target->name,
|
|
||||||
strlen(t->u.kernel.target->name)+1) != 0) {
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto free_counters;
|
goto free_counters;
|
||||||
}
|
}
|
||||||
|
|
|
@ -485,6 +485,7 @@ static struct xt_target clusterip_tg_reg __read_mostly = {
|
||||||
.checkentry = clusterip_tg_check,
|
.checkentry = clusterip_tg_check,
|
||||||
.destroy = clusterip_tg_destroy,
|
.destroy = clusterip_tg_destroy,
|
||||||
.targetsize = sizeof(struct ipt_clusterip_tgt_info),
|
.targetsize = sizeof(struct ipt_clusterip_tgt_info),
|
||||||
|
.usersize = offsetof(struct ipt_clusterip_tgt_info, config),
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compatsize = sizeof(struct compat_ipt_clusterip_tgt_info),
|
.compatsize = sizeof(struct compat_ipt_clusterip_tgt_info),
|
||||||
#endif /* CONFIG_COMPAT */
|
#endif /* CONFIG_COMPAT */
|
||||||
|
|
|
@ -57,8 +57,7 @@ synproxy_send_tcp(struct net *net,
|
||||||
goto free_nskb;
|
goto free_nskb;
|
||||||
|
|
||||||
if (nfct) {
|
if (nfct) {
|
||||||
nskb->nfct = nfct;
|
nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
|
||||||
nskb->nfctinfo = ctinfo;
|
|
||||||
nf_conntrack_get(nfct);
|
nf_conntrack_get(nfct);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,8 +106,8 @@ synproxy_send_client_synack(struct net *net,
|
||||||
|
|
||||||
synproxy_build_options(nth, opts);
|
synproxy_build_options(nth, opts);
|
||||||
|
|
||||||
synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
|
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
|
||||||
niph, nth, tcp_hdr_size);
|
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -230,8 +229,8 @@ synproxy_send_client_ack(struct net *net,
|
||||||
|
|
||||||
synproxy_build_options(nth, opts);
|
synproxy_build_options(nth, opts);
|
||||||
|
|
||||||
synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
|
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
|
||||||
niph, nth, tcp_hdr_size);
|
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
|
|
@ -128,16 +128,16 @@ static bool icmp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||||
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
|
/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
|
||||||
static int
|
static int
|
||||||
icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||||
enum ip_conntrack_info *ctinfo,
|
|
||||||
unsigned int hooknum)
|
unsigned int hooknum)
|
||||||
{
|
{
|
||||||
struct nf_conntrack_tuple innertuple, origtuple;
|
struct nf_conntrack_tuple innertuple, origtuple;
|
||||||
const struct nf_conntrack_l4proto *innerproto;
|
const struct nf_conntrack_l4proto *innerproto;
|
||||||
const struct nf_conntrack_tuple_hash *h;
|
const struct nf_conntrack_tuple_hash *h;
|
||||||
const struct nf_conntrack_zone *zone;
|
const struct nf_conntrack_zone *zone;
|
||||||
|
enum ip_conntrack_info ctinfo;
|
||||||
struct nf_conntrack_zone tmp;
|
struct nf_conntrack_zone tmp;
|
||||||
|
|
||||||
NF_CT_ASSERT(skb->nfct == NULL);
|
NF_CT_ASSERT(!skb_nfct(skb));
|
||||||
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
|
zone = nf_ct_zone_tmpl(tmpl, skb, &tmp);
|
||||||
|
|
||||||
/* Are they talking about one of our connections? */
|
/* Are they talking about one of our connections? */
|
||||||
|
@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||||
return -NF_ACCEPT;
|
return -NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
*ctinfo = IP_CT_RELATED;
|
ctinfo = IP_CT_RELATED;
|
||||||
|
|
||||||
h = nf_conntrack_find_get(net, zone, &innertuple);
|
h = nf_conntrack_find_get(net, zone, &innertuple);
|
||||||
if (!h) {
|
if (!h) {
|
||||||
|
@ -169,11 +169,10 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
|
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
|
||||||
*ctinfo += IP_CT_IS_REPLY;
|
ctinfo += IP_CT_IS_REPLY;
|
||||||
|
|
||||||
/* Update skb to refer to this connection */
|
/* Update skb to refer to this connection */
|
||||||
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
|
nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
|
||||||
skb->nfctinfo = *ctinfo;
|
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -181,7 +180,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||||
static int
|
static int
|
||||||
icmp_error(struct net *net, struct nf_conn *tmpl,
|
icmp_error(struct net *net, struct nf_conn *tmpl,
|
||||||
struct sk_buff *skb, unsigned int dataoff,
|
struct sk_buff *skb, unsigned int dataoff,
|
||||||
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
|
u8 pf, unsigned int hooknum)
|
||||||
{
|
{
|
||||||
const struct icmphdr *icmph;
|
const struct icmphdr *icmph;
|
||||||
struct icmphdr _ih;
|
struct icmphdr _ih;
|
||||||
|
@ -225,7 +224,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
|
||||||
icmph->type != ICMP_REDIRECT)
|
icmph->type != ICMP_REDIRECT)
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
||||||
return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
|
return icmp_error_message(net, tmpl, skb, hooknum);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
|
|
|
@ -45,7 +45,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
|
||||||
{
|
{
|
||||||
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
|
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
if (skb->nfct) {
|
if (skb_nfct(skb)) {
|
||||||
enum ip_conntrack_info ctinfo;
|
enum ip_conntrack_info ctinfo;
|
||||||
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ static unsigned int ipv4_conntrack_defrag(void *priv,
|
||||||
#if !IS_ENABLED(CONFIG_NF_NAT)
|
#if !IS_ENABLED(CONFIG_NF_NAT)
|
||||||
/* Previously seen (loopback)? Ignore. Do this before
|
/* Previously seen (loopback)? Ignore. Do this before
|
||||||
fragment check. */
|
fragment check. */
|
||||||
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
|
if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -68,10 +68,9 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
/* Avoid counting cloned packets towards the original connection. */
|
/* Avoid counting cloned packets towards the original connection. */
|
||||||
nf_conntrack_put(skb->nfct);
|
nf_reset(skb);
|
||||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
nf_conntrack_get(skb_nfct(skb));
|
||||||
nf_conntrack_get(skb->nfct);
|
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
|
* If we are in PREROUTING/INPUT, decrease the TTL to mitigate potential
|
||||||
|
|
|
@ -87,7 +87,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
|
||||||
struct nf_log_buf *m;
|
struct nf_log_buf *m;
|
||||||
|
|
||||||
/* FIXME: Disabled from containers until syslog ns is supported */
|
/* FIXME: Disabled from containers until syslog ns is supported */
|
||||||
if (!net_eq(net, &init_net))
|
if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
m = nf_log_buf_open();
|
m = nf_log_buf_open();
|
||||||
|
|
|
@ -319,7 +319,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
|
||||||
struct nf_log_buf *m;
|
struct nf_log_buf *m;
|
||||||
|
|
||||||
/* FIXME: Disabled from containers until syslog ns is supported */
|
/* FIXME: Disabled from containers until syslog ns is supported */
|
||||||
if (!net_eq(net, &init_net))
|
if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
m = nf_log_buf_open();
|
m = nf_log_buf_open();
|
||||||
|
|
|
@ -855,10 +855,6 @@ copy_entries_to_user(unsigned int total_size,
|
||||||
return PTR_ERR(counters);
|
return PTR_ERR(counters);
|
||||||
|
|
||||||
loc_cpu_entry = private->entries;
|
loc_cpu_entry = private->entries;
|
||||||
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
|
|
||||||
ret = -EFAULT;
|
|
||||||
goto free_counters;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* FIXME: use iterator macros --RR */
|
/* FIXME: use iterator macros --RR */
|
||||||
/* ... then go back and fix counters and names */
|
/* ... then go back and fix counters and names */
|
||||||
|
@ -868,6 +864,10 @@ copy_entries_to_user(unsigned int total_size,
|
||||||
const struct xt_entry_target *t;
|
const struct xt_entry_target *t;
|
||||||
|
|
||||||
e = (struct ip6t_entry *)(loc_cpu_entry + off);
|
e = (struct ip6t_entry *)(loc_cpu_entry + off);
|
||||||
|
if (copy_to_user(userptr + off, e, sizeof(*e))) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto free_counters;
|
||||||
|
}
|
||||||
if (copy_to_user(userptr + off
|
if (copy_to_user(userptr + off
|
||||||
+ offsetof(struct ip6t_entry, counters),
|
+ offsetof(struct ip6t_entry, counters),
|
||||||
&counters[num],
|
&counters[num],
|
||||||
|
@ -881,23 +881,14 @@ copy_entries_to_user(unsigned int total_size,
|
||||||
i += m->u.match_size) {
|
i += m->u.match_size) {
|
||||||
m = (void *)e + i;
|
m = (void *)e + i;
|
||||||
|
|
||||||
if (copy_to_user(userptr + off + i
|
if (xt_match_to_user(m, userptr + off + i)) {
|
||||||
+ offsetof(struct xt_entry_match,
|
|
||||||
u.user.name),
|
|
||||||
m->u.kernel.match->name,
|
|
||||||
strlen(m->u.kernel.match->name)+1)
|
|
||||||
!= 0) {
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto free_counters;
|
goto free_counters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
t = ip6t_get_target_c(e);
|
t = ip6t_get_target_c(e);
|
||||||
if (copy_to_user(userptr + off + e->target_offset
|
if (xt_target_to_user(t, userptr + off + e->target_offset)) {
|
||||||
+ offsetof(struct xt_entry_target,
|
|
||||||
u.user.name),
|
|
||||||
t->u.kernel.target->name,
|
|
||||||
strlen(t->u.kernel.target->name)+1) != 0) {
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto free_counters;
|
goto free_counters;
|
||||||
}
|
}
|
||||||
|
|
|
@ -112,6 +112,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
|
||||||
.table = "mangle",
|
.table = "mangle",
|
||||||
.target = ip6t_snpt_tg,
|
.target = ip6t_snpt_tg,
|
||||||
.targetsize = sizeof(struct ip6t_npt_tginfo),
|
.targetsize = sizeof(struct ip6t_npt_tginfo),
|
||||||
|
.usersize = offsetof(struct ip6t_npt_tginfo, adjustment),
|
||||||
.checkentry = ip6t_npt_checkentry,
|
.checkentry = ip6t_npt_checkentry,
|
||||||
.family = NFPROTO_IPV6,
|
.family = NFPROTO_IPV6,
|
||||||
.hooks = (1 << NF_INET_LOCAL_IN) |
|
.hooks = (1 << NF_INET_LOCAL_IN) |
|
||||||
|
@ -123,6 +124,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {
|
||||||
.table = "mangle",
|
.table = "mangle",
|
||||||
.target = ip6t_dnpt_tg,
|
.target = ip6t_dnpt_tg,
|
||||||
.targetsize = sizeof(struct ip6t_npt_tginfo),
|
.targetsize = sizeof(struct ip6t_npt_tginfo),
|
||||||
|
.usersize = offsetof(struct ip6t_npt_tginfo, adjustment),
|
||||||
.checkentry = ip6t_npt_checkentry,
|
.checkentry = ip6t_npt_checkentry,
|
||||||
.family = NFPROTO_IPV6,
|
.family = NFPROTO_IPV6,
|
||||||
.hooks = (1 << NF_INET_PRE_ROUTING) |
|
.hooks = (1 << NF_INET_PRE_ROUTING) |
|
||||||
|
|
|
@ -71,8 +71,7 @@ synproxy_send_tcp(struct net *net,
|
||||||
skb_dst_set(nskb, dst);
|
skb_dst_set(nskb, dst);
|
||||||
|
|
||||||
if (nfct) {
|
if (nfct) {
|
||||||
nskb->nfct = nfct;
|
nf_ct_set(nskb, (struct nf_conn *)nfct, ctinfo);
|
||||||
nskb->nfctinfo = ctinfo;
|
|
||||||
nf_conntrack_get(nfct);
|
nf_conntrack_get(nfct);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,8 +120,8 @@ synproxy_send_client_synack(struct net *net,
|
||||||
|
|
||||||
synproxy_build_options(nth, opts);
|
synproxy_build_options(nth, opts);
|
||||||
|
|
||||||
synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
|
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
|
||||||
niph, nth, tcp_hdr_size);
|
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -244,8 +243,8 @@ synproxy_send_client_ack(struct net *net,
|
||||||
|
|
||||||
synproxy_build_options(nth, opts);
|
synproxy_build_options(nth, opts);
|
||||||
|
|
||||||
synproxy_send_tcp(net, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
|
synproxy_send_tcp(net, skb, nskb, skb_nfct(skb),
|
||||||
niph, nth, tcp_hdr_size);
|
IP_CT_ESTABLISHED_REPLY, niph, nth, tcp_hdr_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static bool
|
||||||
|
|
|
@ -145,15 +145,15 @@ static int
|
||||||
icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
|
icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
unsigned int icmp6off,
|
unsigned int icmp6off,
|
||||||
enum ip_conntrack_info *ctinfo,
|
|
||||||
unsigned int hooknum)
|
unsigned int hooknum)
|
||||||
{
|
{
|
||||||
struct nf_conntrack_tuple intuple, origtuple;
|
struct nf_conntrack_tuple intuple, origtuple;
|
||||||
const struct nf_conntrack_tuple_hash *h;
|
const struct nf_conntrack_tuple_hash *h;
|
||||||
const struct nf_conntrack_l4proto *inproto;
|
const struct nf_conntrack_l4proto *inproto;
|
||||||
|
enum ip_conntrack_info ctinfo;
|
||||||
struct nf_conntrack_zone tmp;
|
struct nf_conntrack_zone tmp;
|
||||||
|
|
||||||
NF_CT_ASSERT(skb->nfct == NULL);
|
NF_CT_ASSERT(!skb_nfct(skb));
|
||||||
|
|
||||||
/* Are they talking about one of our connections? */
|
/* Are they talking about one of our connections? */
|
||||||
if (!nf_ct_get_tuplepr(skb,
|
if (!nf_ct_get_tuplepr(skb,
|
||||||
|
@ -176,7 +176,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
|
||||||
return -NF_ACCEPT;
|
return -NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
*ctinfo = IP_CT_RELATED;
|
ctinfo = IP_CT_RELATED;
|
||||||
|
|
||||||
h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
|
h = nf_conntrack_find_get(net, nf_ct_zone_tmpl(tmpl, skb, &tmp),
|
||||||
&intuple);
|
&intuple);
|
||||||
|
@ -185,19 +185,18 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
|
||||||
return -NF_ACCEPT;
|
return -NF_ACCEPT;
|
||||||
} else {
|
} else {
|
||||||
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
|
if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
|
||||||
*ctinfo += IP_CT_IS_REPLY;
|
ctinfo += IP_CT_IS_REPLY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Update skb to refer to this connection */
|
/* Update skb to refer to this connection */
|
||||||
skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
|
nf_ct_set(skb, nf_ct_tuplehash_to_ctrack(h), ctinfo);
|
||||||
skb->nfctinfo = *ctinfo;
|
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
icmpv6_error(struct net *net, struct nf_conn *tmpl,
|
icmpv6_error(struct net *net, struct nf_conn *tmpl,
|
||||||
struct sk_buff *skb, unsigned int dataoff,
|
struct sk_buff *skb, unsigned int dataoff,
|
||||||
enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum)
|
u8 pf, unsigned int hooknum)
|
||||||
{
|
{
|
||||||
const struct icmp6hdr *icmp6h;
|
const struct icmp6hdr *icmp6h;
|
||||||
struct icmp6hdr _ih;
|
struct icmp6hdr _ih;
|
||||||
|
@ -222,9 +221,8 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
|
||||||
type = icmp6h->icmp6_type - 130;
|
type = icmp6h->icmp6_type - 130;
|
||||||
if (type >= 0 && type < sizeof(noct_valid_new) &&
|
if (type >= 0 && type < sizeof(noct_valid_new) &&
|
||||||
noct_valid_new[type]) {
|
noct_valid_new[type]) {
|
||||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
nf_conntrack_get(skb_nfct(skb));
|
||||||
nf_conntrack_get(skb->nfct);
|
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -232,7 +230,7 @@ icmpv6_error(struct net *net, struct nf_conn *tmpl,
|
||||||
if (icmp6h->icmp6_type >= 128)
|
if (icmp6h->icmp6_type >= 128)
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
|
|
||||||
return icmpv6_error_message(net, tmpl, skb, dataoff, ctinfo, hooknum);
|
return icmpv6_error_message(net, tmpl, skb, dataoff, hooknum);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
|
|
|
@ -37,7 +37,7 @@ static enum ip6_defrag_users nf_ct6_defrag_user(unsigned int hooknum,
|
||||||
{
|
{
|
||||||
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
|
u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
if (skb->nfct) {
|
if (skb_nfct(skb)) {
|
||||||
enum ip_conntrack_info ctinfo;
|
enum ip_conntrack_info ctinfo;
|
||||||
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
const struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ static unsigned int ipv6_defrag(void *priv,
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
/* Previously seen (loopback)? */
|
/* Previously seen (loopback)? */
|
||||||
if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
|
if (skb_nfct(skb) && !nf_ct_is_template((struct nf_conn *)skb_nfct(skb)))
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -57,10 +57,9 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
nf_conntrack_put(skb->nfct);
|
nf_reset(skb);
|
||||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
nf_conntrack_get(skb_nfct(skb));
|
||||||
nf_conntrack_get(skb->nfct);
|
|
||||||
#endif
|
#endif
|
||||||
if (hooknum == NF_INET_PRE_ROUTING ||
|
if (hooknum == NF_INET_PRE_ROUTING ||
|
||||||
hooknum == NF_INET_LOCAL_IN) {
|
hooknum == NF_INET_LOCAL_IN) {
|
||||||
|
|
|
@ -351,7 +351,7 @@ static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
|
||||||
struct nf_log_buf *m;
|
struct nf_log_buf *m;
|
||||||
|
|
||||||
/* FIXME: Disabled from containers until syslog ns is supported */
|
/* FIXME: Disabled from containers until syslog ns is supported */
|
||||||
if (!net_eq(net, &init_net))
|
if (!net_eq(net, &init_net) && !sysctl_nf_log_all_netns)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
m = nf_log_buf_open();
|
m = nf_log_buf_open();
|
||||||
|
|
|
@ -162,6 +162,7 @@ config NF_CT_PROTO_SCTP
|
||||||
bool 'SCTP protocol connection tracking support'
|
bool 'SCTP protocol connection tracking support'
|
||||||
depends on NETFILTER_ADVANCED
|
depends on NETFILTER_ADVANCED
|
||||||
default y
|
default y
|
||||||
|
select LIBCRC32C
|
||||||
help
|
help
|
||||||
With this option enabled, the layer 3 independent connection
|
With this option enabled, the layer 3 independent connection
|
||||||
tracking code will be able to do state tracking on SCTP connections.
|
tracking code will be able to do state tracking on SCTP connections.
|
||||||
|
@ -397,7 +398,6 @@ config NF_NAT_PROTO_SCTP
|
||||||
bool
|
bool
|
||||||
default NF_NAT && NF_CT_PROTO_SCTP
|
default NF_NAT && NF_CT_PROTO_SCTP
|
||||||
depends on NF_NAT && NF_CT_PROTO_SCTP
|
depends on NF_NAT && NF_CT_PROTO_SCTP
|
||||||
select LIBCRC32C
|
|
||||||
|
|
||||||
config NF_NAT_AMANDA
|
config NF_NAT_AMANDA
|
||||||
tristate
|
tristate
|
||||||
|
|
|
@ -7,7 +7,6 @@ nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
|
||||||
nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
|
nf_conntrack-$(CONFIG_NF_CONNTRACK_LABELS) += nf_conntrack_labels.o
|
||||||
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
|
nf_conntrack-$(CONFIG_NF_CT_PROTO_DCCP) += nf_conntrack_proto_dccp.o
|
||||||
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
|
nf_conntrack-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
|
||||||
nf_conntrack-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
|
|
||||||
|
|
||||||
obj-$(CONFIG_NETFILTER) = netfilter.o
|
obj-$(CONFIG_NETFILTER) = netfilter.o
|
||||||
|
|
||||||
|
@ -47,7 +46,6 @@ nf_nat-y := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
|
||||||
# NAT protocols (nf_nat)
|
# NAT protocols (nf_nat)
|
||||||
nf_nat-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
|
nf_nat-$(CONFIG_NF_NAT_PROTO_DCCP) += nf_nat_proto_dccp.o
|
||||||
nf_nat-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
|
nf_nat-$(CONFIG_NF_NAT_PROTO_SCTP) += nf_nat_proto_sctp.o
|
||||||
nf_nat-$(CONFIG_NF_NAT_PROTO_UDPLITE) += nf_nat_proto_udplite.o
|
|
||||||
|
|
||||||
# generic transport layer logging
|
# generic transport layer logging
|
||||||
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
|
obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
|
||||||
|
|
|
@ -375,7 +375,7 @@ void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
void (*attach)(struct sk_buff *, const struct sk_buff *);
|
void (*attach)(struct sk_buff *, const struct sk_buff *);
|
||||||
|
|
||||||
if (skb->nfct) {
|
if (skb->_nfct) {
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
attach = rcu_dereference(ip_ct_attach);
|
attach = rcu_dereference(ip_ct_attach);
|
||||||
if (attach)
|
if (attach)
|
||||||
|
|
|
@ -710,7 +710,6 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af,
|
||||||
dest->vport == svc->port))) {
|
dest->vport == svc->port))) {
|
||||||
/* HIT */
|
/* HIT */
|
||||||
list_del(&dest->t_list);
|
list_del(&dest->t_list);
|
||||||
ip_vs_dest_hold(dest);
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -740,7 +739,7 @@ static void ip_vs_dest_free(struct ip_vs_dest *dest)
|
||||||
* When the ip_vs_control_clearup is activated by ipvs module exit,
|
* When the ip_vs_control_clearup is activated by ipvs module exit,
|
||||||
* the service tables must have been flushed and all the connections
|
* the service tables must have been flushed and all the connections
|
||||||
* are expired, and the refcnt of each destination in the trash must
|
* are expired, and the refcnt of each destination in the trash must
|
||||||
* be 0, so we simply release them here.
|
* be 1, so we simply release them here.
|
||||||
*/
|
*/
|
||||||
static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
|
static void ip_vs_trash_cleanup(struct netns_ipvs *ipvs)
|
||||||
{
|
{
|
||||||
|
@ -1079,11 +1078,10 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest,
|
||||||
if (list_empty(&ipvs->dest_trash) && !cleanup)
|
if (list_empty(&ipvs->dest_trash) && !cleanup)
|
||||||
mod_timer(&ipvs->dest_trash_timer,
|
mod_timer(&ipvs->dest_trash_timer,
|
||||||
jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
|
jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1));
|
||||||
/* dest lives in trash without reference */
|
/* dest lives in trash with reference */
|
||||||
list_add(&dest->t_list, &ipvs->dest_trash);
|
list_add(&dest->t_list, &ipvs->dest_trash);
|
||||||
dest->idle_start = 0;
|
dest->idle_start = 0;
|
||||||
spin_unlock_bh(&ipvs->dest_trash_lock);
|
spin_unlock_bh(&ipvs->dest_trash_lock);
|
||||||
ip_vs_dest_put(dest);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1159,7 +1157,7 @@ static void ip_vs_dest_trash_expire(unsigned long data)
|
||||||
|
|
||||||
spin_lock(&ipvs->dest_trash_lock);
|
spin_lock(&ipvs->dest_trash_lock);
|
||||||
list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
|
list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) {
|
||||||
if (atomic_read(&dest->refcnt) > 0)
|
if (atomic_read(&dest->refcnt) > 1)
|
||||||
continue;
|
continue;
|
||||||
if (dest->idle_start) {
|
if (dest->idle_start) {
|
||||||
if (time_before(now, dest->idle_start +
|
if (time_before(now, dest->idle_start +
|
||||||
|
|
|
@ -350,16 +350,31 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
|
||||||
spin_unlock(&pcpu->lock);
|
spin_unlock(&pcpu->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK)
|
||||||
|
|
||||||
/* Released via destroy_conntrack() */
|
/* Released via destroy_conntrack() */
|
||||||
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
|
||||||
const struct nf_conntrack_zone *zone,
|
const struct nf_conntrack_zone *zone,
|
||||||
gfp_t flags)
|
gfp_t flags)
|
||||||
{
|
{
|
||||||
struct nf_conn *tmpl;
|
struct nf_conn *tmpl, *p;
|
||||||
|
|
||||||
tmpl = kzalloc(sizeof(*tmpl), flags);
|
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) {
|
||||||
if (tmpl == NULL)
|
tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags);
|
||||||
return NULL;
|
if (!tmpl)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
p = tmpl;
|
||||||
|
tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
|
||||||
|
if (tmpl != p) {
|
||||||
|
tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p);
|
||||||
|
tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tmpl = kzalloc(sizeof(*tmpl), flags);
|
||||||
|
if (!tmpl)
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
tmpl->status = IPS_TEMPLATE;
|
tmpl->status = IPS_TEMPLATE;
|
||||||
write_pnet(&tmpl->ct_net, net);
|
write_pnet(&tmpl->ct_net, net);
|
||||||
|
@ -374,7 +389,11 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl)
|
||||||
{
|
{
|
||||||
nf_ct_ext_destroy(tmpl);
|
nf_ct_ext_destroy(tmpl);
|
||||||
nf_ct_ext_free(tmpl);
|
nf_ct_ext_free(tmpl);
|
||||||
kfree(tmpl);
|
|
||||||
|
if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK)
|
||||||
|
kfree((char *)tmpl - tmpl->proto.tmpl_padto);
|
||||||
|
else
|
||||||
|
kfree(tmpl);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
|
EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
|
||||||
|
|
||||||
|
@ -686,12 +705,12 @@ static int nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
|
||||||
!nfct_nat(ct) &&
|
!nfct_nat(ct) &&
|
||||||
!nf_ct_is_dying(ct) &&
|
!nf_ct_is_dying(ct) &&
|
||||||
atomic_inc_not_zero(&ct->ct_general.use)) {
|
atomic_inc_not_zero(&ct->ct_general.use)) {
|
||||||
nf_ct_acct_merge(ct, ctinfo, (struct nf_conn *)skb->nfct);
|
enum ip_conntrack_info oldinfo;
|
||||||
nf_conntrack_put(skb->nfct);
|
struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
|
||||||
/* Assign conntrack already in hashes to this skbuff. Don't
|
|
||||||
* modify skb->nfctinfo to ensure consistent stateful filtering.
|
nf_ct_acct_merge(ct, ctinfo, loser_ct);
|
||||||
*/
|
nf_conntrack_put(&loser_ct->ct_general);
|
||||||
skb->nfct = &ct->ct_general;
|
nf_ct_set(skb, ct, oldinfo);
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
}
|
}
|
||||||
NF_CT_STAT_INC(net, drop);
|
NF_CT_STAT_INC(net, drop);
|
||||||
|
@ -1218,7 +1237,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
|
||||||
return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
|
return &ct->tuplehash[IP_CT_DIR_ORIGINAL];
|
||||||
}
|
}
|
||||||
|
|
||||||
/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
|
/* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */
|
||||||
static inline struct nf_conn *
|
static inline struct nf_conn *
|
||||||
resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
|
resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
|
@ -1277,8 +1296,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
|
||||||
}
|
}
|
||||||
*set_reply = 0;
|
*set_reply = 0;
|
||||||
}
|
}
|
||||||
skb->nfct = &ct->ct_general;
|
nf_ct_set(skb, ct, *ctinfo);
|
||||||
skb->nfctinfo = *ctinfo;
|
|
||||||
return ct;
|
return ct;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1286,7 +1304,7 @@ unsigned int
|
||||||
nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct nf_conn *ct, *tmpl = NULL;
|
struct nf_conn *ct, *tmpl;
|
||||||
enum ip_conntrack_info ctinfo;
|
enum ip_conntrack_info ctinfo;
|
||||||
struct nf_conntrack_l3proto *l3proto;
|
struct nf_conntrack_l3proto *l3proto;
|
||||||
struct nf_conntrack_l4proto *l4proto;
|
struct nf_conntrack_l4proto *l4proto;
|
||||||
|
@ -1296,14 +1314,14 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||||
int set_reply = 0;
|
int set_reply = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (skb->nfct) {
|
tmpl = nf_ct_get(skb, &ctinfo);
|
||||||
|
if (tmpl) {
|
||||||
/* Previously seen (loopback or untracked)? Ignore. */
|
/* Previously seen (loopback or untracked)? Ignore. */
|
||||||
tmpl = (struct nf_conn *)skb->nfct;
|
|
||||||
if (!nf_ct_is_template(tmpl)) {
|
if (!nf_ct_is_template(tmpl)) {
|
||||||
NF_CT_STAT_INC_ATOMIC(net, ignore);
|
NF_CT_STAT_INC_ATOMIC(net, ignore);
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
}
|
}
|
||||||
skb->nfct = NULL;
|
skb->_nfct = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* rcu_read_lock()ed by nf_hook_thresh */
|
/* rcu_read_lock()ed by nf_hook_thresh */
|
||||||
|
@ -1324,8 +1342,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||||
* inverse of the return code tells to the netfilter
|
* inverse of the return code tells to the netfilter
|
||||||
* core what to do with the packet. */
|
* core what to do with the packet. */
|
||||||
if (l4proto->error != NULL) {
|
if (l4proto->error != NULL) {
|
||||||
ret = l4proto->error(net, tmpl, skb, dataoff, &ctinfo,
|
ret = l4proto->error(net, tmpl, skb, dataoff, pf, hooknum);
|
||||||
pf, hooknum);
|
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
NF_CT_STAT_INC_ATOMIC(net, error);
|
NF_CT_STAT_INC_ATOMIC(net, error);
|
||||||
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
||||||
|
@ -1333,7 +1350,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/* ICMP[v6] protocol trackers may assign one conntrack. */
|
/* ICMP[v6] protocol trackers may assign one conntrack. */
|
||||||
if (skb->nfct)
|
if (skb->_nfct)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
repeat:
|
repeat:
|
||||||
|
@ -1353,7 +1370,7 @@ repeat:
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
NF_CT_ASSERT(skb->nfct);
|
NF_CT_ASSERT(skb_nfct(skb));
|
||||||
|
|
||||||
/* Decide what timeout policy we want to apply to this flow. */
|
/* Decide what timeout policy we want to apply to this flow. */
|
||||||
timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
|
timeouts = nf_ct_timeout_lookup(net, ct, l4proto);
|
||||||
|
@ -1363,8 +1380,8 @@ repeat:
|
||||||
/* Invalid: inverse of the return code tells
|
/* Invalid: inverse of the return code tells
|
||||||
* the netfilter core what to do */
|
* the netfilter core what to do */
|
||||||
pr_debug("nf_conntrack_in: Can't track with proto module\n");
|
pr_debug("nf_conntrack_in: Can't track with proto module\n");
|
||||||
nf_conntrack_put(skb->nfct);
|
nf_conntrack_put(&ct->ct_general);
|
||||||
skb->nfct = NULL;
|
skb->_nfct = 0;
|
||||||
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
NF_CT_STAT_INC_ATOMIC(net, invalid);
|
||||||
if (ret == -NF_DROP)
|
if (ret == -NF_DROP)
|
||||||
NF_CT_STAT_INC_ATOMIC(net, drop);
|
NF_CT_STAT_INC_ATOMIC(net, drop);
|
||||||
|
@ -1522,9 +1539,8 @@ static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
|
||||||
ctinfo = IP_CT_RELATED;
|
ctinfo = IP_CT_RELATED;
|
||||||
|
|
||||||
/* Attach to new skbuff, and increment count */
|
/* Attach to new skbuff, and increment count */
|
||||||
nskb->nfct = &ct->ct_general;
|
nf_ct_set(nskb, ct, ctinfo);
|
||||||
nskb->nfctinfo = ctinfo;
|
nf_conntrack_get(skb_nfct(nskb));
|
||||||
nf_conntrack_get(nskb->nfct);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Bring out ya dead! */
|
/* Bring out ya dead! */
|
||||||
|
@ -1860,7 +1876,8 @@ int nf_conntrack_init_start(void)
|
||||||
nf_conntrack_max = max_factor * nf_conntrack_htable_size;
|
nf_conntrack_max = max_factor * nf_conntrack_htable_size;
|
||||||
|
|
||||||
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
|
nf_conntrack_cachep = kmem_cache_create("nf_conntrack",
|
||||||
sizeof(struct nf_conn), 0,
|
sizeof(struct nf_conn),
|
||||||
|
NFCT_INFOMASK + 1,
|
||||||
SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
|
SLAB_DESTROY_BY_RCU | SLAB_HWCACHE_ALIGN, NULL);
|
||||||
if (!nf_conntrack_cachep)
|
if (!nf_conntrack_cachep)
|
||||||
goto err_cachep;
|
goto err_cachep;
|
||||||
|
|
|
@ -561,7 +561,6 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
|
||||||
|
|
||||||
static int dccp_error(struct net *net, struct nf_conn *tmpl,
|
static int dccp_error(struct net *net, struct nf_conn *tmpl,
|
||||||
struct sk_buff *skb, unsigned int dataoff,
|
struct sk_buff *skb, unsigned int dataoff,
|
||||||
enum ip_conntrack_info *ctinfo,
|
|
||||||
u_int8_t pf, unsigned int hooknum)
|
u_int8_t pf, unsigned int hooknum)
|
||||||
{
|
{
|
||||||
struct dccp_hdr _dh, *dh;
|
struct dccp_hdr _dh, *dh;
|
||||||
|
|
|
@ -22,7 +22,9 @@
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/interrupt.h>
|
#include <linux/interrupt.h>
|
||||||
|
#include <net/sctp/checksum.h>
|
||||||
|
|
||||||
|
#include <net/netfilter/nf_log.h>
|
||||||
#include <net/netfilter/nf_conntrack.h>
|
#include <net/netfilter/nf_conntrack.h>
|
||||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||||
#include <net/netfilter/nf_conntrack_ecache.h>
|
#include <net/netfilter/nf_conntrack_ecache.h>
|
||||||
|
@ -505,6 +507,34 @@ static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
|
||||||
|
unsigned int dataoff,
|
||||||
|
u8 pf, unsigned int hooknum)
|
||||||
|
{
|
||||||
|
const struct sctphdr *sh;
|
||||||
|
struct sctphdr _sctph;
|
||||||
|
const char *logmsg;
|
||||||
|
|
||||||
|
sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
|
||||||
|
if (!sh) {
|
||||||
|
logmsg = "nf_ct_sctp: short packet ";
|
||||||
|
goto out_invalid;
|
||||||
|
}
|
||||||
|
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
|
||||||
|
skb->ip_summed == CHECKSUM_NONE) {
|
||||||
|
if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
|
||||||
|
logmsg = "nf_ct_sctp: bad CRC ";
|
||||||
|
goto out_invalid;
|
||||||
|
}
|
||||||
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
}
|
||||||
|
return NF_ACCEPT;
|
||||||
|
out_invalid:
|
||||||
|
if (LOG_INVALID(net, IPPROTO_SCTP))
|
||||||
|
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", logmsg);
|
||||||
|
return -NF_ACCEPT;
|
||||||
|
}
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
|
|
||||||
#include <linux/netfilter/nfnetlink.h>
|
#include <linux/netfilter/nfnetlink.h>
|
||||||
|
@ -752,6 +782,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
|
||||||
.packet = sctp_packet,
|
.packet = sctp_packet,
|
||||||
.get_timeouts = sctp_get_timeouts,
|
.get_timeouts = sctp_get_timeouts,
|
||||||
.new = sctp_new,
|
.new = sctp_new,
|
||||||
|
.error = sctp_error,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
.to_nlattr = sctp_to_nlattr,
|
.to_nlattr = sctp_to_nlattr,
|
||||||
|
@ -786,6 +817,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
|
||||||
.packet = sctp_packet,
|
.packet = sctp_packet,
|
||||||
.get_timeouts = sctp_get_timeouts,
|
.get_timeouts = sctp_get_timeouts,
|
||||||
.new = sctp_new,
|
.new = sctp_new,
|
||||||
|
.error = sctp_error,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
.to_nlattr = sctp_to_nlattr,
|
.to_nlattr = sctp_to_nlattr,
|
||||||
|
|
|
@ -750,7 +750,6 @@ static const u8 tcp_valid_flags[(TCPHDR_FIN|TCPHDR_SYN|TCPHDR_RST|TCPHDR_ACK|
|
||||||
static int tcp_error(struct net *net, struct nf_conn *tmpl,
|
static int tcp_error(struct net *net, struct nf_conn *tmpl,
|
||||||
struct sk_buff *skb,
|
struct sk_buff *skb,
|
||||||
unsigned int dataoff,
|
unsigned int dataoff,
|
||||||
enum ip_conntrack_info *ctinfo,
|
|
||||||
u_int8_t pf,
|
u_int8_t pf,
|
||||||
unsigned int hooknum)
|
unsigned int hooknum)
|
||||||
{
|
{
|
||||||
|
|
|
@ -108,8 +108,60 @@ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
|
||||||
|
static int udplite_error(struct net *net, struct nf_conn *tmpl,
|
||||||
|
struct sk_buff *skb,
|
||||||
|
unsigned int dataoff,
|
||||||
|
u8 pf, unsigned int hooknum)
|
||||||
|
{
|
||||||
|
unsigned int udplen = skb->len - dataoff;
|
||||||
|
const struct udphdr *hdr;
|
||||||
|
struct udphdr _hdr;
|
||||||
|
unsigned int cscov;
|
||||||
|
|
||||||
|
/* Header is too small? */
|
||||||
|
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
||||||
|
if (!hdr) {
|
||||||
|
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
||||||
|
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||||
|
"nf_ct_udplite: short packet ");
|
||||||
|
return -NF_ACCEPT;
|
||||||
|
}
|
||||||
|
|
||||||
|
cscov = ntohs(hdr->len);
|
||||||
|
if (cscov == 0) {
|
||||||
|
cscov = udplen;
|
||||||
|
} else if (cscov < sizeof(*hdr) || cscov > udplen) {
|
||||||
|
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
||||||
|
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||||
|
"nf_ct_udplite: invalid checksum coverage ");
|
||||||
|
return -NF_ACCEPT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* UDPLITE mandates checksums */
|
||||||
|
if (!hdr->check) {
|
||||||
|
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
||||||
|
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||||
|
"nf_ct_udplite: checksum missing ");
|
||||||
|
return -NF_ACCEPT;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Checksum invalid? Ignore. */
|
||||||
|
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
|
||||||
|
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
|
||||||
|
pf)) {
|
||||||
|
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
||||||
|
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
||||||
|
"nf_ct_udplite: bad UDPLite checksum ");
|
||||||
|
return -NF_ACCEPT;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NF_ACCEPT;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
|
||||||
unsigned int dataoff, enum ip_conntrack_info *ctinfo,
|
unsigned int dataoff,
|
||||||
u_int8_t pf,
|
u_int8_t pf,
|
||||||
unsigned int hooknum)
|
unsigned int hooknum)
|
||||||
{
|
{
|
||||||
|
@ -290,6 +342,41 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
|
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
|
||||||
|
struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
|
||||||
|
{
|
||||||
|
.l3proto = PF_INET,
|
||||||
|
.l4proto = IPPROTO_UDPLITE,
|
||||||
|
.name = "udplite",
|
||||||
|
.allow_clash = true,
|
||||||
|
.pkt_to_tuple = udp_pkt_to_tuple,
|
||||||
|
.invert_tuple = udp_invert_tuple,
|
||||||
|
.print_tuple = udp_print_tuple,
|
||||||
|
.packet = udp_packet,
|
||||||
|
.get_timeouts = udp_get_timeouts,
|
||||||
|
.new = udp_new,
|
||||||
|
.error = udplite_error,
|
||||||
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
|
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
|
||||||
|
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
|
||||||
|
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
|
||||||
|
.nla_policy = nf_ct_port_nla_policy,
|
||||||
|
#endif
|
||||||
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
||||||
|
.ctnl_timeout = {
|
||||||
|
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
|
||||||
|
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
|
||||||
|
.nlattr_max = CTA_TIMEOUT_UDP_MAX,
|
||||||
|
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
|
||||||
|
.nla_policy = udp_timeout_nla_policy,
|
||||||
|
},
|
||||||
|
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
|
||||||
|
.init_net = udp_init_net,
|
||||||
|
.get_net_proto = udp_get_net_proto,
|
||||||
|
};
|
||||||
|
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
|
||||||
|
#endif
|
||||||
|
|
||||||
struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
|
struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
|
||||||
{
|
{
|
||||||
.l3proto = PF_INET6,
|
.l3proto = PF_INET6,
|
||||||
|
@ -322,3 +409,38 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
|
||||||
.get_net_proto = udp_get_net_proto,
|
.get_net_proto = udp_get_net_proto,
|
||||||
};
|
};
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
|
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
|
||||||
|
|
||||||
|
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
|
||||||
|
struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
|
||||||
|
{
|
||||||
|
.l3proto = PF_INET6,
|
||||||
|
.l4proto = IPPROTO_UDPLITE,
|
||||||
|
.name = "udplite",
|
||||||
|
.allow_clash = true,
|
||||||
|
.pkt_to_tuple = udp_pkt_to_tuple,
|
||||||
|
.invert_tuple = udp_invert_tuple,
|
||||||
|
.print_tuple = udp_print_tuple,
|
||||||
|
.packet = udp_packet,
|
||||||
|
.get_timeouts = udp_get_timeouts,
|
||||||
|
.new = udp_new,
|
||||||
|
.error = udplite_error,
|
||||||
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
|
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
|
||||||
|
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
|
||||||
|
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
|
||||||
|
.nla_policy = nf_ct_port_nla_policy,
|
||||||
|
#endif
|
||||||
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
||||||
|
.ctnl_timeout = {
|
||||||
|
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
|
||||||
|
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
|
||||||
|
.nlattr_max = CTA_TIMEOUT_UDP_MAX,
|
||||||
|
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
|
||||||
|
.nla_policy = udp_timeout_nla_policy,
|
||||||
|
},
|
||||||
|
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
|
||||||
|
.init_net = udp_init_net,
|
||||||
|
.get_net_proto = udp_get_net_proto,
|
||||||
|
};
|
||||||
|
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
|
||||||
|
#endif
|
||||||
|
|
|
@ -1,324 +0,0 @@
|
||||||
/* (C) 1999-2001 Paul `Rusty' Russell
|
|
||||||
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
|
|
||||||
* (C) 2007 Patrick McHardy <kaber@trash.net>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/timer.h>
|
|
||||||
#include <linux/udp.h>
|
|
||||||
#include <linux/seq_file.h>
|
|
||||||
#include <linux/skbuff.h>
|
|
||||||
#include <linux/ipv6.h>
|
|
||||||
#include <net/ip6_checksum.h>
|
|
||||||
#include <net/checksum.h>
|
|
||||||
|
|
||||||
#include <linux/netfilter.h>
|
|
||||||
#include <linux/netfilter_ipv4.h>
|
|
||||||
#include <linux/netfilter_ipv6.h>
|
|
||||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
|
||||||
#include <net/netfilter/nf_conntrack_ecache.h>
|
|
||||||
#include <net/netfilter/nf_log.h>
|
|
||||||
|
|
||||||
static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
|
|
||||||
[UDPLITE_CT_UNREPLIED] = 30*HZ,
|
|
||||||
[UDPLITE_CT_REPLIED] = 180*HZ,
|
|
||||||
};
|
|
||||||
|
|
||||||
static inline struct nf_udplite_net *udplite_pernet(struct net *net)
|
|
||||||
{
|
|
||||||
return &net->ct.nf_ct_proto.udplite;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
|
|
||||||
unsigned int dataoff,
|
|
||||||
struct net *net,
|
|
||||||
struct nf_conntrack_tuple *tuple)
|
|
||||||
{
|
|
||||||
const struct udphdr *hp;
|
|
||||||
struct udphdr _hdr;
|
|
||||||
|
|
||||||
/* Actually only need first 4 bytes to get ports. */
|
|
||||||
hp = skb_header_pointer(skb, dataoff, 4, &_hdr);
|
|
||||||
if (hp == NULL)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
tuple->src.u.udp.port = hp->source;
|
|
||||||
tuple->dst.u.udp.port = hp->dest;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool udplite_invert_tuple(struct nf_conntrack_tuple *tuple,
|
|
||||||
const struct nf_conntrack_tuple *orig)
|
|
||||||
{
|
|
||||||
tuple->src.u.udp.port = orig->dst.u.udp.port;
|
|
||||||
tuple->dst.u.udp.port = orig->src.u.udp.port;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Print out the per-protocol part of the tuple. */
|
|
||||||
static void udplite_print_tuple(struct seq_file *s,
|
|
||||||
const struct nf_conntrack_tuple *tuple)
|
|
||||||
{
|
|
||||||
seq_printf(s, "sport=%hu dport=%hu ",
|
|
||||||
ntohs(tuple->src.u.udp.port),
|
|
||||||
ntohs(tuple->dst.u.udp.port));
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned int *udplite_get_timeouts(struct net *net)
|
|
||||||
{
|
|
||||||
return udplite_pernet(net)->timeouts;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Returns verdict for packet, and may modify conntracktype */
|
|
||||||
static int udplite_packet(struct nf_conn *ct,
|
|
||||||
const struct sk_buff *skb,
|
|
||||||
unsigned int dataoff,
|
|
||||||
enum ip_conntrack_info ctinfo,
|
|
||||||
u_int8_t pf,
|
|
||||||
unsigned int hooknum,
|
|
||||||
unsigned int *timeouts)
|
|
||||||
{
|
|
||||||
/* If we've seen traffic both ways, this is some kind of UDP
|
|
||||||
stream. Extend timeout. */
|
|
||||||
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
|
|
||||||
nf_ct_refresh_acct(ct, ctinfo, skb,
|
|
||||||
timeouts[UDPLITE_CT_REPLIED]);
|
|
||||||
/* Also, more likely to be important, and not a probe */
|
|
||||||
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
|
|
||||||
nf_conntrack_event_cache(IPCT_ASSURED, ct);
|
|
||||||
} else {
|
|
||||||
nf_ct_refresh_acct(ct, ctinfo, skb,
|
|
||||||
timeouts[UDPLITE_CT_UNREPLIED]);
|
|
||||||
}
|
|
||||||
return NF_ACCEPT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Called when a new connection for this protocol found. */
|
|
||||||
static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
|
|
||||||
unsigned int dataoff, unsigned int *timeouts)
|
|
||||||
{
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int udplite_error(struct net *net, struct nf_conn *tmpl,
|
|
||||||
struct sk_buff *skb,
|
|
||||||
unsigned int dataoff,
|
|
||||||
enum ip_conntrack_info *ctinfo,
|
|
||||||
u_int8_t pf,
|
|
||||||
unsigned int hooknum)
|
|
||||||
{
|
|
||||||
unsigned int udplen = skb->len - dataoff;
|
|
||||||
const struct udphdr *hdr;
|
|
||||||
struct udphdr _hdr;
|
|
||||||
unsigned int cscov;
|
|
||||||
|
|
||||||
/* Header is too small? */
|
|
||||||
hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
|
|
||||||
if (hdr == NULL) {
|
|
||||||
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
|
||||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
|
||||||
"nf_ct_udplite: short packet ");
|
|
||||||
return -NF_ACCEPT;
|
|
||||||
}
|
|
||||||
|
|
||||||
cscov = ntohs(hdr->len);
|
|
||||||
if (cscov == 0)
|
|
||||||
cscov = udplen;
|
|
||||||
else if (cscov < sizeof(*hdr) || cscov > udplen) {
|
|
||||||
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
|
||||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
|
||||||
"nf_ct_udplite: invalid checksum coverage ");
|
|
||||||
return -NF_ACCEPT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* UDPLITE mandates checksums */
|
|
||||||
if (!hdr->check) {
|
|
||||||
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
|
||||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
|
||||||
"nf_ct_udplite: checksum missing ");
|
|
||||||
return -NF_ACCEPT;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Checksum invalid? Ignore. */
|
|
||||||
if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
|
|
||||||
nf_checksum_partial(skb, hooknum, dataoff, cscov, IPPROTO_UDP,
|
|
||||||
pf)) {
|
|
||||||
if (LOG_INVALID(net, IPPROTO_UDPLITE))
|
|
||||||
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
|
|
||||||
"nf_ct_udplite: bad UDPLite checksum ");
|
|
||||||
return -NF_ACCEPT;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NF_ACCEPT;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
|
||||||
|
|
||||||
#include <linux/netfilter/nfnetlink.h>
|
|
||||||
#include <linux/netfilter/nfnetlink_cttimeout.h>
|
|
||||||
|
|
||||||
static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[],
|
|
||||||
struct net *net, void *data)
|
|
||||||
{
|
|
||||||
unsigned int *timeouts = data;
|
|
||||||
struct nf_udplite_net *un = udplite_pernet(net);
|
|
||||||
|
|
||||||
/* set default timeouts for UDPlite. */
|
|
||||||
timeouts[UDPLITE_CT_UNREPLIED] = un->timeouts[UDPLITE_CT_UNREPLIED];
|
|
||||||
timeouts[UDPLITE_CT_REPLIED] = un->timeouts[UDPLITE_CT_REPLIED];
|
|
||||||
|
|
||||||
if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
|
|
||||||
timeouts[UDPLITE_CT_UNREPLIED] =
|
|
||||||
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
|
|
||||||
}
|
|
||||||
if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
|
|
||||||
timeouts[UDPLITE_CT_REPLIED] =
|
|
||||||
ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int
|
|
||||||
udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
|
|
||||||
{
|
|
||||||
const unsigned int *timeouts = data;
|
|
||||||
|
|
||||||
if (nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
|
|
||||||
htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ)) ||
|
|
||||||
nla_put_be32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
|
|
||||||
htonl(timeouts[UDPLITE_CT_REPLIED] / HZ)))
|
|
||||||
goto nla_put_failure;
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
nla_put_failure:
|
|
||||||
return -ENOSPC;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct nla_policy
|
|
||||||
udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
|
|
||||||
[CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
|
|
||||||
[CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
|
|
||||||
};
|
|
||||||
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
|
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
|
||||||
static struct ctl_table udplite_sysctl_table[] = {
|
|
||||||
{
|
|
||||||
.procname = "nf_conntrack_udplite_timeout",
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec_jiffies,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
.procname = "nf_conntrack_udplite_timeout_stream",
|
|
||||||
.maxlen = sizeof(unsigned int),
|
|
||||||
.mode = 0644,
|
|
||||||
.proc_handler = proc_dointvec_jiffies,
|
|
||||||
},
|
|
||||||
{ }
|
|
||||||
};
|
|
||||||
#endif /* CONFIG_SYSCTL */
|
|
||||||
|
|
||||||
static int udplite_kmemdup_sysctl_table(struct nf_proto_net *pn,
|
|
||||||
struct nf_udplite_net *un)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_SYSCTL
|
|
||||||
if (pn->ctl_table)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
pn->ctl_table = kmemdup(udplite_sysctl_table,
|
|
||||||
sizeof(udplite_sysctl_table),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!pn->ctl_table)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
pn->ctl_table[0].data = &un->timeouts[UDPLITE_CT_UNREPLIED];
|
|
||||||
pn->ctl_table[1].data = &un->timeouts[UDPLITE_CT_REPLIED];
|
|
||||||
#endif
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int udplite_init_net(struct net *net, u_int16_t proto)
|
|
||||||
{
|
|
||||||
struct nf_udplite_net *un = udplite_pernet(net);
|
|
||||||
struct nf_proto_net *pn = &un->pn;
|
|
||||||
|
|
||||||
if (!pn->users) {
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0 ; i < UDPLITE_CT_MAX; i++)
|
|
||||||
un->timeouts[i] = udplite_timeouts[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return udplite_kmemdup_sysctl_table(pn, un);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
|
|
||||||
{
|
|
||||||
.l3proto = PF_INET,
|
|
||||||
.l4proto = IPPROTO_UDPLITE,
|
|
||||||
.name = "udplite",
|
|
||||||
.allow_clash = true,
|
|
||||||
.pkt_to_tuple = udplite_pkt_to_tuple,
|
|
||||||
.invert_tuple = udplite_invert_tuple,
|
|
||||||
.print_tuple = udplite_print_tuple,
|
|
||||||
.packet = udplite_packet,
|
|
||||||
.get_timeouts = udplite_get_timeouts,
|
|
||||||
.new = udplite_new,
|
|
||||||
.error = udplite_error,
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
|
||||||
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
|
|
||||||
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
|
|
||||||
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
|
|
||||||
.nla_policy = nf_ct_port_nla_policy,
|
|
||||||
#endif
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
|
||||||
.ctnl_timeout = {
|
|
||||||
.nlattr_to_obj = udplite_timeout_nlattr_to_obj,
|
|
||||||
.obj_to_nlattr = udplite_timeout_obj_to_nlattr,
|
|
||||||
.nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
|
|
||||||
.obj_size = sizeof(unsigned int) *
|
|
||||||
CTA_TIMEOUT_UDPLITE_MAX,
|
|
||||||
.nla_policy = udplite_timeout_nla_policy,
|
|
||||||
},
|
|
||||||
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
|
|
||||||
.init_net = udplite_init_net,
|
|
||||||
};
|
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite4);
|
|
||||||
|
|
||||||
struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
|
|
||||||
{
|
|
||||||
.l3proto = PF_INET6,
|
|
||||||
.l4proto = IPPROTO_UDPLITE,
|
|
||||||
.name = "udplite",
|
|
||||||
.allow_clash = true,
|
|
||||||
.pkt_to_tuple = udplite_pkt_to_tuple,
|
|
||||||
.invert_tuple = udplite_invert_tuple,
|
|
||||||
.print_tuple = udplite_print_tuple,
|
|
||||||
.packet = udplite_packet,
|
|
||||||
.get_timeouts = udplite_get_timeouts,
|
|
||||||
.new = udplite_new,
|
|
||||||
.error = udplite_error,
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
|
||||||
.tuple_to_nlattr = nf_ct_port_tuple_to_nlattr,
|
|
||||||
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
|
|
||||||
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
|
|
||||||
.nla_policy = nf_ct_port_nla_policy,
|
|
||||||
#endif
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
|
|
||||||
.ctnl_timeout = {
|
|
||||||
.nlattr_to_obj = udplite_timeout_nlattr_to_obj,
|
|
||||||
.obj_to_nlattr = udplite_timeout_obj_to_nlattr,
|
|
||||||
.nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
|
|
||||||
.obj_size = sizeof(unsigned int) *
|
|
||||||
CTA_TIMEOUT_UDPLITE_MAX,
|
|
||||||
.nla_policy = udplite_timeout_nla_policy,
|
|
||||||
},
|
|
||||||
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
|
|
||||||
.init_net = udplite_init_net,
|
|
||||||
};
|
|
||||||
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
|
|
|
@ -642,6 +642,9 @@ static int __init nf_conntrack_standalone_init(void)
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_start;
|
goto out_start;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(SKB_NFCT_PTRMASK != NFCT_PTRMASK);
|
||||||
|
BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER);
|
||||||
|
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
nf_ct_netfilter_header =
|
nf_ct_netfilter_header =
|
||||||
register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
|
register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);
|
||||||
|
|
|
@ -15,6 +15,9 @@
|
||||||
|
|
||||||
#define NFLOGGER_NAME_LEN 64
|
#define NFLOGGER_NAME_LEN 64
|
||||||
|
|
||||||
|
int sysctl_nf_log_all_netns __read_mostly;
|
||||||
|
EXPORT_SYMBOL(sysctl_nf_log_all_netns);
|
||||||
|
|
||||||
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
|
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
|
||||||
static DEFINE_MUTEX(nf_log_mutex);
|
static DEFINE_MUTEX(nf_log_mutex);
|
||||||
|
|
||||||
|
@ -413,6 +416,18 @@ static const struct file_operations nflog_file_ops = {
|
||||||
#ifdef CONFIG_SYSCTL
|
#ifdef CONFIG_SYSCTL
|
||||||
static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
|
static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
|
||||||
static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
|
static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
|
||||||
|
static struct ctl_table_header *nf_log_sysctl_fhdr;
|
||||||
|
|
||||||
|
static struct ctl_table nf_log_sysctl_ftable[] = {
|
||||||
|
{
|
||||||
|
.procname = "nf_log_all_netns",
|
||||||
|
.data = &sysctl_nf_log_all_netns,
|
||||||
|
.maxlen = sizeof(sysctl_nf_log_all_netns),
|
||||||
|
.mode = 0644,
|
||||||
|
.proc_handler = proc_dointvec,
|
||||||
|
},
|
||||||
|
{ }
|
||||||
|
};
|
||||||
|
|
||||||
static int nf_log_proc_dostring(struct ctl_table *table, int write,
|
static int nf_log_proc_dostring(struct ctl_table *table, int write,
|
||||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||||
|
@ -482,6 +497,10 @@ static int netfilter_log_sysctl_init(struct net *net)
|
||||||
nf_log_sysctl_table[i].extra1 =
|
nf_log_sysctl_table[i].extra1 =
|
||||||
(void *)(unsigned long) i;
|
(void *)(unsigned long) i;
|
||||||
}
|
}
|
||||||
|
nf_log_sysctl_fhdr = register_net_sysctl(net, "net/netfilter",
|
||||||
|
nf_log_sysctl_ftable);
|
||||||
|
if (!nf_log_sysctl_fhdr)
|
||||||
|
goto err_freg;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
|
for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
|
||||||
|
@ -498,6 +517,9 @@ static int netfilter_log_sysctl_init(struct net *net)
|
||||||
err_reg:
|
err_reg:
|
||||||
if (!net_eq(net, &init_net))
|
if (!net_eq(net, &init_net))
|
||||||
kfree(table);
|
kfree(table);
|
||||||
|
else
|
||||||
|
unregister_net_sysctl_table(nf_log_sysctl_fhdr);
|
||||||
|
err_freg:
|
||||||
err_alloc:
|
err_alloc:
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
@ -510,6 +532,8 @@ static void netfilter_log_sysctl_exit(struct net *net)
|
||||||
unregister_net_sysctl_table(net->nf.nf_log_dir_header);
|
unregister_net_sysctl_table(net->nf.nf_log_dir_header);
|
||||||
if (!net_eq(net, &init_net))
|
if (!net_eq(net, &init_net))
|
||||||
kfree(table);
|
kfree(table);
|
||||||
|
else
|
||||||
|
unregister_net_sysctl_table(nf_log_sysctl_fhdr);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static int netfilter_log_sysctl_init(struct net *net)
|
static int netfilter_log_sysctl_init(struct net *net)
|
||||||
|
|
|
@ -60,7 +60,7 @@ static void mangle_contents(struct sk_buff *skb,
|
||||||
__skb_trim(skb, skb->len + rep_len - match_len);
|
__skb_trim(skb, skb->len + rep_len - match_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (nf_ct_l3num((struct nf_conn *)skb->nfct) == NFPROTO_IPV4) {
|
if (nf_ct_l3num((struct nf_conn *)skb_nfct(skb)) == NFPROTO_IPV4) {
|
||||||
/* fix IP hdr checksum information */
|
/* fix IP hdr checksum information */
|
||||||
ip_hdr(skb)->tot_len = htons(skb->len);
|
ip_hdr(skb)->tot_len = htons(skb->len);
|
||||||
ip_send_check(ip_hdr(skb));
|
ip_send_check(ip_hdr(skb));
|
||||||
|
|
|
@ -30,20 +30,15 @@ udp_unique_tuple(const struct nf_nat_l3proto *l3proto,
|
||||||
&udp_port_rover);
|
&udp_port_rover);
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool
|
static void
|
||||||
udp_manip_pkt(struct sk_buff *skb,
|
__udp_manip_pkt(struct sk_buff *skb,
|
||||||
const struct nf_nat_l3proto *l3proto,
|
const struct nf_nat_l3proto *l3proto,
|
||||||
unsigned int iphdroff, unsigned int hdroff,
|
unsigned int iphdroff, struct udphdr *hdr,
|
||||||
const struct nf_conntrack_tuple *tuple,
|
const struct nf_conntrack_tuple *tuple,
|
||||||
enum nf_nat_manip_type maniptype)
|
enum nf_nat_manip_type maniptype, bool do_csum)
|
||||||
{
|
{
|
||||||
struct udphdr *hdr;
|
|
||||||
__be16 *portptr, newport;
|
__be16 *portptr, newport;
|
||||||
|
|
||||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
|
||||||
return false;
|
|
||||||
hdr = (struct udphdr *)(skb->data + hdroff);
|
|
||||||
|
|
||||||
if (maniptype == NF_NAT_MANIP_SRC) {
|
if (maniptype == NF_NAT_MANIP_SRC) {
|
||||||
/* Get rid of src port */
|
/* Get rid of src port */
|
||||||
newport = tuple->src.u.udp.port;
|
newport = tuple->src.u.udp.port;
|
||||||
|
@ -53,7 +48,7 @@ udp_manip_pkt(struct sk_buff *skb,
|
||||||
newport = tuple->dst.u.udp.port;
|
newport = tuple->dst.u.udp.port;
|
||||||
portptr = &hdr->dest;
|
portptr = &hdr->dest;
|
||||||
}
|
}
|
||||||
if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
|
if (do_csum) {
|
||||||
l3proto->csum_update(skb, iphdroff, &hdr->check,
|
l3proto->csum_update(skb, iphdroff, &hdr->check,
|
||||||
tuple, maniptype);
|
tuple, maniptype);
|
||||||
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
|
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
|
||||||
|
@ -62,9 +57,68 @@ udp_manip_pkt(struct sk_buff *skb,
|
||||||
hdr->check = CSUM_MANGLED_0;
|
hdr->check = CSUM_MANGLED_0;
|
||||||
}
|
}
|
||||||
*portptr = newport;
|
*portptr = newport;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool udp_manip_pkt(struct sk_buff *skb,
|
||||||
|
const struct nf_nat_l3proto *l3proto,
|
||||||
|
unsigned int iphdroff, unsigned int hdroff,
|
||||||
|
const struct nf_conntrack_tuple *tuple,
|
||||||
|
enum nf_nat_manip_type maniptype)
|
||||||
|
{
|
||||||
|
struct udphdr *hdr;
|
||||||
|
bool do_csum;
|
||||||
|
|
||||||
|
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
hdr = (struct udphdr *)(skb->data + hdroff);
|
||||||
|
do_csum = hdr->check || skb->ip_summed == CHECKSUM_PARTIAL;
|
||||||
|
|
||||||
|
__udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, do_csum);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_NF_NAT_PROTO_UDPLITE
|
||||||
|
static u16 udplite_port_rover;
|
||||||
|
|
||||||
|
static bool udplite_manip_pkt(struct sk_buff *skb,
|
||||||
|
const struct nf_nat_l3proto *l3proto,
|
||||||
|
unsigned int iphdroff, unsigned int hdroff,
|
||||||
|
const struct nf_conntrack_tuple *tuple,
|
||||||
|
enum nf_nat_manip_type maniptype)
|
||||||
|
{
|
||||||
|
struct udphdr *hdr;
|
||||||
|
|
||||||
|
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
hdr = (struct udphdr *)(skb->data + hdroff);
|
||||||
|
__udp_manip_pkt(skb, l3proto, iphdroff, hdr, tuple, maniptype, true);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
|
||||||
|
struct nf_conntrack_tuple *tuple,
|
||||||
|
const struct nf_nat_range *range,
|
||||||
|
enum nf_nat_manip_type maniptype,
|
||||||
|
const struct nf_conn *ct)
|
||||||
|
{
|
||||||
|
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
|
||||||
|
&udplite_port_rover);
|
||||||
|
}
|
||||||
|
|
||||||
|
const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
|
||||||
|
.l4proto = IPPROTO_UDPLITE,
|
||||||
|
.manip_pkt = udplite_manip_pkt,
|
||||||
|
.in_range = nf_nat_l4proto_in_range,
|
||||||
|
.unique_tuple = udplite_unique_tuple,
|
||||||
|
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
||||||
|
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
#endif /* CONFIG_NF_NAT_PROTO_UDPLITE */
|
||||||
|
|
||||||
const struct nf_nat_l4proto nf_nat_l4proto_udp = {
|
const struct nf_nat_l4proto nf_nat_l4proto_udp = {
|
||||||
.l4proto = IPPROTO_UDP,
|
.l4proto = IPPROTO_UDP,
|
||||||
.manip_pkt = udp_manip_pkt,
|
.manip_pkt = udp_manip_pkt,
|
||||||
|
|
|
@ -1,73 +0,0 @@
|
||||||
/* (C) 1999-2001 Paul `Rusty' Russell
|
|
||||||
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
|
|
||||||
* (C) 2008 Patrick McHardy <kaber@trash.net>
|
|
||||||
*
|
|
||||||
* This program is free software; you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU General Public License version 2 as
|
|
||||||
* published by the Free Software Foundation.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/udp.h>
|
|
||||||
|
|
||||||
#include <linux/netfilter.h>
|
|
||||||
#include <net/netfilter/nf_nat.h>
|
|
||||||
#include <net/netfilter/nf_nat_l3proto.h>
|
|
||||||
#include <net/netfilter/nf_nat_l4proto.h>
|
|
||||||
|
|
||||||
static u16 udplite_port_rover;
|
|
||||||
|
|
||||||
static void
|
|
||||||
udplite_unique_tuple(const struct nf_nat_l3proto *l3proto,
|
|
||||||
struct nf_conntrack_tuple *tuple,
|
|
||||||
const struct nf_nat_range *range,
|
|
||||||
enum nf_nat_manip_type maniptype,
|
|
||||||
const struct nf_conn *ct)
|
|
||||||
{
|
|
||||||
nf_nat_l4proto_unique_tuple(l3proto, tuple, range, maniptype, ct,
|
|
||||||
&udplite_port_rover);
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool
|
|
||||||
udplite_manip_pkt(struct sk_buff *skb,
|
|
||||||
const struct nf_nat_l3proto *l3proto,
|
|
||||||
unsigned int iphdroff, unsigned int hdroff,
|
|
||||||
const struct nf_conntrack_tuple *tuple,
|
|
||||||
enum nf_nat_manip_type maniptype)
|
|
||||||
{
|
|
||||||
struct udphdr *hdr;
|
|
||||||
__be16 *portptr, newport;
|
|
||||||
|
|
||||||
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
hdr = (struct udphdr *)(skb->data + hdroff);
|
|
||||||
|
|
||||||
if (maniptype == NF_NAT_MANIP_SRC) {
|
|
||||||
/* Get rid of source port */
|
|
||||||
newport = tuple->src.u.udp.port;
|
|
||||||
portptr = &hdr->source;
|
|
||||||
} else {
|
|
||||||
/* Get rid of dst port */
|
|
||||||
newport = tuple->dst.u.udp.port;
|
|
||||||
portptr = &hdr->dest;
|
|
||||||
}
|
|
||||||
|
|
||||||
l3proto->csum_update(skb, iphdroff, &hdr->check, tuple, maniptype);
|
|
||||||
inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport, false);
|
|
||||||
if (!hdr->check)
|
|
||||||
hdr->check = CSUM_MANGLED_0;
|
|
||||||
|
|
||||||
*portptr = newport;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
|
|
||||||
.l4proto = IPPROTO_UDPLITE,
|
|
||||||
.manip_pkt = udplite_manip_pkt,
|
|
||||||
.in_range = nf_nat_l4proto_in_range,
|
|
||||||
.unique_tuple = udplite_unique_tuple,
|
|
||||||
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
|
|
||||||
.nlattr_to_range = nf_nat_l4proto_nlattr_to_range,
|
|
||||||
#endif
|
|
||||||
};
|
|
|
@ -576,6 +576,28 @@ err:
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void _nf_tables_table_disable(struct net *net,
|
||||||
|
const struct nft_af_info *afi,
|
||||||
|
struct nft_table *table,
|
||||||
|
u32 cnt)
|
||||||
|
{
|
||||||
|
struct nft_chain *chain;
|
||||||
|
u32 i = 0;
|
||||||
|
|
||||||
|
list_for_each_entry(chain, &table->chains, list) {
|
||||||
|
if (!nft_is_active_next(net, chain))
|
||||||
|
continue;
|
||||||
|
if (!(chain->flags & NFT_BASE_CHAIN))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
if (cnt && i++ == cnt)
|
||||||
|
break;
|
||||||
|
|
||||||
|
nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
|
||||||
|
afi->nops);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int nf_tables_table_enable(struct net *net,
|
static int nf_tables_table_enable(struct net *net,
|
||||||
const struct nft_af_info *afi,
|
const struct nft_af_info *afi,
|
||||||
struct nft_table *table)
|
struct nft_table *table)
|
||||||
|
@ -598,18 +620,8 @@ static int nf_tables_table_enable(struct net *net,
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
list_for_each_entry(chain, &table->chains, list) {
|
if (i)
|
||||||
if (!nft_is_active_next(net, chain))
|
_nf_tables_table_disable(net, afi, table, i);
|
||||||
continue;
|
|
||||||
if (!(chain->flags & NFT_BASE_CHAIN))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (i-- <= 0)
|
|
||||||
break;
|
|
||||||
|
|
||||||
nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
|
|
||||||
afi->nops);
|
|
||||||
}
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -617,17 +629,7 @@ static void nf_tables_table_disable(struct net *net,
|
||||||
const struct nft_af_info *afi,
|
const struct nft_af_info *afi,
|
||||||
struct nft_table *table)
|
struct nft_table *table)
|
||||||
{
|
{
|
||||||
struct nft_chain *chain;
|
_nf_tables_table_disable(net, afi, table, 0);
|
||||||
|
|
||||||
list_for_each_entry(chain, &table->chains, list) {
|
|
||||||
if (!nft_is_active_next(net, chain))
|
|
||||||
continue;
|
|
||||||
if (!(chain->flags & NFT_BASE_CHAIN))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
nf_unregister_net_hooks(net, nft_base_chain(chain)->ops,
|
|
||||||
afi->nops);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nf_tables_updtable(struct nft_ctx *ctx)
|
static int nf_tables_updtable(struct nft_ctx *ctx)
|
||||||
|
@ -696,10 +698,7 @@ static int nf_tables_newtable(struct net *net, struct sock *nlsk,
|
||||||
if (IS_ERR(table)) {
|
if (IS_ERR(table)) {
|
||||||
if (PTR_ERR(table) != -ENOENT)
|
if (PTR_ERR(table) != -ENOENT)
|
||||||
return PTR_ERR(table);
|
return PTR_ERR(table);
|
||||||
table = NULL;
|
} else {
|
||||||
}
|
|
||||||
|
|
||||||
if (table != NULL) {
|
|
||||||
if (nlh->nlmsg_flags & NLM_F_EXCL)
|
if (nlh->nlmsg_flags & NLM_F_EXCL)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
if (nlh->nlmsg_flags & NLM_F_REPLACE)
|
if (nlh->nlmsg_flags & NLM_F_REPLACE)
|
||||||
|
@ -2966,10 +2965,7 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk,
|
||||||
if (IS_ERR(set)) {
|
if (IS_ERR(set)) {
|
||||||
if (PTR_ERR(set) != -ENOENT)
|
if (PTR_ERR(set) != -ENOENT)
|
||||||
return PTR_ERR(set);
|
return PTR_ERR(set);
|
||||||
set = NULL;
|
} else {
|
||||||
}
|
|
||||||
|
|
||||||
if (set != NULL) {
|
|
||||||
if (nlh->nlmsg_flags & NLM_F_EXCL)
|
if (nlh->nlmsg_flags & NLM_F_EXCL)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
if (nlh->nlmsg_flags & NLM_F_REPLACE)
|
if (nlh->nlmsg_flags & NLM_F_REPLACE)
|
||||||
|
@ -4163,10 +4159,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk,
|
||||||
if (err != -ENOENT)
|
if (err != -ENOENT)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
obj = NULL;
|
} else {
|
||||||
}
|
|
||||||
|
|
||||||
if (obj != NULL) {
|
|
||||||
if (nlh->nlmsg_flags & NLM_F_EXCL)
|
if (nlh->nlmsg_flags & NLM_F_EXCL)
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
|
|
||||||
|
|
|
@ -129,6 +129,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr,
|
||||||
memcpy(dest, &count, sizeof(count));
|
memcpy(dest, &count, sizeof(count));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
case NFT_CT_AVGPKT: {
|
||||||
|
const struct nf_conn_acct *acct = nf_conn_acct_find(ct);
|
||||||
|
u64 avgcnt = 0, bcnt = 0, pcnt = 0;
|
||||||
|
|
||||||
|
if (acct) {
|
||||||
|
pcnt = nft_ct_get_eval_counter(acct->counter,
|
||||||
|
NFT_CT_PKTS, priv->dir);
|
||||||
|
bcnt = nft_ct_get_eval_counter(acct->counter,
|
||||||
|
NFT_CT_BYTES, priv->dir);
|
||||||
|
if (pcnt != 0)
|
||||||
|
avgcnt = div64_u64(bcnt, pcnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
memcpy(dest, &avgcnt, sizeof(avgcnt));
|
||||||
|
return;
|
||||||
|
}
|
||||||
case NFT_CT_L3PROTOCOL:
|
case NFT_CT_L3PROTOCOL:
|
||||||
*dest = nf_ct_l3num(ct);
|
*dest = nf_ct_l3num(ct);
|
||||||
return;
|
return;
|
||||||
|
@ -316,6 +332,7 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||||
break;
|
break;
|
||||||
case NFT_CT_BYTES:
|
case NFT_CT_BYTES:
|
||||||
case NFT_CT_PKTS:
|
case NFT_CT_PKTS:
|
||||||
|
case NFT_CT_AVGPKT:
|
||||||
/* no direction? return sum of original + reply */
|
/* no direction? return sum of original + reply */
|
||||||
if (tb[NFTA_CT_DIRECTION] == NULL)
|
if (tb[NFTA_CT_DIRECTION] == NULL)
|
||||||
priv->dir = IP_CT_DIR_MAX;
|
priv->dir = IP_CT_DIR_MAX;
|
||||||
|
@ -346,7 +363,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (priv->key == NFT_CT_BYTES || priv->key == NFT_CT_PKTS)
|
if (priv->key == NFT_CT_BYTES ||
|
||||||
|
priv->key == NFT_CT_PKTS ||
|
||||||
|
priv->key == NFT_CT_AVGPKT)
|
||||||
nf_ct_set_acct(ctx->net, true);
|
nf_ct_set_acct(ctx->net, true);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -445,6 +464,7 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||||
break;
|
break;
|
||||||
case NFT_CT_BYTES:
|
case NFT_CT_BYTES:
|
||||||
case NFT_CT_PKTS:
|
case NFT_CT_PKTS:
|
||||||
|
case NFT_CT_AVGPKT:
|
||||||
if (priv->dir < IP_CT_DIR_MAX &&
|
if (priv->dir < IP_CT_DIR_MAX &&
|
||||||
nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
|
nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
@ -534,8 +554,7 @@ static void nft_notrack_eval(const struct nft_expr *expr,
|
||||||
|
|
||||||
ct = nf_ct_untracked_get();
|
ct = nf_ct_untracked_get();
|
||||||
atomic_inc(&ct->ct_general.use);
|
atomic_inc(&ct->ct_general.use);
|
||||||
skb->nfct = &ct->ct_general;
|
nf_ct_set(skb, ct, IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct nft_expr_type nft_notrack_type;
|
static struct nft_expr_type nft_notrack_type;
|
||||||
|
|
|
@ -154,13 +154,36 @@ void nft_meta_get_eval(const struct nft_expr *expr,
|
||||||
*dest = PACKET_BROADCAST;
|
*dest = PACKET_BROADCAST;
|
||||||
break;
|
break;
|
||||||
case NFPROTO_IPV6:
|
case NFPROTO_IPV6:
|
||||||
if (ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
|
*dest = PACKET_MULTICAST;
|
||||||
|
break;
|
||||||
|
case NFPROTO_NETDEV:
|
||||||
|
switch (skb->protocol) {
|
||||||
|
case htons(ETH_P_IP): {
|
||||||
|
int noff = skb_network_offset(skb);
|
||||||
|
struct iphdr *iph, _iph;
|
||||||
|
|
||||||
|
iph = skb_header_pointer(skb, noff,
|
||||||
|
sizeof(_iph), &_iph);
|
||||||
|
if (!iph)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (ipv4_is_multicast(iph->daddr))
|
||||||
|
*dest = PACKET_MULTICAST;
|
||||||
|
else
|
||||||
|
*dest = PACKET_BROADCAST;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case htons(ETH_P_IPV6):
|
||||||
*dest = PACKET_MULTICAST;
|
*dest = PACKET_MULTICAST;
|
||||||
else
|
break;
|
||||||
*dest = PACKET_BROADCAST;
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
WARN_ON(1);
|
WARN_ON_ONCE(1);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -262,6 +262,60 @@ struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(xt_request_find_target);
|
EXPORT_SYMBOL_GPL(xt_request_find_target);
|
||||||
|
|
||||||
|
|
||||||
|
static int xt_obj_to_user(u16 __user *psize, u16 size,
|
||||||
|
void __user *pname, const char *name,
|
||||||
|
u8 __user *prev, u8 rev)
|
||||||
|
{
|
||||||
|
if (put_user(size, psize))
|
||||||
|
return -EFAULT;
|
||||||
|
if (copy_to_user(pname, name, strlen(name) + 1))
|
||||||
|
return -EFAULT;
|
||||||
|
if (put_user(rev, prev))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define XT_OBJ_TO_USER(U, K, TYPE, C_SIZE) \
|
||||||
|
xt_obj_to_user(&U->u.TYPE##_size, C_SIZE ? : K->u.TYPE##_size, \
|
||||||
|
U->u.user.name, K->u.kernel.TYPE->name, \
|
||||||
|
&U->u.user.revision, K->u.kernel.TYPE->revision)
|
||||||
|
|
||||||
|
int xt_data_to_user(void __user *dst, const void *src,
|
||||||
|
int usersize, int size)
|
||||||
|
{
|
||||||
|
usersize = usersize ? : size;
|
||||||
|
if (copy_to_user(dst, src, usersize))
|
||||||
|
return -EFAULT;
|
||||||
|
if (usersize != size && clear_user(dst + usersize, size - usersize))
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xt_data_to_user);
|
||||||
|
|
||||||
|
#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
|
||||||
|
xt_data_to_user(U->data, K->data, \
|
||||||
|
K->u.kernel.TYPE->usersize, \
|
||||||
|
C_SIZE ? : K->u.kernel.TYPE->TYPE##size)
|
||||||
|
|
||||||
|
int xt_match_to_user(const struct xt_entry_match *m,
|
||||||
|
struct xt_entry_match __user *u)
|
||||||
|
{
|
||||||
|
return XT_OBJ_TO_USER(u, m, match, 0) ||
|
||||||
|
XT_DATA_TO_USER(u, m, match, 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xt_match_to_user);
|
||||||
|
|
||||||
|
int xt_target_to_user(const struct xt_entry_target *t,
|
||||||
|
struct xt_entry_target __user *u)
|
||||||
|
{
|
||||||
|
return XT_OBJ_TO_USER(u, t, target, 0) ||
|
||||||
|
XT_DATA_TO_USER(u, t, target, 0);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xt_target_to_user);
|
||||||
|
|
||||||
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
|
static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
|
||||||
{
|
{
|
||||||
const struct xt_match *m;
|
const struct xt_match *m;
|
||||||
|
@ -565,17 +619,14 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
|
||||||
int off = xt_compat_match_offset(match);
|
int off = xt_compat_match_offset(match);
|
||||||
u_int16_t msize = m->u.user.match_size - off;
|
u_int16_t msize = m->u.user.match_size - off;
|
||||||
|
|
||||||
if (copy_to_user(cm, m, sizeof(*cm)) ||
|
if (XT_OBJ_TO_USER(cm, m, match, msize))
|
||||||
put_user(msize, &cm->u.user.match_size) ||
|
|
||||||
copy_to_user(cm->u.user.name, m->u.kernel.match->name,
|
|
||||||
strlen(m->u.kernel.match->name) + 1))
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (match->compat_to_user) {
|
if (match->compat_to_user) {
|
||||||
if (match->compat_to_user((void __user *)cm->data, m->data))
|
if (match->compat_to_user((void __user *)cm->data, m->data))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else {
|
} else {
|
||||||
if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
|
if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -923,17 +974,14 @@ int xt_compat_target_to_user(const struct xt_entry_target *t,
|
||||||
int off = xt_compat_target_offset(target);
|
int off = xt_compat_target_offset(target);
|
||||||
u_int16_t tsize = t->u.user.target_size - off;
|
u_int16_t tsize = t->u.user.target_size - off;
|
||||||
|
|
||||||
if (copy_to_user(ct, t, sizeof(*ct)) ||
|
if (XT_OBJ_TO_USER(ct, t, target, tsize))
|
||||||
put_user(tsize, &ct->u.user.target_size) ||
|
|
||||||
copy_to_user(ct->u.user.name, t->u.kernel.target->name,
|
|
||||||
strlen(t->u.kernel.target->name) + 1))
|
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (target->compat_to_user) {
|
if (target->compat_to_user) {
|
||||||
if (target->compat_to_user((void __user *)ct->data, t->data))
|
if (target->compat_to_user((void __user *)ct->data, t->data))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
} else {
|
} else {
|
||||||
if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
|
if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,15 +23,14 @@
|
||||||
static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
|
static inline int xt_ct_target(struct sk_buff *skb, struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
/* Previously seen (loopback)? Ignore. */
|
/* Previously seen (loopback)? Ignore. */
|
||||||
if (skb->nfct != NULL)
|
if (skb->_nfct != 0)
|
||||||
return XT_CONTINUE;
|
return XT_CONTINUE;
|
||||||
|
|
||||||
/* special case the untracked ct : we want the percpu object */
|
/* special case the untracked ct : we want the percpu object */
|
||||||
if (!ct)
|
if (!ct)
|
||||||
ct = nf_ct_untracked_get();
|
ct = nf_ct_untracked_get();
|
||||||
atomic_inc(&ct->ct_general.use);
|
atomic_inc(&ct->ct_general.use);
|
||||||
skb->nfct = &ct->ct_general;
|
nf_ct_set(skb, ct, IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
|
||||||
|
|
||||||
return XT_CONTINUE;
|
return XT_CONTINUE;
|
||||||
}
|
}
|
||||||
|
@ -373,6 +372,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
|
||||||
.name = "CT",
|
.name = "CT",
|
||||||
.family = NFPROTO_UNSPEC,
|
.family = NFPROTO_UNSPEC,
|
||||||
.targetsize = sizeof(struct xt_ct_target_info),
|
.targetsize = sizeof(struct xt_ct_target_info),
|
||||||
|
.usersize = offsetof(struct xt_ct_target_info, ct),
|
||||||
.checkentry = xt_ct_tg_check_v0,
|
.checkentry = xt_ct_tg_check_v0,
|
||||||
.destroy = xt_ct_tg_destroy_v0,
|
.destroy = xt_ct_tg_destroy_v0,
|
||||||
.target = xt_ct_target_v0,
|
.target = xt_ct_target_v0,
|
||||||
|
@ -384,6 +384,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_UNSPEC,
|
.family = NFPROTO_UNSPEC,
|
||||||
.revision = 1,
|
.revision = 1,
|
||||||
.targetsize = sizeof(struct xt_ct_target_info_v1),
|
.targetsize = sizeof(struct xt_ct_target_info_v1),
|
||||||
|
.usersize = offsetof(struct xt_ct_target_info, ct),
|
||||||
.checkentry = xt_ct_tg_check_v1,
|
.checkentry = xt_ct_tg_check_v1,
|
||||||
.destroy = xt_ct_tg_destroy_v1,
|
.destroy = xt_ct_tg_destroy_v1,
|
||||||
.target = xt_ct_target_v1,
|
.target = xt_ct_target_v1,
|
||||||
|
@ -395,6 +396,7 @@ static struct xt_target xt_ct_tg_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_UNSPEC,
|
.family = NFPROTO_UNSPEC,
|
||||||
.revision = 2,
|
.revision = 2,
|
||||||
.targetsize = sizeof(struct xt_ct_target_info_v1),
|
.targetsize = sizeof(struct xt_ct_target_info_v1),
|
||||||
|
.usersize = offsetof(struct xt_ct_target_info, ct),
|
||||||
.checkentry = xt_ct_tg_check_v2,
|
.checkentry = xt_ct_tg_check_v2,
|
||||||
.destroy = xt_ct_tg_destroy_v1,
|
.destroy = xt_ct_tg_destroy_v1,
|
||||||
.target = xt_ct_target_v1,
|
.target = xt_ct_target_v1,
|
||||||
|
@ -407,12 +409,11 @@ static unsigned int
|
||||||
notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
notrack_tg(struct sk_buff *skb, const struct xt_action_param *par)
|
||||||
{
|
{
|
||||||
/* Previously seen (loopback)? Ignore. */
|
/* Previously seen (loopback)? Ignore. */
|
||||||
if (skb->nfct != NULL)
|
if (skb->_nfct != 0)
|
||||||
return XT_CONTINUE;
|
return XT_CONTINUE;
|
||||||
|
|
||||||
skb->nfct = &nf_ct_untracked_get()->ct_general;
|
nf_ct_set(skb, nf_ct_untracked_get(), IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
nf_conntrack_get(skb_nfct(skb));
|
||||||
nf_conntrack_get(skb->nfct);
|
|
||||||
|
|
||||||
return XT_CONTINUE;
|
return XT_CONTINUE;
|
||||||
}
|
}
|
||||||
|
|
|
@ -162,6 +162,7 @@ static struct xt_target xt_rateest_tg_reg __read_mostly = {
|
||||||
.checkentry = xt_rateest_tg_checkentry,
|
.checkentry = xt_rateest_tg_checkentry,
|
||||||
.destroy = xt_rateest_tg_destroy,
|
.destroy = xt_rateest_tg_destroy,
|
||||||
.targetsize = sizeof(struct xt_rateest_target_info),
|
.targetsize = sizeof(struct xt_rateest_target_info),
|
||||||
|
.usersize = offsetof(struct xt_rateest_target_info, est),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -133,6 +133,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_IPV4,
|
.family = NFPROTO_IPV4,
|
||||||
.target = tee_tg4,
|
.target = tee_tg4,
|
||||||
.targetsize = sizeof(struct xt_tee_tginfo),
|
.targetsize = sizeof(struct xt_tee_tginfo),
|
||||||
|
.usersize = offsetof(struct xt_tee_tginfo, priv),
|
||||||
.checkentry = tee_tg_check,
|
.checkentry = tee_tg_check,
|
||||||
.destroy = tee_tg_destroy,
|
.destroy = tee_tg_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
@ -144,6 +145,7 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_IPV6,
|
.family = NFPROTO_IPV6,
|
||||||
.target = tee_tg6,
|
.target = tee_tg6,
|
||||||
.targetsize = sizeof(struct xt_tee_tginfo),
|
.targetsize = sizeof(struct xt_tee_tginfo),
|
||||||
|
.usersize = offsetof(struct xt_tee_tginfo, priv),
|
||||||
.checkentry = tee_tg_check,
|
.checkentry = tee_tg_check,
|
||||||
.destroy = tee_tg_destroy,
|
.destroy = tee_tg_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
|
|
@ -110,6 +110,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = {
|
||||||
.match = bpf_mt,
|
.match = bpf_mt,
|
||||||
.destroy = bpf_mt_destroy,
|
.destroy = bpf_mt_destroy,
|
||||||
.matchsize = sizeof(struct xt_bpf_info),
|
.matchsize = sizeof(struct xt_bpf_info),
|
||||||
|
.usersize = offsetof(struct xt_bpf_info, filter),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -120,6 +121,7 @@ static struct xt_match bpf_mt_reg[] __read_mostly = {
|
||||||
.match = bpf_mt_v1,
|
.match = bpf_mt_v1,
|
||||||
.destroy = bpf_mt_destroy_v1,
|
.destroy = bpf_mt_destroy_v1,
|
||||||
.matchsize = sizeof(struct xt_bpf_info_v1),
|
.matchsize = sizeof(struct xt_bpf_info_v1),
|
||||||
|
.usersize = offsetof(struct xt_bpf_info_v1, filter),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -122,6 +122,7 @@ static struct xt_match cgroup_mt_reg[] __read_mostly = {
|
||||||
.checkentry = cgroup_mt_check_v1,
|
.checkentry = cgroup_mt_check_v1,
|
||||||
.match = cgroup_mt_v1,
|
.match = cgroup_mt_v1,
|
||||||
.matchsize = sizeof(struct xt_cgroup_info_v1),
|
.matchsize = sizeof(struct xt_cgroup_info_v1),
|
||||||
|
.usersize = offsetof(struct xt_cgroup_info_v1, priv),
|
||||||
.destroy = cgroup_mt_destroy_v1,
|
.destroy = cgroup_mt_destroy_v1,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
.hooks = (1 << NF_INET_LOCAL_OUT) |
|
.hooks = (1 << NF_INET_LOCAL_OUT) |
|
||||||
|
|
|
@ -218,7 +218,7 @@ count_tree(struct net *net, struct rb_root *root,
|
||||||
int diff;
|
int diff;
|
||||||
bool addit;
|
bool addit;
|
||||||
|
|
||||||
rbconn = container_of(*rbnode, struct xt_connlimit_rb, node);
|
rbconn = rb_entry(*rbnode, struct xt_connlimit_rb, node);
|
||||||
|
|
||||||
parent = *rbnode;
|
parent = *rbnode;
|
||||||
diff = same_source_net(addr, mask, &rbconn->addr, family);
|
diff = same_source_net(addr, mask, &rbconn->addr, family);
|
||||||
|
@ -398,7 +398,7 @@ static void destroy_tree(struct rb_root *r)
|
||||||
struct rb_node *node;
|
struct rb_node *node;
|
||||||
|
|
||||||
while ((node = rb_first(r)) != NULL) {
|
while ((node = rb_first(r)) != NULL) {
|
||||||
rbconn = container_of(node, struct xt_connlimit_rb, node);
|
rbconn = rb_entry(node, struct xt_connlimit_rb, node);
|
||||||
|
|
||||||
rb_erase(node, r);
|
rb_erase(node, r);
|
||||||
|
|
||||||
|
@ -431,6 +431,7 @@ static struct xt_match connlimit_mt_reg __read_mostly = {
|
||||||
.checkentry = connlimit_mt_check,
|
.checkentry = connlimit_mt_check,
|
||||||
.match = connlimit_mt,
|
.match = connlimit_mt,
|
||||||
.matchsize = sizeof(struct xt_connlimit_info),
|
.matchsize = sizeof(struct xt_connlimit_info),
|
||||||
|
.usersize = offsetof(struct xt_connlimit_info, data),
|
||||||
.destroy = connlimit_mt_destroy,
|
.destroy = connlimit_mt_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
|
@ -838,6 +838,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_IPV4,
|
.family = NFPROTO_IPV4,
|
||||||
.match = hashlimit_mt_v1,
|
.match = hashlimit_mt_v1,
|
||||||
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
|
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
|
||||||
|
.usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo),
|
||||||
.checkentry = hashlimit_mt_check_v1,
|
.checkentry = hashlimit_mt_check_v1,
|
||||||
.destroy = hashlimit_mt_destroy_v1,
|
.destroy = hashlimit_mt_destroy_v1,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
@ -848,6 +849,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_IPV4,
|
.family = NFPROTO_IPV4,
|
||||||
.match = hashlimit_mt,
|
.match = hashlimit_mt,
|
||||||
.matchsize = sizeof(struct xt_hashlimit_mtinfo2),
|
.matchsize = sizeof(struct xt_hashlimit_mtinfo2),
|
||||||
|
.usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo),
|
||||||
.checkentry = hashlimit_mt_check,
|
.checkentry = hashlimit_mt_check,
|
||||||
.destroy = hashlimit_mt_destroy,
|
.destroy = hashlimit_mt_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
@ -859,6 +861,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_IPV6,
|
.family = NFPROTO_IPV6,
|
||||||
.match = hashlimit_mt_v1,
|
.match = hashlimit_mt_v1,
|
||||||
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
|
.matchsize = sizeof(struct xt_hashlimit_mtinfo1),
|
||||||
|
.usersize = offsetof(struct xt_hashlimit_mtinfo1, hinfo),
|
||||||
.checkentry = hashlimit_mt_check_v1,
|
.checkentry = hashlimit_mt_check_v1,
|
||||||
.destroy = hashlimit_mt_destroy_v1,
|
.destroy = hashlimit_mt_destroy_v1,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
@ -869,6 +872,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
|
||||||
.family = NFPROTO_IPV6,
|
.family = NFPROTO_IPV6,
|
||||||
.match = hashlimit_mt,
|
.match = hashlimit_mt,
|
||||||
.matchsize = sizeof(struct xt_hashlimit_mtinfo2),
|
.matchsize = sizeof(struct xt_hashlimit_mtinfo2),
|
||||||
|
.usersize = offsetof(struct xt_hashlimit_mtinfo2, hinfo),
|
||||||
.checkentry = hashlimit_mt_check,
|
.checkentry = hashlimit_mt_check,
|
||||||
.destroy = hashlimit_mt_destroy,
|
.destroy = hashlimit_mt_destroy,
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
|
|
|
@ -192,6 +192,8 @@ static struct xt_match limit_mt_reg __read_mostly = {
|
||||||
.compatsize = sizeof(struct compat_xt_rateinfo),
|
.compatsize = sizeof(struct compat_xt_rateinfo),
|
||||||
.compat_from_user = limit_mt_compat_from_user,
|
.compat_from_user = limit_mt_compat_from_user,
|
||||||
.compat_to_user = limit_mt_compat_to_user,
|
.compat_to_user = limit_mt_compat_to_user,
|
||||||
|
#else
|
||||||
|
.usersize = offsetof(struct xt_rateinfo, prev),
|
||||||
#endif
|
#endif
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
|
@ -33,8 +33,7 @@ pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
||||||
else if (xt_family(par) == NFPROTO_IPV4 &&
|
else if (xt_family(par) == NFPROTO_IPV4 &&
|
||||||
ipv4_is_multicast(ip_hdr(skb)->daddr))
|
ipv4_is_multicast(ip_hdr(skb)->daddr))
|
||||||
type = PACKET_MULTICAST;
|
type = PACKET_MULTICAST;
|
||||||
else if (xt_family(par) == NFPROTO_IPV6 &&
|
else if (xt_family(par) == NFPROTO_IPV6)
|
||||||
ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
|
|
||||||
type = PACKET_MULTICAST;
|
type = PACKET_MULTICAST;
|
||||||
else
|
else
|
||||||
type = PACKET_BROADCAST;
|
type = PACKET_BROADCAST;
|
||||||
|
|
|
@ -73,6 +73,7 @@ static struct xt_match quota_mt_reg __read_mostly = {
|
||||||
.checkentry = quota_mt_check,
|
.checkentry = quota_mt_check,
|
||||||
.destroy = quota_mt_destroy,
|
.destroy = quota_mt_destroy,
|
||||||
.matchsize = sizeof(struct xt_quota_info),
|
.matchsize = sizeof(struct xt_quota_info),
|
||||||
|
.usersize = offsetof(struct xt_quota_info, master),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -133,6 +133,7 @@ static struct xt_match xt_rateest_mt_reg __read_mostly = {
|
||||||
.checkentry = xt_rateest_mt_checkentry,
|
.checkentry = xt_rateest_mt_checkentry,
|
||||||
.destroy = xt_rateest_mt_destroy,
|
.destroy = xt_rateest_mt_destroy,
|
||||||
.matchsize = sizeof(struct xt_rateest_match_info),
|
.matchsize = sizeof(struct xt_rateest_match_info),
|
||||||
|
.usersize = offsetof(struct xt_rateest_match_info, est1),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -77,6 +77,7 @@ static struct xt_match xt_string_mt_reg __read_mostly = {
|
||||||
.match = string_mt,
|
.match = string_mt,
|
||||||
.destroy = string_mt_destroy,
|
.destroy = string_mt_destroy,
|
||||||
.matchsize = sizeof(struct xt_string_info),
|
.matchsize = sizeof(struct xt_string_info),
|
||||||
|
.usersize = offsetof(struct xt_string_info, config),
|
||||||
.me = THIS_MODULE,
|
.me = THIS_MODULE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -460,8 +460,7 @@ ovs_ct_find_existing(struct net *net, const struct nf_conntrack_zone *zone,
|
||||||
|
|
||||||
ct = nf_ct_tuplehash_to_ctrack(h);
|
ct = nf_ct_tuplehash_to_ctrack(h);
|
||||||
|
|
||||||
skb->nfct = &ct->ct_general;
|
nf_ct_set(skb, ct, ovs_ct_get_info(h));
|
||||||
skb->nfctinfo = ovs_ct_get_info(h);
|
|
||||||
return ct;
|
return ct;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -722,11 +721,10 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
|
||||||
|
|
||||||
/* Associate skb with specified zone. */
|
/* Associate skb with specified zone. */
|
||||||
if (tmpl) {
|
if (tmpl) {
|
||||||
if (skb->nfct)
|
if (skb_nfct(skb))
|
||||||
nf_conntrack_put(skb->nfct);
|
nf_conntrack_put(skb_nfct(skb));
|
||||||
nf_conntrack_get(&tmpl->ct_general);
|
nf_conntrack_get(&tmpl->ct_general);
|
||||||
skb->nfct = &tmpl->ct_general;
|
nf_ct_set(skb, tmpl, IP_CT_NEW);
|
||||||
skb->nfctinfo = IP_CT_NEW;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = nf_conntrack_in(net, info->family,
|
err = nf_conntrack_in(net, info->family,
|
||||||
|
@ -820,7 +818,7 @@ static int ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
ct = (struct nf_conn *)skb->nfct;
|
ct = (struct nf_conn *)skb_nfct(skb);
|
||||||
if (ct)
|
if (ct)
|
||||||
nf_ct_deliver_cached_events(ct);
|
nf_ct_deliver_cached_events(ct);
|
||||||
}
|
}
|
||||||
|
|
|
@ -129,7 +129,7 @@ static u32 flow_get_mark(const struct sk_buff *skb)
|
||||||
static u32 flow_get_nfct(const struct sk_buff *skb)
|
static u32 flow_get_nfct(const struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
|
||||||
return addr_fold(skb->nfct);
|
return addr_fold(skb_nfct(skb));
|
||||||
#else
|
#else
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
|
|
Загрузка…
Ссылка в новой задаче