netfilter: trivial code cleanup and doc changes
Changes while reading through the netfilter code. Added hint about how conntrack nf_conn refcnt is accessed. And renamed repl_hash to reply_hash for readability Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net> Reviewed-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Родитель
52af2bfcc0
Коммит
b476b72a0f
|
@ -73,7 +73,13 @@ struct nf_conn_help {
|
||||||
|
|
||||||
struct nf_conn {
|
struct nf_conn {
|
||||||
/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
|
/* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
|
||||||
plus 1 for any connection(s) we are `master' for */
|
* plus 1 for any connection(s) we are `master' for
|
||||||
|
*
|
||||||
|
* Hint, SKB address this struct and refcnt via skb->nfct and
|
||||||
|
* helpers nf_conntrack_get() and nf_conntrack_put().
|
||||||
|
* Helper nf_ct_put() equals nf_conntrack_put() by dec refcnt,
|
||||||
|
* beware nf_ct_get() is different and don't inc refcnt.
|
||||||
|
*/
|
||||||
struct nf_conntrack ct_general;
|
struct nf_conntrack ct_general;
|
||||||
|
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
|
|
|
@ -408,21 +408,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
|
||||||
|
|
||||||
static void __nf_conntrack_hash_insert(struct nf_conn *ct,
|
static void __nf_conntrack_hash_insert(struct nf_conn *ct,
|
||||||
unsigned int hash,
|
unsigned int hash,
|
||||||
unsigned int repl_hash)
|
unsigned int reply_hash)
|
||||||
{
|
{
|
||||||
struct net *net = nf_ct_net(ct);
|
struct net *net = nf_ct_net(ct);
|
||||||
|
|
||||||
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
|
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
|
||||||
&net->ct.hash[hash]);
|
&net->ct.hash[hash]);
|
||||||
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
|
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
|
||||||
&net->ct.hash[repl_hash]);
|
&net->ct.hash[reply_hash]);
|
||||||
}
|
}
|
||||||
|
|
||||||
int
|
int
|
||||||
nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
struct net *net = nf_ct_net(ct);
|
struct net *net = nf_ct_net(ct);
|
||||||
unsigned int hash, repl_hash;
|
unsigned int hash, reply_hash;
|
||||||
struct nf_conntrack_tuple_hash *h;
|
struct nf_conntrack_tuple_hash *h;
|
||||||
struct hlist_nulls_node *n;
|
struct hlist_nulls_node *n;
|
||||||
u16 zone;
|
u16 zone;
|
||||||
|
@ -430,7 +430,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
||||||
zone = nf_ct_zone(ct);
|
zone = nf_ct_zone(ct);
|
||||||
hash = hash_conntrack(net, zone,
|
hash = hash_conntrack(net, zone,
|
||||||
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
|
||||||
repl_hash = hash_conntrack(net, zone,
|
reply_hash = hash_conntrack(net, zone,
|
||||||
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||||
|
|
||||||
spin_lock_bh(&nf_conntrack_lock);
|
spin_lock_bh(&nf_conntrack_lock);
|
||||||
|
@ -441,7 +441,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
||||||
&h->tuple) &&
|
&h->tuple) &&
|
||||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||||
goto out;
|
goto out;
|
||||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
|
||||||
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
||||||
&h->tuple) &&
|
&h->tuple) &&
|
||||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||||
|
@ -451,7 +451,7 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
/* The caller holds a reference to this object */
|
/* The caller holds a reference to this object */
|
||||||
atomic_set(&ct->ct_general.use, 2);
|
atomic_set(&ct->ct_general.use, 2);
|
||||||
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
__nf_conntrack_hash_insert(ct, hash, reply_hash);
|
||||||
NF_CT_STAT_INC(net, insert);
|
NF_CT_STAT_INC(net, insert);
|
||||||
spin_unlock_bh(&nf_conntrack_lock);
|
spin_unlock_bh(&nf_conntrack_lock);
|
||||||
|
|
||||||
|
@ -483,7 +483,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
|
||||||
int
|
int
|
||||||
__nf_conntrack_confirm(struct sk_buff *skb)
|
__nf_conntrack_confirm(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
unsigned int hash, repl_hash;
|
unsigned int hash, reply_hash;
|
||||||
struct nf_conntrack_tuple_hash *h;
|
struct nf_conntrack_tuple_hash *h;
|
||||||
struct nf_conn *ct;
|
struct nf_conn *ct;
|
||||||
struct nf_conn_help *help;
|
struct nf_conn_help *help;
|
||||||
|
@ -507,7 +507,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||||
/* reuse the hash saved before */
|
/* reuse the hash saved before */
|
||||||
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
|
hash = *(unsigned long *)&ct->tuplehash[IP_CT_DIR_REPLY].hnnode.pprev;
|
||||||
hash = hash_bucket(hash, net);
|
hash = hash_bucket(hash, net);
|
||||||
repl_hash = hash_conntrack(net, zone,
|
reply_hash = hash_conntrack(net, zone,
|
||||||
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
|
||||||
|
|
||||||
/* We're not in hash table, and we refuse to set up related
|
/* We're not in hash table, and we refuse to set up related
|
||||||
|
@ -540,7 +540,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||||
&h->tuple) &&
|
&h->tuple) &&
|
||||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||||
goto out;
|
goto out;
|
||||||
hlist_nulls_for_each_entry(h, n, &net->ct.hash[repl_hash], hnnode)
|
hlist_nulls_for_each_entry(h, n, &net->ct.hash[reply_hash], hnnode)
|
||||||
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
if (nf_ct_tuple_equal(&ct->tuplehash[IP_CT_DIR_REPLY].tuple,
|
||||||
&h->tuple) &&
|
&h->tuple) &&
|
||||||
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
|
||||||
|
@ -570,7 +570,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||||
* guarantee that no other CPU can find the conntrack before the above
|
* guarantee that no other CPU can find the conntrack before the above
|
||||||
* stores are visible.
|
* stores are visible.
|
||||||
*/
|
*/
|
||||||
__nf_conntrack_hash_insert(ct, hash, repl_hash);
|
__nf_conntrack_hash_insert(ct, hash, reply_hash);
|
||||||
NF_CT_STAT_INC(net, insert);
|
NF_CT_STAT_INC(net, insert);
|
||||||
spin_unlock_bh(&nf_conntrack_lock);
|
spin_unlock_bh(&nf_conntrack_lock);
|
||||||
|
|
||||||
|
|
|
@ -417,7 +417,7 @@ out:
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
|
int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
|
||||||
u32 portid, int report)
|
u32 portid, int report)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче