netfilter: nft_counter: convert it to use per-cpu counters
This patch converts the existing seqlock to per-cpu counters. Suggested-by: Eric Dumazet <eric.dumazet@gmail.com> Suggested-by: Patrick McHardy <kaber@trash.net> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Родитель
d92cff89a0
Коммит
0c45e76960
|
@ -18,39 +18,59 @@
|
||||||
#include <net/netfilter/nf_tables.h>
|
#include <net/netfilter/nf_tables.h>
|
||||||
|
|
||||||
struct nft_counter {
|
struct nft_counter {
|
||||||
seqlock_t lock;
|
|
||||||
u64 bytes;
|
u64 bytes;
|
||||||
u64 packets;
|
u64 packets;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct nft_counter_percpu {
|
||||||
|
struct nft_counter counter;
|
||||||
|
struct u64_stats_sync syncp;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct nft_counter_percpu_priv {
|
||||||
|
struct nft_counter_percpu __percpu *counter;
|
||||||
|
};
|
||||||
|
|
||||||
static void nft_counter_eval(const struct nft_expr *expr,
|
static void nft_counter_eval(const struct nft_expr *expr,
|
||||||
struct nft_regs *regs,
|
struct nft_regs *regs,
|
||||||
const struct nft_pktinfo *pkt)
|
const struct nft_pktinfo *pkt)
|
||||||
{
|
{
|
||||||
struct nft_counter *priv = nft_expr_priv(expr);
|
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
|
||||||
|
struct nft_counter_percpu *this_cpu;
|
||||||
|
|
||||||
write_seqlock_bh(&priv->lock);
|
local_bh_disable();
|
||||||
priv->bytes += pkt->skb->len;
|
this_cpu = this_cpu_ptr(priv->counter);
|
||||||
priv->packets++;
|
u64_stats_update_begin(&this_cpu->syncp);
|
||||||
write_sequnlock_bh(&priv->lock);
|
this_cpu->counter.bytes += pkt->skb->len;
|
||||||
|
this_cpu->counter.packets++;
|
||||||
|
u64_stats_update_end(&this_cpu->syncp);
|
||||||
|
local_bh_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
|
||||||
{
|
{
|
||||||
struct nft_counter *priv = nft_expr_priv(expr);
|
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
|
||||||
|
struct nft_counter_percpu *cpu_stats;
|
||||||
|
struct nft_counter total;
|
||||||
|
u64 bytes, packets;
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
u64 bytes;
|
int cpu;
|
||||||
u64 packets;
|
|
||||||
|
|
||||||
do {
|
memset(&total, 0, sizeof(total));
|
||||||
seq = read_seqbegin(&priv->lock);
|
for_each_possible_cpu(cpu) {
|
||||||
bytes = priv->bytes;
|
cpu_stats = per_cpu_ptr(priv->counter, cpu);
|
||||||
packets = priv->packets;
|
do {
|
||||||
} while (read_seqretry(&priv->lock, seq));
|
seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
|
||||||
|
bytes = cpu_stats->counter.bytes;
|
||||||
|
packets = cpu_stats->counter.packets;
|
||||||
|
} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
|
||||||
|
|
||||||
if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(bytes)))
|
total.packets += packets;
|
||||||
goto nla_put_failure;
|
total.bytes += bytes;
|
||||||
if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(packets)))
|
}
|
||||||
|
|
||||||
|
if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
|
||||||
|
nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -67,23 +87,44 @@ static int nft_counter_init(const struct nft_ctx *ctx,
|
||||||
const struct nft_expr *expr,
|
const struct nft_expr *expr,
|
||||||
const struct nlattr * const tb[])
|
const struct nlattr * const tb[])
|
||||||
{
|
{
|
||||||
struct nft_counter *priv = nft_expr_priv(expr);
|
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
|
||||||
|
struct nft_counter_percpu __percpu *cpu_stats;
|
||||||
|
struct nft_counter_percpu *this_cpu;
|
||||||
|
|
||||||
if (tb[NFTA_COUNTER_PACKETS])
|
cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
|
||||||
priv->packets = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
|
if (cpu_stats == NULL)
|
||||||
if (tb[NFTA_COUNTER_BYTES])
|
return ENOMEM;
|
||||||
priv->bytes = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
|
|
||||||
|
|
||||||
seqlock_init(&priv->lock);
|
preempt_disable();
|
||||||
|
this_cpu = this_cpu_ptr(cpu_stats);
|
||||||
|
if (tb[NFTA_COUNTER_PACKETS]) {
|
||||||
|
this_cpu->counter.packets =
|
||||||
|
be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS]));
|
||||||
|
}
|
||||||
|
if (tb[NFTA_COUNTER_BYTES]) {
|
||||||
|
this_cpu->counter.bytes =
|
||||||
|
be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_BYTES]));
|
||||||
|
}
|
||||||
|
preempt_enable();
|
||||||
|
priv->counter = cpu_stats;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void nft_counter_destroy(const struct nft_ctx *ctx,
|
||||||
|
const struct nft_expr *expr)
|
||||||
|
{
|
||||||
|
struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
|
||||||
|
|
||||||
|
free_percpu(priv->counter);
|
||||||
|
}
|
||||||
|
|
||||||
static struct nft_expr_type nft_counter_type;
|
static struct nft_expr_type nft_counter_type;
|
||||||
static const struct nft_expr_ops nft_counter_ops = {
|
static const struct nft_expr_ops nft_counter_ops = {
|
||||||
.type = &nft_counter_type,
|
.type = &nft_counter_type,
|
||||||
.size = NFT_EXPR_SIZE(sizeof(struct nft_counter)),
|
.size = NFT_EXPR_SIZE(sizeof(struct nft_counter_percpu_priv)),
|
||||||
.eval = nft_counter_eval,
|
.eval = nft_counter_eval,
|
||||||
.init = nft_counter_init,
|
.init = nft_counter_init,
|
||||||
|
.destroy = nft_counter_destroy,
|
||||||
.dump = nft_counter_dump,
|
.dump = nft_counter_dump,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче