netfilter: x_tables: align per cpu xt_counter
Let's force a 16 bytes alignment on xt_counter percpu allocations, so that bytes and packets sit in same cache line. xt_counter being exported to user space, we cannot add __align(16) on the structure itself. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Florian Westphal <fw@strlen.de> Acked-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Родитель
01555e74bd
Коммит
a1a56aaa07
|
@ -356,7 +356,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
|
||||||
* so nothing needs to be done there.
|
* so nothing needs to be done there.
|
||||||
*
|
*
|
||||||
* xt_percpu_counter_alloc returns the address of the percpu
|
* xt_percpu_counter_alloc returns the address of the percpu
|
||||||
* counter, or 0 on !SMP.
|
* counter, or 0 on !SMP. We force an alignment of 16 bytes
|
||||||
|
* so that bytes/packets share a common cache line.
|
||||||
*
|
*
|
||||||
* Hence caller must use IS_ERR_VALUE to check for error, this
|
* Hence caller must use IS_ERR_VALUE to check for error, this
|
||||||
* allows us to return 0 for single core systems without forcing
|
* allows us to return 0 for single core systems without forcing
|
||||||
|
@ -365,7 +366,8 @@ static inline unsigned long ifname_compare_aligned(const char *_a,
|
||||||
static inline u64 xt_percpu_counter_alloc(void)
|
static inline u64 xt_percpu_counter_alloc(void)
|
||||||
{
|
{
|
||||||
if (nr_cpu_ids > 1) {
|
if (nr_cpu_ids > 1) {
|
||||||
void __percpu *res = alloc_percpu(struct xt_counters);
|
void __percpu *res = __alloc_percpu(sizeof(struct xt_counters),
|
||||||
|
sizeof(struct xt_counters));
|
||||||
|
|
||||||
if (res == NULL)
|
if (res == NULL)
|
||||||
return (u64) -ENOMEM;
|
return (u64) -ENOMEM;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче