net: better pcpu data alignment
Tunnels can force an alignment of their percpu data to reduce number of cache lines used in fast path, or read in .ndo_get_stats() percpu_alloc() is a very fine grained allocator, so any small hole will be used anyway. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
2bc8ca40f9
Коммит
8ce120f118
|
@ -27,8 +27,8 @@
|
|||
|
||||
struct veth_net_stats {
|
||||
u64 rx_packets;
|
||||
u64 tx_packets;
|
||||
u64 rx_bytes;
|
||||
u64 tx_packets;
|
||||
u64 tx_bytes;
|
||||
u64 rx_dropped;
|
||||
struct u64_stats_sync syncp;
|
||||
|
|
|
@ -171,7 +171,7 @@ struct pcpu_tstats {
|
|||
unsigned long rx_bytes;
|
||||
unsigned long tx_packets;
|
||||
unsigned long tx_bytes;
|
||||
};
|
||||
} __attribute__((aligned(4*sizeof(unsigned long))));
|
||||
|
||||
static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -148,7 +148,7 @@ struct pcpu_tstats {
|
|||
unsigned long rx_bytes;
|
||||
unsigned long tx_packets;
|
||||
unsigned long tx_bytes;
|
||||
};
|
||||
} __attribute__((aligned(4*sizeof(unsigned long))));
|
||||
|
||||
static struct net_device_stats *ipip_get_stats(struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -93,7 +93,7 @@ struct pcpu_tstats {
|
|||
unsigned long rx_bytes;
|
||||
unsigned long tx_packets;
|
||||
unsigned long tx_bytes;
|
||||
};
|
||||
} __attribute__((aligned(4*sizeof(unsigned long))));
|
||||
|
||||
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
|
||||
{
|
||||
|
|
|
@ -91,7 +91,7 @@ struct pcpu_tstats {
|
|||
unsigned long rx_bytes;
|
||||
unsigned long tx_packets;
|
||||
unsigned long tx_bytes;
|
||||
};
|
||||
} __attribute__((aligned(4*sizeof(unsigned long))));
|
||||
|
||||
static struct net_device_stats *ipip6_get_stats(struct net_device *dev)
|
||||
{
|
||||
|
|
Загрузка…
Ссылка в новой задаче