skbuff: pass the result of data ksize to __build_skb_around
Avoid to call ksize again in __build_skb_around by passing the result of data ksize to __build_skb_around nginx stress test shows this change can reduce ksize cpu usage, and give a little performance boost Signed-off-by: Li RongQing <lirongqing@baidu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
db4278c55f
Коммит
a5df6333f1
|
@ -394,8 +394,9 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
|||
{
|
||||
struct kmem_cache *cache;
|
||||
struct sk_buff *skb;
|
||||
u8 *data;
|
||||
unsigned int osize;
|
||||
bool pfmemalloc;
|
||||
u8 *data;
|
||||
|
||||
cache = (flags & SKB_ALLOC_FCLONE)
|
||||
? skbuff_fclone_cache : skbuff_head_cache;
|
||||
|
@ -427,7 +428,8 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
|||
* Put skb_shared_info exactly at the end of allocated zone,
|
||||
* to allow max possible filling before reallocation.
|
||||
*/
|
||||
size = SKB_WITH_OVERHEAD(ksize(data));
|
||||
osize = ksize(data);
|
||||
size = SKB_WITH_OVERHEAD(osize);
|
||||
prefetchw(data + size);
|
||||
|
||||
/*
|
||||
|
@ -436,7 +438,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
|
|||
* the tail pointer in struct sk_buff!
|
||||
*/
|
||||
memset(skb, 0, offsetof(struct sk_buff, tail));
|
||||
__build_skb_around(skb, data, 0);
|
||||
__build_skb_around(skb, data, osize);
|
||||
skb->pfmemalloc = pfmemalloc;
|
||||
|
||||
if (flags & SKB_ALLOC_FCLONE) {
|
||||
|
|
Загрузка…
Ссылка в новой задаче