bnx2x: avoid two atomic ops per page on x86

Commit 4cace675d6 ("bnx2x: Alloc 4k fragment for each rx ring buffer
element") added extra put_page() and get_page() calls on arches where
PAGE_SIZE=4K like x86

Reorder things to avoid this overhead.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
Cc: Yuval Mintz <Yuval.Mintz@cavium.com>
Cc: Ariel Elior <ariel.elior@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2017-01-20 08:25:34 -08:00 коммит произвёл David S. Miller
Родитель 41e8c70ee1
Коммит b9032741e4
1 изменённых файлов: 5 добавлений и 10 удалений

Просмотреть файл

@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
/* put page reference used by the memory pool, since we
* won't be using this page as the mempool anymore.
*/
if (pool->page)
put_page(pool->page);
if (!pool->page) {
pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
if (unlikely(!pool->page))
return -ENOMEM;
@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return -ENOMEM;
}
get_page(pool->page);
sw_buf->page = pool->page;
sw_buf->offset = pool->offset;
@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
pool->offset += SGE_PAGE_SIZE;
if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
get_page(pool->page);
else
pool->page = NULL;
return 0;
}