netdev: octeon_mgmt: Fix race condition freeing TX buffers.
Under heavy load the TX cleanup tasklet and xmit threads would race and try to free too many buffers. Signed-off-by: David Daney <ddaney@caviumnetworks.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Родитель
62538d2490
Коммит
4d30b8013b
|
@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
|
|||
|
||||
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
|
||||
while (mix_orcnt.s.orcnt) {
|
||||
spin_lock_irqsave(&p->tx_list.lock, flags);
|
||||
|
||||
mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
|
||||
|
||||
if (mix_orcnt.s.orcnt == 0) {
|
||||
spin_unlock_irqrestore(&p->tx_list.lock, flags);
|
||||
break;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
|
||||
ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
spin_lock_irqsave(&p->tx_list.lock, flags);
|
||||
|
||||
re.d64 = p->tx_ring[p->tx_next_clean];
|
||||
p->tx_next_clean =
|
||||
(p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
|
||||
|
|
Загрузка…
Ссылка в новой задаче