serial: sh-sci: Fix TX DMA buffer flushing and workqueue races
When uart_flush_buffer() is called, the .flush_buffer() callback zeroes the tx_dma_len field. This may race with the work queue function handling transmit DMA requests: 1. If the buffer is flushed before the first DMA API call, dmaengine_prep_slave_single() may be called with a zero length, causing the DMA request to never complete, leading to messages like: rcar-dmac e7300000.dma-controller: Channel Address Error happen and, with debug enabled: sh-sci e6e88000.serial: sci_dma_tx_work_fn: ffff800639b55000: 0...0, cookie 126 and DMA timeouts. 2. If the buffer is flushed after the first DMA API call, but before the second, dma_sync_single_for_device() may be called with a zero length, causing the transmit data not to be flushed to RAM, and leading to stale data being output. Fix this by: 1. Letting sci_dma_tx_work_fn() return immediately if the transmit buffer is empty, 2. Extending the critical section to cover all DMA preparational work, so tx_dma_len stays consistent for all of it, 3. Using local copies of circ_buf.head and circ_buf.tail, to make sure they match the actual operation above. Reported-by: Eugeniu Rosca <erosca@de.adit-jv.com> Suggested-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com> Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Reviewed-by: Eugeniu Rosca <erosca@de.adit-jv.com> Tested-by: Eugeniu Rosca <erosca@de.adit-jv.com> Link: https://lore.kernel.org/r/20190624123540.20629-2-geert+renesas@glider.be Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
ecd6bf67da
Коммит
8493eab026
|
@ -1398,6 +1398,7 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
|||
struct circ_buf *xmit = &port->state->xmit;
|
||||
unsigned long flags;
|
||||
dma_addr_t buf;
|
||||
int head, tail;
|
||||
|
||||
/*
|
||||
* DMA is idle now.
|
||||
|
@ -1407,16 +1408,23 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
|||
* consistent xmit buffer state.
|
||||
*/
|
||||
spin_lock_irq(&port->lock);
|
||||
buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
|
||||
head = xmit->head;
|
||||
tail = xmit->tail;
|
||||
buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
|
||||
s->tx_dma_len = min_t(unsigned int,
|
||||
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
|
||||
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
|
||||
spin_unlock_irq(&port->lock);
|
||||
CIRC_CNT(head, tail, UART_XMIT_SIZE),
|
||||
CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
|
||||
if (!s->tx_dma_len) {
|
||||
/* Transmit buffer has been flushed */
|
||||
spin_unlock_irq(&port->lock);
|
||||
return;
|
||||
}
|
||||
|
||||
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
|
||||
DMA_MEM_TO_DEV,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
if (!desc) {
|
||||
spin_unlock_irq(&port->lock);
|
||||
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
|
||||
goto switch_to_pio;
|
||||
}
|
||||
|
@ -1424,18 +1432,18 @@ static void sci_dma_tx_work_fn(struct work_struct *work)
|
|||
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
spin_lock_irq(&port->lock);
|
||||
desc->callback = sci_dma_tx_complete;
|
||||
desc->callback_param = s;
|
||||
spin_unlock_irq(&port->lock);
|
||||
s->cookie_tx = dmaengine_submit(desc);
|
||||
if (dma_submit_error(s->cookie_tx)) {
|
||||
spin_unlock_irq(&port->lock);
|
||||
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
|
||||
goto switch_to_pio;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&port->lock);
|
||||
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
|
||||
__func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
|
||||
__func__, xmit->buf, tail, head, s->cookie_tx);
|
||||
|
||||
dma_async_issue_pending(chan);
|
||||
return;
|
||||
|
|
Загрузка…
Ссылка в новой задаче