From f79abb627f033c85a6088231f20c85bc4a9bd757 Mon Sep 17 00:00:00 2001 From: Zhang Wei Date: Tue, 18 Mar 2008 18:45:00 -0700 Subject: [PATCH 1/2] fsldma: Fix the DMA halt when using DMA_INTERRUPT async_tx transfer. The DMA_INTERRUPT async_tx is a NULL transfer, thus the BCR(count register) is 0. When the transfer started with a byte count of zero, the DMA controller will triger a PE(programming error) event and halt, not a normal interrupt. I add special codes for PE event and DMA_INTERRUPT async_tx testing. Signed-off-by: Zhang Wei Signed-off-by: Andrew Morton Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 30 ++++++++++++++++++++++++++++++ drivers/dma/fsldma.h | 1 + 2 files changed, 31 insertions(+) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ad2f938597e2..72692309398a 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -123,6 +123,11 @@ static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); } +static u32 get_bcr(struct fsl_dma_chan *fsl_chan) +{ + return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); +} + static int dma_is_idle(struct fsl_dma_chan *fsl_chan) { u32 sr = get_sr(fsl_chan); @@ -426,6 +431,9 @@ fsl_dma_prep_interrupt(struct dma_chan *chan) new->async_tx.cookie = -EBUSY; new->async_tx.ack = 0; + /* Insert the link descriptor to the LD ring */ + list_add_tail(&new->node, &new->async_tx.tx_list); + /* Set End-of-link to the last link descriptor of new list*/ set_ld_eol(fsl_chan, new); @@ -701,6 +709,23 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) if (stat & FSL_DMA_SR_TE) dev_err(fsl_chan->dev, "Transfer Error!\n"); + /* Programming Error + * The DMA_INTERRUPT async_tx is a NULL transfer, which will + * triger a PE interrupt. + */ + if (stat & FSL_DMA_SR_PE) { + dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); + if (get_bcr(fsl_chan) == 0) { + /* BCR register is 0, this is a DMA_INTERRUPT async_tx. + * Now, update the completed cookie, and continue the + * next uncompleted transfer. + */ + fsl_dma_update_completed_cookie(fsl_chan); + fsl_chan_xfer_ld_queue(fsl_chan); + } + stat &= ~FSL_DMA_SR_PE; + } + /* If the link descriptor segment transfer finishes, * we will recycle the used descriptor. */ @@ -841,6 +866,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan) tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0); async_tx_ack(tx3); + /* Interrupt tx test */ + tx1 = fsl_dma_prep_interrupt(chan); + async_tx_ack(tx1); + cookie = fsl_dma_tx_submit(tx1); + /* Test exchanging the prepared tx sort */ cookie = fsl_dma_tx_submit(tx3); cookie = fsl_dma_tx_submit(tx2); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index ba78c42121ba..fddd6aee2a63 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -40,6 +40,7 @@ #define FSL_DMA_MR_EOTIE 0x00000080 #define FSL_DMA_SR_CH 0x00000020 +#define FSL_DMA_SR_PE 0x00000010 #define FSL_DMA_SR_CB 0x00000004 #define FSL_DMA_SR_TE 0x00000080 #define FSL_DMA_SR_EOSI 0x00000002 From 8d8002f642886ae256a3c5d70fe8aff4faf3631a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 18 Mar 2008 21:23:59 -0700 Subject: [PATCH 2/2] async_tx: avoid the async xor_zero_sum path when src_cnt > device->max_xor If the channel cannot perform the operation in one call to ->device_prep_dma_zero_sum, then fallback to the xor+page_is_zero path. This only affects users with arrays larger than 16 devices on iop13xx or 32 devices on iop3xx. Cc: Cc: Neil Brown Signed-off-by: Dan Williams --- crypto/async_tx/async_xor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 7a9db353f198..1c445c7bdab7 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -271,7 +271,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, BUG_ON(src_cnt <= 1); - if (device) { + if (device && src_cnt <= device->max_xor) { dma_addr_t *dma_src = (dma_addr_t *) src_list; unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; int i;