fsldma: use channel name in printk output

This makes debugging the driver much easier when multiple channels are
running concurrently. In addition, you can see how much descriptor
memory each channel has allocated via the dmapool API in sysfs.

Signed-off-by: Ira W. Snyder <iws@ovro.caltech.edu>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
Ira Snyder 2011-03-03 07:54:55 +00:00 коммит произвёл Dan Williams
Родитель e8bd84df27
Коммит b158471ef6
2 изменённых файлов: 36 добавлений и 34 удалений

Просмотреть файл

@ -37,7 +37,12 @@
#include "fsldma.h" #include "fsldma.h"
static const char msg_ld_oom[] = "No free memory for link descriptor\n"; #define chan_dbg(chan, fmt, arg...) \
dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
#define chan_err(chan, fmt, arg...) \
dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
static const char msg_ld_oom[] = "No free memory for link descriptor";
/* /*
* Register Helpers * Register Helpers
@ -207,7 +212,7 @@ static void dma_halt(struct fsldma_chan *chan)
} }
if (!dma_is_idle(chan)) if (!dma_is_idle(chan))
dev_err(chan->dev, "DMA halt timeout!\n"); chan_err(chan, "DMA halt timeout!\n");
} }
/** /**
@ -405,7 +410,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
if (!desc) { if (!desc) {
dev_dbg(chan->dev, "out of memory for link desc\n"); chan_dbg(chan, "out of memory for link descriptor\n");
return NULL; return NULL;
} }
@ -439,13 +444,11 @@ static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
* We need the descriptor to be aligned to 32bytes * We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement. * for meeting FSL DMA specification requirement.
*/ */
chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", chan->desc_pool = dma_pool_create(chan->name, chan->dev,
chan->dev,
sizeof(struct fsl_desc_sw), sizeof(struct fsl_desc_sw),
__alignof__(struct fsl_desc_sw), 0); __alignof__(struct fsl_desc_sw), 0);
if (!chan->desc_pool) { if (!chan->desc_pool) {
dev_err(chan->dev, "unable to allocate channel %d " chan_err(chan, "unable to allocate descriptor pool\n");
"descriptor pool\n", chan->id);
return -ENOMEM; return -ENOMEM;
} }
@ -491,7 +494,7 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
struct fsldma_chan *chan = to_fsl_chan(dchan); struct fsldma_chan *chan = to_fsl_chan(dchan);
unsigned long flags; unsigned long flags;
dev_dbg(chan->dev, "Free all channel resources.\n"); chan_dbg(chan, "free all channel resources\n");
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_pending);
fsldma_free_desc_list(chan, &chan->ld_running); fsldma_free_desc_list(chan, &chan->ld_running);
@ -514,7 +517,7 @@ fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
new = fsl_dma_alloc_descriptor(chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(chan->dev, msg_ld_oom); chan_err(chan, "%s\n", msg_ld_oom);
return NULL; return NULL;
} }
@ -551,11 +554,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
/* Allocate the link descriptor from DMA pool */ /* Allocate the link descriptor from DMA pool */
new = fsl_dma_alloc_descriptor(chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(chan->dev, msg_ld_oom); chan_err(chan, "%s\n", msg_ld_oom);
goto fail; goto fail;
} }
#ifdef FSL_DMA_LD_DEBUG #ifdef FSL_DMA_LD_DEBUG
dev_dbg(chan->dev, "new link desc alloc %p\n", new); chan_dbg(chan, "new link desc alloc %p\n", new);
#endif #endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
@ -639,11 +642,11 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
/* allocate and populate the descriptor */ /* allocate and populate the descriptor */
new = fsl_dma_alloc_descriptor(chan); new = fsl_dma_alloc_descriptor(chan);
if (!new) { if (!new) {
dev_err(chan->dev, msg_ld_oom); chan_err(chan, "%s\n", msg_ld_oom);
goto fail; goto fail;
} }
#ifdef FSL_DMA_LD_DEBUG #ifdef FSL_DMA_LD_DEBUG
dev_dbg(chan->dev, "new link desc alloc %p\n", new); chan_dbg(chan, "new link desc alloc %p\n", new);
#endif #endif
set_desc_cnt(chan, &new->hw, len); set_desc_cnt(chan, &new->hw, len);
@ -815,7 +818,7 @@ static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
if (list_empty(&chan->ld_running)) { if (list_empty(&chan->ld_running)) {
dev_dbg(chan->dev, "no running descriptors\n"); chan_dbg(chan, "no running descriptors\n");
goto out_unlock; goto out_unlock;
} }
@ -863,7 +866,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); chan_dbg(chan, "chan completed_cookie = %d\n", chan->completed_cookie);
list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
@ -879,7 +882,7 @@ static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
callback_param = desc->async_tx.callback_param; callback_param = desc->async_tx.callback_param;
if (callback) { if (callback) {
spin_unlock_irqrestore(&chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
dev_dbg(chan->dev, "LD %p callback\n", desc); chan_dbg(chan, "LD %p callback\n", desc);
callback(callback_param); callback(callback_param);
spin_lock_irqsave(&chan->desc_lock, flags); spin_lock_irqsave(&chan->desc_lock, flags);
} }
@ -913,7 +916,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* don't need to do any work at all * don't need to do any work at all
*/ */
if (list_empty(&chan->ld_pending)) { if (list_empty(&chan->ld_pending)) {
dev_dbg(chan->dev, "no pending LDs\n"); chan_dbg(chan, "no pending LDs\n");
goto out_unlock; goto out_unlock;
} }
@ -923,7 +926,7 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
* at the end of the current transaction * at the end of the current transaction
*/ */
if (!dma_is_idle(chan)) { if (!dma_is_idle(chan)) {
dev_dbg(chan->dev, "DMA controller still busy\n"); chan_dbg(chan, "DMA controller still busy\n");
goto out_unlock; goto out_unlock;
} }
@ -1003,14 +1006,14 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
/* save and clear the status register */ /* save and clear the status register */
stat = get_sr(chan); stat = get_sr(chan);
set_sr(chan, stat); set_sr(chan, stat);
dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); chan_dbg(chan, "irq: stat = 0x%x\n", stat);
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat) if (!stat)
return IRQ_NONE; return IRQ_NONE;
if (stat & FSL_DMA_SR_TE) if (stat & FSL_DMA_SR_TE)
dev_err(chan->dev, "Transfer Error!\n"); chan_err(chan, "Transfer Error!\n");
/* /*
* Programming Error * Programming Error
@ -1018,7 +1021,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* triger a PE interrupt. * triger a PE interrupt.
*/ */
if (stat & FSL_DMA_SR_PE) { if (stat & FSL_DMA_SR_PE) {
dev_dbg(chan->dev, "irq: Programming Error INT\n"); chan_dbg(chan, "irq: Programming Error INT\n");
if (get_bcr(chan) == 0) { if (get_bcr(chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx. /* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the * Now, update the completed cookie, and continue the
@ -1035,8 +1038,8 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* we will recycle the used descriptor. * we will recycle the used descriptor.
*/ */
if (stat & FSL_DMA_SR_EOSI) { if (stat & FSL_DMA_SR_EOSI) {
dev_dbg(chan->dev, "irq: End-of-segments INT\n"); chan_dbg(chan, "irq: End-of-segments INT\n");
dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", chan_dbg(chan, "irq: clndar 0x%llx, nlndar 0x%llx\n",
(unsigned long long)get_cdar(chan), (unsigned long long)get_cdar(chan),
(unsigned long long)get_ndar(chan)); (unsigned long long)get_ndar(chan));
stat &= ~FSL_DMA_SR_EOSI; stat &= ~FSL_DMA_SR_EOSI;
@ -1048,7 +1051,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* and start the next transfer if it exist. * and start the next transfer if it exist.
*/ */
if (stat & FSL_DMA_SR_EOCDI) { if (stat & FSL_DMA_SR_EOCDI) {
dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); chan_dbg(chan, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI; stat &= ~FSL_DMA_SR_EOCDI;
update_cookie = 1; update_cookie = 1;
xfer_ld_q = 1; xfer_ld_q = 1;
@ -1060,7 +1063,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
* prepare next transfer. * prepare next transfer.
*/ */
if (stat & FSL_DMA_SR_EOLNI) { if (stat & FSL_DMA_SR_EOLNI) {
dev_dbg(chan->dev, "irq: End-of-link INT\n"); chan_dbg(chan, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI; stat &= ~FSL_DMA_SR_EOLNI;
xfer_ld_q = 1; xfer_ld_q = 1;
} }
@ -1070,9 +1073,9 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data)
if (xfer_ld_q) if (xfer_ld_q)
fsl_chan_xfer_ld_queue(chan); fsl_chan_xfer_ld_queue(chan);
if (stat) if (stat)
dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); chan_dbg(chan, "irq: unhandled sr 0x%08x\n", stat);
dev_dbg(chan->dev, "irq: Exit\n"); chan_dbg(chan, "irq: Exit\n");
tasklet_schedule(&chan->tasklet); tasklet_schedule(&chan->tasklet);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
@ -1128,7 +1131,7 @@ static void fsldma_free_irqs(struct fsldma_device *fdev)
for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
chan = fdev->chan[i]; chan = fdev->chan[i];
if (chan && chan->irq != NO_IRQ) { if (chan && chan->irq != NO_IRQ) {
dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); chan_dbg(chan, "free per-channel IRQ\n");
free_irq(chan->irq, chan); free_irq(chan->irq, chan);
} }
} }
@ -1155,19 +1158,16 @@ static int fsldma_request_irqs(struct fsldma_device *fdev)
continue; continue;
if (chan->irq == NO_IRQ) { if (chan->irq == NO_IRQ) {
dev_err(fdev->dev, "no interrupts property defined for " chan_err(chan, "interrupts property missing in device tree\n");
"DMA channel %d. Please fix your "
"device tree\n", chan->id);
ret = -ENODEV; ret = -ENODEV;
goto out_unwind; goto out_unwind;
} }
dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); chan_dbg(chan, "request per-channel IRQ\n");
ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
"fsldma-chan", chan); "fsldma-chan", chan);
if (ret) { if (ret) {
dev_err(fdev->dev, "unable to request IRQ for DMA " chan_err(chan, "unable to request per-channel IRQ\n");
"channel %d\n", chan->id);
goto out_unwind; goto out_unwind;
} }
} }
@ -1242,6 +1242,7 @@ static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
fdev->chan[chan->id] = chan; fdev->chan[chan->id] = chan;
tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
/* Initialize the channel */ /* Initialize the channel */
dma_init(chan); dma_init(chan);

Просмотреть файл

@ -135,6 +135,7 @@ struct fsldma_device {
#define FSL_DMA_CHAN_START_EXT 0x00002000 #define FSL_DMA_CHAN_START_EXT 0x00002000
struct fsldma_chan { struct fsldma_chan {
char name[8]; /* Channel name */
struct fsldma_chan_regs __iomem *regs; struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */ dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */ spinlock_t desc_lock; /* Descriptor operation lock */