dmaengine fixes for 4.4-rc6
This has fixes spread thru driver, notably among them - edma fixes for recent edma DT changes which went into 4.4 - odd fixes for at_hdmac - minor fixes on bc dma and mic dma -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWcE7VAAoJEHwUBw8lI4NH+IsP+gJEq1+xwC+Qni+oW0hUirwd jltn0PZiwawsBFFxj8ZoKBxRGcpLIG0YI0k/umdpeZE3bxg/IfffpBtZfZGSF8Gr w5lFZab2tyTjThc7PpjkGJB0ks/Dv1qlPZRx2+SoRq1IZP3ROv7i5HcTjr0pYWur PfGq7EkeBGxyVPeElSa7VhfzimyiDz/SS77ZgOPCagnu99rWc1A+bXGvTO367E1E IugN3+ndfykIHw4I3WBuVO+IC3yyXvgE21LyTIsb81iCs/ZzB3Cijb8jR3dpmtlK VoFJQwAdlJHw7J7pDWhMvM8HMYIErmLbFqZbDi6PHBe6ZYkLOP3z5QVIc67l1KIN vzIDUDvSLrFrRZ06691A6+/3yhI/g+FdlBaLeWwpcdJbmXHoEen2HrcQAF4ZUTRw RZQeDtgze1iBeqbzEeO5+esBcAxc2PUFKQHpt/vEB1kHoA9/KjUg5L8Mkjj6o/Xz uwoolopYJwI9H8rKZnX25F89N8R8cNrPXIe+qiCqPQj9cg2bmXzSTHFPdbI+t5bN YdOuV1qiYfzFPKStoQEjgmYEDduHw7ndUjGuw4CXGF0ctcYlkLbP3KATfLh4MLJb KpQ2GguIjOixlwtx/v9ovRKtqjYH5Egxa6TPiXm/MNVP2NXIXW+OG4x6OcKkLD9/ KEDo6XywrAc0VkL+1EEy =r53B -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-4.4-rc6' of git://git.infradead.org/users/vkoul/slave-dma Pull dmaengine fixes from Vinod Koul: "This has fixes spread thru driver, notably among them: - edma fixes for recent edma DT changes which went into 4.4 - odd fixes for at_hdmac - minor fixes on bc dma and mic dma" * tag 'dmaengine-fix-4.4-rc6' of git://git.infradead.org/users/vkoul/slave-dma: dmaengine: at_xdmac: fix at_xdmac_prep_dma_memcpy() dmaengine: edma: DT: Change reserved slot array from 16bit to 32bit type dmaengine: edma: DT: Change memcpy channel array from 16bit to 32bit type dmaengine: mic_x100: add missing spin_unlock dmaengine: bcm2835-dma: Convert to use DMA pool dmaengine: at_xdmac: fix bad behavior in interleaved mode dmaengine: at_xdmac: fix false condition for memset_sg transfers dmaengine: at_xdmac: fix macro typo
This commit is contained in:
Коммит
edb42dc7bc
|
@ -22,8 +22,7 @@ Required properties:
|
|||
Optional properties:
|
||||
- ti,hwmods: Name of the hwmods associated to the eDMA CC
|
||||
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
|
||||
these channels will be SW triggered channels. The list must
|
||||
contain 16 bits numbers, see example.
|
||||
these channels will be SW triggered channels. See example.
|
||||
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
|
||||
the driver, they are allocated to be used by for example the
|
||||
DSP. See example.
|
||||
|
@ -56,10 +55,9 @@ edma: edma@49000000 {
|
|||
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
|
||||
|
||||
/* Channel 20 and 21 is allocated for memcpy */
|
||||
ti,edma-memcpy-channels = /bits/ 16 <20 21>;
|
||||
/* The following PaRAM slots are reserved: 35-45 and 100-110 */
|
||||
ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
|
||||
/bits/ 16 <100 10>;
|
||||
ti,edma-memcpy-channels = <20 21>;
|
||||
/* The following PaRAM slots are reserved: 35-44 and 100-109 */
|
||||
ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
|
||||
};
|
||||
|
||||
edma_tptc0: tptc@49800000 {
|
||||
|
|
|
@ -156,7 +156,7 @@
|
|||
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
|
||||
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
|
||||
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
|
||||
#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
|
||||
#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
|
||||
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
|
||||
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
|
||||
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
|
||||
|
@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
|||
NULL,
|
||||
src_addr, dst_addr,
|
||||
xt, xt->sgl);
|
||||
for (i = 0; i < xt->numf; i++)
|
||||
|
||||
/* Length of the block is (BLEN+1) microblocks. */
|
||||
for (i = 0; i < xt->numf - 1; i++)
|
||||
at_xdmac_increment_block_count(chan, first);
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
|
||||
|
@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
/* Check remaining length and change data width if needed. */
|
||||
dwidth = at_xdmac_align_width(chan,
|
||||
src_addr | dst_addr | xfer_size);
|
||||
chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
|
||||
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
|
||||
|
||||
ublen = xfer_size >> dwidth;
|
||||
|
@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
* since we don't care about the stride anymore.
|
||||
*/
|
||||
if ((i == (sg_len - 1)) &&
|
||||
sg_dma_len(ppsg) == sg_dma_len(psg)) {
|
||||
sg_dma_len(psg) == sg_dma_len(sg)) {
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: desc 0x%p can be merged with desc 0x%p\n",
|
||||
__func__, desc, pdesc);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
*/
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
|
|||
uint32_t pad[2];
|
||||
};
|
||||
|
||||
struct bcm2835_cb_entry {
|
||||
struct bcm2835_dma_cb *cb;
|
||||
dma_addr_t paddr;
|
||||
};
|
||||
|
||||
struct bcm2835_chan {
|
||||
struct virt_dma_chan vc;
|
||||
struct list_head node;
|
||||
|
@ -72,18 +78,18 @@ struct bcm2835_chan {
|
|||
|
||||
int ch;
|
||||
struct bcm2835_desc *desc;
|
||||
struct dma_pool *cb_pool;
|
||||
|
||||
void __iomem *chan_base;
|
||||
int irq_number;
|
||||
};
|
||||
|
||||
struct bcm2835_desc {
|
||||
struct bcm2835_chan *c;
|
||||
struct virt_dma_desc vd;
|
||||
enum dma_transfer_direction dir;
|
||||
|
||||
unsigned int control_block_size;
|
||||
struct bcm2835_dma_cb *control_block_base;
|
||||
dma_addr_t control_block_base_phys;
|
||||
struct bcm2835_cb_entry *cb_list;
|
||||
|
||||
unsigned int frames;
|
||||
size_t size;
|
||||
|
@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
|
|||
static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
|
||||
dma_free_coherent(desc->vd.tx.chan->device->dev,
|
||||
desc->control_block_size,
|
||||
desc->control_block_base,
|
||||
desc->control_block_base_phys);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < desc->frames; i++)
|
||||
dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
|
||||
desc->cb_list[i].paddr);
|
||||
|
||||
kfree(desc->cb_list);
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
|
@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
|
|||
|
||||
c->desc = d = to_bcm2835_dma_desc(&vd->tx);
|
||||
|
||||
writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
|
||||
writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
|
||||
writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
|
||||
}
|
||||
|
||||
|
@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
|
|||
static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
|
||||
struct device *dev = c->vc.chan.device->dev;
|
||||
|
||||
dev_dbg(c->vc.chan.device->dev,
|
||||
"Allocating DMA channel %d\n", c->ch);
|
||||
dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
|
||||
|
||||
c->cb_pool = dma_pool_create(dev_name(dev), dev,
|
||||
sizeof(struct bcm2835_dma_cb), 0, 0);
|
||||
if (!c->cb_pool) {
|
||||
dev_err(dev, "unable to allocate descriptor pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return request_irq(c->irq_number,
|
||||
bcm2835_dma_callback, 0, "DMA IRQ", c);
|
||||
|
@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
vchan_free_chan_resources(&c->vc);
|
||||
free_irq(c->irq_number, c);
|
||||
dma_pool_destroy(c->cb_pool);
|
||||
|
||||
dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
|
||||
}
|
||||
|
@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
|
|||
size_t size;
|
||||
|
||||
for (size = i = 0; i < d->frames; i++) {
|
||||
struct bcm2835_dma_cb *control_block =
|
||||
&d->control_block_base[i];
|
||||
struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
|
||||
size_t this_size = control_block->length;
|
||||
dma_addr_t dma;
|
||||
|
||||
|
@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
dma_addr_t dev_addr;
|
||||
unsigned int es, sync_type;
|
||||
unsigned int frame;
|
||||
int i;
|
||||
|
||||
/* Grab configuration */
|
||||
if (!is_slave_direction(direction)) {
|
||||
|
@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
if (!d)
|
||||
return NULL;
|
||||
|
||||
d->c = c;
|
||||
d->dir = direction;
|
||||
d->frames = buf_len / period_len;
|
||||
|
||||
/* Allocate memory for control blocks */
|
||||
d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
|
||||
d->control_block_base = dma_zalloc_coherent(chan->device->dev,
|
||||
d->control_block_size, &d->control_block_base_phys,
|
||||
GFP_NOWAIT);
|
||||
|
||||
if (!d->control_block_base) {
|
||||
d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
|
||||
if (!d->cb_list) {
|
||||
kfree(d);
|
||||
return NULL;
|
||||
}
|
||||
/* Allocate memory for control blocks */
|
||||
for (i = 0; i < d->frames; i++) {
|
||||
struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
|
||||
|
||||
cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
|
||||
&cb_entry->paddr);
|
||||
if (!cb_entry->cb)
|
||||
goto error_cb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all frames, create a control block
|
||||
* for each frame and link them together.
|
||||
*/
|
||||
for (frame = 0; frame < d->frames; frame++) {
|
||||
struct bcm2835_dma_cb *control_block =
|
||||
&d->control_block_base[frame];
|
||||
struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
|
||||
|
||||
/* Setup adresses */
|
||||
if (d->dir == DMA_DEV_TO_MEM) {
|
||||
|
@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
* This DMA engine driver currently only supports cyclic DMA.
|
||||
* Therefore, wrap around at number of frames.
|
||||
*/
|
||||
control_block->next = d->control_block_base_phys +
|
||||
sizeof(struct bcm2835_dma_cb)
|
||||
* ((frame + 1) % d->frames);
|
||||
control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&c->vc, &d->vd, flags);
|
||||
error_cb:
|
||||
i--;
|
||||
for (; i >= 0; i--) {
|
||||
struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
|
||||
|
||||
dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
|
||||
}
|
||||
|
||||
kfree(d->cb_list);
|
||||
kfree(d);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int bcm2835_dma_slave_config(struct dma_chan *chan,
|
||||
|
|
|
@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels)
|
||||
static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
|
||||
{
|
||||
s16 *memcpy_ch = memcpy_channels;
|
||||
|
||||
if (!memcpy_channels)
|
||||
return false;
|
||||
while (*memcpy_ch != -1) {
|
||||
if (*memcpy_ch == ch_num)
|
||||
while (*memcpy_channels != -1) {
|
||||
if (*memcpy_channels == ch_num)
|
||||
return true;
|
||||
memcpy_ch++;
|
||||
memcpy_channels++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
|
|||
{
|
||||
struct dma_device *s_ddev = &ecc->dma_slave;
|
||||
struct dma_device *m_ddev = NULL;
|
||||
s16 *memcpy_channels = ecc->info->memcpy_channels;
|
||||
s32 *memcpy_channels = ecc->info->memcpy_channels;
|
||||
int i, j;
|
||||
|
||||
dma_cap_zero(s_ddev->cap_mask);
|
||||
|
@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
|
|||
prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
|
||||
if (prop) {
|
||||
const char pname[] = "ti,edma-memcpy-channels";
|
||||
size_t nelm = sz / sizeof(s16);
|
||||
s16 *memcpy_ch;
|
||||
size_t nelm = sz / sizeof(s32);
|
||||
s32 *memcpy_ch;
|
||||
|
||||
memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16),
|
||||
memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
|
||||
GFP_KERNEL);
|
||||
if (!memcpy_ch)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = of_property_read_u16_array(dev->of_node, pname,
|
||||
(u16 *)memcpy_ch, nelm);
|
||||
ret = of_property_read_u32_array(dev->of_node, pname,
|
||||
(u32 *)memcpy_ch, nelm);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
|
|||
&sz);
|
||||
if (prop) {
|
||||
const char pname[] = "ti,edma-reserved-slot-ranges";
|
||||
u32 (*tmp)[2];
|
||||
s16 (*rsv_slots)[2];
|
||||
size_t nelm = sz / sizeof(*rsv_slots);
|
||||
size_t nelm = sz / sizeof(*tmp);
|
||||
struct edma_rsv_info *rsv_info;
|
||||
int i;
|
||||
|
||||
if (!nelm)
|
||||
return info;
|
||||
|
||||
rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
|
||||
if (!rsv_info)
|
||||
tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
|
||||
if (!rsv_info) {
|
||||
kfree(tmp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
|
||||
GFP_KERNEL);
|
||||
if (!rsv_slots)
|
||||
if (!rsv_slots) {
|
||||
kfree(tmp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = of_property_read_u16_array(dev->of_node, pname,
|
||||
(u16 *)rsv_slots, nelm * 2);
|
||||
if (ret)
|
||||
ret = of_property_read_u32_array(dev->of_node, pname,
|
||||
(u32 *)tmp, nelm * 2);
|
||||
if (ret) {
|
||||
kfree(tmp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
for (i = 0; i < nelm; i++) {
|
||||
rsv_slots[i][0] = tmp[i][0];
|
||||
rsv_slots[i][1] = tmp[i][1];
|
||||
}
|
||||
rsv_slots[nelm][0] = -1;
|
||||
rsv_slots[nelm][1] = -1;
|
||||
|
||||
info->rsv = rsv_info;
|
||||
info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
|
||||
|
||||
kfree(tmp);
|
||||
}
|
||||
|
||||
return info;
|
||||
|
|
|
@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
|
|||
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
|
||||
struct device *dev = mic_dma_ch_to_device(mic_ch);
|
||||
int result;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
if (!len && !flags)
|
||||
return NULL;
|
||||
|
@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
|
|||
spin_lock(&mic_ch->prep_lock);
|
||||
result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
|
||||
if (result >= 0)
|
||||
return allocate_tx(mic_ch);
|
||||
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
|
||||
tx = allocate_tx(mic_ch);
|
||||
|
||||
if (!tx)
|
||||
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
|
||||
|
||||
spin_unlock(&mic_ch->prep_lock);
|
||||
return NULL;
|
||||
return tx;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
|
@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
|
|||
{
|
||||
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
|
||||
int ret;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
spin_lock(&mic_ch->prep_lock);
|
||||
ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
|
||||
if (!ret)
|
||||
return allocate_tx(mic_ch);
|
||||
tx = allocate_tx(mic_ch);
|
||||
spin_unlock(&mic_ch->prep_lock);
|
||||
return NULL;
|
||||
return tx;
|
||||
}
|
||||
|
||||
/* Return the status of the transaction */
|
||||
|
|
|
@ -72,7 +72,7 @@ struct edma_soc_info {
|
|||
struct edma_rsv_info *rsv;
|
||||
|
||||
/* List of channels allocated for memcpy, terminated with -1 */
|
||||
s16 *memcpy_channels;
|
||||
s32 *memcpy_channels;
|
||||
|
||||
s8 (*queue_priority_mapping)[2];
|
||||
const s16 (*xbar_chans)[2];
|
||||
|
|
Загрузка…
Ссылка в новой задаче