dmaengine: k3: Split device_control
Split the device_control callback of the Hisilicon K3 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Родитель
701c1edbb4
Коммит
db08425ebd
|
@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
|
||||||
num = 0;
|
num = 0;
|
||||||
|
|
||||||
if (!c->ccfg) {
|
if (!c->ccfg) {
|
||||||
/* default is memtomem, without calling device_control */
|
/* default is memtomem, without calling device_config */
|
||||||
c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
|
c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
|
||||||
c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
|
c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
|
||||||
c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
|
c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
|
||||||
|
@ -523,20 +523,13 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
|
||||||
return vchan_tx_prep(&c->vc, &ds->vd, flags);
|
return vchan_tx_prep(&c->vc, &ds->vd, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
static int k3_dma_config(struct dma_chan *chan,
|
||||||
unsigned long arg)
|
struct dma_slave_config *cfg)
|
||||||
{
|
{
|
||||||
struct k3_dma_chan *c = to_k3_chan(chan);
|
struct k3_dma_chan *c = to_k3_chan(chan);
|
||||||
struct k3_dma_dev *d = to_k3_dma(chan->device);
|
|
||||||
struct dma_slave_config *cfg = (void *)arg;
|
|
||||||
struct k3_dma_phy *p = c->phy;
|
|
||||||
unsigned long flags;
|
|
||||||
u32 maxburst = 0, val = 0;
|
u32 maxburst = 0, val = 0;
|
||||||
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
|
enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
|
||||||
LIST_HEAD(head);
|
|
||||||
|
|
||||||
switch (cmd) {
|
|
||||||
case DMA_SLAVE_CONFIG:
|
|
||||||
if (cfg == NULL)
|
if (cfg == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
c->dir = cfg->direction;
|
c->dir = cfg->direction;
|
||||||
|
@ -573,9 +566,18 @@ static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||||
|
|
||||||
/* specific request line */
|
/* specific request line */
|
||||||
c->ccfg |= c->vc.chan.chan_id << 4;
|
c->ccfg |= c->vc.chan.chan_id << 4;
|
||||||
break;
|
|
||||||
|
|
||||||
case DMA_TERMINATE_ALL:
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int k3_dma_terminate_all(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct k3_dma_chan *c = to_k3_chan(chan);
|
||||||
|
struct k3_dma_dev *d = to_k3_dma(chan->device);
|
||||||
|
struct k3_dma_phy *p = c->phy;
|
||||||
|
unsigned long flags;
|
||||||
|
LIST_HEAD(head);
|
||||||
|
|
||||||
dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
|
dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
|
||||||
|
|
||||||
/* Prevent this channel being scheduled */
|
/* Prevent this channel being scheduled */
|
||||||
|
@ -595,9 +597,16 @@ static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||||
vchan_dma_desc_free_list(&c->vc, &head);
|
vchan_dma_desc_free_list(&c->vc, &head);
|
||||||
break;
|
|
||||||
|
|
||||||
case DMA_PAUSE:
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int k3_dma_pause(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct k3_dma_chan *c = to_k3_chan(chan);
|
||||||
|
struct k3_dma_dev *d = to_k3_dma(chan->device);
|
||||||
|
struct k3_dma_phy *p = c->phy;
|
||||||
|
|
||||||
dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
|
dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
|
||||||
if (c->status == DMA_IN_PROGRESS) {
|
if (c->status == DMA_IN_PROGRESS) {
|
||||||
c->status = DMA_PAUSED;
|
c->status = DMA_PAUSED;
|
||||||
|
@ -609,9 +618,17 @@ static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||||
spin_unlock(&d->lock);
|
spin_unlock(&d->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
|
||||||
|
|
||||||
case DMA_RESUME:
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int k3_dma_resume(struct dma_chan *chan)
|
||||||
|
{
|
||||||
|
struct k3_dma_chan *c = to_k3_chan(chan);
|
||||||
|
struct k3_dma_dev *d = to_k3_dma(chan->device);
|
||||||
|
struct k3_dma_phy *p = c->phy;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
|
dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
|
||||||
spin_lock_irqsave(&c->vc.lock, flags);
|
spin_lock_irqsave(&c->vc.lock, flags);
|
||||||
if (c->status == DMA_PAUSED) {
|
if (c->status == DMA_PAUSED) {
|
||||||
|
@ -625,10 +642,7 @@ static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||||
break;
|
|
||||||
default:
|
|
||||||
return -ENXIO;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op)
|
||||||
d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
|
d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
|
||||||
d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
|
d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
|
||||||
d->slave.device_issue_pending = k3_dma_issue_pending;
|
d->slave.device_issue_pending = k3_dma_issue_pending;
|
||||||
d->slave.device_control = k3_dma_control;
|
d->slave.device_config = k3_dma_config;
|
||||||
|
d->slave.device_pause = k3_dma_pause;
|
||||||
|
d->slave.device_resume = k3_dma_resume;
|
||||||
|
d->slave.device_terminate_all = k3_dma_terminate_all;
|
||||||
d->slave.copy_align = DMA_ALIGN;
|
d->slave.copy_align = DMA_ALIGN;
|
||||||
|
|
||||||
/* init virtual channel */
|
/* init virtual channel */
|
||||||
|
|
Загрузка…
Ссылка в новой задаче