dmaengine: remove BUG_ON while registering devices
DMAengine core has BUG_ON to check for mandatory operations and ones based on capabilities, but they use BUG_ON, so remove and move to error returns and logging the errors gracefully Acked-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
This commit is contained in:
Родитель
99380edf9e
Коммит
3eeb515636
|
@ -923,28 +923,85 @@ int dma_async_device_register(struct dma_device *device)
|
|||
return -ENODEV;
|
||||
|
||||
/* validate device routines */
|
||||
BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
|
||||
!device->device_prep_dma_memcpy);
|
||||
BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
|
||||
!device->device_prep_dma_xor);
|
||||
BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
|
||||
!device->device_prep_dma_xor_val);
|
||||
BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
|
||||
!device->device_prep_dma_pq);
|
||||
BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
|
||||
!device->device_prep_dma_pq_val);
|
||||
BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
|
||||
!device->device_prep_dma_memset);
|
||||
BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
|
||||
!device->device_prep_dma_interrupt);
|
||||
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
|
||||
!device->device_prep_dma_cyclic);
|
||||
BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
|
||||
!device->device_prep_interleaved_dma);
|
||||
if (!device->dev) {
|
||||
pr_err("DMAdevice must have dev\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
BUG_ON(!device->device_tx_status);
|
||||
BUG_ON(!device->device_issue_pending);
|
||||
BUG_ON(!device->dev);
|
||||
if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_MEMCPY");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_XOR");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_XOR_VAL");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_PQ");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_PQ_VAL");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_MEMSET");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_INTERRUPT");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_CYCLIC");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
|
||||
dev_err(device->dev,
|
||||
"Device claims capability %s, but op is not defined\n",
|
||||
"DMA_INTERLEAVE");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
||||
if (!device->device_tx_status) {
|
||||
dev_err(device->dev, "Device tx_status is not defined\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
|
||||
if (!device->device_issue_pending) {
|
||||
dev_err(device->dev, "Device issue_pending is not defined\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* note: this only matters in the
|
||||
* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
|
||||
|
|
Загрузка…
Ссылка в новой задаче