crypto: ccp - Use devres interface to allocate PCI/iomap and cleanup

Update pci and platform files to use devres interface to allocate the PCI
and iomap resources. Also add helper functions to consolicate module init,
exit and power mangagement code duplication.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Acked-by: Gary R Hook <gary.hook@amd.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Brijesh Singh 2017-07-06 09:59:13 -05:00 коммит произвёл Herbert Xu
Родитель a0a613abe1
Коммит 970e8303cb
5 изменённых файлов: 108 добавлений и 140 удалений

Просмотреть файл

@ -586,6 +586,13 @@ static const struct ccp_actions ccp3_actions = {
.irqhandler = ccp_irq_handler,
};
const struct ccp_vdata ccpv3_platform = {
.version = CCP_VERSION(3, 0),
.setup = NULL,
.perform = &ccp3_actions,
.offset = 0,
};
const struct ccp_vdata ccpv3 = {
.version = CCP_VERSION(3, 0),
.setup = NULL,

Просмотреть файл

@ -539,8 +539,69 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
return ccp->cmd_q_count == suspended;
}
int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
}
int ccp_dev_resume(struct ccp_device *ccp)
{
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
/* Wake up all the kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
}
#endif
int ccp_dev_init(struct ccp_device *ccp)
{
ccp->io_regs = ccp->io_map + ccp->vdata->offset;
if (ccp->vdata->setup)
ccp->vdata->setup(ccp);
return ccp->vdata->perform->init(ccp);
}
void ccp_dev_destroy(struct ccp_device *ccp)
{
if (!ccp)
return;
ccp->vdata->perform->destroy(ccp);
}
static int __init ccp_mod_init(void)
{
#ifdef CONFIG_X86

Просмотреть файл

@ -652,6 +652,11 @@ void ccp_dmaengine_unregister(struct ccp_device *ccp);
void ccp5_debugfs_setup(struct ccp_device *ccp);
void ccp5_debugfs_destroy(void);
int ccp_dev_init(struct ccp_device *ccp);
void ccp_dev_destroy(struct ccp_device *ccp);
int ccp_dev_suspend(struct ccp_device *ccp, pm_message_t state);
int ccp_dev_resume(struct ccp_device *ccp);
/* Structure for computation functions that are device-specific */
struct ccp_actions {
int (*aes)(struct ccp_op *);
@ -679,6 +684,7 @@ struct ccp_vdata {
const unsigned int offset;
};
extern const struct ccp_vdata ccpv3_platform;
extern const struct ccp_vdata ccpv3;
extern const struct ccp_vdata ccpv5a;
extern const struct ccp_vdata ccpv5b;

Просмотреть файл

@ -150,28 +150,13 @@ static void ccp_free_irqs(struct ccp_device *ccp)
ccp->irq = 0;
}
static int ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct pci_dev *pdev = to_pci_dev(dev);
resource_size_t io_len;
unsigned long io_flags;
io_flags = pci_resource_flags(pdev, ccp->vdata->bar);
io_len = pci_resource_len(pdev, ccp->vdata->bar);
if ((io_flags & IORESOURCE_MEM) &&
(io_len >= (ccp->vdata->offset + 0x800)))
return ccp->vdata->bar;
return -EIO;
}
static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ccp_device *ccp;
struct ccp_pci *ccp_pci;
struct device *dev = &pdev->dev;
unsigned int bar;
void __iomem * const *iomap_table;
int bar_mask;
int ret;
ret = -ENOMEM;
@ -193,65 +178,55 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
ret = pci_request_regions(pdev, "ccp");
ret = pcim_enable_device(pdev);
if (ret) {
dev_err(dev, "pci_request_regions failed (%d)\n", ret);
dev_err(dev, "pcim_enable_device failed (%d)\n", ret);
goto e_err;
}
ret = pci_enable_device(pdev);
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
ret = pcim_iomap_regions(pdev, bar_mask, "ccp");
if (ret) {
dev_err(dev, "pci_enable_device failed (%d)\n", ret);
goto e_regions;
dev_err(dev, "pcim_iomap_regions failed (%d)\n", ret);
goto e_err;
}
iomap_table = pcim_iomap_table(pdev);
if (!iomap_table) {
dev_err(dev, "pcim_iomap_table failed\n");
ret = -ENOMEM;
goto e_err;
}
ccp->io_map = iomap_table[ccp->vdata->bar];
if (!ccp->io_map) {
dev_err(dev, "ioremap failed\n");
ret = -ENOMEM;
goto e_err;
}
pci_set_master(pdev);
ret = ccp_find_mmio_area(ccp);
if (ret < 0)
goto e_device;
bar = ret;
ret = -EIO;
ccp->io_map = pci_iomap(pdev, bar, 0);
if (!ccp->io_map) {
dev_err(dev, "pci_iomap failed\n");
goto e_device;
}
ccp->io_regs = ccp->io_map + ccp->vdata->offset;
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
if (ret) {
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
ret);
goto e_iomap;
goto e_err;
}
}
dev_set_drvdata(dev, ccp);
if (ccp->vdata->setup)
ccp->vdata->setup(ccp);
ret = ccp->vdata->perform->init(ccp);
ret = ccp_dev_init(ccp);
if (ret)
goto e_iomap;
goto e_err;
dev_notice(dev, "enabled\n");
return 0;
e_iomap:
pci_iounmap(pdev, ccp->io_map);
e_device:
pci_disable_device(pdev);
e_regions:
pci_release_regions(pdev);
e_err:
dev_notice(dev, "initialization failed\n");
return ret;
@ -265,13 +240,7 @@ static void ccp_pci_remove(struct pci_dev *pdev)
if (!ccp)
return;
ccp->vdata->perform->destroy(ccp);
pci_iounmap(pdev, ccp->io_map);
pci_disable_device(pdev);
pci_release_regions(pdev);
ccp_dev_destroy(ccp);
dev_notice(dev, "disabled\n");
}
@ -281,47 +250,16 @@ static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
return ccp_dev_suspend(ccp, state);
}
static int ccp_pci_resume(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
/* Wake up all the kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
return ccp_dev_resume(ccp);
}
#endif

Просмотреть файл

@ -104,19 +104,6 @@ static void ccp_free_irqs(struct ccp_device *ccp)
free_irq(ccp->irq, dev);
}
static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
{
struct device *dev = ccp->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *ior;
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (ior && (resource_size(ior) >= 0x800))
return ior;
return NULL;
}
static int ccp_platform_probe(struct platform_device *pdev)
{
struct ccp_device *ccp;
@ -146,7 +133,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
ccp->get_irq = ccp_get_irqs;
ccp->free_irq = ccp_free_irqs;
ior = ccp_find_mmio_area(ccp);
ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ccp->io_map = devm_ioremap_resource(dev, ior);
if (IS_ERR(ccp->io_map)) {
ret = PTR_ERR(ccp->io_map);
@ -174,7 +161,7 @@ static int ccp_platform_probe(struct platform_device *pdev)
dev_set_drvdata(dev, ccp);
ret = ccp->vdata->perform->init(ccp);
ret = ccp_dev_init(ccp);
if (ret)
goto e_err;
@ -192,7 +179,7 @@ static int ccp_platform_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp->vdata->perform->destroy(ccp);
ccp_dev_destroy(ccp);
dev_notice(dev, "disabled\n");
@ -205,47 +192,16 @@ static int ccp_platform_suspend(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 1;
/* Wake all the queue kthreads to prepare for suspend */
for (i = 0; i < ccp->cmd_q_count; i++)
wake_up_process(ccp->cmd_q[i].kthread);
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
/* Wait for all queue kthreads to say they're done */
while (!ccp_queues_suspended(ccp))
wait_event_interruptible(ccp->suspend_queue,
ccp_queues_suspended(ccp));
return 0;
return ccp_dev_suspend(ccp, state);
}
static int ccp_platform_resume(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ccp_device *ccp = dev_get_drvdata(dev);
unsigned long flags;
unsigned int i;
spin_lock_irqsave(&ccp->cmd_lock, flags);
ccp->suspending = 0;
/* Wake up all the kthreads */
for (i = 0; i < ccp->cmd_q_count; i++) {
ccp->cmd_q[i].suspended = 0;
wake_up_process(ccp->cmd_q[i].kthread);
}
spin_unlock_irqrestore(&ccp->cmd_lock, flags);
return 0;
return ccp_dev_resume(ccp);
}
#endif
@ -260,7 +216,7 @@ MODULE_DEVICE_TABLE(acpi, ccp_acpi_match);
#ifdef CONFIG_OF
static const struct of_device_id ccp_of_match[] = {
{ .compatible = "amd,ccp-seattle-v1a",
.data = (const void *)&ccpv3 },
.data = (const void *)&ccpv3_platform },
{ },
};
MODULE_DEVICE_TABLE(of, ccp_of_match);