Replace all DMA_nBIT_MASK macro with DMA_BIT_MASK(n)
This is the second go through of the old DMA_nBIT_MASK macro,and there're not so many of them left,so I put them into one patch.I hope this is the last round. After this the definition of the old DMA_nBIT_MASK macro could be removed. Signed-off-by: Yang Hongyang <yanghy@cn.fujitsu.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Tony Lindgren <tony@atomide.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: Greg KH <greg@kroah.com> Cc: Takashi Iwai <tiwai@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
316cb4ef3e
Коммит
e930438c42
|
@ -131,14 +131,14 @@ static struct musb_hdrc_platform_data musb_plat = {
|
|||
.power = 50, /* up to 100 mA */
|
||||
};
|
||||
|
||||
static u64 musb_dmamask = DMA_32BIT_MASK;
|
||||
static u64 musb_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device musb_device = {
|
||||
.name = "musb_hdrc",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.dma_mask = &musb_dmamask,
|
||||
.coherent_dma_mask = DMA_32BIT_MASK,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = &musb_plat,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(musb_resources),
|
||||
|
@ -146,14 +146,14 @@ static struct platform_device musb_device = {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_NOP_USB_XCEIV
|
||||
static u64 nop_xceiv_dmamask = DMA_32BIT_MASK;
|
||||
static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device nop_xceiv_device = {
|
||||
.name = "nop_usb_xceiv",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.dma_mask = &nop_xceiv_dmamask,
|
||||
.coherent_dma_mask = DMA_32BIT_MASK,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = NULL,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -16,7 +16,7 @@ EXPORT_SYMBOL(swiotlb);
|
|||
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
if (dev->coherent_dma_mask != DMA_64BIT_MASK)
|
||||
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
|
||||
gfp |= GFP_DMA;
|
||||
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
|
||||
}
|
||||
|
|
|
@ -1059,7 +1059,7 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
|||
goto out;
|
||||
}
|
||||
|
||||
err = pci_set_dma_mask(dev, DMA_32BIT_MASK);
|
||||
err = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
dev_warn(&dev->dev, "Failed to set 32-bit DMA mask\n");
|
||||
goto out;
|
||||
|
|
|
@ -3505,7 +3505,7 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
|
|||
/* The Inbound Post Queue only accepts 32-bit physical addresses for the
|
||||
CCISS commands, so they must be allocated from the lower 4GiB of
|
||||
memory. */
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (err) {
|
||||
iounmap(vaddr);
|
||||
return -ENOMEM;
|
||||
|
|
|
@ -2532,8 +2532,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
|
|||
* various kernel subsystems to support the mechanics required by a
|
||||
* fixed-high-32-bit system.
|
||||
*/
|
||||
if ((pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) ||
|
||||
(pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK) != 0)) {
|
||||
if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
|
||||
(pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
|
||||
dev_err(&pdev->dev, "No usable DMA configuration,aborting\n");
|
||||
goto err_dma;
|
||||
}
|
||||
|
|
|
@ -1821,11 +1821,11 @@ static int __devinit be_probe(struct pci_dev *pdev,
|
|||
|
||||
be_msix_enable(adapter);
|
||||
|
||||
status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
||||
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
if (!status) {
|
||||
netdev->features |= NETIF_F_HIGHDMA;
|
||||
} else {
|
||||
status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (status) {
|
||||
dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
|
||||
goto free_netdev;
|
||||
|
|
|
@ -2591,13 +2591,13 @@ static int
|
|||
jme_pci_dma64(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
|
||||
!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
|
||||
if (!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
|
||||
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||
if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||
return 1;
|
||||
|
||||
if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 &&
|
||||
!pci_set_dma_mask(pdev, DMA_40BIT_MASK))
|
||||
if (!pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK))
|
||||
!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
|
||||
if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)))
|
||||
return 1;
|
||||
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
|
||||
|
|
|
@ -93,14 +93,14 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
if (pci_enable_device(pdev))
|
||||
return -EIO;
|
||||
|
||||
ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
|
||||
goto bad;
|
||||
}
|
||||
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
|
||||
|
|
|
@ -492,8 +492,8 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
|
|||
goto err_disable_dev;
|
||||
}
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
dev_err(&pdev->dev, "No suitable DMA available\n");
|
||||
goto err_free_reg;
|
||||
}
|
||||
|
|
|
@ -2234,10 +2234,10 @@ static int twa_resume(struct pci_dev *pdev)
|
|||
pci_set_master(pdev);
|
||||
pci_try_set_mwi(pdev);
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|
||||
|| pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
|
||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
|
||||
|| pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|
||||
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
|
||||
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
|
||||
retval = -ENODEV;
|
||||
goto out_disable_device;
|
||||
|
|
|
@ -1378,7 +1378,7 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
|||
if (dev->nondasd_support && !dev->in_reset)
|
||||
printk(KERN_INFO "%s%d: Non-DASD support enabled.\n",dev->name, dev->id);
|
||||
|
||||
if (dma_get_required_mask(&dev->pdev->dev) > DMA_32BIT_MASK)
|
||||
if (dma_get_required_mask(&dev->pdev->dev) > DMA_BIT_MASK(32))
|
||||
dev->needs_dac = 1;
|
||||
dev->dac_support = 0;
|
||||
if ((sizeof(dma_addr_t) > 4) && dev->needs_dac &&
|
||||
|
|
|
@ -855,9 +855,9 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
|
|||
if (sizeof(dma_addr_t) > 4) {
|
||||
const uint64_t required_mask =
|
||||
dma_get_required_mask(&pdev->dev);
|
||||
if ((required_mask > DMA_32BIT_MASK) && !pci_set_dma_mask(pdev,
|
||||
DMA_64BIT_MASK) && !pci_set_consistent_dma_mask(pdev,
|
||||
DMA_64BIT_MASK)) {
|
||||
if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
|
||||
DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
|
||||
DMA_BIT_MASK(64))) {
|
||||
ioc->base_add_sg_single = &_base_add_sg_single_64;
|
||||
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
|
||||
desc = "64";
|
||||
|
@ -865,8 +865,8 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)
|
||||
&& !pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
|
||||
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
|
||||
ioc->base_add_sg_single = &_base_add_sg_single_32;
|
||||
ioc->sge_size = sizeof(Mpi2SGESimple32_t);
|
||||
desc = "32";
|
||||
|
|
|
@ -1000,7 +1000,7 @@ static int __devinit b3dfg_probe(struct pci_dev *pdev,
|
|||
|
||||
pci_set_master(pdev);
|
||||
|
||||
r = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
r = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (r) {
|
||||
dev_err(&pdev->dev, "no usable DMA configuration\n");
|
||||
goto err_free_res;
|
||||
|
|
|
@ -36,14 +36,14 @@ struct nop_usb_xceiv {
|
|||
struct device *dev;
|
||||
};
|
||||
|
||||
static u64 nop_xceiv_dmamask = DMA_32BIT_MASK;
|
||||
static u64 nop_xceiv_dmamask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device nop_xceiv_device = {
|
||||
.name = "nop_usb_xceiv",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.dma_mask = &nop_xceiv_dmamask,
|
||||
.coherent_dma_mask = DMA_32BIT_MASK,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
.platform_data = NULL,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -2260,11 +2260,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
|
|||
gcap &= ~0x01;
|
||||
|
||||
/* allow 64bit DMA address if supported by H/W */
|
||||
if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_64BIT_MASK))
|
||||
pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
|
||||
if ((gcap & 0x01) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
|
||||
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
|
||||
else {
|
||||
pci_set_dma_mask(pci, DMA_32BIT_MASK);
|
||||
pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
|
||||
pci_set_dma_mask(pci, DMA_BIT_MASK(32));
|
||||
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
|
||||
}
|
||||
|
||||
/* read number of streams from GCAP register instead of using
|
||||
|
|
Загрузка…
Ссылка в новой задаче