dma-mapping: replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64)
Replace all DMA_64BIT_MASK macro with DMA_BIT_MASK(64) Signed-off-by: Yang Hongyang<yanghy@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Родитель
8a59f5d252
Коммит
6a35528a83
|
@ -307,7 +307,7 @@ static struct resource iop13xx_adma_2_resources[] = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
static u64 iop13xx_adma_dmamask = DMA_64BIT_MASK;
|
static u64 iop13xx_adma_dmamask = DMA_BIT_MASK(64);
|
||||||
static struct iop_adma_platform_data iop13xx_adma_0_data = {
|
static struct iop_adma_platform_data iop13xx_adma_0_data = {
|
||||||
.hw_id = 0,
|
.hw_id = 0,
|
||||||
.pool_size = PAGE_SIZE,
|
.pool_size = PAGE_SIZE,
|
||||||
|
@ -331,7 +331,7 @@ static struct platform_device iop13xx_adma_0_channel = {
|
||||||
.resource = iop13xx_adma_0_resources,
|
.resource = iop13xx_adma_0_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_adma_dmamask,
|
.dma_mask = &iop13xx_adma_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *) &iop13xx_adma_0_data,
|
.platform_data = (void *) &iop13xx_adma_0_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -343,7 +343,7 @@ static struct platform_device iop13xx_adma_1_channel = {
|
||||||
.resource = iop13xx_adma_1_resources,
|
.resource = iop13xx_adma_1_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_adma_dmamask,
|
.dma_mask = &iop13xx_adma_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *) &iop13xx_adma_1_data,
|
.platform_data = (void *) &iop13xx_adma_1_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -355,7 +355,7 @@ static struct platform_device iop13xx_adma_2_channel = {
|
||||||
.resource = iop13xx_adma_2_resources,
|
.resource = iop13xx_adma_2_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_adma_dmamask,
|
.dma_mask = &iop13xx_adma_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *) &iop13xx_adma_2_data,
|
.platform_data = (void *) &iop13xx_adma_2_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -151,7 +151,7 @@ static struct resource iop13xx_tpmi_3_resources[] = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
u64 iop13xx_tpmi_mask = DMA_64BIT_MASK;
|
u64 iop13xx_tpmi_mask = DMA_BIT_MASK(64);
|
||||||
static struct platform_device iop13xx_tpmi_0_device = {
|
static struct platform_device iop13xx_tpmi_0_device = {
|
||||||
.name = "iop-tpmi",
|
.name = "iop-tpmi",
|
||||||
.id = 0,
|
.id = 0,
|
||||||
|
@ -159,7 +159,7 @@ static struct platform_device iop13xx_tpmi_0_device = {
|
||||||
.resource = iop13xx_tpmi_0_resources,
|
.resource = iop13xx_tpmi_0_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_tpmi_mask,
|
.dma_mask = &iop13xx_tpmi_mask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ static struct platform_device iop13xx_tpmi_1_device = {
|
||||||
.resource = iop13xx_tpmi_1_resources,
|
.resource = iop13xx_tpmi_1_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_tpmi_mask,
|
.dma_mask = &iop13xx_tpmi_mask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ static struct platform_device iop13xx_tpmi_2_device = {
|
||||||
.resource = iop13xx_tpmi_2_resources,
|
.resource = iop13xx_tpmi_2_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_tpmi_mask,
|
.dma_mask = &iop13xx_tpmi_mask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ static struct platform_device iop13xx_tpmi_3_device = {
|
||||||
.resource = iop13xx_tpmi_3_resources,
|
.resource = iop13xx_tpmi_3_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop13xx_tpmi_mask,
|
.dma_mask = &iop13xx_tpmi_mask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -559,7 +559,7 @@ static struct platform_device kirkwood_xor00_channel = {
|
||||||
.resource = kirkwood_xor00_resources,
|
.resource = kirkwood_xor00_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &kirkwood_xor_dmamask,
|
.dma_mask = &kirkwood_xor_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *)&kirkwood_xor00_data,
|
.platform_data = (void *)&kirkwood_xor00_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -585,7 +585,7 @@ static struct platform_device kirkwood_xor01_channel = {
|
||||||
.resource = kirkwood_xor01_resources,
|
.resource = kirkwood_xor01_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &kirkwood_xor_dmamask,
|
.dma_mask = &kirkwood_xor_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *)&kirkwood_xor01_data,
|
.platform_data = (void *)&kirkwood_xor01_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -657,7 +657,7 @@ static struct platform_device kirkwood_xor10_channel = {
|
||||||
.resource = kirkwood_xor10_resources,
|
.resource = kirkwood_xor10_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &kirkwood_xor_dmamask,
|
.dma_mask = &kirkwood_xor_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *)&kirkwood_xor10_data,
|
.platform_data = (void *)&kirkwood_xor10_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -683,7 +683,7 @@ static struct platform_device kirkwood_xor11_channel = {
|
||||||
.resource = kirkwood_xor11_resources,
|
.resource = kirkwood_xor11_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &kirkwood_xor_dmamask,
|
.dma_mask = &kirkwood_xor_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *)&kirkwood_xor11_data,
|
.platform_data = (void *)&kirkwood_xor11_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -486,7 +486,7 @@ static struct platform_device orion5x_xor0_channel = {
|
||||||
.resource = orion5x_xor0_resources,
|
.resource = orion5x_xor0_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &orion5x_xor_dmamask,
|
.dma_mask = &orion5x_xor_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *)&orion5x_xor0_data,
|
.platform_data = (void *)&orion5x_xor0_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -512,7 +512,7 @@ static struct platform_device orion5x_xor1_channel = {
|
||||||
.resource = orion5x_xor1_resources,
|
.resource = orion5x_xor1_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &orion5x_xor_dmamask,
|
.dma_mask = &orion5x_xor_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *)&orion5x_xor1_data,
|
.platform_data = (void *)&orion5x_xor1_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -143,7 +143,7 @@ struct platform_device iop3xx_dma_0_channel = {
|
||||||
.resource = iop3xx_dma_0_resources,
|
.resource = iop3xx_dma_0_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop3xx_adma_dmamask,
|
.dma_mask = &iop3xx_adma_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *) &iop3xx_dma_0_data,
|
.platform_data = (void *) &iop3xx_dma_0_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -155,7 +155,7 @@ struct platform_device iop3xx_dma_1_channel = {
|
||||||
.resource = iop3xx_dma_1_resources,
|
.resource = iop3xx_dma_1_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop3xx_adma_dmamask,
|
.dma_mask = &iop3xx_adma_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *) &iop3xx_dma_1_data,
|
.platform_data = (void *) &iop3xx_dma_1_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -167,7 +167,7 @@ struct platform_device iop3xx_aau_channel = {
|
||||||
.resource = iop3xx_aau_resources,
|
.resource = iop3xx_aau_resources,
|
||||||
.dev = {
|
.dev = {
|
||||||
.dma_mask = &iop3xx_adma_dmamask,
|
.dma_mask = &iop3xx_adma_dmamask,
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.platform_data = (void *) &iop3xx_aau_data,
|
.platform_data = (void *) &iop3xx_aau_data,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
|
@ -349,7 +349,7 @@ static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
|
|
||||||
u64 sn_dma_get_required_mask(struct device *dev)
|
u64 sn_dma_get_required_mask(struct device *dev)
|
||||||
{
|
{
|
||||||
return DMA_64BIT_MASK;
|
return DMA_BIT_MASK(64);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
|
EXPORT_SYMBOL_GPL(sn_dma_get_required_mask);
|
||||||
|
|
||||||
|
|
|
@ -644,7 +644,7 @@ static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||||
|
|
||||||
static int dma_fixed_dma_supported(struct device *dev, u64 mask)
|
static int dma_fixed_dma_supported(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
return mask == DMA_64BIT_MASK;
|
return mask == DMA_BIT_MASK(64);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
|
static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
|
||||||
|
|
|
@ -2405,8 +2405,8 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (using_dac &&
|
if (using_dac &&
|
||||||
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -3913,8 +3913,8 @@ static int pci_go_64(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -584,8 +584,8 @@ static int qs_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
|
||||||
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
|
int rc, have_64bit_bus = (bus_info & QS_HPHY_64BIT);
|
||||||
|
|
||||||
if (have_64bit_bus &&
|
if (have_64bit_bus &&
|
||||||
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -1297,8 +1297,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
host->iomap = iomap;
|
host->iomap = iomap;
|
||||||
|
|
||||||
/* configure and activate the device */
|
/* configure and activate the device */
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -1372,8 +1372,8 @@ static bool DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
|
||||||
dma_addr_t CommandMailboxDMA;
|
dma_addr_t CommandMailboxDMA;
|
||||||
DAC960_V2_CommandStatus_T CommandStatus;
|
DAC960_V2_CommandStatus_T CommandStatus;
|
||||||
|
|
||||||
if (!pci_set_dma_mask(Controller->PCIDevice, DMA_64BIT_MASK))
|
if (!pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(64)))
|
||||||
Controller->BounceBufferLimit = DMA_64BIT_MASK;
|
Controller->BounceBufferLimit = DMA_BIT_MASK(64);
|
||||||
else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK))
|
else if (!pci_set_dma_mask(Controller->PCIDevice, DMA_32BIT_MASK))
|
||||||
Controller->BounceBufferLimit = DMA_32BIT_MASK;
|
Controller->BounceBufferLimit = DMA_32BIT_MASK;
|
||||||
else
|
else
|
||||||
|
|
|
@ -3637,7 +3637,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||||
hba[i]->pdev = pdev;
|
hba[i]->pdev = pdev;
|
||||||
|
|
||||||
/* configure PCI DMA stuff */
|
/* configure PCI DMA stuff */
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK))
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||||
dac = 1;
|
dac = 1;
|
||||||
else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
|
else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK))
|
||||||
dac = 0;
|
dac = 0;
|
||||||
|
|
|
@ -1586,9 +1586,9 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
|
|
||||||
#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
|
#ifdef IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
|
||||||
rc = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!rc) {
|
if (!rc) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
|
printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
|
||||||
pci_name(pdev));
|
pci_name(pdev));
|
||||||
|
|
|
@ -829,7 +829,7 @@ static int __devinit mm_pci_probe(struct pci_dev *dev,
|
||||||
dev_printk(KERN_INFO, &dev->dev,
|
dev_printk(KERN_INFO, &dev->dev,
|
||||||
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
|
"Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n");
|
||||||
|
|
||||||
if (pci_set_dma_mask(dev, DMA_64BIT_MASK) &&
|
if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) &&
|
||||||
pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
|
pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
|
||||||
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
|
dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
|
@ -98,13 +98,13 @@ static int __devinit ioat_probe(struct pci_dev *pdev,
|
||||||
if (err)
|
if (err)
|
||||||
goto err_request_regions;
|
goto err_request_regions;
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err)
|
if (err)
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_set_dma_mask;
|
goto err_set_dma_mask;
|
||||||
|
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err)
|
if (err)
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -178,7 +178,7 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl,
|
||||||
|
|
||||||
static struct device dummy_dma_dev = {
|
static struct device dummy_dma_dev = {
|
||||||
.init_name = "fallback device",
|
.init_name = "fallback device",
|
||||||
.coherent_dma_mask = DMA_64BIT_MASK,
|
.coherent_dma_mask = DMA_BIT_MASK(64),
|
||||||
.dma_mask = &dummy_dma_dev.coherent_dma_mask,
|
.dma_mask = &dummy_dma_dev.coherent_dma_mask,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -989,7 +989,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((sizeof(dma_addr_t) > 4)) {
|
if ((sizeof(dma_addr_t) > 4)) {
|
||||||
ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
|
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
printk(KERN_ERR PFX "64b DMA configuration failed\n");
|
printk(KERN_ERR PFX "64b DMA configuration failed\n");
|
||||||
goto bail2;
|
goto bail2;
|
||||||
|
|
|
@ -470,7 +470,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
||||||
goto bail_disable;
|
goto bail_disable;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
* if the 64 bit setup fails, try 32 bit. Some systems
|
* if the 64 bit setup fails, try 32 bit. Some systems
|
||||||
|
@ -496,7 +496,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (ret)
|
if (ret)
|
||||||
dev_info(&pdev->dev,
|
dev_info(&pdev->dev,
|
||||||
"Unable to set DMA consistent mask "
|
"Unable to set DMA consistent mask "
|
||||||
|
|
|
@ -1016,7 +1016,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
|
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
@ -1025,7 +1025,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
|
||||||
goto err_free_res;
|
goto err_free_res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
|
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
|
||||||
"consistent PCI DMA mask.\n");
|
"consistent PCI DMA mask.\n");
|
||||||
|
|
|
@ -478,12 +478,12 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((sizeof(dma_addr_t) > 4)) {
|
if ((sizeof(dma_addr_t) > 4)) {
|
||||||
ret = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
|
ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
|
printk(KERN_ERR PFX "64b DMA mask configuration failed\n");
|
||||||
goto bail2;
|
goto bail2;
|
||||||
}
|
}
|
||||||
ret = pci_set_consistent_dma_mask(pcidev, DMA_64BIT_MASK);
|
ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
|
printk(KERN_ERR PFX "64b DMA consistent mask configuration failed\n");
|
||||||
goto bail2;
|
goto bail2;
|
||||||
|
|
|
@ -1534,8 +1534,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
|
||||||
|
|
||||||
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
|
pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|
||||||
&& !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
|
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
|
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
|
||||||
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
|
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
|
||||||
ioc->name));
|
ioc->name));
|
||||||
|
|
|
@ -185,7 +185,7 @@ int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
|
||||||
int dma_64 = 0;
|
int dma_64 = 0;
|
||||||
|
|
||||||
mutex_lock(&mem_lock);
|
mutex_lock(&mem_lock);
|
||||||
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_64BIT_MASK)) {
|
if ((sizeof(dma_addr_t) > 4) && (pdev->dma_mask == DMA_BIT_MASK(64))) {
|
||||||
dma_64 = 1;
|
dma_64 = 1;
|
||||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
|
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||||
mutex_unlock(&mem_lock);
|
mutex_unlock(&mem_lock);
|
||||||
|
@ -196,7 +196,7 @@ int i2o_dma_alloc(struct device *dev, struct i2o_dma *addr, size_t len)
|
||||||
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);
|
addr->virt = dma_alloc_coherent(dev, len, &addr->phys, GFP_KERNEL);
|
||||||
|
|
||||||
if ((sizeof(dma_addr_t) > 4) && dma_64)
|
if ((sizeof(dma_addr_t) > 4) && dma_64)
|
||||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||||
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
|
printk(KERN_WARNING "i2o: unable to set 64-bit DMA");
|
||||||
mutex_unlock(&mem_lock);
|
mutex_unlock(&mem_lock);
|
||||||
|
|
||||||
|
|
|
@ -397,7 +397,7 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
|
||||||
if (sizeof(dma_addr_t) > 4) {
|
if (sizeof(dma_addr_t) > 4) {
|
||||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||||
printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
|
printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
|
||||||
c->name);
|
c->name);
|
||||||
else {
|
else {
|
||||||
|
|
|
@ -1929,8 +1929,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
/* Configure DMA attributes. */
|
/* Configure DMA attributes. */
|
||||||
if ((sizeof(dma_addr_t) > 4) &&
|
if ((sizeof(dma_addr_t) > 4) &&
|
||||||
!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) &&
|
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
|
||||||
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
pci_using_dac = 0;
|
pci_using_dac = 0;
|
||||||
|
|
|
@ -1161,7 +1161,7 @@ static int __devinit ace_init(struct net_device *dev)
|
||||||
/*
|
/*
|
||||||
* Configure DMA attributes.
|
* Configure DMA attributes.
|
||||||
*/
|
*/
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
ap->pci_using_dac = 1;
|
ap->pci_using_dac = 1;
|
||||||
} else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
|
} else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||||
ap->pci_using_dac = 0;
|
ap->pci_using_dac = 0;
|
||||||
|
|
|
@ -7527,7 +7527,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
|
||||||
if (CHIP_NUM(bp) == CHIP_NUM_5708)
|
if (CHIP_NUM(bp) == CHIP_NUM_5708)
|
||||||
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
|
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
|
||||||
else
|
else
|
||||||
persist_dma_mask = dma_mask = DMA_64BIT_MASK;
|
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
|
||||||
|
|
||||||
/* Configure DMA attributes. */
|
/* Configure DMA attributes. */
|
||||||
if (pci_set_dma_mask(pdev, dma_mask) == 0) {
|
if (pci_set_dma_mask(pdev, dma_mask) == 0) {
|
||||||
|
|
|
@ -10979,9 +10979,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
|
||||||
goto err_out_release;
|
goto err_out_release;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
|
||||||
bp->flags |= USING_DAC_FLAG;
|
bp->flags |= USING_DAC_FLAG;
|
||||||
if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||||
printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
|
printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
|
||||||
" failed, aborting\n");
|
" failed, aborting\n");
|
||||||
rc = -EIO;
|
rc = -EIO;
|
||||||
|
|
|
@ -5074,10 +5074,10 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
|
||||||
|
|
||||||
|
|
||||||
/* Configure DMA attributes. */
|
/* Configure DMA attributes. */
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
err = pci_set_consistent_dma_mask(pdev,
|
err = pci_set_consistent_dma_mask(pdev,
|
||||||
DMA_64BIT_MASK);
|
DMA_BIT_MASK(64));
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
|
dev_err(&pdev->dev, "Unable to obtain 64-bit DMA "
|
||||||
"for consistent allocations\n");
|
"for consistent allocations\n");
|
||||||
|
|
|
@ -1056,10 +1056,10 @@ static int __devinit init_one(struct pci_dev *pdev,
|
||||||
goto out_disable_pdev;
|
goto out_disable_pdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
|
|
||||||
if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
CH_ERR("%s: unable to obtain 64-bit DMA for "
|
CH_ERR("%s: unable to obtain 64-bit DMA for "
|
||||||
"consistent allocations\n", pci_name(pdev));
|
"consistent allocations\n", pci_name(pdev));
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
|
|
|
@ -3038,9 +3038,9 @@ static int __devinit init_one(struct pci_dev *pdev,
|
||||||
goto out_release_regions;
|
goto out_release_regions;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
|
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
|
||||||
"coherent allocations\n");
|
"coherent allocations\n");
|
||||||
|
|
|
@ -962,8 +962,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
|
||||||
!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
|
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
|
|
@ -4763,9 +4763,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
pci_using_dac = 0;
|
pci_using_dac = 0;
|
||||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!err)
|
if (!err)
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1154,9 +1154,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
pci_using_dac = 0;
|
pci_using_dac = 0;
|
||||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!err)
|
if (!err)
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1226,10 +1226,10 @@ static int __devinit ioc3_probe(struct pci_dev *pdev,
|
||||||
int err, pci_using_dac;
|
int err, pci_using_dac;
|
||||||
|
|
||||||
/* Configure DMA attributes. */
|
/* Configure DMA attributes. */
|
||||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!err) {
|
if (!err) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
|
printk(KERN_ERR "%s: Unable to obtain 64 bit DMA "
|
||||||
"for consistent allocations\n", pci_name(pdev));
|
"for consistent allocations\n", pci_name(pdev));
|
||||||
|
|
|
@ -365,8 +365,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
|
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
|
||||||
!(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
|
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
|
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
|
||||||
|
|
|
@ -4509,8 +4509,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK) &&
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
|
||||||
!pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
|
!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
|
|
@ -1076,7 +1076,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
|
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
@ -1085,7 +1085,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
goto err_release_bar2;
|
goto err_release_bar2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
|
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
|
||||||
"consistent PCI DMA mask.\n");
|
"consistent PCI DMA mask.\n");
|
||||||
|
|
|
@ -3792,7 +3792,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
dac_enabled = 1;
|
dac_enabled = 1;
|
||||||
status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (status != 0) {
|
if (status != 0) {
|
||||||
dac_enabled = 0;
|
dac_enabled = 0;
|
||||||
dev_err(&pdev->dev,
|
dev_err(&pdev->dev,
|
||||||
|
@ -3804,7 +3804,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
|
dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
|
||||||
goto abort_with_enabled;
|
goto abort_with_enabled;
|
||||||
}
|
}
|
||||||
(void)pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
|
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
|
||||||
&mgp->cmd_bus, GFP_KERNEL);
|
&mgp->cmd_bus, GFP_KERNEL);
|
||||||
if (mgp->cmd == NULL)
|
if (mgp->cmd == NULL)
|
||||||
|
|
|
@ -1973,7 +1973,7 @@ static int __devinit ns83820_init_one(struct pci_dev *pci_dev,
|
||||||
|
|
||||||
/* See if we can set the dma mask early on; failure is fatal. */
|
/* See if we can set the dma mask early on; failure is fatal. */
|
||||||
if (sizeof(dma_addr_t) == 8 &&
|
if (sizeof(dma_addr_t) == 8 &&
|
||||||
!pci_set_dma_mask(pci_dev, DMA_64BIT_MASK)) {
|
!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
|
||||||
using_dac = 1;
|
using_dac = 1;
|
||||||
} else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
|
} else if (!pci_set_dma_mask(pci_dev, DMA_32BIT_MASK)) {
|
||||||
using_dac = 0;
|
using_dac = 0;
|
||||||
|
|
|
@ -3934,9 +3934,9 @@ static int __devinit ql3xxx_probe(struct pci_dev *pdev,
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
} else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
|
} else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
|
||||||
pci_using_dac = 0;
|
pci_using_dac = 0;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
|
|
@ -3726,9 +3726,9 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
|
||||||
}
|
}
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
set_bit(QL_DMA64, &qdev->flags);
|
set_bit(QL_DMA64, &qdev->flags);
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
} else {
|
} else {
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (!err)
|
if (!err)
|
||||||
|
|
|
@ -2046,7 +2046,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
tp->cp_cmd = PCIMulRW | RxChkSum;
|
tp->cp_cmd = PCIMulRW | RxChkSum;
|
||||||
|
|
||||||
if ((sizeof(dma_addr_t) > 4) &&
|
if ((sizeof(dma_addr_t) > 4) &&
|
||||||
!pci_set_dma_mask(pdev, DMA_64BIT_MASK) && use_dac) {
|
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
|
||||||
tp->cp_cmd |= PCIDAC;
|
tp->cp_cmd |= PCIDAC;
|
||||||
dev->features |= NETIF_F_HIGHDMA;
|
dev->features |= NETIF_F_HIGHDMA;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -7775,11 +7775,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
|
DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
|
||||||
dma_flag = TRUE;
|
dma_flag = TRUE;
|
||||||
if (pci_set_consistent_dma_mask
|
if (pci_set_consistent_dma_mask
|
||||||
(pdev, DMA_64BIT_MASK)) {
|
(pdev, DMA_BIT_MASK(64))) {
|
||||||
DBG_PRINT(ERR_DBG,
|
DBG_PRINT(ERR_DBG,
|
||||||
"Unable to obtain 64bit DMA for \
|
"Unable to obtain 64bit DMA for \
|
||||||
consistent allocations\n");
|
consistent allocations\n");
|
||||||
|
|
|
@ -3912,9 +3912,9 @@ static int __devinit skge_probe(struct pci_dev *pdev,
|
||||||
|
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
using_dac = 1;
|
using_dac = 1;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
} else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
|
} else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
|
||||||
using_dac = 0;
|
using_dac = 0;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
|
|
@ -4374,9 +4374,9 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
if (sizeof(dma_addr_t) > sizeof(u32) &&
|
if (sizeof(dma_addr_t) > sizeof(u32) &&
|
||||||
!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
|
!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))) {
|
||||||
using_dac = 1;
|
using_dac = 1;
|
||||||
err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
|
dev_err(&pdev->dev, "unable to obtain 64 bit DMA "
|
||||||
"for consistent allocations\n");
|
"for consistent allocations\n");
|
||||||
|
|
|
@ -3042,7 +3042,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
|
||||||
*/
|
*/
|
||||||
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
|
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
|
||||||
pdev->device == PCI_DEVICE_ID_SUN_GEM &&
|
pdev->device == PCI_DEVICE_ID_SUN_GEM &&
|
||||||
!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
|
|
@ -1941,8 +1941,8 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
|
if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
|
||||||
goto err_pci; /* it's not a problem though */
|
goto err_pci; /* it's not a problem though */
|
||||||
|
|
||||||
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK)) &&
|
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
|
||||||
!(err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))) {
|
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
|
||||||
pci_using_dac = 1;
|
pci_using_dac = 1;
|
||||||
} else {
|
} else {
|
||||||
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
|
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/firmware.h>
|
#include <linux/firmware.h>
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
/* Compile Time Switches */
|
/* Compile Time Switches */
|
||||||
/* start */
|
/* start */
|
||||||
|
@ -98,10 +99,6 @@
|
||||||
#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg)
|
#define READ_REG(pp, reg) readl(pp->pBdxRegs + reg)
|
||||||
#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg)
|
#define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg)
|
||||||
|
|
||||||
#ifndef DMA_64BIT_MASK
|
|
||||||
# define DMA_64BIT_MASK 0xffffffffffffffffULL
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifndef DMA_32BIT_MASK
|
#ifndef DMA_32BIT_MASK
|
||||||
# define DMA_32BIT_MASK 0x00000000ffffffffULL
|
# define DMA_32BIT_MASK 0x00000000ffffffffULL
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -13232,10 +13232,10 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
|
||||||
else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
|
else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
|
||||||
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
|
persist_dma_mask = dma_mask = DMA_40BIT_MASK;
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
dma_mask = DMA_64BIT_MASK;
|
dma_mask = DMA_BIT_MASK(64);
|
||||||
#endif
|
#endif
|
||||||
} else
|
} else
|
||||||
persist_dma_mask = dma_mask = DMA_64BIT_MASK;
|
persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
|
||||||
|
|
||||||
/* Configure DMA attributes. */
|
/* Configure DMA attributes. */
|
||||||
if (dma_mask > DMA_32BIT_MASK) {
|
if (dma_mask > DMA_32BIT_MASK) {
|
||||||
|
|
|
@ -1180,7 +1180,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
|
||||||
#if 0
|
#if 0
|
||||||
// dma_supported() is deeply broken on almost all architectures
|
// dma_supported() is deeply broken on almost all architectures
|
||||||
// possible with some EHCI controllers
|
// possible with some EHCI controllers
|
||||||
if (dma_supported (&udev->dev, DMA_64BIT_MASK))
|
if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
|
||||||
net->features |= NETIF_F_HIGHDMA;
|
net->features |= NETIF_F_HIGHDMA;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -772,7 +772,7 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
|
||||||
|
|
||||||
tmp = b43_read32(dev, SSB_TMSHIGH);
|
tmp = b43_read32(dev, SSB_TMSHIGH);
|
||||||
if (tmp & SSB_TMSHIGH_DMA64)
|
if (tmp & SSB_TMSHIGH_DMA64)
|
||||||
return DMA_64BIT_MASK;
|
return DMA_BIT_MASK(64);
|
||||||
mmio_base = b43_dmacontroller_base(0, 0);
|
mmio_base = b43_dmacontroller_base(0, 0);
|
||||||
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
|
b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
|
||||||
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
|
tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
|
||||||
|
@ -788,7 +788,7 @@ static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
|
||||||
return B43_DMA_30BIT;
|
return B43_DMA_30BIT;
|
||||||
if (dmamask == DMA_32BIT_MASK)
|
if (dmamask == DMA_32BIT_MASK)
|
||||||
return B43_DMA_32BIT;
|
return B43_DMA_32BIT;
|
||||||
if (dmamask == DMA_64BIT_MASK)
|
if (dmamask == DMA_BIT_MASK(64))
|
||||||
return B43_DMA_64BIT;
|
return B43_DMA_64BIT;
|
||||||
B43_WARN_ON(1);
|
B43_WARN_ON(1);
|
||||||
return B43_DMA_30BIT;
|
return B43_DMA_30BIT;
|
||||||
|
@ -999,7 +999,7 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
|
||||||
err = ssb_dma_set_mask(dev->dev, mask);
|
err = ssb_dma_set_mask(dev->dev, mask);
|
||||||
if (!err)
|
if (!err)
|
||||||
break;
|
break;
|
||||||
if (mask == DMA_64BIT_MASK) {
|
if (mask == DMA_BIT_MASK(64)) {
|
||||||
mask = DMA_32BIT_MASK;
|
mask = DMA_32BIT_MASK;
|
||||||
fallback = 1;
|
fallback = 1;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -846,7 +846,7 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
|
||||||
|
|
||||||
tmp = b43legacy_read32(dev, SSB_TMSHIGH);
|
tmp = b43legacy_read32(dev, SSB_TMSHIGH);
|
||||||
if (tmp & SSB_TMSHIGH_DMA64)
|
if (tmp & SSB_TMSHIGH_DMA64)
|
||||||
return DMA_64BIT_MASK;
|
return DMA_BIT_MASK(64);
|
||||||
mmio_base = b43legacy_dmacontroller_base(0, 0);
|
mmio_base = b43legacy_dmacontroller_base(0, 0);
|
||||||
b43legacy_write32(dev,
|
b43legacy_write32(dev,
|
||||||
mmio_base + B43legacy_DMA32_TXCTL,
|
mmio_base + B43legacy_DMA32_TXCTL,
|
||||||
|
@ -865,7 +865,7 @@ static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
|
||||||
return B43legacy_DMA_30BIT;
|
return B43legacy_DMA_30BIT;
|
||||||
if (dmamask == DMA_32BIT_MASK)
|
if (dmamask == DMA_32BIT_MASK)
|
||||||
return B43legacy_DMA_32BIT;
|
return B43legacy_DMA_32BIT;
|
||||||
if (dmamask == DMA_64BIT_MASK)
|
if (dmamask == DMA_BIT_MASK(64))
|
||||||
return B43legacy_DMA_64BIT;
|
return B43legacy_DMA_64BIT;
|
||||||
B43legacy_WARN_ON(1);
|
B43legacy_WARN_ON(1);
|
||||||
return B43legacy_DMA_30BIT;
|
return B43legacy_DMA_30BIT;
|
||||||
|
@ -1042,7 +1042,7 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
|
||||||
err = ssb_dma_set_mask(dev->dev, mask);
|
err = ssb_dma_set_mask(dev->dev, mask);
|
||||||
if (!err)
|
if (!err)
|
||||||
break;
|
break;
|
||||||
if (mask == DMA_64BIT_MASK) {
|
if (mask == DMA_BIT_MASK(64)) {
|
||||||
mask = DMA_32BIT_MASK;
|
mask = DMA_32BIT_MASK;
|
||||||
fallback = 1;
|
fallback = 1;
|
||||||
continue;
|
continue;
|
||||||
|
|
|
@ -57,7 +57,7 @@
|
||||||
|
|
||||||
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
|
||||||
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
|
#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK)
|
||||||
#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK)
|
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
|
||||||
|
|
||||||
/* global iommu list, set NULL for ignored DMAR units */
|
/* global iommu list, set NULL for ignored DMAR units */
|
||||||
static struct intel_iommu **g_iommus;
|
static struct intel_iommu **g_iommus;
|
||||||
|
|
|
@ -2016,8 +2016,8 @@ static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
pci_try_set_mwi(pdev);
|
pci_try_set_mwi(pdev);
|
||||||
|
|
||||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|
||||||
|| pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
|
|| pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
|
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)
|
||||||
|| pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
|
|| pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||||
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
|
TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
|
||||||
|
|
|
@ -1402,8 +1402,8 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||||
}
|
}
|
||||||
|
|
||||||
if(dev->dac_support != 0) {
|
if(dev->dac_support != 0) {
|
||||||
if (!pci_set_dma_mask(dev->pdev, DMA_64BIT_MASK) &&
|
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
|
||||||
!pci_set_consistent_dma_mask(dev->pdev, DMA_64BIT_MASK)) {
|
!pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
|
||||||
if (!dev->in_reset)
|
if (!dev->in_reset)
|
||||||
printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
|
printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
|
||||||
dev->name, dev->id);
|
dev->name, dev->id);
|
||||||
|
|
|
@ -195,7 +195,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||||
const u64 required_mask = dma_get_required_mask(dev);
|
const u64 required_mask = dma_get_required_mask(dev);
|
||||||
|
|
||||||
if (required_mask > DMA_39BIT_MASK &&
|
if (required_mask > DMA_39BIT_MASK &&
|
||||||
dma_set_mask(dev, DMA_64BIT_MASK) == 0)
|
dma_set_mask(dev, DMA_BIT_MASK(64)) == 0)
|
||||||
ahd->flags |= AHD_64BIT_ADDRESSING;
|
ahd->flags |= AHD_64BIT_ADDRESSING;
|
||||||
else if (required_mask > DMA_32BIT_MASK &&
|
else if (required_mask > DMA_32BIT_MASK &&
|
||||||
dma_set_mask(dev, DMA_39BIT_MASK) == 0)
|
dma_set_mask(dev, DMA_39BIT_MASK) == 0)
|
||||||
|
|
|
@ -790,8 +790,8 @@ static int __devinit asd_pci_probe(struct pci_dev *dev,
|
||||||
goto Err_remove;
|
goto Err_remove;
|
||||||
|
|
||||||
err = -ENODEV;
|
err = -ENODEV;
|
||||||
if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)
|
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))
|
||||||
&& !pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK))
|
&& !pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)))
|
||||||
;
|
;
|
||||||
else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)
|
else if (!pci_set_dma_mask(dev, DMA_32BIT_MASK)
|
||||||
&& !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))
|
&& !pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK))
|
||||||
|
|
|
@ -393,7 +393,7 @@ static int arcmsr_probe(struct pci_dev *pdev,
|
||||||
acb = (struct AdapterControlBlock *)host->hostdata;
|
acb = (struct AdapterControlBlock *)host->hostdata;
|
||||||
memset(acb, 0, sizeof (struct AdapterControlBlock));
|
memset(acb, 0, sizeof (struct AdapterControlBlock));
|
||||||
|
|
||||||
error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (error) {
|
if (error) {
|
||||||
error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (error) {
|
if (error) {
|
||||||
|
|
|
@ -1014,7 +1014,7 @@ static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev
|
||||||
* See if we should enable dma64 mode.
|
* See if we should enable dma64 mode.
|
||||||
*/
|
*/
|
||||||
if (sizeof(dma_addr_t) > 4 &&
|
if (sizeof(dma_addr_t) > 4 &&
|
||||||
pci_set_dma_mask(pDev, DMA_64BIT_MASK) == 0) {
|
pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
|
||||||
if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
|
if (dma_get_required_mask(&pDev->dev) > DMA_32BIT_MASK)
|
||||||
dma64 = 1;
|
dma64 = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -5030,7 +5030,7 @@ static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
shp->max_cmd_len = 16;
|
shp->max_cmd_len = 16;
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
|
printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
|
||||||
} else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
|
} else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
|
||||||
printk(KERN_WARNING "GDT-PCI %d: "
|
printk(KERN_WARNING "GDT-PCI %d: "
|
||||||
|
|
|
@ -958,7 +958,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
|
||||||
pci_set_master(pcidev);
|
pci_set_master(pcidev);
|
||||||
|
|
||||||
/* Enable 64bit DMA if possible */
|
/* Enable 64bit DMA if possible */
|
||||||
if (pci_set_dma_mask(pcidev, DMA_64BIT_MASK)) {
|
if (pci_set_dma_mask(pcidev, DMA_BIT_MASK(64))) {
|
||||||
if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
|
if (pci_set_dma_mask(pcidev, DMA_32BIT_MASK)) {
|
||||||
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
printk(KERN_ERR "hptiop: fail to set dma_mask\n");
|
||||||
goto disable_pci_device;
|
goto disable_pci_device;
|
||||||
|
|
|
@ -7048,7 +7048,7 @@ ips_init_phase1(struct pci_dev *pci_dev, int *indexPtr)
|
||||||
* are guaranteed to be < 4G.
|
* are guaranteed to be < 4G.
|
||||||
*/
|
*/
|
||||||
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
|
if (IPS_ENABLE_DMA64 && IPS_HAS_ENH_SGLIST(ha) &&
|
||||||
!pci_set_dma_mask(ha->pcidev, DMA_64BIT_MASK)) {
|
!pci_set_dma_mask(ha->pcidev, DMA_BIT_MASK(64))) {
|
||||||
(ha)->flags |= IPS_HA_ENH_SG;
|
(ha)->flags |= IPS_HA_ENH_SG;
|
||||||
} else {
|
} else {
|
||||||
if (pci_set_dma_mask(ha->pcidev, DMA_32BIT_MASK) != 0) {
|
if (pci_set_dma_mask(ha->pcidev, DMA_32BIT_MASK) != 0) {
|
||||||
|
|
|
@ -2660,7 +2660,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||||
pci_save_state(pdev);
|
pci_save_state(pdev);
|
||||||
pci_try_set_mwi(pdev);
|
pci_try_set_mwi(pdev);
|
||||||
|
|
||||||
if (pci_set_dma_mask(phba->pcidev, DMA_64BIT_MASK) != 0)
|
if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0)
|
||||||
if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
|
if (pci_set_dma_mask(phba->pcidev, DMA_32BIT_MASK) != 0)
|
||||||
goto out_idr_remove;
|
goto out_idr_remove;
|
||||||
|
|
||||||
|
|
|
@ -4793,7 +4793,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
|
|
||||||
/* Set the Mode of addressing to 64 bit if we can */
|
/* Set the Mode of addressing to 64 bit if we can */
|
||||||
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
|
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
|
||||||
pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
adapter->has_64bit_addr = 1;
|
adapter->has_64bit_addr = 1;
|
||||||
} else {
|
} else {
|
||||||
pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
|
|
|
@ -900,7 +900,7 @@ megaraid_init_mbox(adapter_t *adapter)
|
||||||
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
|
adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
|
||||||
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
|
(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
|
||||||
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
|
adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
|
||||||
if (pci_set_dma_mask(adapter->pdev, DMA_64BIT_MASK)) {
|
if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
|
||||||
con_log(CL_ANN, (KERN_WARNING
|
con_log(CL_ANN, (KERN_WARNING
|
||||||
"megaraid: DMA mask for 64-bit failed\n"));
|
"megaraid: DMA mask for 64-bit failed\n"));
|
||||||
|
|
||||||
|
|
|
@ -2497,7 +2497,7 @@ megasas_set_dma_mask(struct pci_dev *pdev)
|
||||||
* All our contollers are capable of performing 64-bit DMA
|
* All our contollers are capable of performing 64-bit DMA
|
||||||
*/
|
*/
|
||||||
if (IS_DMA64) {
|
if (IS_DMA64) {
|
||||||
if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
|
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||||
|
|
||||||
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
|
if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0)
|
||||||
goto fail_set_dma_mask;
|
goto fail_set_dma_mask;
|
||||||
|
|
|
@ -875,8 +875,8 @@ static int pci_go_64(struct pci_dev *pdev)
|
||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
|
|
|
@ -4275,7 +4275,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
ha->devnum = devnum; /* specifies microcode load address */
|
ha->devnum = devnum; /* specifies microcode load address */
|
||||||
|
|
||||||
#ifdef QLA_64BIT_PTR
|
#ifdef QLA_64BIT_PTR
|
||||||
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
|
if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
|
||||||
if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
|
if (pci_set_dma_mask(ha->pdev, DMA_32BIT_MASK)) {
|
||||||
printk(KERN_WARNING "scsi(%li): Unable to set a "
|
printk(KERN_WARNING "scsi(%li): Unable to set a "
|
||||||
"suitable DMA mask - aborting\n", ha->host_no);
|
"suitable DMA mask - aborting\n", ha->host_no);
|
||||||
|
|
|
@ -1176,10 +1176,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
|
||||||
/* Assume a 32bit DMA mask. */
|
/* Assume a 32bit DMA mask. */
|
||||||
ha->flags.enable_64bit_addressing = 0;
|
ha->flags.enable_64bit_addressing = 0;
|
||||||
|
|
||||||
if (!dma_set_mask(&ha->pdev->dev, DMA_64BIT_MASK)) {
|
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
||||||
/* Any upper-dword bits set? */
|
/* Any upper-dword bits set? */
|
||||||
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
|
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
|
||||||
!pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
|
!pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
|
||||||
/* Ok, a 64bit DMA mask is applicable. */
|
/* Ok, a 64bit DMA mask is applicable. */
|
||||||
ha->flags.enable_64bit_addressing = 1;
|
ha->flags.enable_64bit_addressing = 1;
|
||||||
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
|
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
|
||||||
|
|
|
@ -1369,8 +1369,8 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha)
|
||||||
int retval;
|
int retval;
|
||||||
|
|
||||||
/* Update our PCI device dma_mask for full 64 bit mask */
|
/* Update our PCI device dma_mask for full 64 bit mask */
|
||||||
if (pci_set_dma_mask(ha->pdev, DMA_64BIT_MASK) == 0) {
|
if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64)) == 0) {
|
||||||
if (pci_set_consistent_dma_mask(ha->pdev, DMA_64BIT_MASK)) {
|
if (pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
|
||||||
dev_dbg(&ha->pdev->dev,
|
dev_dbg(&ha->pdev->dev,
|
||||||
"Failed to set 64 bit PCI consistent mask; "
|
"Failed to set 64 bit PCI consistent mask; "
|
||||||
"using 32 bit.\n");
|
"using 32 bit.\n");
|
||||||
|
|
|
@ -1395,8 +1395,8 @@ static int stex_set_dma_mask(struct pci_dev * pdev)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)
|
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
|
||||||
&& !pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK))
|
&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
|
||||||
return 0;
|
return 0;
|
||||||
ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
ret = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
|
|
@ -1094,7 +1094,7 @@ do { \
|
||||||
(data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
|
(data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len); \
|
||||||
} while (0)
|
} while (0)
|
||||||
#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
|
#elif SYM_CONF_DMA_ADDRESSING_MODE == 2
|
||||||
#define DMA_DAC_MASK DMA_64BIT_MASK
|
#define DMA_DAC_MASK DMA_BIT_MASK(64)
|
||||||
int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
|
int sym_lookup_dmap(struct sym_hcb *np, u32 h, int s);
|
||||||
static inline void
|
static inline void
|
||||||
sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
|
sym_build_sge(struct sym_hcb *np, struct sym_tblmove *data, u64 badd, int len)
|
||||||
|
|
|
@ -617,9 +617,9 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
|
||||||
pci_set_master(pdev);
|
pci_set_master(pdev);
|
||||||
|
|
||||||
#ifdef USE_64BIT_DMA
|
#ifdef USE_64BIT_DMA
|
||||||
ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
|
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
|
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA "
|
printk(KERN_WARNING "%s: Unable to obtain 64 bit DMA "
|
||||||
"for consistent allocations\n",
|
"for consistent allocations\n",
|
||||||
|
|
|
@ -849,8 +849,8 @@ static int __devinit probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||||
#if 1 /* @todo For now, disable 64-bit, because I do not understand the implications (DAC!) */
|
#if 1 /* @todo For now, disable 64-bit, because I do not understand the implications (DAC!) */
|
||||||
/* query for DMA transfer */
|
/* query for DMA transfer */
|
||||||
/* @see Documentation/PCI/PCI-DMA-mapping.txt */
|
/* @see Documentation/PCI/PCI-DMA-mapping.txt */
|
||||||
if (!pci_set_dma_mask(dev, DMA_64BIT_MASK)) {
|
if (!pci_set_dma_mask(dev, DMA_BIT_MASK(64))) {
|
||||||
pci_set_consistent_dma_mask(dev, DMA_64BIT_MASK);
|
pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
|
||||||
/* use 64-bit DMA */
|
/* use 64-bit DMA */
|
||||||
printk(KERN_DEBUG "Using a 64-bit DMA mask.\n");
|
printk(KERN_DEBUG "Using a 64-bit DMA mask.\n");
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -371,9 +371,9 @@ static int __devinit slic_entry_probe(struct pci_dev *pcidev,
|
||||||
printk(KERN_DEBUG "%s\n", slic_proc_version);
|
printk(KERN_DEBUG "%s\n", slic_proc_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK);
|
err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
|
||||||
if (err) {
|
if (err) {
|
||||||
err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK);
|
err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
|
||||||
if (err)
|
if (err)
|
||||||
goto err_out_disable_pci;
|
goto err_out_disable_pci;
|
||||||
}
|
}
|
||||||
|
|
|
@ -934,8 +934,8 @@ static int sxg_entry_probe(struct pci_dev *pcidev,
|
||||||
|
|
||||||
pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
|
pci_read_config_byte(pcidev, PCI_REVISION_ID, &revision_id);
|
||||||
|
|
||||||
if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
|
if (!(err = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)))) {
|
||||||
DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
|
DBG_ERROR("pci_set_dma_mask(DMA_BIT_MASK(64)) successful\n");
|
||||||
} else {
|
} else {
|
||||||
if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
|
if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
|
||||||
DBG_ERROR
|
DBG_ERROR
|
||||||
|
|
|
@ -622,7 +622,7 @@ static int ehci_run (struct usb_hcd *hcd)
|
||||||
ehci_writel(ehci, 0, &ehci->regs->segment);
|
ehci_writel(ehci, 0, &ehci->regs->segment);
|
||||||
#if 0
|
#if 0
|
||||||
// this is deeply broken on almost all architectures
|
// this is deeply broken on almost all architectures
|
||||||
if (!dma_set_mask(hcd->self.controller, DMA_64BIT_MASK))
|
if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64)))
|
||||||
ehci_info(ehci, "enabled 64bit DMA\n");
|
ehci_info(ehci, "enabled 64bit DMA\n");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
|
@ -160,8 +160,8 @@ static int whci_probe(struct pci_dev *pci, const struct pci_device_id *id)
|
||||||
pci_enable_msi(pci);
|
pci_enable_msi(pci);
|
||||||
pci_set_master(pci);
|
pci_set_master(pci);
|
||||||
err = -ENXIO;
|
err = -ENXIO;
|
||||||
if (!pci_set_dma_mask(pci, DMA_64BIT_MASK))
|
if (!pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
|
||||||
pci_set_consistent_dma_mask(pci, DMA_64BIT_MASK);
|
pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64));
|
||||||
else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
|
else if (!pci_set_dma_mask(pci, DMA_32BIT_MASK))
|
||||||
pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
|
pci_set_consistent_dma_mask(pci, DMA_32BIT_MASK);
|
||||||
else
|
else
|
||||||
|
|
Загрузка…
Ссылка в новой задаче