sparc64/pci_sun4v: fix ATU checks for large DMA masks

Now that we allow drivers to always need to set larger than required
DMA masks we need to be a little more careful in the sun4v PCI iommu
driver to chose when to select the ATU support - a larger DMA mask
can be set even when the platform does not support ATU, so we always
have to check if it is avaiable before using it.  Add a little helper
for that and use it in all the places where we make ATU usage decisions
based on the DMA mask.

Fixes: 24132a419c ("sparc64/pci_sun4v: allow large DMA masks")
Reported-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Meelis Roos <mroos@linux.ee>
Acked-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Christoph Hellwig 2019-04-03 21:34:34 +02:00
Родитель 15ade5d2e7
Коммит 2a29e9f6b9
1 изменённых файлов: 11 добавлений и 9 удалений

Просмотреть файл

@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0;
}
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
{
return iommu->atu && mask > DMA_BIT_MASK(32);
}
/* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) {
if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry),
npages,
@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl;
struct page *page;
void *ret;
@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
atu = iommu->atu;
mask = dev->coherent_dma_mask;
if (mask <= DMA_BIT_MASK(32) || !atu)
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
atu = iommu->atu;
devhandle = pbm->devhandle;
if (dvma <= DMA_BIT_MASK(32)) {
if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */
} else {
@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32))
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32))
if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl;
else
tbl = &atu->tbl;