Fix a sparc64 sun4v_pci regression introduced in this merged window,
 and a dma-debug stracktrace regression from the big refactor last
 merge window.
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAlywq+gLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYPXSg//XPtl1nRBygmt4USrcwU7n0sSB5/THnh4uVUF/ugW
 wus6lJ+AIwoSpUv2VTGAifx0GOYIC7SHtEaGxBUkklwMygD+GhOm6uPtjsczI466
 aUt3R1w88Anz0bTFLSZ4z3ESlAejHN7IgZZXjLEpmsTgvz5JwXCN/DbEc8wBNMfe
 NF9QbEk4i6RaVHr1PA5dao8jQ2BIO7WLNOxZjm8IRHs2+wgX3vAs4ohjLQyFc6dp
 TdVcP3oe9Ere8pkvFp1om/fr185pnEKn01fuG7lf3QA3JFkY7KAa9FxVOumzHZhh
 tt/0rnoCYgzOjg+SVYEadqaH9rneNriKER+JBz9J5OOhRP/W5OuSsJkd9V6qSUEp
 xO/Ie9tqF1KZI8+hDh70OE18h3+b1g/aGpS8KxOqa3J548TbzOcKnzfsL3T5rIlH
 zW+mw1Z14PZ4Z0cXSH214fkVoZphF80A9cdZMYKiR9GKo1Jw0jBkw4qMvBocP3r+
 zf53nXQewbRrnQ5Rn1dFDlWTUdazS+kXFXFmt69i22JEBjbTd8ljT53uLzlP+8BW
 sW9Nuc1yybgtW83F8kLLuetY+z8fo9SurxSE31ogUhVRV+hWCPsv8I6HtLZYbKFQ
 Ygc1xdjL+d6N5ywnZ2s5fDAtHA/TKE+6wDGTHlD3GoNpfNvOLVDl8EjLcUuK/uW9
 uOE=
 =Ohx3
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-5.1-1' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "Fix a sparc64 sun4v_pci regression introduced in this merged window,
  and a dma-debug stracktrace regression from the big refactor last
  merge window"

* tag 'dma-mapping-5.1-1' of git://git.infradead.org/users/hch/dma-mapping:
  dma-debug: only skip one stackframe entry
  sparc64/pci_sun4v: fix ATU checks for large DMA masks
This commit is contained in:
Linus Torvalds 2019-04-12 08:25:16 -07:00
Родитель 4876191cbe 8c5165430c
Коммит 8ee15f3248
2 изменённых файлов: 12 добавлений и 10 удалений

Просмотреть файл

@ -73,6 +73,11 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns
p->npages = 0; p->npages = 0;
} }
static inline bool iommu_use_atu(struct iommu *iommu, u64 mask)
{
return iommu->atu && mask > DMA_BIT_MASK(32);
}
/* Interrupts must be disabled. */ /* Interrupts must be disabled. */
static long iommu_batch_flush(struct iommu_batch *p, u64 mask) static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{ {
@ -92,7 +97,7 @@ static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) { while (npages != 0) {
if (mask <= DMA_BIT_MASK(32) || !pbm->iommu->atu) { if (!iommu_use_atu(pbm->iommu, mask)) {
num = pci_sun4v_iommu_map(devhandle, num = pci_sun4v_iommu_map(devhandle,
HV_PCI_TSBID(0, entry), HV_PCI_TSBID(0, entry),
npages, npages,
@ -179,7 +184,6 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
unsigned long flags, order, first_page, npages, n; unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0; unsigned long prot = 0;
struct iommu *iommu; struct iommu *iommu;
struct atu *atu;
struct iommu_map_table *tbl; struct iommu_map_table *tbl;
struct page *page; struct page *page;
void *ret; void *ret;
@ -205,13 +209,11 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
memset((char *)first_page, 0, PAGE_SIZE << order); memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu; iommu = dev->archdata.iommu;
atu = iommu->atu;
mask = dev->coherent_dma_mask; mask = dev->coherent_dma_mask;
if (mask <= DMA_BIT_MASK(32) || !atu) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &iommu->atu->tbl;
entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0); (unsigned long)(-1), 0);
@ -333,7 +335,7 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
atu = iommu->atu; atu = iommu->atu;
devhandle = pbm->devhandle; devhandle = pbm->devhandle;
if (dvma <= DMA_BIT_MASK(32)) { if (!iommu_use_atu(iommu, dvma)) {
tbl = &iommu->tbl; tbl = &iommu->tbl;
iotsb_num = 0; /* we don't care for legacy iommu */ iotsb_num = 0; /* we don't care for legacy iommu */
} else { } else {
@ -374,7 +376,7 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
npages >>= IO_PAGE_SHIFT; npages >>= IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32)) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &atu->tbl;
@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT; IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (mask <= DMA_BIT_MASK(32)) if (!iommu_use_atu(iommu, mask))
tbl = &iommu->tbl; tbl = &iommu->tbl;
else else
tbl = &atu->tbl; tbl = &atu->tbl;

Просмотреть файл

@ -706,7 +706,7 @@ static struct dma_debug_entry *dma_entry_alloc(void)
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES;
entry->stacktrace.entries = entry->st_entries; entry->stacktrace.entries = entry->st_entries;
entry->stacktrace.skip = 2; entry->stacktrace.skip = 1;
save_stack_trace(&entry->stacktrace); save_stack_trace(&entry->stacktrace);
#endif #endif