dma-direct: support PCI P2PDMA pages in dma-direct map_sg
Add PCI P2PDMA support for dma_direct_map_sg() so that it can map PCI P2PDMA pages directly without a hack in the callers. This allows for heterogeneous SGLs that contain both P2PDMA and regular pages. A P2PDMA page may have three possible outcomes when being mapped: 1) If the data path between the two devices doesn't go through the root port, then it should be mapped with a PCI bus address 2) If the data path goes through the host bridge, it should be mapped normally, as though it were a CPU physical address 3) It is not possible for the two devices to communicate and thus the mapping operation should fail (and it will return -EREMOTEIO). SGL segments that contain PCI bus addresses are marked with sg_dma_mark_pci_p2pdma() and are ignored when unmapped. P2PDMA mappings are also failed if swiotlb needs to be used on the mapping. Signed-off-by: Logan Gunthorpe <logang@deltatee.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Родитель
7c2645a2a3
Коммит
f02ad36d4f
|
@ -454,29 +454,60 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
||||||
arch_sync_dma_for_cpu_all();
|
arch_sync_dma_for_cpu_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unmaps segments, except for ones marked as pci_p2pdma which do not
|
||||||
|
* require any further action as they contain a bus address.
|
||||||
|
*/
|
||||||
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i)
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
|
if (sg_is_dma_bus_address(sg))
|
||||||
attrs);
|
sg_dma_unmark_bus_address(sg);
|
||||||
|
else
|
||||||
|
dma_direct_unmap_page(dev, sg->dma_address,
|
||||||
|
sg_dma_len(sg), dir, attrs);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||||
enum dma_data_direction dir, unsigned long attrs)
|
enum dma_data_direction dir, unsigned long attrs)
|
||||||
{
|
{
|
||||||
int i;
|
struct pci_p2pdma_map_state p2pdma_state = {};
|
||||||
|
enum pci_p2pdma_map_type map;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
int i, ret;
|
||||||
|
|
||||||
for_each_sg(sgl, sg, nents, i) {
|
for_each_sg(sgl, sg, nents, i) {
|
||||||
|
if (is_pci_p2pdma_page(sg_page(sg))) {
|
||||||
|
map = pci_p2pdma_map_segment(&p2pdma_state, dev, sg);
|
||||||
|
switch (map) {
|
||||||
|
case PCI_P2PDMA_MAP_BUS_ADDR:
|
||||||
|
continue;
|
||||||
|
case PCI_P2PDMA_MAP_THRU_HOST_BRIDGE:
|
||||||
|
/*
|
||||||
|
* Any P2P mapping that traverses the PCI
|
||||||
|
* host bridge must be mapped with CPU physical
|
||||||
|
* address and not PCI bus addresses. This is
|
||||||
|
* done with dma_direct_map_page() below.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
ret = -EREMOTEIO;
|
||||||
|
goto out_unmap;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
|
sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
|
||||||
sg->offset, sg->length, dir, attrs);
|
sg->offset, sg->length, dir, attrs);
|
||||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
if (sg->dma_address == DMA_MAPPING_ERROR) {
|
||||||
|
ret = -EIO;
|
||||||
goto out_unmap;
|
goto out_unmap;
|
||||||
|
}
|
||||||
sg_dma_len(sg) = sg->length;
|
sg_dma_len(sg) = sg->length;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -484,7 +515,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||||
|
|
||||||
out_unmap:
|
out_unmap:
|
||||||
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
|
||||||
return -EIO;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#define _KERNEL_DMA_DIRECT_H
|
#define _KERNEL_DMA_DIRECT_H
|
||||||
|
|
||||||
#include <linux/dma-direct.h>
|
#include <linux/dma-direct.h>
|
||||||
|
#include <linux/memremap.h>
|
||||||
|
|
||||||
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
|
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||||
|
@ -87,10 +88,15 @@ static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||||
phys_addr_t phys = page_to_phys(page) + offset;
|
phys_addr_t phys = page_to_phys(page) + offset;
|
||||||
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
dma_addr_t dma_addr = phys_to_dma(dev, phys);
|
||||||
|
|
||||||
if (is_swiotlb_force_bounce(dev))
|
if (is_swiotlb_force_bounce(dev)) {
|
||||||
|
if (is_pci_p2pdma_page(page))
|
||||||
|
return DMA_MAPPING_ERROR;
|
||||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
|
||||||
|
if (is_pci_p2pdma_page(page))
|
||||||
|
return DMA_MAPPING_ERROR;
|
||||||
if (is_swiotlb_active(dev))
|
if (is_swiotlb_active(dev))
|
||||||
return swiotlb_map(dev, phys, size, dir, attrs);
|
return swiotlb_map(dev, phys, size, dir, attrs);
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче