- fix dma coherent mmap in nommu (me)
  - more AMD SEV fallout (David Rientjes, me)
  - fix alignment in dma_common_*_remap (Eric Auger)
 -----BEGIN PGP SIGNATURE-----
 
 iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl72+VsLHGhjaEBsc3Qu
 ZGUACgkQD55TZVIEUYMVaw//VgQbKUfTsuCZt+ZZqIY5nd6YajexoC+X051yC7/8
 YtdGqAa2RuutoHwUhTcqzvrSsCqthNCeeZ3yBUS/SQwyoQy3szrEwNXnRboNdwgq
 xebuTOra3MIRSWJzFHL+PNQjkaGSoQroSJHEeVZOUdYchE+sNh/pZxQoPU8ImcOe
 iVB+6nDJga+CpbKVi6oaGs8EISHtYkt1yHOeAhTxlqPkmP1tvsOZFgvMQBPCq4Rz
 QlqcVilDb0fPl2pnLy1LTbgAC8yPs7phrf9KBVUqCptfTLAv1nkwI9WpX8zFmkDo
 KapepEr9bkAHcq+gNcUOSiKr3K1bMF41numZ5zi6PnEJ/bHsPEotzwf05GrKY0Ci
 vMNpWL5QIcaMECe8Q8jrelgoDK0614vp8k7U+1CXmgpyF3lf5+zXwJyYLSgcf2PI
 2ryJnnib3jYORe80VVHc76CpX5Z5Ez6IaaDP/3rNsexLW/Ip3mhwqUDEYNCvMN+P
 qYJ8GrmqGAbMrhifvxVRL0ur73kIKE2s4l7xznd7p0Nj6ToAdMYnmrKUZEhMTPD9
 UcpzK9omgT51qAsByEggT97eDYzQSqYfh0OxAUJwML/8AXa7nJVdFo9ipHCVal6x
 tEuWpAMBe9YRBDaPUgu3vf8VNagv7YCzJmLnPFS7KvYJ0siw5r6ZxdXfkE2cG9o2
 DyI=
 =qAJQ
 -----END PGP SIGNATURE-----

Merge tag 'dma-mapping-5.8-4' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:

 - fix dma coherent mmap in nommu (me)

 - more AMD SEV fallout (David Rientjes, me)

 - fix alignment in dma_common_*_remap (Eric Auger)

* tag 'dma-mapping-5.8-4' of git://git.infradead.org/users/hch/dma-mapping:
  dma-remap: align the size in dma_common_*_remap()
  dma-mapping: DMA_COHERENT_POOL should select GENERIC_ALLOCATOR
  dma-direct: add missing set_memory_decrypted() for coherent mapping
  dma-direct: check return value when encrypting or decrypting memory
  dma-direct: re-encrypt memory if dma_direct_alloc_pages() fails
  dma-direct: always align allocation size in dma_direct_alloc_pages()
  dma-direct: mark __dma_direct_alloc_pages static
  dma-direct: re-enable mmap for !CONFIG_MMU
This commit is contained in:
Linus Torvalds 2020-06-27 13:06:22 -07:00
Родитель 4e99b32169 8e36baf97b
Коммит f05baa066d
4 изменённых файлов: 39 добавлений и 30 удалений

Просмотреть файл

@ -77,8 +77,6 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);

Просмотреть файл

@ -71,15 +71,16 @@ config SWIOTLB
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
default y if !MMU
bool
config DMA_COHERENT_POOL
select GENERIC_ALLOCATOR
bool
config DMA_REMAP
bool
depends on MMU
select GENERIC_ALLOCATOR
select DMA_NONCOHERENT_MMAP
config DMA_DIRECT_REMAP

Просмотреть файл

@ -109,14 +109,15 @@ static inline bool dma_should_free_from_pool(struct device *dev,
return false;
}
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs)
{
size_t alloc_size = PAGE_ALIGN(size);
int node = dev_to_node(dev);
struct page *page = NULL;
u64 phys_limit;
WARN_ON_ONCE(!PAGE_ALIGNED(size));
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
@ -124,14 +125,14 @@ struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp &= ~__GFP_ZERO;
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
page = dma_alloc_contiguous(dev, alloc_size, gfp);
page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, alloc_size);
dma_free_contiguous(dev, page, size);
page = NULL;
}
again:
if (!page)
page = alloc_pages_node(node, gfp, get_order(alloc_size));
page = alloc_pages_node(node, gfp, get_order(size));
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
page = NULL;
@ -157,9 +158,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
{
struct page *page;
void *ret;
int err;
size = PAGE_ALIGN(size);
if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
ret = dma_alloc_from_pool(dev, size, &page, gfp);
if (!ret)
return NULL;
goto done;
@ -183,14 +187,20 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
dma_alloc_need_uncached(dev, attrs)) ||
(IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, PAGE_ALIGN(size));
arch_dma_prep_coherent(page, size);
/* create a coherent mapping */
ret = dma_common_contiguous_remap(page, PAGE_ALIGN(size),
ret = dma_common_contiguous_remap(page, size,
dma_pgprot(dev, PAGE_KERNEL, attrs),
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
if (force_dma_unencrypted(dev)) {
err = set_memory_decrypted((unsigned long)ret,
1 << get_order(size));
if (err)
goto out_free_pages;
}
memset(ret, 0, size);
goto done;
}
@ -207,8 +217,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
}
ret = page_address(page);
if (force_dma_unencrypted(dev))
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
if (force_dma_unencrypted(dev)) {
err = set_memory_decrypted((unsigned long)ret,
1 << get_order(size));
if (err)
goto out_free_pages;
}
memset(ret, 0, size);
@ -217,7 +231,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
goto out_free_pages;
goto out_encrypt_pages;
}
done:
if (force_dma_unencrypted(dev))
@ -225,6 +239,15 @@ done:
else
*dma_handle = phys_to_dma(dev, page_to_phys(page));
return ret;
out_encrypt_pages:
if (force_dma_unencrypted(dev)) {
err = set_memory_encrypted((unsigned long)page_address(page),
1 << get_order(size));
/* If memory cannot be re-encrypted, it must be leaked */
if (err)
return NULL;
}
out_free_pages:
dma_free_contiguous(dev, page, size);
return NULL;
@ -459,7 +482,6 @@ int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
return ret;
}
#ifdef CONFIG_MMU
bool dma_direct_can_mmap(struct device *dev)
{
return dev_is_dma_coherent(dev) ||
@ -485,19 +507,6 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
}
#else /* CONFIG_MMU */
bool dma_direct_can_mmap(struct device *dev)
{
return false;
}
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return -ENXIO;
}
#endif /* CONFIG_MMU */
int dma_direct_supported(struct device *dev, u64 mask)
{

Просмотреть файл

@ -24,7 +24,8 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
{
void *vaddr;
vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT,
VM_DMA_COHERENT, prot);
if (vaddr)
find_vm_area(vaddr)->pages = pages;
return vaddr;
@ -37,7 +38,7 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
void *dma_common_contiguous_remap(struct page *page, size_t size,
pgprot_t prot, const void *caller)
{
int count = size >> PAGE_SHIFT;
int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page **pages;
void *vaddr;
int i;