xen/gntdev: Fix the abuse of underlying struct page in DMA-buf import
[ Upstream commit 2d2db7d40254d5fb53b11ebd703cd1ed0c5de7a1 ] DO NOT access the underlying struct page of an sg table exported by DMA-buf in dmabuf_imp_to_refs(), this is not allowed. Please see drivers/dma-buf/dma-buf.c:mangle_sg_table() for details. Fortunately, here (for special Xen device) we can avoid using pages and calculate gfns directly from dma addresses provided by the sg table. Suggested-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Oleksandr Tyshchenko <oleksandr_tyshchenko@epam.com> Acked-by: Christian König <christian.koenig@amd.com> Reviewed-by: Stefano Stabellini <sstabellini@kernel.org> Acked-by: Daniel Vetter <daniel@ffwll.ch> Link: https://lore.kernel.org/r/20240107103426.2038075-1-olekstysh@gmail.com Signed-off-by: Juergen Gross <jgross@suse.com> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Родитель
9b7d253b82
Коммит
f7752e5f53
|
@ -11,6 +11,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/dma-buf.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
@ -56,7 +57,7 @@ struct gntdev_dmabuf {
|
|||
|
||||
/* Number of pages this buffer has. */
|
||||
int nr_pages;
|
||||
/* Pages of this buffer. */
|
||||
/* Pages of this buffer (only for dma-buf export). */
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
|
@ -490,7 +491,7 @@ out:
|
|||
/* DMA buffer import support. */
|
||||
|
||||
static int
|
||||
dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
|
||||
dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
|
||||
int count, int domid)
|
||||
{
|
||||
grant_ref_t priv_gref_head;
|
||||
|
@ -513,7 +514,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
|
|||
}
|
||||
|
||||
gnttab_grant_foreign_access_ref(cur_ref, domid,
|
||||
xen_page_to_gfn(pages[i]), 0);
|
||||
gfns[i], 0);
|
||||
refs[i] = cur_ref;
|
||||
}
|
||||
|
||||
|
@ -535,7 +536,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
|
|||
|
||||
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
|
||||
{
|
||||
kfree(gntdev_dmabuf->pages);
|
||||
kfree(gntdev_dmabuf->u.imp.refs);
|
||||
kfree(gntdev_dmabuf);
|
||||
}
|
||||
|
@ -555,12 +555,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
|
|||
if (!gntdev_dmabuf->u.imp.refs)
|
||||
goto fail;
|
||||
|
||||
gntdev_dmabuf->pages = kcalloc(count,
|
||||
sizeof(gntdev_dmabuf->pages[0]),
|
||||
GFP_KERNEL);
|
||||
if (!gntdev_dmabuf->pages)
|
||||
goto fail;
|
||||
|
||||
gntdev_dmabuf->nr_pages = count;
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
|
@ -582,7 +576,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
|
|||
struct dma_buf *dma_buf;
|
||||
struct dma_buf_attachment *attach;
|
||||
struct sg_table *sgt;
|
||||
struct sg_page_iter sg_iter;
|
||||
struct sg_dma_page_iter sg_iter;
|
||||
unsigned long *gfns;
|
||||
int i;
|
||||
|
||||
dma_buf = dma_buf_get(fd);
|
||||
|
@ -630,26 +625,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
|
|||
|
||||
gntdev_dmabuf->u.imp.sgt = sgt;
|
||||
|
||||
/* Now convert sgt to array of pages and check for page validity. */
|
||||
i = 0;
|
||||
for_each_sgtable_page(sgt, &sg_iter, 0) {
|
||||
struct page *page = sg_page_iter_page(&sg_iter);
|
||||
/*
|
||||
* Check if page is valid: this can happen if we are given
|
||||
* a page from VRAM or other resources which are not backed
|
||||
* by a struct page.
|
||||
*/
|
||||
if (!pfn_valid(page_to_pfn(page))) {
|
||||
ret = ERR_PTR(-EINVAL);
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
gntdev_dmabuf->pages[i++] = page;
|
||||
gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
|
||||
if (!gfns) {
|
||||
ret = ERR_PTR(-ENOMEM);
|
||||
goto fail_unmap;
|
||||
}
|
||||
|
||||
ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
|
||||
/*
|
||||
* Now convert sgt to array of gfns without accessing underlying pages.
|
||||
* It is not allowed to access the underlying struct page of an sg table
|
||||
* exported by DMA-buf, but since we deal with special Xen dma device here
|
||||
* (not a normal physical one) look at the dma addresses in the sg table
|
||||
* and then calculate gfns directly from them.
|
||||
*/
|
||||
i = 0;
|
||||
for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
|
||||
dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
|
||||
unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
|
||||
|
||||
gfns[i++] = pfn_to_gfn(pfn);
|
||||
}
|
||||
|
||||
ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
|
||||
gntdev_dmabuf->u.imp.refs,
|
||||
count, domid));
|
||||
kfree(gfns);
|
||||
if (IS_ERR(ret))
|
||||
goto fail_end_access;
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче