When an IOMMU device is available on the platform bus, allocate an IOMMU
domain and attach the display controllers to it. The display controllers
can then scan out non-contiguous buffers by mapping them through the
IOMMU.

Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
Thierry Reding 2014-06-26 21:41:53 +02:00
Родитель 1d1e6fe9b5
Коммит df06b759f2
6 изменённых файлов: 309 добавлений и 34 удалений

Просмотреть файл

@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/iommu.h>
#include <linux/reset.h>
#include <soc/tegra/pmc.h>
@ -1290,6 +1291,17 @@ static int tegra_dc_init(struct host1x_client *client)
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
err = iommu_attach_device(tegra->domain, dc->dev);
if (err < 0) {
dev_err(dc->dev, "failed to attach to domain: %d\n",
err);
return err;
}
dc->domain = tegra->domain;
}
drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs);
drm_mode_crtc_set_gamma_size(&dc->base, 256);
drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
@ -1347,6 +1359,11 @@ static int tegra_dc_exit(struct host1x_client *client)
return err;
}
if (dc->domain) {
iommu_detach_device(dc->domain, dc->dev);
dc->domain = NULL;
}
return 0;
}

Просмотреть файл

@ -8,6 +8,7 @@
*/
#include <linux/host1x.h>
#include <linux/iommu.h>
#include "drm.h"
#include "gem.h"
@ -33,6 +34,17 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
if (!tegra)
return -ENOMEM;
if (iommu_present(&platform_bus_type)) {
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (IS_ERR(tegra->domain)) {
err = PTR_ERR(tegra->domain);
goto free;
}
DRM_DEBUG("IOMMU context initialized\n");
drm_mm_init(&tegra->mm, 0, SZ_2G);
}
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
drm->dev_private = tegra;
@ -76,6 +88,12 @@ fbdev:
tegra_drm_fb_free(drm);
config:
drm_mode_config_cleanup(drm);
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
}
free:
kfree(tegra);
return err;
}
@ -83,6 +101,7 @@ config:
static int tegra_drm_unload(struct drm_device *drm)
{
struct host1x_device *device = to_host1x_device(drm->dev);
struct tegra_drm *tegra = drm->dev_private;
int err;
drm_kms_helper_poll_fini(drm);
@ -94,6 +113,11 @@ static int tegra_drm_unload(struct drm_device *drm)
if (err < 0)
return err;
if (tegra->domain) {
iommu_domain_free(tegra->domain);
drm_mm_takedown(&tegra->mm);
}
return 0;
}

Просмотреть файл

@ -39,6 +39,9 @@ struct tegra_fbdev {
struct tegra_drm {
struct drm_device *drm;
struct iommu_domain *domain;
struct drm_mm mm;
struct mutex clients_lock;
struct list_head clients;
@ -121,6 +124,8 @@ struct tegra_dc {
struct drm_pending_vblank_event *event;
const struct tegra_dc_soc_info *soc;
struct iommu_domain *domain;
};
static inline struct tegra_dc *

Просмотреть файл

@ -65,8 +65,12 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
for (i = 0; i < fb->num_planes; i++) {
struct tegra_bo *bo = fb->planes[i];
if (bo)
if (bo) {
if (bo->pages && bo->vaddr)
vunmap(bo->vaddr);
drm_gem_object_unreference_unlocked(&bo->gem);
}
}
drm_framebuffer_cleanup(framebuffer);
@ -254,6 +258,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
if (bo->pages) {
bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
pgprot_writecombine(PAGE_KERNEL));
if (!bo->vaddr) {
dev_err(drm->dev, "failed to vmap() framebuffer\n");
err = -ENOMEM;
goto destroy;
}
}
drm->mode_config.fb_base = (resource_size_t)bo->paddr;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;

Просмотреть файл

@ -14,6 +14,7 @@
*/
#include <linux/dma-buf.h>
#include <linux/iommu.h>
#include <drm/tegra_drm.h>
#include "drm.h"
@ -91,6 +92,88 @@ static const struct host1x_bo_ops tegra_bo_ops = {
.kunmap = tegra_bo_kunmap,
};
/*
* A generic iommu_map_sg() function is being reviewed and will hopefully be
* merged soon. At that point this function can be dropped in favour of the
* one provided by the IOMMU API.
*/
static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents,
int prot)
{
struct scatterlist *s;
size_t offset = 0;
unsigned int i;
int err;
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s));
size_t length = s->offset + s->length;
err = iommu_map(domain, iova + offset, phys, length, prot);
if (err < 0) {
iommu_unmap(domain, iova, offset);
return err;
}
offset += length;
}
return offset;
}
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
int prot = IOMMU_READ | IOMMU_WRITE;
ssize_t err;
if (bo->mm)
return -EBUSY;
bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
if (!bo->mm)
return -ENOMEM;
err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
PAGE_SIZE, 0, 0, 0);
if (err < 0) {
dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
err);
goto free;
}
bo->paddr = bo->mm->start;
err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
bo->sgt->nents, prot);
if (err < 0) {
dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
goto remove;
}
bo->size = err;
return 0;
remove:
drm_mm_remove_node(bo->mm);
free:
kfree(bo->mm);
return err;
}
static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
{
if (!bo->mm)
return 0;
iommu_unmap(tegra->domain, bo->paddr, bo->size);
drm_mm_remove_node(bo->mm);
kfree(bo->mm);
return 0;
}
static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
size_t size)
{
@ -121,9 +204,64 @@ free:
return ERR_PTR(err);
}
static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
if (bo->pages) {
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
} else {
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
bo->paddr);
}
}
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
size_t size)
{
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
bo->num_pages = size >> PAGE_SHIFT;
bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
if (IS_ERR(bo->sgt)) {
drm_gem_put_pages(&bo->gem, bo->pages, false, false);
return PTR_ERR(bo->sgt);
}
return 0;
}
static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
size_t size)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
err = tegra_bo_get_pages(drm, bo, size);
if (err < 0)
return err;
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0) {
tegra_bo_free(drm, bo);
return err;
}
} else {
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev,
"failed to allocate buffer of size %zu\n",
size);
return -ENOMEM;
}
}
return 0;
}
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
@ -136,14 +274,9 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
if (IS_ERR(bo))
return bo;
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
dev_err(drm->dev, "failed to allocate buffer with size %u\n",
size);
err = -ENOMEM;
goto err_dma;
}
err = tegra_bo_alloc(drm, bo, size);
if (err < 0)
goto release;
if (flags & DRM_TEGRA_GEM_CREATE_TILED)
bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
@ -153,9 +286,9 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
return bo;
err_dma:
release:
drm_gem_object_release(&bo->gem);
kfree(bo);
return ERR_PTR(err);
}
@ -186,6 +319,7 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
struct dma_buf *buf)
{
struct tegra_drm *tegra = drm->dev_private;
struct dma_buf_attachment *attach;
struct tegra_bo *bo;
int err;
@ -213,12 +347,19 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
goto detach;
}
if (bo->sgt->nents > 1) {
err = -EINVAL;
goto detach;
if (tegra->domain) {
err = tegra_bo_iommu_map(tegra, bo);
if (err < 0)
goto detach;
} else {
if (bo->sgt->nents > 1) {
err = -EINVAL;
goto detach;
}
bo->paddr = sg_dma_address(bo->sgt->sgl);
}
bo->paddr = sg_dma_address(bo->sgt->sgl);
bo->gem.import_attach = attach;
return bo;
@ -237,14 +378,18 @@ free:
void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_drm *tegra = gem->dev->dev_private;
struct tegra_bo *bo = to_tegra_bo(gem);
if (tegra->domain)
tegra_bo_iommu_unmap(tegra, bo);
if (gem->import_attach) {
dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
drm_prime_gem_destroy(gem, NULL);
} else {
tegra_bo_destroy(gem->dev, bo);
tegra_bo_free(gem->dev, bo);
}
drm_gem_object_release(gem);
@ -299,14 +444,44 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
return 0;
}
static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *gem = vma->vm_private_data;
struct tegra_bo *bo = to_tegra_bo(gem);
struct page *page;
pgoff_t offset;
int err;
if (!bo->pages)
return VM_FAULT_SIGBUS;
offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
page = bo->pages[offset];
err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
switch (err) {
case -EAGAIN:
case 0:
case -ERESTARTSYS:
case -EINTR:
case -EBUSY:
return VM_FAULT_NOPAGE;
case -ENOMEM:
return VM_FAULT_OOM;
}
return VM_FAULT_SIGBUS;
}
const struct vm_operations_struct tegra_bo_vm_ops = {
.fault = tegra_bo_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long vm_pgoff = vma->vm_pgoff;
struct drm_gem_object *gem;
struct tegra_bo *bo;
int ret;
@ -318,18 +493,29 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
gem = vma->vm_private_data;
bo = to_tegra_bo(gem);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
if (!bo->pages) {
unsigned long vm_pgoff = vma->vm_pgoff;
ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr, bo->paddr,
gem->size);
if (ret) {
drm_gem_vm_close(vma);
return ret;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_pgoff = 0;
ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
bo->paddr, gem->size);
if (ret) {
drm_gem_vm_close(vma);
return ret;
}
vma->vm_pgoff = vm_pgoff;
} else {
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_page_prot = pgprot_writecombine(prot);
}
vma->vm_pgoff = vm_pgoff;
return 0;
}
@ -345,21 +531,44 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
if (!sgt)
return NULL;
if (sg_alloc_table(sgt, 1, GFP_KERNEL)) {
kfree(sgt);
return NULL;
if (bo->pages) {
struct scatterlist *sg;
unsigned int i;
if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
goto free;
for_each_sg(sgt->sgl, sg, bo->num_pages, i)
sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
goto free;
} else {
if (sg_alloc_table(sgt, 1, GFP_KERNEL))
goto free;
sg_dma_address(sgt->sgl) = bo->paddr;
sg_dma_len(sgt->sgl) = gem->size;
}
sg_dma_address(sgt->sgl) = bo->paddr;
sg_dma_len(sgt->sgl) = gem->size;
return sgt;
free:
sg_free_table(sgt);
kfree(sgt);
return NULL;
}
static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
struct drm_gem_object *gem = attach->dmabuf->priv;
struct tegra_bo *bo = to_tegra_bo(gem);
if (bo->pages)
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
}

Просмотреть файл

@ -38,6 +38,12 @@ struct tegra_bo {
dma_addr_t paddr;
void *vaddr;
struct drm_mm_node *mm;
unsigned long num_pages;
struct page **pages;
/* size of IOMMU mapping */
size_t size;
struct tegra_bo_tiling tiling;
};