drm/ttm: merge ttm_dma_tt back into ttm_tt
It makes no difference to kmalloc if the structure is 48 or 64 bytes in size. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Dave Airlie <airlied@redhat.com> Link: https://patchwork.freedesktop.org/patch/396950/
This commit is contained in:
Родитель
230c079fdc
Коммит
e34b8feeaa
|
@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
|
|||
uint64_t *addr, uint64_t *flags)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
switch (bo->tbo.mem.mem_type) {
|
||||
case TTM_PL_TT:
|
||||
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
||||
*addr = ttm->dma_address[0];
|
||||
*addr = bo->tbo.ttm->dma_address[0];
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
*addr = amdgpu_bo_gpu_offset(bo);
|
||||
|
@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
|
|||
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached)
|
||||
return AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
|
||||
if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
|
||||
if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
|
||||
return AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
return adev->gmc.agp_start + ttm->dma_address[0];
|
||||
return adev->gmc.agp_start + bo->ttm->dma_address[0];
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -294,11 +294,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
|||
cpu_addr = &job->ibs[0].ptr[num_dw];
|
||||
|
||||
if (mem->mem_type == TTM_PL_TT) {
|
||||
struct ttm_dma_tt *dma;
|
||||
dma_addr_t *dma_address;
|
||||
|
||||
dma = container_of(bo->ttm, struct ttm_dma_tt, ttm);
|
||||
dma_address = &dma->dma_address[offset >> PAGE_SHIFT];
|
||||
dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
|
||||
r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
|
||||
cpu_addr);
|
||||
if (r)
|
||||
|
@ -841,7 +839,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type)
|
|||
* TTM backend functions.
|
||||
*/
|
||||
struct amdgpu_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct ttm_tt ttm;
|
||||
struct drm_gem_object *gobj;
|
||||
u64 offset;
|
||||
uint64_t userptr;
|
||||
|
@ -1292,7 +1290,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
|
|||
r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages);
|
||||
if (r)
|
||||
DRM_ERROR("failed to unbind %u pages at 0x%08llX\n",
|
||||
gtt->ttm.ttm.num_pages, gtt->offset);
|
||||
gtt->ttm.num_pages, gtt->offset);
|
||||
gtt->bound = false;
|
||||
}
|
||||
|
||||
|
@ -1306,7 +1304,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
|
|||
if (gtt->usertask)
|
||||
put_task_struct(gtt->usertask);
|
||||
|
||||
ttm_dma_tt_fini(>t->ttm);
|
||||
ttm_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
|
@ -1340,7 +1338,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
return >t->ttm.ttm;
|
||||
return >t->ttm;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1507,7 +1505,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
|||
/* Return false if no part of the ttm_tt object lies within
|
||||
* the range
|
||||
*/
|
||||
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
|
||||
size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE;
|
||||
if (gtt->userptr > end || gtt->userptr + size <= start)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -1781,7 +1781,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
resv = vm->root.base.bo->tbo.base.resv;
|
||||
} else {
|
||||
struct drm_gem_object *obj = &bo->tbo.base;
|
||||
struct ttm_dma_tt *ttm;
|
||||
|
||||
resv = bo->tbo.base.resv;
|
||||
if (obj->import_attach && bo_va->is_xgmi) {
|
||||
|
@ -1794,10 +1793,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
}
|
||||
mem = &bo->tbo.mem;
|
||||
nodes = mem->mm_node;
|
||||
if (mem->mem_type == TTM_PL_TT) {
|
||||
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
|
||||
pages_addr = ttm->dma_address;
|
||||
}
|
||||
if (mem->mem_type == TTM_PL_TT)
|
||||
pages_addr = bo->tbo.ttm->dma_address;
|
||||
}
|
||||
|
||||
if (bo) {
|
||||
|
|
|
@ -547,7 +547,7 @@ void
|
|||
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
||||
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
|
||||
int i;
|
||||
|
||||
if (!ttm_dma)
|
||||
|
@ -557,7 +557,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
|||
if (nvbo->force_coherent)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
|
||||
for (i = 0; i < ttm_dma->num_pages; i++)
|
||||
dma_sync_single_for_device(drm->dev->dev,
|
||||
ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, DMA_TO_DEVICE);
|
||||
|
@ -567,7 +567,7 @@ void
|
|||
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
||||
struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm;
|
||||
int i;
|
||||
|
||||
if (!ttm_dma)
|
||||
|
@ -577,7 +577,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
|||
if (nvbo->force_coherent)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ttm_dma->ttm.num_pages; i++)
|
||||
for (i = 0; i < ttm_dma->num_pages; i++)
|
||||
dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i],
|
||||
PAGE_SIZE, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
@ -1309,7 +1309,7 @@ static int
|
|||
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct ttm_dma_tt *ttm_dma = (void *)ttm;
|
||||
struct ttm_tt *ttm_dma = (void *)ttm;
|
||||
struct nouveau_drm *drm;
|
||||
struct device *dev;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
@ -1345,7 +1345,7 @@ static void
|
|||
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_dma_tt *ttm_dma = (void *)ttm;
|
||||
struct ttm_tt *ttm_dma = (void *)ttm;
|
||||
struct nouveau_drm *drm;
|
||||
struct device *dev;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
|
|
|
@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem)
|
|||
}
|
||||
|
||||
int
|
||||
nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
|
||||
nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(reg);
|
||||
struct nouveau_cli *cli = mem->cli;
|
||||
|
@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt)
|
|||
mem->comp = 0;
|
||||
}
|
||||
|
||||
if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl;
|
||||
else args.dma = tt->dma_address;
|
||||
if (tt->sg)
|
||||
args.sgl = tt->sg->sgl;
|
||||
else
|
||||
args.dma = tt->dma_address;
|
||||
|
||||
mutex_lock(&drm->master.lock);
|
||||
cli->base.super = true;
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#ifndef __NOUVEAU_MEM_H__
|
||||
#define __NOUVEAU_MEM_H__
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
struct ttm_dma_tt;
|
||||
struct ttm_tt;
|
||||
|
||||
#include <nvif/mem.h>
|
||||
#include <nvif/vmm.h>
|
||||
|
@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp,
|
|||
struct ttm_resource *);
|
||||
void nouveau_mem_del(struct ttm_resource *);
|
||||
int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page);
|
||||
int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *);
|
||||
int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *);
|
||||
void nouveau_mem_fini(struct nouveau_mem *);
|
||||
int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *);
|
||||
#endif
|
||||
|
|
|
@ -11,7 +11,7 @@ struct nouveau_sgdma_be {
|
|||
/* this has to be the first field so populate/unpopulated in
|
||||
* nouve_bo.c works properly, otherwise have to move them here
|
||||
*/
|
||||
struct ttm_dma_tt ttm;
|
||||
struct ttm_tt ttm;
|
||||
struct nouveau_mem *mem;
|
||||
};
|
||||
|
||||
|
@ -23,7 +23,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
|||
if (ttm) {
|
||||
nouveau_sgdma_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
ttm_dma_tt_fini(&nvbe->ttm);
|
||||
ttm_tt_fini(&nvbe->ttm);
|
||||
kfree(nvbe);
|
||||
}
|
||||
}
|
||||
|
@ -88,5 +88,5 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags)
|
|||
kfree(nvbe);
|
||||
return NULL;
|
||||
}
|
||||
return &nvbe->ttm.ttm;
|
||||
return &nvbe->ttm;
|
||||
}
|
||||
|
|
|
@ -116,7 +116,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
|
||||
if (ttm == NULL)
|
||||
return NULL;
|
||||
if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) {
|
||||
if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
|
||||
kfree(ttm);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -437,7 +437,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
|
|||
* TTM backend functions.
|
||||
*/
|
||||
struct radeon_ttm_tt {
|
||||
struct ttm_dma_tt ttm;
|
||||
struct ttm_tt ttm;
|
||||
u64 offset;
|
||||
|
||||
uint64_t userptr;
|
||||
|
@ -602,7 +602,7 @@ static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt
|
|||
radeon_ttm_backend_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
|
||||
ttm_dma_tt_fini(>t->ttm);
|
||||
ttm_tt_fini(>t->ttm);
|
||||
kfree(gtt);
|
||||
}
|
||||
|
||||
|
@ -640,7 +640,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
kfree(gtt);
|
||||
return NULL;
|
||||
}
|
||||
return >t->ttm.ttm;
|
||||
return >t->ttm;
|
||||
}
|
||||
|
||||
static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
|
||||
|
@ -653,7 +653,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
|
|||
|
||||
if (!ttm)
|
||||
return NULL;
|
||||
return container_of(ttm, struct radeon_ttm_tt, ttm.ttm);
|
||||
return container_of(ttm, struct radeon_ttm_tt, ttm);
|
||||
}
|
||||
|
||||
static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
|
|
|
@ -1192,7 +1192,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
|
|||
|
||||
size += ttm_round_pot(struct_size);
|
||||
size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t)));
|
||||
size += ttm_round_pot(sizeof(struct ttm_dma_tt));
|
||||
size += ttm_round_pot(sizeof(struct ttm_tt));
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_dma_acc_size);
|
||||
|
|
|
@ -1081,28 +1081,28 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_pool_unpopulate);
|
||||
|
||||
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
|
||||
int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
unsigned i, j;
|
||||
int r;
|
||||
|
||||
r = ttm_pool_populate(&tt->ttm, ctx);
|
||||
r = ttm_pool_populate(tt, ctx);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < tt->ttm.num_pages; ++i) {
|
||||
struct page *p = tt->ttm.pages[i];
|
||||
for (i = 0; i < tt->num_pages; ++i) {
|
||||
struct page *p = tt->pages[i];
|
||||
size_t num_pages = 1;
|
||||
|
||||
for (j = i + 1; j < tt->ttm.num_pages; ++j) {
|
||||
if (++p != tt->ttm.pages[j])
|
||||
for (j = i + 1; j < tt->num_pages; ++j) {
|
||||
if (++p != tt->pages[j])
|
||||
break;
|
||||
|
||||
++num_pages;
|
||||
}
|
||||
|
||||
tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i],
|
||||
tt->dma_address[i] = dma_map_page(dev, tt->pages[i],
|
||||
0, num_pages * PAGE_SIZE,
|
||||
DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(dev, tt->dma_address[i])) {
|
||||
|
@ -1111,7 +1111,7 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
|
|||
PAGE_SIZE, DMA_BIDIRECTIONAL);
|
||||
tt->dma_address[i] = 0;
|
||||
}
|
||||
ttm_pool_unpopulate(&tt->ttm);
|
||||
ttm_pool_unpopulate(tt);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -1124,21 +1124,21 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_populate_and_map_pages);
|
||||
|
||||
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
|
||||
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt)
|
||||
{
|
||||
unsigned i, j;
|
||||
|
||||
for (i = 0; i < tt->ttm.num_pages;) {
|
||||
struct page *p = tt->ttm.pages[i];
|
||||
for (i = 0; i < tt->num_pages;) {
|
||||
struct page *p = tt->pages[i];
|
||||
size_t num_pages = 1;
|
||||
|
||||
if (!tt->dma_address[i] || !tt->ttm.pages[i]) {
|
||||
if (!tt->dma_address[i] || !tt->pages[i]) {
|
||||
++i;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (j = i + 1; j < tt->ttm.num_pages; ++j) {
|
||||
if (++p != tt->ttm.pages[j])
|
||||
for (j = i + 1; j < tt->num_pages; ++j) {
|
||||
if (++p != tt->pages[j])
|
||||
break;
|
||||
|
||||
++num_pages;
|
||||
|
@ -1149,7 +1149,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
|
|||
|
||||
i += num_pages;
|
||||
}
|
||||
ttm_pool_unpopulate(&tt->ttm);
|
||||
ttm_pool_unpopulate(tt);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
|
||||
|
||||
|
|
|
@ -832,11 +832,10 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
|
|||
* return dma_page pointer if success, otherwise NULL.
|
||||
*/
|
||||
static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
|
||||
struct ttm_dma_tt *ttm_dma,
|
||||
struct ttm_tt *ttm,
|
||||
unsigned index)
|
||||
{
|
||||
struct dma_page *d_page = NULL;
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
unsigned long irq_flags;
|
||||
int count;
|
||||
|
||||
|
@ -845,8 +844,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
|
|||
if (count) {
|
||||
d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
|
||||
ttm->pages[index] = d_page->p;
|
||||
ttm_dma->dma_address[index] = d_page->dma;
|
||||
list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
|
||||
ttm->dma_address[index] = d_page->dma;
|
||||
list_move_tail(&d_page->page_list, &ttm->pages_list);
|
||||
pool->npages_in_use += 1;
|
||||
pool->npages_free -= 1;
|
||||
}
|
||||
|
@ -854,9 +853,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool,
|
|||
return d_page;
|
||||
}
|
||||
|
||||
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
|
||||
static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
gfp_t gfp_flags;
|
||||
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
|
||||
|
@ -883,11 +881,10 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
|
|||
* On success pages list will hold count number of correctly
|
||||
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
|
||||
*/
|
||||
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||
int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
unsigned long num_pages = ttm->num_pages;
|
||||
struct dma_pool *pool;
|
||||
struct dma_page *d_page;
|
||||
|
@ -901,7 +898,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
|||
if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx))
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
INIT_LIST_HEAD(&ttm->pages_list);
|
||||
i = 0;
|
||||
|
||||
type = ttm_to_type(ttm->page_flags, ttm->caching);
|
||||
|
@ -912,7 +909,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
|||
|
||||
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
|
||||
if (!pool) {
|
||||
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true);
|
||||
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true);
|
||||
|
||||
pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE);
|
||||
if (IS_ERR_OR_NULL(pool))
|
||||
|
@ -922,21 +919,21 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
|||
while (num_pages >= HPAGE_PMD_NR) {
|
||||
unsigned j;
|
||||
|
||||
d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
d_page = ttm_dma_pool_get_pages(pool, ttm, i);
|
||||
if (!d_page)
|
||||
break;
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
pool->size, ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
ttm_dma_unpopulate(ttm, dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT;
|
||||
for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) {
|
||||
ttm->pages[j] = ttm->pages[j - 1] + 1;
|
||||
ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] +
|
||||
ttm->dma_address[j] = ttm->dma_address[j - 1] +
|
||||
PAGE_SIZE;
|
||||
}
|
||||
|
||||
|
@ -949,7 +946,7 @@ skip_huge:
|
|||
|
||||
pool = ttm_dma_find_pool(dev, type);
|
||||
if (!pool) {
|
||||
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false);
|
||||
gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false);
|
||||
|
||||
pool = ttm_dma_pool_init(dev, gfp_flags, type);
|
||||
if (IS_ERR_OR_NULL(pool))
|
||||
|
@ -957,16 +954,16 @@ skip_huge:
|
|||
}
|
||||
|
||||
while (num_pages) {
|
||||
d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i);
|
||||
d_page = ttm_dma_pool_get_pages(pool, ttm, i);
|
||||
if (!d_page) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
ttm_dma_unpopulate(ttm, dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
pool->size, ctx);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_dma_unpopulate(ttm_dma, dev);
|
||||
ttm_dma_unpopulate(ttm, dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -980,10 +977,9 @@ skip_huge:
|
|||
EXPORT_SYMBOL_GPL(ttm_dma_populate);
|
||||
|
||||
/* Put all pages in pages list to correct pool to wait for reuse */
|
||||
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
||||
void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = &ttm_mem_glob;
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
struct dma_pool *pool;
|
||||
struct dma_page *d_page, *next;
|
||||
enum pool_type type;
|
||||
|
@ -997,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|||
pool = ttm_dma_find_pool(dev, type | IS_HUGE);
|
||||
if (pool) {
|
||||
count = 0;
|
||||
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
|
||||
list_for_each_entry_safe(d_page, next, &ttm->pages_list,
|
||||
page_list) {
|
||||
if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL))
|
||||
continue;
|
||||
|
@ -1027,7 +1023,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|||
|
||||
/* make sure pages array match list and count number of pages */
|
||||
count = 0;
|
||||
list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list,
|
||||
list_for_each_entry_safe(d_page, next, &ttm->pages_list,
|
||||
page_list) {
|
||||
ttm->pages[count] = d_page->p;
|
||||
count++;
|
||||
|
@ -1048,7 +1044,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|||
pool->nfrees += count;
|
||||
} else {
|
||||
pool->npages_free += count;
|
||||
list_splice(&ttm_dma->pages_list, &pool->free_list);
|
||||
list_splice(&ttm->pages_list, &pool->free_list);
|
||||
/*
|
||||
* Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
|
||||
* to free in order to minimize calls to set_memory_wb().
|
||||
|
@ -1059,10 +1055,10 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
|
|||
}
|
||||
spin_unlock_irqrestore(&pool->lock, irq_flags);
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
INIT_LIST_HEAD(&ttm->pages_list);
|
||||
for (i = 0; i < ttm->num_pages; i++) {
|
||||
ttm->pages[i] = NULL;
|
||||
ttm_dma->dma_address[i] = 0;
|
||||
ttm->dma_address[i] = 0;
|
||||
}
|
||||
|
||||
/* shrink pool if necessary (only on !is_cached pools)*/
|
||||
|
|
|
@ -92,21 +92,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||
static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages,
|
||||
sizeof(*ttm->ttm.pages) +
|
||||
sizeof(*ttm->dma_address),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!ttm->ttm.pages)
|
||||
ttm->pages = kvmalloc_array(ttm->num_pages,
|
||||
sizeof(*ttm->pages) +
|
||||
sizeof(*ttm->dma_address),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!ttm->pages)
|
||||
return -ENOMEM;
|
||||
ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
|
||||
|
||||
ttm->dma_address = (void *)(ttm->pages + ttm->num_pages);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
|
||||
static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages,
|
||||
ttm->dma_address = kvmalloc_array(ttm->num_pages,
|
||||
sizeof(*ttm->dma_address),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!ttm->dma_address)
|
||||
|
@ -138,8 +139,10 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
|
|||
ttm->num_pages = bo->num_pages;
|
||||
ttm->caching = ttm_cached;
|
||||
ttm->page_flags = page_flags;
|
||||
ttm->dma_address = NULL;
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->sg = bo->sg;
|
||||
INIT_LIST_HEAD(&ttm->pages_list);
|
||||
ttm->caching = caching;
|
||||
}
|
||||
|
||||
|
@ -158,20 +161,21 @@ EXPORT_SYMBOL(ttm_tt_init);
|
|||
|
||||
void ttm_tt_fini(struct ttm_tt *ttm)
|
||||
{
|
||||
kvfree(ttm->pages);
|
||||
if (ttm->pages)
|
||||
kvfree(ttm->pages);
|
||||
else
|
||||
kvfree(ttm->dma_address);
|
||||
ttm->pages = NULL;
|
||||
ttm->dma_address = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_fini);
|
||||
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags, enum ttm_caching caching)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
ttm_tt_init_fields(ttm, bo, page_flags, caching);
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
if (ttm_dma_tt_alloc_page_directory(ttm_dma)) {
|
||||
if (ttm_dma_tt_alloc_page_directory(ttm)) {
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -179,19 +183,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_init);
|
||||
|
||||
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags, enum ttm_caching caching)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
int ret;
|
||||
|
||||
ttm_tt_init_fields(ttm, bo, page_flags, caching);
|
||||
|
||||
INIT_LIST_HEAD(&ttm_dma->pages_list);
|
||||
if (page_flags & TTM_PAGE_FLAG_SG)
|
||||
ret = ttm_sg_tt_alloc_page_directory(ttm_dma);
|
||||
ret = ttm_sg_tt_alloc_page_directory(ttm);
|
||||
else
|
||||
ret = ttm_dma_tt_alloc_page_directory(ttm_dma);
|
||||
ret = ttm_dma_tt_alloc_page_directory(ttm);
|
||||
if (ret) {
|
||||
pr_err("Failed allocating page table\n");
|
||||
return -ENOMEM;
|
||||
|
@ -200,19 +202,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_sg_tt_init);
|
||||
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
|
||||
{
|
||||
struct ttm_tt *ttm = &ttm_dma->ttm;
|
||||
|
||||
if (ttm->pages)
|
||||
kvfree(ttm->pages);
|
||||
else
|
||||
kvfree(ttm_dma->dma_address);
|
||||
ttm->pages = NULL;
|
||||
ttm_dma->dma_address = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_dma_tt_fini);
|
||||
|
||||
int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
|
|
|
@ -186,7 +186,7 @@ struct ttm_placement vmw_nonfixed_placement = {
|
|||
};
|
||||
|
||||
struct vmw_ttm_tt {
|
||||
struct ttm_dma_tt dma_ttm;
|
||||
struct ttm_tt dma_ttm;
|
||||
struct vmw_private *dev_priv;
|
||||
int gmr_id;
|
||||
struct vmw_mob *mob;
|
||||
|
@ -374,8 +374,8 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
|||
return 0;
|
||||
|
||||
vsgt->mode = dev_priv->map_mode;
|
||||
vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
|
||||
vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
|
||||
vsgt->pages = vmw_tt->dma_ttm.pages;
|
||||
vsgt->num_pages = vmw_tt->dma_ttm.num_pages;
|
||||
vsgt->addrs = vmw_tt->dma_ttm.dma_address;
|
||||
vsgt->sgt = &vmw_tt->sgt;
|
||||
|
||||
|
@ -483,7 +483,7 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
|
|||
const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_tt =
|
||||
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
|
||||
return &vmw_tt->vsgt;
|
||||
}
|
||||
|
@ -493,7 +493,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev,
|
|||
struct ttm_tt *ttm, struct ttm_resource *bo_mem)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be =
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
int ret = 0;
|
||||
|
||||
if (!bo_mem)
|
||||
|
@ -537,7 +537,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
|
|||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be =
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
|
||||
if (!vmw_be->bound)
|
||||
return;
|
||||
|
@ -562,13 +562,13 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev,
|
|||
static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_be =
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
|
||||
vmw_ttm_unbind(bdev, ttm);
|
||||
ttm_tt_destroy_common(bdev, ttm);
|
||||
vmw_ttm_unmap_dma(vmw_be);
|
||||
if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
|
||||
ttm_dma_tt_fini(&vmw_be->dma_ttm);
|
||||
ttm_tt_fini(&vmw_be->dma_ttm);
|
||||
else
|
||||
ttm_tt_fini(ttm);
|
||||
|
||||
|
@ -583,7 +583,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev,
|
|||
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_tt =
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||
int ret;
|
||||
|
@ -612,7 +612,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
|
|||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
||||
dma_ttm.ttm);
|
||||
dma_ttm);
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||
|
||||
|
@ -650,12 +650,12 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
|||
ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
||||
ttm_cached);
|
||||
else
|
||||
ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags,
|
||||
ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
|
||||
ttm_cached);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_init;
|
||||
|
||||
return &vmw_be->dma_ttm.ttm;
|
||||
return &vmw_be->dma_ttm;
|
||||
out_no_init:
|
||||
kfree(vmw_be);
|
||||
return NULL;
|
||||
|
@ -813,7 +813,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv,
|
|||
ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx);
|
||||
if (likely(ret == 0)) {
|
||||
struct vmw_ttm_tt *vmw_tt =
|
||||
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm);
|
||||
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
ret = vmw_ttm_map_dma(vmw_tt);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,13 +61,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
|||
/**
|
||||
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
|
||||
*/
|
||||
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
|
||||
int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* Unpopulates and DMA unmaps pages as part of a
|
||||
* ttm_dma_unpopulate() request */
|
||||
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
|
||||
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
|
@ -90,9 +90,9 @@ void ttm_dma_page_alloc_fini(void);
|
|||
*/
|
||||
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
|
||||
int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
|
||||
void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev);
|
||||
|
||||
#else
|
||||
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
|
||||
|
@ -107,13 +107,13 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
|
||||
static inline int ttm_dma_populate(struct ttm_tt *ttm_dma,
|
||||
struct device *dev,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma,
|
||||
static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma,
|
||||
struct device *dev)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -47,12 +47,13 @@ struct ttm_operation_ctx;
|
|||
* struct ttm_tt
|
||||
*
|
||||
* @pages: Array of pages backing the data.
|
||||
* @page_flags: see TTM_PAGE_FLAG_*
|
||||
* @num_pages: Number of pages in the page array.
|
||||
* @bdev: Pointer to the current struct ttm_bo_device.
|
||||
* @be: Pointer to the ttm backend.
|
||||
* @sg: for SG objects via dma-buf
|
||||
* @dma_address: The DMA (bus) addresses of the pages
|
||||
* @swap_storage: Pointer to shmem struct file for swap storage.
|
||||
* @caching_state: The current caching state of the pages.
|
||||
* @state: The current binding state of the pages.
|
||||
* @pages_list: used by some page allocation backend
|
||||
* @caching: The current caching state of the pages.
|
||||
*
|
||||
* This is a structure holding the pages, caching- and aperture binding
|
||||
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
|
@ -62,8 +63,10 @@ struct ttm_tt {
|
|||
struct page **pages;
|
||||
uint32_t page_flags;
|
||||
uint32_t num_pages;
|
||||
struct sg_table *sg; /* for SG objects via dma-buf */
|
||||
struct sg_table *sg;
|
||||
dma_addr_t *dma_address;
|
||||
struct file *swap_storage;
|
||||
struct list_head pages_list;
|
||||
enum ttm_caching caching;
|
||||
};
|
||||
|
||||
|
@ -72,23 +75,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt)
|
|||
return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct ttm_dma_tt
|
||||
*
|
||||
* @ttm: Base ttm_tt struct.
|
||||
* @dma_address: The DMA (bus) addresses of the pages
|
||||
* @pages_list: used by some page allocation backend
|
||||
*
|
||||
* This is a structure holding the pages, caching- and aperture binding
|
||||
* status for a buffer object that isn't backed by fixed (VRAM / AGP)
|
||||
* memory.
|
||||
*/
|
||||
struct ttm_dma_tt {
|
||||
struct ttm_tt ttm;
|
||||
dma_addr_t *dma_address;
|
||||
struct list_head pages_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_tt_create
|
||||
*
|
||||
|
@ -115,9 +101,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc);
|
|||
*/
|
||||
int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags, enum ttm_caching caching);
|
||||
int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags, enum ttm_caching caching);
|
||||
int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo,
|
||||
uint32_t page_flags, enum ttm_caching caching);
|
||||
|
||||
/**
|
||||
|
@ -128,7 +114,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo,
|
|||
* Free memory of ttm_tt structure
|
||||
*/
|
||||
void ttm_tt_fini(struct ttm_tt *ttm);
|
||||
void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
|
|
Загрузка…
Ссылка в новой задаче