drm/ttm: rename bo->mem and make it a pointer
When we want to decouble resource management from buffer management we need to be able to handle resources separately. Add a resource pointer and rename bo->mem so that all code needs to change to access the pointer instead. No functional change. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20210430092508.60710-4-christian.koenig@amd.com
This commit is contained in:
Родитель
9450129ed9
Коммит
d3116756a7
|
@ -1666,7 +1666,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
|
|||
* the next restore worker
|
||||
*/
|
||||
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
|
||||
bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
|
||||
is_invalid_userptr = true;
|
||||
|
||||
ret = vm_validate_pt_pd_bos(avm);
|
||||
|
|
|
@ -4103,9 +4103,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
|
|||
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
|
||||
|
||||
/* No need to recover an evicted BO */
|
||||
if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
|
||||
shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
|
||||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
|
||||
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
|
||||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
|
||||
shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
continue;
|
||||
|
||||
r = amdgpu_bo_restore_shadow(shadow, &next);
|
||||
|
|
|
@ -226,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
|
|||
if (r)
|
||||
return ERR_PTR(r);
|
||||
|
||||
} else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
|
||||
} else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
|
||||
AMDGPU_GEM_DOMAIN_GTT)) {
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
switch (bo->tbo.mem.mem_type) {
|
||||
switch (bo->tbo.resource->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
sgt = drm_prime_pages_to_sg(obj->dev,
|
||||
bo->tbo.ttm->pages,
|
||||
|
@ -245,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
|
|||
break;
|
||||
|
||||
case TTM_PL_VRAM:
|
||||
r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
|
||||
bo->tbo.base.size, attach->dev, dir, &sgt);
|
||||
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
|
||||
bo->tbo.base.size, attach->dev,
|
||||
dir, &sgt);
|
||||
if (r)
|
||||
return ERR_PTR(r);
|
||||
break;
|
||||
|
@ -436,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
|
|||
struct amdgpu_vm_bo_base *bo_base;
|
||||
int r;
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
|
||||
return;
|
||||
|
||||
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
|
||||
|
|
|
@ -101,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
|
|||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
switch (bo->tbo.mem.mem_type) {
|
||||
switch (bo->tbo.resource->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
*addr = bo->tbo.ttm->dma_address[0];
|
||||
break;
|
||||
|
@ -112,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
|
|||
*addr = 0;
|
||||
break;
|
||||
}
|
||||
*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
|
||||
*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
|
||||
amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
|
|||
int r;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
|
||||
if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) &&
|
||||
atomic64_read(&mgr->available) < mem->num_pages) {
|
||||
spin_unlock(&mgr->lock);
|
||||
return -ENOSPC;
|
||||
|
|
|
@ -362,14 +362,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
|
|||
if (cpu_addr)
|
||||
amdgpu_bo_kunmap(*bo_ptr);
|
||||
|
||||
ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
|
||||
ttm_resource_free(&(*bo_ptr)->tbo, (*bo_ptr)->tbo.resource);
|
||||
|
||||
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
|
||||
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
|
||||
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
|
||||
}
|
||||
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
|
||||
&(*bo_ptr)->tbo.mem, &ctx);
|
||||
(*bo_ptr)->tbo.resource, &ctx);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -573,15 +573,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||
return r;
|
||||
|
||||
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
|
||||
bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
|
||||
ctx.bytes_moved);
|
||||
else
|
||||
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
|
||||
|
||||
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
|
||||
bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
|
||||
|
@ -761,7 +761,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
|||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -884,8 +884,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
|
||||
|
||||
if (bo->tbo.pin_count) {
|
||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||
uint32_t mem_flags = bo->tbo.mem.placement;
|
||||
uint32_t mem_type = bo->tbo.resource->mem_type;
|
||||
uint32_t mem_flags = bo->tbo.resource->placement;
|
||||
|
||||
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
|
||||
return -EINVAL;
|
||||
|
@ -935,7 +935,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||
|
||||
ttm_bo_pin(&bo->tbo);
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
|
||||
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
|
||||
|
@ -987,11 +987,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
|
|||
if (bo->tbo.base.import_attach)
|
||||
dma_buf_unpin(bo->tbo.base.import_attach);
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
|
||||
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
|
||||
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
|
||||
&adev->visible_pin_size);
|
||||
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
|
||||
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
|
||||
}
|
||||
}
|
||||
|
@ -1223,7 +1223,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_bo *abo;
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
struct ttm_resource *old_mem = bo->resource;
|
||||
|
||||
if (!amdgpu_bo_is_amdgpu_bo(bo))
|
||||
return;
|
||||
|
@ -1234,7 +1234,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
amdgpu_bo_kunmap(abo);
|
||||
|
||||
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
|
||||
bo->mem.mem_type != TTM_PL_SYSTEM)
|
||||
bo->resource->mem_type != TTM_PL_SYSTEM)
|
||||
dma_buf_move_notify(abo->tbo.base.dma_buf);
|
||||
|
||||
/* remember the eviction */
|
||||
|
@ -1254,7 +1254,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
|
|||
{
|
||||
unsigned int domain;
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
*vram_mem += amdgpu_bo_size(bo);
|
||||
|
@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
|
|||
if (bo->base.resv == &bo->base._resv)
|
||||
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
|
||||
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node ||
|
||||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
|
||||
return;
|
||||
|
||||
|
@ -1333,10 +1333,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
/* Remember that this BO was accessed by the CPU */
|
||||
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM)
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
offset = bo->resource->start << PAGE_SHIFT;
|
||||
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
|
||||
return 0;
|
||||
|
||||
|
@ -1359,9 +1359,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
else if (unlikely(r))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
offset = bo->resource->start << PAGE_SHIFT;
|
||||
/* this should never happen */
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
||||
(offset + bo->base.size) > adev->gmc.visible_vram_size)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
|
@ -1446,11 +1446,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
|||
*/
|
||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||
WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
|
||||
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
|
||||
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
|
||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
|
||||
WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
|
||||
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
|
||||
|
||||
return amdgpu_bo_gpu_offset_no_check(bo);
|
||||
|
@ -1468,8 +1468,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
|
|||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
uint64_t offset;
|
||||
|
||||
offset = (bo->tbo.mem.start << PAGE_SHIFT) +
|
||||
amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
|
||||
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
|
||||
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
|
||||
|
||||
return amdgpu_gmc_sign_extend(offset);
|
||||
}
|
||||
|
@ -1522,7 +1522,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
|
|||
unsigned int pin_count;
|
||||
u64 size;
|
||||
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
switch (domain) {
|
||||
case AMDGPU_GEM_DOMAIN_VRAM:
|
||||
placement = "VRAM";
|
||||
|
|
|
@ -219,10 +219,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
|
|||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
|
||||
if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
|
||||
return false;
|
||||
|
||||
amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
|
||||
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
|
||||
while (cursor.remaining) {
|
||||
if (cursor.start < adev->gmc.visible_vram_size)
|
||||
return true;
|
||||
|
|
|
@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.mem.num_pages;
|
||||
__entry->type = bo->tbo.mem.mem_type;
|
||||
__entry->pages = bo->tbo.resource->num_pages;
|
||||
__entry->type = bo->tbo.resource->mem_type;
|
||||
__entry->prefer = bo->preferred_domains;
|
||||
__entry->allow = bo->allowed_domains;
|
||||
__entry->visible = bo->flags;
|
||||
|
|
|
@ -125,7 +125,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
|
|||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
switch (bo->mem.mem_type) {
|
||||
|
||||
switch (bo->resource->mem_type) {
|
||||
case AMDGPU_PL_GDS:
|
||||
case AMDGPU_PL_GWS:
|
||||
case AMDGPU_PL_OA:
|
||||
|
@ -458,7 +459,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
{
|
||||
struct amdgpu_device *adev;
|
||||
struct amdgpu_bo *abo;
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
struct ttm_resource *old_mem = bo->resource;
|
||||
int r;
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_TT) {
|
||||
|
@ -490,7 +491,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
return r;
|
||||
|
||||
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
ttm_bo_assign_mem(bo, new_mem);
|
||||
goto out;
|
||||
}
|
||||
|
@ -599,7 +600,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
|
|||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct amdgpu_res_cursor cursor;
|
||||
|
||||
amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
|
||||
amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
|
||||
&cursor);
|
||||
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -954,12 +956,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
|||
uint64_t addr, flags;
|
||||
int r;
|
||||
|
||||
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
|
||||
if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
|
||||
return 0;
|
||||
|
||||
addr = amdgpu_gmc_agp_addr(bo);
|
||||
if (addr != AMDGPU_BO_INVALID_OFFSET) {
|
||||
bo->mem.start = addr >> PAGE_SHIFT;
|
||||
bo->resource->start = addr >> PAGE_SHIFT;
|
||||
} else {
|
||||
|
||||
/* allocate GART space */
|
||||
|
@ -970,7 +972,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
|||
placements.fpfn = 0;
|
||||
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
|
||||
placements.mem_type = TTM_PL_TT;
|
||||
placements.flags = bo->mem.placement;
|
||||
placements.flags = bo->resource->placement;
|
||||
|
||||
r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
|
||||
if (unlikely(r))
|
||||
|
@ -987,8 +989,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
|||
return r;
|
||||
}
|
||||
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
bo->mem = tmp;
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
ttm_bo_assign_mem(bo, &tmp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1009,7 +1011,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
|
|||
if (!tbo->ttm)
|
||||
return 0;
|
||||
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
|
||||
flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
|
||||
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
|
||||
|
||||
return r;
|
||||
|
@ -1322,7 +1324,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|||
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place)
|
||||
{
|
||||
unsigned long num_pages = bo->mem.num_pages;
|
||||
unsigned long num_pages = bo->resource->num_pages;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
struct dma_resv_list *flist;
|
||||
struct dma_fence *f;
|
||||
|
@ -1346,7 +1348,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||
}
|
||||
}
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
if (amdgpu_bo_is_amdgpu_bo(bo) &&
|
||||
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
|
||||
|
@ -1355,7 +1357,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||
|
||||
case TTM_PL_VRAM:
|
||||
/* Check each drm MM node individually */
|
||||
amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
|
||||
amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
|
||||
&cursor);
|
||||
while (cursor.remaining) {
|
||||
if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
|
||||
|
@ -1397,10 +1399,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
|||
uint32_t value = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM)
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
return -EIO;
|
||||
|
||||
amdgpu_res_first(&bo->mem, offset, len, &cursor);
|
||||
amdgpu_res_first(bo->resource, offset, len, &cursor);
|
||||
while (cursor.remaining) {
|
||||
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
|
||||
uint64_t bytes = 4 - (cursor.start & 3);
|
||||
|
@ -1917,16 +1919,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_TT) {
|
||||
r = amdgpu_ttm_alloc_gart(&bo->tbo);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
|
||||
num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
|
||||
num_loops = 0;
|
||||
|
||||
amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
|
||||
amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
|
||||
while (cursor.remaining) {
|
||||
num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
|
||||
amdgpu_res_next(&cursor, cursor.size);
|
||||
|
@ -1951,12 +1953,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||
}
|
||||
}
|
||||
|
||||
amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
|
||||
amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
|
||||
while (cursor.remaining) {
|
||||
uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
|
||||
uint64_t dst_addr = cursor.start;
|
||||
|
||||
dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
|
||||
dst_addr += amdgpu_ttm_domain_start(adev,
|
||||
bo->tbo.resource->mem_type);
|
||||
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
|
||||
cur_size);
|
||||
|
||||
|
|
|
@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
|
|||
amdgpu_vm_bo_idle(base);
|
||||
|
||||
if (bo->preferred_domains &
|
||||
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
|
||||
amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
|
||||
return;
|
||||
|
||||
/*
|
||||
|
@ -657,11 +657,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
|
|||
if (!bo->parent)
|
||||
continue;
|
||||
|
||||
ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
|
||||
ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
|
||||
&vm->lru_bulk_move);
|
||||
if (bo->shadow)
|
||||
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
|
||||
&bo->shadow->tbo.mem,
|
||||
bo->shadow->tbo.resource,
|
||||
&vm->lru_bulk_move);
|
||||
}
|
||||
spin_unlock(&adev->mman.bdev.lru_lock);
|
||||
|
@ -1818,10 +1818,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
struct drm_gem_object *gobj = dma_buf->priv;
|
||||
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||
|
||||
if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
|
||||
bo = gem_to_amdgpu_bo(gobj);
|
||||
}
|
||||
mem = &bo->tbo.mem;
|
||||
mem = bo->tbo.resource;
|
||||
if (mem->mem_type == TTM_PL_TT)
|
||||
pages_addr = bo->tbo.ttm->dma_address;
|
||||
}
|
||||
|
@ -1881,7 +1881,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
|
|||
* next command submission.
|
||||
*/
|
||||
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
|
||||
uint32_t mem_type = bo->tbo.mem.mem_type;
|
||||
uint32_t mem_type = bo->tbo.resource->mem_type;
|
||||
|
||||
if (!(bo->preferred_domains &
|
||||
amdgpu_mem_type_to_domain(mem_type)))
|
||||
|
|
|
@ -217,7 +217,7 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
|
|||
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_resource *mem = &bo->tbo.mem;
|
||||
struct ttm_resource *mem = bo->tbo.resource;
|
||||
struct drm_mm_node *nodes = mem->mm_node;
|
||||
unsigned pages = mem->num_pages;
|
||||
u64 usage;
|
||||
|
|
|
@ -409,7 +409,7 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
|
|||
pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
|
||||
prange->svms, prange->start, prange->last);
|
||||
|
||||
prange->ttm_res = &prange->svm_bo->bo->tbo.mem;
|
||||
prange->ttm_res = prange->svm_bo->bo->tbo.resource;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -515,7 +515,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
|
|||
|
||||
svm_bo->bo = bo;
|
||||
prange->svm_bo = svm_bo;
|
||||
prange->ttm_res = &bo->tbo.mem;
|
||||
prange->ttm_res = bo->tbo.resource;
|
||||
prange->offset = 0;
|
||||
|
||||
spin_lock(&svm_bo->list_lock);
|
||||
|
|
|
@ -40,12 +40,12 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
|
|||
const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
|
||||
|
||||
drm_printf_indent(p, indent, "placement=");
|
||||
drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
|
||||
drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
|
||||
drm_printf(p, "\n");
|
||||
|
||||
if (bo->mem.bus.is_iomem)
|
||||
if (bo->resource->bus.is_iomem)
|
||||
drm_printf_indent(p, indent, "bus.offset=%lx\n",
|
||||
(unsigned long)bo->mem.bus.offset);
|
||||
(unsigned long)bo->resource->bus.offset);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_ttm_print_info);
|
||||
|
||||
|
|
|
@ -248,10 +248,10 @@ EXPORT_SYMBOL(drm_gem_vram_put);
|
|||
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
|
||||
{
|
||||
/* Keep TTM behavior for now, remove when drivers are audited */
|
||||
if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
|
||||
if (WARN_ON_ONCE(!gbo->bo.resource->mm_node))
|
||||
return 0;
|
||||
|
||||
return gbo->bo.mem.start;
|
||||
return gbo->bo.resource->start;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
|||
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
|
||||
NOUVEAU_GEM_DOMAIN_GART;
|
||||
else
|
||||
if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
|
||||
if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
|
||||
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
else
|
||||
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
|
||||
|
|
|
@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
|
|||
if (nvbo->bo.pin_count) {
|
||||
bool error = evict;
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
|
||||
break;
|
||||
|
@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
|
|||
if (error) {
|
||||
NV_ERROR(drm, "bo %p pinned elsewhere: "
|
||||
"0x%08x vs 0x%08x\n", bo,
|
||||
bo->mem.mem_type, domain);
|
||||
bo->resource->mem_type, domain);
|
||||
ret = -EBUSY;
|
||||
}
|
||||
ttm_bo_pin(&nvbo->bo);
|
||||
|
@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
|
|||
|
||||
ttm_bo_pin(&nvbo->bo);
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
drm->gem.vram_available -= bo->base.size;
|
||||
break;
|
||||
|
@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
|
|||
|
||||
ttm_bo_unpin(&nvbo->bo);
|
||||
if (!nvbo->bo.pin_count) {
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
drm->gem.vram_available += bo->base.size;
|
||||
break;
|
||||
|
@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
|
||||
|
||||
ttm_bo_unreserve(&nvbo->bo);
|
||||
return ret;
|
||||
|
@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
|||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
|
||||
NOUVEAU_GEM_DOMAIN_CPU);
|
||||
|
@ -754,7 +754,7 @@ static int
|
|||
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
|
||||
struct ttm_resource *reg)
|
||||
{
|
||||
struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
|
||||
struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
|
||||
struct nouveau_mem *new_mem = nouveau_mem(reg);
|
||||
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
|
||||
int ret;
|
||||
|
@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
|
|||
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
|
||||
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
|
||||
if (ret == 0) {
|
||||
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
|
||||
ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_fence_new(chan, false, &fence);
|
||||
if (ret == 0) {
|
||||
|
@ -969,7 +969,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct ttm_resource *old_reg = &bo->mem;
|
||||
struct ttm_resource *old_reg = bo->resource;
|
||||
struct nouveau_drm_tile *new_tile = NULL;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1009,7 +1009,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
if (old_reg->mem_type == TTM_PL_TT &&
|
||||
new_reg->mem_type == TTM_PL_SYSTEM) {
|
||||
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
ttm_bo_assign_mem(bo, new_reg);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1045,7 +1045,7 @@ out:
|
|||
}
|
||||
out_ntfy:
|
||||
if (ret) {
|
||||
nouveau_bo_move_ntfy(bo, &bo->mem);
|
||||
nouveau_bo_move_ntfy(bo, bo->resource);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -1170,7 +1170,7 @@ out:
|
|||
list_del_init(&nvbo->io_reserve_lru);
|
||||
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
|
||||
bdev->dev_mapping);
|
||||
nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
|
||||
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
|
@ -1200,12 +1200,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
/* as long as the bo isn't in vram, and isn't tiled, we've got
|
||||
* nothing to do here.
|
||||
*/
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM) {
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
|
||||
!nvbo->kind)
|
||||
return 0;
|
||||
|
||||
if (bo->mem.mem_type != TTM_PL_SYSTEM)
|
||||
if (bo->resource->mem_type != TTM_PL_SYSTEM)
|
||||
return 0;
|
||||
|
||||
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
|
||||
|
@ -1213,7 +1213,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
} else {
|
||||
/* make sure bo is in mappable vram */
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
|
||||
bo->mem.start + bo->mem.num_pages < mappable)
|
||||
bo->resource->start + bo->resource->num_pages < mappable)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < nvbo->placement.num_placement; ++i) {
|
||||
|
|
|
@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
|||
args.start = 0;
|
||||
args.limit = chan->vmm->vmm.limit - 1;
|
||||
} else
|
||||
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
|
||||
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
|
||||
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
|
||||
/* nv04 vram pushbuf hack, retarget to its location in
|
||||
* the framebuffer bar rather than direct vram access..
|
||||
|
|
|
@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
|||
FBINFO_HWACCEL_FILLRECT |
|
||||
FBINFO_HWACCEL_IMAGEBLIT;
|
||||
info->fbops = &nouveau_fbcon_sw_ops;
|
||||
info->fix.smem_start = nvbo->bo.mem.bus.offset;
|
||||
info->fix.smem_start = nvbo->bo.resource->bus.offset;
|
||||
info->fix.smem_len = nvbo->bo.base.size;
|
||||
|
||||
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
|
||||
|
|
|
@ -276,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
|
|||
|
||||
if (is_power_of_2(nvbo->valid_domains))
|
||||
rep->domain = nvbo->valid_domains;
|
||||
else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
|
||||
else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
|
||||
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
|
||||
else
|
||||
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
|
@ -347,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
|
|||
valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
|
||||
|
||||
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
|
||||
bo->mem.mem_type == TTM_PL_VRAM)
|
||||
bo->resource->mem_type == TTM_PL_VRAM)
|
||||
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
|
||||
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
|
||||
bo->mem.mem_type == TTM_PL_TT)
|
||||
bo->resource->mem_type == TTM_PL_TT)
|
||||
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
|
||||
|
||||
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
|
||||
|
@ -561,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
|
|||
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (nvbo->offset == b->presumed.offset &&
|
||||
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
|
||||
((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
|
||||
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
|
||||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
|
||||
(nvbo->bo.resource->mem_type == TTM_PL_TT &&
|
||||
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
|
||||
continue;
|
||||
|
||||
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
|
||||
if (nvbo->bo.resource->mem_type == TTM_PL_TT)
|
||||
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
|
||||
else
|
||||
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
|
@ -681,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
|
|||
}
|
||||
|
||||
if (!nvbo->kmap.virtual) {
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
|
||||
&nvbo->kmap);
|
||||
if (ret) {
|
||||
NV_PRINTK(err, cli, "failed kmap for reloc\n");
|
||||
|
@ -870,7 +870,7 @@ revalidate:
|
|||
if (unlikely(cmd != req->suffix0)) {
|
||||
if (!nvbo->kmap.virtual) {
|
||||
ret = ttm_bo_kmap(&nvbo->bo, 0,
|
||||
nvbo->bo.mem.
|
||||
nvbo->bo.resource->
|
||||
num_pages,
|
||||
&nvbo->kmap);
|
||||
if (ret) {
|
||||
|
|
|
@ -77,7 +77,7 @@ int
|
|||
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
|
||||
struct nouveau_vma **pvma)
|
||||
{
|
||||
struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
|
||||
struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
|
||||
struct nouveau_vma *vma;
|
||||
struct nvif_vma tmp;
|
||||
int ret;
|
||||
|
@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
|
|||
vma->fence = NULL;
|
||||
list_add_tail(&vma->head, &nvbo->vma_list);
|
||||
|
||||
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
|
||||
if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
|
||||
mem->mem.page == nvbo->page) {
|
||||
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
|
||||
mem->mem.size, &tmp);
|
||||
|
|
|
@ -77,8 +77,8 @@ static int
|
|||
nv17_fence_context_new(struct nouveau_channel *chan)
|
||||
{
|
||||
struct nv10_fence_priv *priv = chan->drm->fence;
|
||||
struct ttm_resource *reg = priv->bo->bo.resource;
|
||||
struct nv10_fence_chan *fctx;
|
||||
struct ttm_resource *reg = &priv->bo->bo.mem;
|
||||
u32 start = reg->start * PAGE_SIZE;
|
||||
u32 limit = start + priv->bo->bo.base.size - 1;
|
||||
int ret = 0;
|
||||
|
|
|
@ -37,7 +37,7 @@ nv50_fence_context_new(struct nouveau_channel *chan)
|
|||
{
|
||||
struct nv10_fence_priv *priv = chan->drm->fence;
|
||||
struct nv10_fence_chan *fctx;
|
||||
struct ttm_resource *reg = &priv->bo->bo.mem;
|
||||
struct ttm_resource *reg = priv->bo->bo.resource;
|
||||
u32 start = reg->start * PAGE_SIZE;
|
||||
u32 limit = start + priv->bo->bo.base.size - 1;
|
||||
int ret;
|
||||
|
|
|
@ -292,12 +292,12 @@ qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo,
|
|||
unsigned long offset)
|
||||
{
|
||||
struct qxl_memslot *slot =
|
||||
(bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
(bo->tbo.resource->mem_type == TTM_PL_VRAM)
|
||||
? &qdev->main_slot : &qdev->surfaces_slot;
|
||||
|
||||
/* TODO - need to hold one of the locks to read bo->tbo.mem.start */
|
||||
/* TODO - need to hold one of the locks to read bo->tbo.resource->start */
|
||||
|
||||
return slot->high_bits | ((bo->tbo.mem.start << PAGE_SHIFT) + offset);
|
||||
return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset);
|
||||
}
|
||||
|
||||
/* qxl_display.c */
|
||||
|
|
|
@ -212,14 +212,14 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
|
|||
struct io_mapping *map;
|
||||
struct dma_buf_map bo_map;
|
||||
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
|
||||
map = qdev->vram_mapping;
|
||||
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
|
||||
else if (bo->tbo.resource->mem_type == TTM_PL_PRIV)
|
||||
map = qdev->surface_mapping;
|
||||
else
|
||||
goto fallback;
|
||||
|
||||
offset = bo->tbo.mem.start << PAGE_SHIFT;
|
||||
offset = bo->tbo.resource->start << PAGE_SHIFT;
|
||||
return io_mapping_map_atomic_wc(map, offset + page_offset);
|
||||
fallback:
|
||||
if (bo->kptr) {
|
||||
|
@ -266,8 +266,8 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
|
|||
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
|
||||
struct qxl_bo *bo, void *pmap)
|
||||
{
|
||||
if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
|
||||
(bo->tbo.mem.mem_type != TTM_PL_PRIV))
|
||||
if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) &&
|
||||
(bo->tbo.resource->mem_type != TTM_PL_PRIV))
|
||||
goto fallback;
|
||||
|
||||
io_mapping_unmap_atomic(pmap);
|
||||
|
|
|
@ -131,7 +131,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
qbo = to_qxl_bo(bo);
|
||||
qdev = to_qxl(qbo->tbo.base.dev);
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_PRIV && qbo->surface_id)
|
||||
if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id)
|
||||
qxl_surface_evict(qdev, qbo, new_mem ? true : false);
|
||||
}
|
||||
|
||||
|
@ -140,7 +140,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
struct ttm_resource *new_mem,
|
||||
struct ttm_place *hop)
|
||||
{
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
struct ttm_resource *old_mem = bo->resource;
|
||||
int ret;
|
||||
|
||||
qxl_bo_move_notify(bo, new_mem);
|
||||
|
|
|
@ -400,8 +400,8 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a,
|
|||
struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
|
||||
|
||||
/* Sort A before B if A is smaller. */
|
||||
return (int)la->robj->tbo.mem.num_pages -
|
||||
(int)lb->robj->tbo.mem.num_pages;
|
||||
return (int)la->robj->tbo.resource->num_pages -
|
||||
(int)lb->robj->tbo.resource->num_pages;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -516,7 +516,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
|||
}
|
||||
|
||||
r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
|
||||
&rdev->ring_tmp_bo.bo->tbo.mem);
|
||||
rdev->ring_tmp_bo.bo->tbo.resource);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -530,7 +530,7 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
|
||||
r = radeon_vm_bo_update(rdev, bo_va, bo->tbo.resource);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
|
|
@ -529,7 +529,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
|
|||
else
|
||||
r = 0;
|
||||
|
||||
cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
|
||||
cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
|
||||
args->domain = radeon_mem_type_to_domain(cur_placement);
|
||||
drm_gem_object_put(gobj);
|
||||
return r;
|
||||
|
@ -559,7 +559,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
|||
r = ret;
|
||||
|
||||
/* Flush HDP cache via MMIO if necessary */
|
||||
cur_placement = READ_ONCE(robj->tbo.mem.mem_type);
|
||||
cur_placement = READ_ONCE(robj->tbo.resource->mem_type);
|
||||
if (rdev->asic->mmio_hdp_flush &&
|
||||
radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM)
|
||||
robj->rdev->asic->mmio_hdp_flush(rdev);
|
||||
|
@ -643,7 +643,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||
goto error_free;
|
||||
|
||||
list_for_each_entry(entry, &list, head) {
|
||||
domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
|
||||
domain = radeon_mem_type_to_domain(entry->bo->resource->mem_type);
|
||||
/* if anything is swapped out don't swap it in here,
|
||||
just abort and wait for the next CS */
|
||||
if (domain == RADEON_GEM_DOMAIN_CPU)
|
||||
|
@ -656,7 +656,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
|
|||
goto error_unlock;
|
||||
|
||||
if (bo_va->it.start)
|
||||
r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
|
||||
r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
|
||||
|
||||
error_unlock:
|
||||
mutex_unlock(&bo_va->vm->mutex);
|
||||
|
@ -860,7 +860,7 @@ static int radeon_debugfs_gem_info_show(struct seq_file *m, void *unused)
|
|||
unsigned domain;
|
||||
const char *placement;
|
||||
|
||||
domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
|
||||
domain = radeon_mem_type_to_domain(rbo->tbo.resource->mem_type);
|
||||
switch (domain) {
|
||||
case RADEON_GEM_DOMAIN_VRAM:
|
||||
placement = "VRAM";
|
||||
|
|
|
@ -76,7 +76,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
|
||||
bo = container_of(tbo, struct radeon_bo, tbo);
|
||||
|
||||
radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
|
||||
radeon_update_memory_usage(bo, bo->tbo.resource->mem_type, -1);
|
||||
|
||||
mutex_lock(&bo->rdev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
|
@ -250,7 +250,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
|
||||
if (r) {
|
||||
return r;
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
|
|||
{
|
||||
ttm_bo_unpin(&bo->tbo);
|
||||
if (!bo->tbo.pin_count) {
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
|
||||
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
|
||||
else
|
||||
bo->rdev->gart_pin_size -= radeon_bo_size(bo);
|
||||
|
@ -506,7 +506,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
|
|||
u32 domain = lobj->preferred_domains;
|
||||
u32 allowed = lobj->allowed_domains;
|
||||
u32 current_domain =
|
||||
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
|
||||
radeon_mem_type_to_domain(bo->tbo.resource->mem_type);
|
||||
|
||||
/* Check if this buffer will be moved and don't move it
|
||||
* if we have moved too many buffers for this IB already.
|
||||
|
@ -605,7 +605,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo)
|
|||
|
||||
out:
|
||||
radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
|
||||
bo->tbo.mem.start << PAGE_SHIFT,
|
||||
bo->tbo.resource->start << PAGE_SHIFT,
|
||||
bo->tbo.base.size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -711,7 +711,7 @@ int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
|
||||
if (bo->tbo.resource->mem_type != TTM_PL_VRAM) {
|
||||
if (!has_moved)
|
||||
return 0;
|
||||
|
||||
|
@ -743,7 +743,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
if (!new_mem)
|
||||
return;
|
||||
|
||||
radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
|
||||
radeon_update_memory_usage(rbo, bo->resource->mem_type, -1);
|
||||
radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
|
||||
}
|
||||
|
||||
|
@ -760,11 +760,11 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
radeon_bo_check_tiling(rbo, 0, 0);
|
||||
rdev = rbo->rdev;
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM)
|
||||
if (bo->resource->mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
size = bo->mem.num_pages << PAGE_SHIFT;
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
size = bo->resource->num_pages << PAGE_SHIFT;
|
||||
offset = bo->resource->start << PAGE_SHIFT;
|
||||
if ((offset + size) <= rdev->mc.visible_vram_size)
|
||||
return 0;
|
||||
|
||||
|
@ -786,7 +786,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
|||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
|
||||
r = ttm_bo_validate(bo, &rbo->placement, &ctx);
|
||||
} else if (likely(!r)) {
|
||||
offset = bo->mem.start << PAGE_SHIFT;
|
||||
offset = bo->resource->start << PAGE_SHIFT;
|
||||
/* this should never happen */
|
||||
if ((offset + size) > rdev->mc.visible_vram_size)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
|
|
@ -95,7 +95,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
|
|||
|
||||
rdev = radeon_get_rdev(bo->tbo.bdev);
|
||||
|
||||
switch (bo->tbo.mem.mem_type) {
|
||||
switch (bo->tbo.resource->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
start = rdev->mc.gtt_start;
|
||||
break;
|
||||
|
@ -104,7 +104,7 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
|
|||
break;
|
||||
}
|
||||
|
||||
return (bo->tbo.mem.start << PAGE_SHIFT) + start;
|
||||
return (bo->tbo.resource->start << PAGE_SHIFT) + start;
|
||||
}
|
||||
|
||||
static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
|
||||
|
|
|
@ -154,7 +154,7 @@ static void radeon_unmap_vram_bos(struct radeon_device *rdev)
|
|||
return;
|
||||
|
||||
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
|
||||
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
|
||||
if (bo->tbo.resource->mem_type == TTM_PL_VRAM)
|
||||
ttm_bo_unmap_virtual(&bo->tbo);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create,
|
|||
|
||||
TP_fast_assign(
|
||||
__entry->bo = bo;
|
||||
__entry->pages = bo->tbo.mem.num_pages;
|
||||
__entry->pages = bo->tbo.resource->num_pages;
|
||||
),
|
||||
TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages)
|
||||
);
|
||||
|
|
|
@ -98,12 +98,12 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
|
|||
return;
|
||||
}
|
||||
rbo = container_of(bo, struct radeon_bo, tbo);
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
|
||||
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
|
||||
else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
|
||||
bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
|
||||
bo->resource->start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
|
||||
unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
|
||||
int i;
|
||||
|
||||
|
@ -195,9 +195,9 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
struct ttm_resource *new_mem,
|
||||
struct ttm_place *hop)
|
||||
{
|
||||
struct ttm_resource *old_mem = bo->resource;
|
||||
struct radeon_device *rdev;
|
||||
struct radeon_bo *rbo;
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
int r;
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_TT) {
|
||||
|
@ -229,7 +229,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict,
|
|||
if (old_mem->mem_type == TTM_PL_TT &&
|
||||
new_mem->mem_type == TTM_PL_SYSTEM) {
|
||||
radeon_ttm_tt_unbind(bo->bdev, bo->ttm);
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
ttm_bo_assign_mem(bo, new_mem);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
|
|||
int i, mem_type;
|
||||
|
||||
drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n",
|
||||
bo, bo->mem.num_pages, bo->base.size >> 10,
|
||||
bo, bo->resource->num_pages, bo->base.size >> 10,
|
||||
bo->base.size >> 20);
|
||||
for (i = 0; i < placement->num_placement; i++) {
|
||||
mem_type = placement->placement[i].mem_type;
|
||||
|
@ -109,7 +109,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
|
|||
bdev->funcs->del_from_lru_notify(bo);
|
||||
|
||||
if (bulk && !bo->pin_count) {
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_TT:
|
||||
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
|
||||
break;
|
||||
|
@ -163,11 +163,13 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
struct ttm_operation_ctx *ctx,
|
||||
struct ttm_place *hop)
|
||||
{
|
||||
struct ttm_resource_manager *old_man, *new_man;
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *old_man = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
struct ttm_resource_manager *new_man = ttm_manager_type(bdev, mem->mem_type);
|
||||
int ret;
|
||||
|
||||
old_man = ttm_manager_type(bdev, bo->resource->mem_type);
|
||||
new_man = ttm_manager_type(bdev, mem->mem_type);
|
||||
|
||||
ttm_bo_unmap_virtual(bo);
|
||||
|
||||
/*
|
||||
|
@ -200,7 +202,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
|
|||
return 0;
|
||||
|
||||
out_err:
|
||||
new_man = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
new_man = ttm_manager_type(bdev, bo->resource->mem_type);
|
||||
if (!new_man->use_tt)
|
||||
ttm_bo_tt_destroy(bo);
|
||||
|
||||
|
@ -221,7 +223,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
|
|||
bo->bdev->funcs->delete_mem_notify(bo);
|
||||
|
||||
ttm_bo_tt_destroy(bo);
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
}
|
||||
|
||||
static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
|
||||
|
@ -417,7 +419,7 @@ static void ttm_bo_release(struct kref *kref)
|
|||
bo->bdev->funcs->release_notify(bo);
|
||||
|
||||
drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
|
||||
ttm_mem_io_free(bdev, &bo->mem);
|
||||
ttm_mem_io_free(bdev, bo->resource);
|
||||
}
|
||||
|
||||
if (!dma_resv_test_signaled_rcu(bo->base.resv, true) ||
|
||||
|
@ -438,7 +440,7 @@ static void ttm_bo_release(struct kref *kref)
|
|||
*/
|
||||
if (bo->pin_count) {
|
||||
bo->pin_count = 0;
|
||||
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
|
||||
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
|
||||
}
|
||||
|
||||
kref_init(&bo->kref);
|
||||
|
@ -534,8 +536,8 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
|||
/* Don't evict this BO if it's outside of the
|
||||
* requested placement range
|
||||
*/
|
||||
if (place->fpfn >= (bo->mem.start + bo->mem.num_pages) ||
|
||||
(place->lpfn && place->lpfn <= bo->mem.start))
|
||||
if (place->fpfn >= (bo->resource->start + bo->resource->num_pages) ||
|
||||
(place->lpfn && place->lpfn <= bo->resource->start))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -851,7 +853,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
|
|||
}
|
||||
|
||||
error:
|
||||
if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count)
|
||||
if (bo->resource->mem_type == TTM_PL_SYSTEM && !bo->pin_count)
|
||||
ttm_bo_move_to_lru_tail_unlocked(bo);
|
||||
|
||||
return ret;
|
||||
|
@ -987,7 +989,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
/*
|
||||
* Check whether we need to move buffer.
|
||||
*/
|
||||
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
|
||||
if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
|
||||
ret = ttm_bo_move_buffer(bo, placement, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -995,7 +997,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
/*
|
||||
* We might need to add a TTM.
|
||||
*/
|
||||
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
|
||||
if (bo->resource->mem_type == TTM_PL_SYSTEM) {
|
||||
ret = ttm_tt_create(bo, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1027,7 +1029,8 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
|
|||
bo->bdev = bdev;
|
||||
bo->type = type;
|
||||
bo->page_alignment = page_alignment;
|
||||
ttm_resource_alloc(bo, &sys_mem, &bo->mem);
|
||||
bo->resource = &bo->_mem;
|
||||
ttm_resource_alloc(bo, &sys_mem, bo->resource);
|
||||
bo->moving = NULL;
|
||||
bo->pin_count = 0;
|
||||
bo->sg = sg;
|
||||
|
@ -1046,7 +1049,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
|
|||
if (bo->type == ttm_bo_type_device ||
|
||||
bo->type == ttm_bo_type_sg)
|
||||
ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node,
|
||||
bo->mem.num_pages);
|
||||
bo->resource->num_pages);
|
||||
|
||||
/* passed reservation objects should already be locked,
|
||||
* since otherwise lockdep will be angered in radeon.
|
||||
|
@ -1108,7 +1111,7 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
|
|||
struct ttm_device *bdev = bo->bdev;
|
||||
|
||||
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
|
||||
ttm_mem_io_free(bdev, &bo->mem);
|
||||
ttm_mem_io_free(bdev, bo->resource);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
|
||||
|
||||
|
@ -1165,7 +1168,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
|
|||
/*
|
||||
* Move to system cached
|
||||
*/
|
||||
if (bo->mem.mem_type != TTM_PL_SYSTEM) {
|
||||
if (bo->resource->mem_type != TTM_PL_SYSTEM) {
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct ttm_resource evict_mem;
|
||||
struct ttm_place place, hop;
|
||||
|
|
|
@ -179,7 +179,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
|||
struct ttm_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
struct ttm_resource *old_mem = bo->resource;
|
||||
struct ttm_resource old_copy = *old_mem;
|
||||
void *old_iomap;
|
||||
void *new_iomap;
|
||||
|
@ -365,24 +365,23 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
|
|||
unsigned long size,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_resource *mem = &bo->mem;
|
||||
struct ttm_resource *mem = bo->resource;
|
||||
|
||||
if (bo->mem.bus.addr) {
|
||||
if (bo->resource->bus.addr) {
|
||||
map->bo_kmap_type = ttm_bo_map_premapped;
|
||||
map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
|
||||
map->virtual = ((u8 *)bo->resource->bus.addr) + offset;
|
||||
} else {
|
||||
resource_size_t res = bo->resource->bus.offset + offset;
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_iomap;
|
||||
if (mem->bus.caching == ttm_write_combined)
|
||||
map->virtual = ioremap_wc(bo->mem.bus.offset + offset,
|
||||
size);
|
||||
map->virtual = ioremap_wc(res, size);
|
||||
#ifdef CONFIG_X86
|
||||
else if (mem->bus.caching == ttm_cached)
|
||||
map->virtual = ioremap_cache(bo->mem.bus.offset + offset,
|
||||
size);
|
||||
map->virtual = ioremap_cache(res, size);
|
||||
#endif
|
||||
else
|
||||
map->virtual = ioremap(bo->mem.bus.offset + offset,
|
||||
size);
|
||||
map->virtual = ioremap(res, size);
|
||||
}
|
||||
return (!map->virtual) ? -ENOMEM : 0;
|
||||
}
|
||||
|
@ -392,7 +391,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
|||
unsigned long num_pages,
|
||||
struct ttm_bo_kmap_obj *map)
|
||||
{
|
||||
struct ttm_resource *mem = &bo->mem;
|
||||
struct ttm_resource *mem = bo->resource;
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
|
@ -438,15 +437,15 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
|
|||
|
||||
map->virtual = NULL;
|
||||
map->bo = bo;
|
||||
if (num_pages > bo->mem.num_pages)
|
||||
if (num_pages > bo->resource->num_pages)
|
||||
return -EINVAL;
|
||||
if ((start_page + num_pages) > bo->mem.num_pages)
|
||||
if ((start_page + num_pages) > bo->resource->num_pages)
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
|
||||
ret = ttm_mem_io_reserve(bo->bdev, bo->resource);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
if (!bo->resource->bus.is_iomem) {
|
||||
return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
|
||||
} else {
|
||||
offset = start_page << PAGE_SHIFT;
|
||||
|
@ -475,7 +474,7 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
|
|||
default:
|
||||
BUG();
|
||||
}
|
||||
ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
|
||||
ttm_mem_io_free(map->bo->bdev, map->bo->resource);
|
||||
map->virtual = NULL;
|
||||
map->page = NULL;
|
||||
}
|
||||
|
@ -483,7 +482,7 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
|
|||
|
||||
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
|
||||
{
|
||||
struct ttm_resource *mem = &bo->mem;
|
||||
struct ttm_resource *mem = bo->resource;
|
||||
int ret;
|
||||
|
||||
ret = ttm_mem_io_reserve(bo->bdev, mem);
|
||||
|
@ -542,7 +541,7 @@ EXPORT_SYMBOL(ttm_bo_vmap);
|
|||
|
||||
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
|
||||
{
|
||||
struct ttm_resource *mem = &bo->mem;
|
||||
struct ttm_resource *mem = bo->resource;
|
||||
|
||||
if (dma_buf_map_is_null(map))
|
||||
return;
|
||||
|
@ -553,7 +552,7 @@ void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct dma_buf_map *map)
|
|||
iounmap(map->vaddr_iomem);
|
||||
dma_buf_map_clear(map);
|
||||
|
||||
ttm_mem_io_free(bo->bdev, &bo->mem);
|
||||
ttm_mem_io_free(bo->bdev, bo->resource);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_vunmap);
|
||||
|
||||
|
@ -567,7 +566,7 @@ static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo,
|
|||
|
||||
if (!dst_use_tt)
|
||||
ttm_bo_tt_destroy(bo);
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -615,7 +614,9 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
|
|||
struct dma_fence *fence)
|
||||
{
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
struct ttm_resource_manager *from;
|
||||
|
||||
from = ttm_manager_type(bdev, bo->resource->mem_type);
|
||||
|
||||
/**
|
||||
* BO doesn't have a TTM we need to bind/unbind. Just remember
|
||||
|
@ -628,7 +629,7 @@ static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo,
|
|||
}
|
||||
spin_unlock(&from->move_lock);
|
||||
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
|
||||
dma_fence_put(bo->moving);
|
||||
bo->moving = dma_fence_get(fence);
|
||||
|
@ -641,7 +642,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
|||
struct ttm_resource *new_mem)
|
||||
{
|
||||
struct ttm_device *bdev = bo->bdev;
|
||||
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type);
|
||||
struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->resource->mem_type);
|
||||
struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type);
|
||||
int ret = 0;
|
||||
|
||||
|
@ -677,7 +678,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
|
|||
if (ret)
|
||||
ttm_bo_wait(bo, false, false);
|
||||
|
||||
ttm_resource_alloc(bo, &sys_mem, &bo->mem);
|
||||
ttm_resource_alloc(bo, &sys_mem, bo->resource);
|
||||
bo->ttm = NULL;
|
||||
|
||||
dma_resv_unlock(&ghost->base._resv);
|
||||
|
|
|
@ -102,7 +102,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
|
|||
if (bdev->funcs->io_mem_pfn)
|
||||
return bdev->funcs->io_mem_pfn(bo, page_offset);
|
||||
|
||||
return (bo->mem.bus.offset >> PAGE_SHIFT) + page_offset;
|
||||
return (bo->resource->bus.offset >> PAGE_SHIFT) + page_offset;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -200,10 +200,10 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
|
|||
|
||||
/* Fault should not cross bo boundary. */
|
||||
page_offset &= ~(fault_page_size - 1);
|
||||
if (page_offset + fault_page_size > bo->mem.num_pages)
|
||||
if (page_offset + fault_page_size > bo->resource->num_pages)
|
||||
goto out_fallback;
|
||||
|
||||
if (bo->mem.bus.is_iomem)
|
||||
if (bo->resource->bus.is_iomem)
|
||||
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
|
||||
else
|
||||
pfn = page_to_pfn(ttm->pages[page_offset]);
|
||||
|
@ -213,7 +213,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf,
|
|||
goto out_fallback;
|
||||
|
||||
/* Check that memory is contiguous. */
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
if (!bo->resource->bus.is_iomem) {
|
||||
for (i = 1; i < fault_page_size; ++i) {
|
||||
if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i)
|
||||
goto out_fallback;
|
||||
|
@ -299,7 +299,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
|||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
err = ttm_mem_io_reserve(bdev, &bo->mem);
|
||||
err = ttm_mem_io_reserve(bdev, bo->resource);
|
||||
if (unlikely(err != 0))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
|
@ -308,11 +308,11 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
|||
page_last = vma_pages(vma) + vma->vm_pgoff -
|
||||
drm_vma_node_start(&bo->base.vma_node);
|
||||
|
||||
if (unlikely(page_offset >= bo->mem.num_pages))
|
||||
if (unlikely(page_offset >= bo->resource->num_pages))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
prot = ttm_io_prot(bo, &bo->mem, prot);
|
||||
if (!bo->mem.bus.is_iomem) {
|
||||
prot = ttm_io_prot(bo, bo->resource, prot);
|
||||
if (!bo->resource->bus.is_iomem) {
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false,
|
||||
|
@ -337,7 +337,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
|||
* first page.
|
||||
*/
|
||||
for (i = 0; i < num_prefault; ++i) {
|
||||
if (bo->mem.bus.is_iomem) {
|
||||
if (bo->resource->bus.is_iomem) {
|
||||
pfn = ttm_bo_io_mem_pfn(bo, page_offset);
|
||||
} else {
|
||||
page = ttm->pages[page_offset];
|
||||
|
@ -521,14 +521,14 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
|
|||
<< PAGE_SHIFT);
|
||||
int ret;
|
||||
|
||||
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages)
|
||||
if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages)
|
||||
return -EIO;
|
||||
|
||||
ret = ttm_bo_reserve(bo, true, false, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(bo->ttm);
|
||||
|
|
|
@ -483,10 +483,10 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
|
|||
d.src_addr = NULL;
|
||||
d.dst_pages = dst->ttm->pages;
|
||||
d.src_pages = src->ttm->pages;
|
||||
d.dst_num_pages = dst->mem.num_pages;
|
||||
d.src_num_pages = src->mem.num_pages;
|
||||
d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL);
|
||||
d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL);
|
||||
d.dst_num_pages = dst->resource->num_pages;
|
||||
d.src_num_pages = src->resource->num_pages;
|
||||
d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL);
|
||||
d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL);
|
||||
d.diff = diff;
|
||||
|
||||
for (j = 0; j < h; ++j) {
|
||||
|
|
|
@ -103,7 +103,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
|
|||
goto err;
|
||||
|
||||
if (buf->base.pin_count > 0)
|
||||
ret = ttm_bo_mem_compat(placement, &bo->mem,
|
||||
ret = ttm_bo_mem_compat(placement, bo->resource,
|
||||
&new_flags) == true ? 0 : -EINVAL;
|
||||
else
|
||||
ret = ttm_bo_validate(bo, placement, &ctx);
|
||||
|
@ -145,7 +145,7 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
|
|||
goto err;
|
||||
|
||||
if (buf->base.pin_count > 0) {
|
||||
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
|
||||
ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
|
||||
&new_flags) == true ? 0 : -EINVAL;
|
||||
goto out_unreserve;
|
||||
}
|
||||
|
@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
|||
uint32_t new_flags;
|
||||
|
||||
place = vmw_vram_placement.placement[0];
|
||||
place.lpfn = bo->mem.num_pages;
|
||||
place.lpfn = bo->resource->num_pages;
|
||||
placement.num_placement = 1;
|
||||
placement.placement = &place;
|
||||
placement.num_busy_placement = 1;
|
||||
|
@ -227,22 +227,22 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
|
|||
* In that case, evict it first because TTM isn't good at handling
|
||||
* that situation.
|
||||
*/
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.start < bo->mem.num_pages &&
|
||||
bo->mem.start > 0 &&
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM &&
|
||||
bo->resource->start < bo->resource->num_pages &&
|
||||
bo->resource->start > 0 &&
|
||||
buf->base.pin_count == 0) {
|
||||
ctx.interruptible = false;
|
||||
(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
|
||||
}
|
||||
|
||||
if (buf->base.pin_count > 0)
|
||||
ret = ttm_bo_mem_compat(&placement, &bo->mem,
|
||||
ret = ttm_bo_mem_compat(&placement, bo->resource,
|
||||
&new_flags) == true ? 0 : -EINVAL;
|
||||
else
|
||||
ret = ttm_bo_validate(bo, &placement, &ctx);
|
||||
|
||||
/* For some reason we didn't end up at the start of vram */
|
||||
WARN_ON(ret == 0 && bo->mem.start != 0);
|
||||
WARN_ON(ret == 0 && bo->resource->start != 0);
|
||||
if (!ret)
|
||||
vmw_bo_pin_reserved(buf, true);
|
||||
|
||||
|
@ -293,11 +293,11 @@ err:
|
|||
void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
|
||||
SVGAGuestPtr *ptr)
|
||||
{
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM) {
|
||||
ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
ptr->offset = bo->mem.start << PAGE_SHIFT;
|
||||
ptr->offset = bo->resource->start << PAGE_SHIFT;
|
||||
} else {
|
||||
ptr->gmrId = bo->mem.start;
|
||||
ptr->gmrId = bo->resource->start;
|
||||
ptr->offset = 0;
|
||||
}
|
||||
}
|
||||
|
@ -316,7 +316,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
|
|||
struct ttm_place pl;
|
||||
struct ttm_placement placement;
|
||||
struct ttm_buffer_object *bo = &vbo->base;
|
||||
uint32_t old_mem_type = bo->mem.mem_type;
|
||||
uint32_t old_mem_type = bo->resource->mem_type;
|
||||
int ret;
|
||||
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
@ -326,8 +326,8 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
|
|||
|
||||
pl.fpfn = 0;
|
||||
pl.lpfn = 0;
|
||||
pl.mem_type = bo->mem.mem_type;
|
||||
pl.flags = bo->mem.placement;
|
||||
pl.mem_type = bo->resource->mem_type;
|
||||
pl.flags = bo->resource->placement;
|
||||
|
||||
memset(&placement, 0, sizeof(placement));
|
||||
placement.num_placement = 1;
|
||||
|
@ -335,7 +335,7 @@ void vmw_bo_pin_reserved(struct vmw_buffer_object *vbo, bool pin)
|
|||
|
||||
ret = ttm_bo_validate(bo, &placement, &ctx);
|
||||
|
||||
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
|
||||
BUG_ON(ret != 0 || bo->resource->mem_type != old_mem_type);
|
||||
|
||||
if (pin)
|
||||
ttm_bo_pin(bo);
|
||||
|
@ -369,7 +369,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo)
|
|||
if (virtual)
|
||||
return virtual;
|
||||
|
||||
ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map);
|
||||
ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map);
|
||||
if (ret)
|
||||
DRM_ERROR("Buffer object map failed: %d.\n", ret);
|
||||
|
||||
|
@ -1197,7 +1197,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
* With other types of moves, the underlying pages stay the same,
|
||||
* and the map can be kept.
|
||||
*/
|
||||
if (mem->mem_type == TTM_PL_VRAM || bo->mem.mem_type == TTM_PL_VRAM)
|
||||
if (mem->mem_type == TTM_PL_VRAM || bo->resource->mem_type == TTM_PL_VRAM)
|
||||
vmw_bo_unmap(vbo);
|
||||
|
||||
/*
|
||||
|
@ -1205,6 +1205,6 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo,
|
|||
* read back all resource content first, and unbind the MOB from
|
||||
* the resource.
|
||||
*/
|
||||
if (mem->mem_type != VMW_PL_MOB && bo->mem.mem_type == VMW_PL_MOB)
|
||||
if (mem->mem_type != VMW_PL_MOB && bo->resource->mem_type == VMW_PL_MOB)
|
||||
vmw_resource_unbind_list(vbo);
|
||||
}
|
||||
|
|
|
@ -576,11 +576,11 @@ static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
|
|||
cmd->body.cid = cid;
|
||||
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
if (bo->resource->mem_type == TTM_PL_VRAM) {
|
||||
cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
cmd->body.guestResult.offset = bo->mem.start << PAGE_SHIFT;
|
||||
cmd->body.guestResult.offset = bo->resource->start << PAGE_SHIFT;
|
||||
} else {
|
||||
cmd->body.guestResult.gmrId = bo->mem.start;
|
||||
cmd->body.guestResult.gmrId = bo->resource->start;
|
||||
cmd->body.guestResult.offset = 0;
|
||||
}
|
||||
|
||||
|
@ -621,8 +621,8 @@ static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
|
|||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = cid;
|
||||
cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
cmd->body.mobid = bo->mem.start;
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
cmd->body.mobid = bo->resource->start;
|
||||
cmd->body.offset = 0;
|
||||
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
|
|
@ -889,7 +889,7 @@ static int vmw_cmdbuf_space_pool(struct vmw_cmdbuf_man *man,
|
|||
header->cmd = man->map + offset;
|
||||
if (man->using_mob) {
|
||||
cb_hdr->flags = SVGA_CB_FLAG_MOB;
|
||||
cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start;
|
||||
cb_hdr->ptr.mob.mobid = man->cmd_space->resource->start;
|
||||
cb_hdr->ptr.mob.mobOffset = offset;
|
||||
} else {
|
||||
cb_hdr->ptr.pa = (u64)man->handle + (u64)offset;
|
||||
|
|
|
@ -346,7 +346,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
|
|||
} *cmd;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL))
|
||||
|
@ -355,7 +355,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
|
|||
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = res->id;
|
||||
cmd->body.mobid = bo->mem.start;
|
||||
cmd->body.mobid = bo->resource->start;
|
||||
cmd->body.validContents = res->backup_dirty;
|
||||
res->backup_dirty = false;
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
@ -385,7 +385,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
|
|||
uint8_t *cmd;
|
||||
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_binding_state_scrub(uctx->cbs);
|
||||
|
@ -513,7 +513,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
|
|||
} *cmd;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL))
|
||||
|
@ -522,7 +522,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
|
|||
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = res->id;
|
||||
cmd->body.mobid = bo->mem.start;
|
||||
cmd->body.mobid = bo->resource->start;
|
||||
cmd->body.validContents = res->backup_dirty;
|
||||
res->backup_dirty = false;
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
@ -594,7 +594,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
|
|||
uint8_t *cmd;
|
||||
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_dx_context_scrub_cotables(res, readback);
|
||||
|
|
|
@ -173,7 +173,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
|
|||
SVGA3dCmdDXSetCOTable body;
|
||||
} *cmd;
|
||||
|
||||
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
||||
WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
|
||||
|
@ -181,12 +181,12 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
|
|||
return -ENOMEM;
|
||||
|
||||
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
|
||||
WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
WARN_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = vcotbl->ctx->id;
|
||||
cmd->body.type = vcotbl->type;
|
||||
cmd->body.mobid = bo->mem.start;
|
||||
cmd->body.mobid = bo->resource->start;
|
||||
cmd->body.validSizeInBytes = vcotbl->size_read_back;
|
||||
|
||||
vmw_cmd_commit_flush(dev_priv, sizeof(*cmd));
|
||||
|
@ -315,7 +315,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
|
|||
if (!vmw_resource_mob_attached(res))
|
||||
return 0;
|
||||
|
||||
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
|
||||
WARN_ON_ONCE(bo->resource->mem_type != VMW_PL_MOB);
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
|
@ -431,7 +431,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
|
|||
* Do a page by page copy of COTables. This eliminates slow vmap()s.
|
||||
* This should really be a TTM utility.
|
||||
*/
|
||||
for (i = 0; i < old_bo->mem.num_pages; ++i) {
|
||||
for (i = 0; i < old_bo->resource->num_pages; ++i) {
|
||||
bool dummy;
|
||||
|
||||
ret = ttm_bo_kmap(old_bo, i, 1, &old_map);
|
||||
|
|
|
@ -735,7 +735,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
|
|||
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = ctx_res->id;
|
||||
cmd->body.mobid = dx_query_mob->base.mem.start;
|
||||
cmd->body.mobid = dx_query_mob->base.resource->start;
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
vmw_context_bind_dx_query(ctx_res, dx_query_mob);
|
||||
|
@ -1046,7 +1046,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
|
|||
|
||||
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
|
||||
|
||||
if (unlikely(new_query_bo->base.mem.num_pages > 4)) {
|
||||
if (unlikely(new_query_bo->base.resource->num_pages > 4)) {
|
||||
VMW_DEBUG_USER("Query buffer too large.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -3710,16 +3710,16 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
|||
|
||||
list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
|
||||
bo = &reloc->vbo->base;
|
||||
switch (bo->mem.mem_type) {
|
||||
switch (bo->resource->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
reloc->location->offset += bo->mem.start << PAGE_SHIFT;
|
||||
reloc->location->offset += bo->resource->start << PAGE_SHIFT;
|
||||
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
break;
|
||||
case VMW_PL_GMR:
|
||||
reloc->location->gmrId = bo->mem.start;
|
||||
reloc->location->gmrId = bo->resource->start;
|
||||
break;
|
||||
case VMW_PL_MOB:
|
||||
*reloc->mob_loc = bo->mem.start;
|
||||
*reloc->mob_loc = bo->resource->start;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
|
|
@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo,
|
|||
int vmw_bo_dirty_add(struct vmw_buffer_object *vbo)
|
||||
{
|
||||
struct vmw_bo_dirty *dirty = vbo->dirty;
|
||||
pgoff_t num_pages = vbo->base.mem.num_pages;
|
||||
pgoff_t num_pages = vbo->base.resource->num_pages;
|
||||
size_t size, acc_size;
|
||||
int ret;
|
||||
static struct ttm_operation_ctx ctx = {
|
||||
|
@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf)
|
|||
return ret;
|
||||
|
||||
page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node);
|
||||
if (unlikely(page_offset >= bo->mem.num_pages)) {
|
||||
if (unlikely(page_offset >= bo->resource->num_pages)) {
|
||||
ret = VM_FAULT_SIGBUS;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf)
|
|||
|
||||
page_offset = vmf->pgoff -
|
||||
drm_vma_node_start(&bo->base.vma_node);
|
||||
if (page_offset >= bo->mem.num_pages ||
|
||||
if (page_offset >= bo->resource->num_pages ||
|
||||
vmw_resources_clean(vbo, page_offset,
|
||||
page_offset + PAGE_SIZE,
|
||||
&allowed_prefault)) {
|
||||
|
@ -529,7 +529,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf,
|
|||
|
||||
page_offset = vmf->pgoff -
|
||||
drm_vma_node_start(&bo->base.vma_node);
|
||||
if (page_offset >= bo->mem.num_pages ||
|
||||
if (page_offset >= bo->resource->num_pages ||
|
||||
vmw_resources_clean(vbo, page_offset,
|
||||
page_offset + PAGE_SIZE,
|
||||
&allowed_prefault)) {
|
||||
|
|
|
@ -254,7 +254,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
|
|||
} *cmd;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL))
|
||||
|
@ -263,7 +263,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
|
|||
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.shid = res->id;
|
||||
cmd->body.mobid = bo->mem.start;
|
||||
cmd->body.mobid = bo->resource->start;
|
||||
cmd->body.offsetInBytes = res->backup_offset;
|
||||
res->backup_dirty = false;
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
@ -282,7 +282,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
|
|||
} *cmd;
|
||||
struct vmw_fence_obj *fence;
|
||||
|
||||
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
|
||||
if (unlikely(cmd == NULL))
|
||||
|
@ -402,7 +402,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
|
|||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.cid = shader->ctx->id;
|
||||
cmd->body.shid = shader->id;
|
||||
cmd->body.mobid = res->backup->base.mem.start;
|
||||
cmd->body.mobid = res->backup->base.resource->start;
|
||||
cmd->body.offsetInBytes = res->backup_offset;
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
||||
|
@ -450,7 +450,7 @@ static int vmw_dx_shader_bind(struct vmw_resource *res,
|
|||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
vmw_dx_shader_unscrub(res);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
|
@ -513,7 +513,7 @@ static int vmw_dx_shader_unbind(struct vmw_resource *res,
|
|||
struct vmw_fence_obj *fence;
|
||||
int ret;
|
||||
|
||||
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(res->backup->base.resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
ret = vmw_dx_shader_scrub(res);
|
||||
|
|
|
@ -106,7 +106,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res)
|
|||
cmd->header.id = SVGA_3D_CMD_DX_BIND_STREAMOUTPUT;
|
||||
cmd->header.size = sizeof(cmd->body);
|
||||
cmd->body.soid = so->id;
|
||||
cmd->body.mobid = res->backup->base.mem.start;
|
||||
cmd->body.mobid = res->backup->base.resource->start;
|
||||
cmd->body.offsetInBytes = res->backup_offset;
|
||||
cmd->body.sizeInBytes = so->size;
|
||||
vmw_cmd_commit(dev_priv, sizeof(*cmd));
|
||||
|
@ -142,7 +142,7 @@ static int vmw_dx_streamoutput_bind(struct vmw_resource *res,
|
|||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(bo->mem.mem_type != VMW_PL_MOB))
|
||||
if (WARN_ON(bo->resource->mem_type != VMW_PL_MOB))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
|
@ -197,7 +197,7 @@ static int vmw_dx_streamoutput_unbind(struct vmw_resource *res, bool readback,
|
|||
struct vmw_fence_obj *fence;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(res->backup->base.mem.mem_type != VMW_PL_MOB))
|
||||
if (WARN_ON(res->backup->base.resource->mem_type != VMW_PL_MOB))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
|
|
|
@ -1212,7 +1212,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
|
|||
uint32_t submit_size;
|
||||
struct ttm_buffer_object *bo = val_buf->bo;
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
|
||||
|
||||
|
@ -1223,7 +1223,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
|
|||
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
|
||||
cmd1->header.size = sizeof(cmd1->body);
|
||||
cmd1->body.sid = res->id;
|
||||
cmd1->body.mobid = bo->mem.start;
|
||||
cmd1->body.mobid = bo->resource->start;
|
||||
if (res->backup_dirty) {
|
||||
cmd2 = (void *) &cmd1[1];
|
||||
cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE;
|
||||
|
@ -1266,7 +1266,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
|
|||
uint8_t *cmd;
|
||||
|
||||
|
||||
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
|
||||
BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
|
||||
|
||||
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
|
||||
cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
|
||||
|
|
|
@ -719,7 +719,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
|
|||
struct ttm_resource *new_mem,
|
||||
struct ttm_place *hop)
|
||||
{
|
||||
struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type);
|
||||
struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->resource->mem_type);
|
||||
struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type);
|
||||
int ret;
|
||||
|
||||
|
@ -729,10 +729,10 @@ static int vmw_move(struct ttm_buffer_object *bo,
|
|||
return ret;
|
||||
}
|
||||
|
||||
vmw_move_notify(bo, &bo->mem, new_mem);
|
||||
vmw_move_notify(bo, bo->resource, new_mem);
|
||||
|
||||
if (old_man->use_tt && new_man->use_tt) {
|
||||
if (bo->mem.mem_type == TTM_PL_SYSTEM) {
|
||||
if (bo->resource->mem_type == TTM_PL_SYSTEM) {
|
||||
ttm_bo_assign_mem(bo, new_mem);
|
||||
return 0;
|
||||
}
|
||||
|
@ -741,7 +741,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
|
|||
goto fail;
|
||||
|
||||
vmw_ttm_unbind(bo->bdev, bo->ttm);
|
||||
ttm_resource_free(bo, &bo->mem);
|
||||
ttm_resource_free(bo, bo->resource);
|
||||
ttm_bo_assign_mem(bo, new_mem);
|
||||
return 0;
|
||||
} else {
|
||||
|
@ -751,7 +751,7 @@ static int vmw_move(struct ttm_buffer_object *bo,
|
|||
}
|
||||
return 0;
|
||||
fail:
|
||||
vmw_move_notify(bo, new_mem, &bo->mem);
|
||||
vmw_move_notify(bo, new_mem, bo->resource);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -136,7 +136,8 @@ struct ttm_buffer_object {
|
|||
* Members protected by the bo::resv::reserved lock.
|
||||
*/
|
||||
|
||||
struct ttm_resource mem;
|
||||
struct ttm_resource *resource;
|
||||
struct ttm_resource _mem;
|
||||
struct ttm_tt *ttm;
|
||||
bool deleted;
|
||||
|
||||
|
|
|
@ -181,14 +181,14 @@ static inline void
|
|||
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
|
||||
{
|
||||
spin_lock(&bo->bdev->lru_lock);
|
||||
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
|
||||
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
|
||||
spin_unlock(&bo->bdev->lru_lock);
|
||||
}
|
||||
|
||||
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
|
||||
struct ttm_resource *new_mem)
|
||||
{
|
||||
bo->mem = *new_mem;
|
||||
bo->_mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
|
|||
static inline void ttm_bo_move_null(struct ttm_buffer_object *bo,
|
||||
struct ttm_resource *new_mem)
|
||||
{
|
||||
struct ttm_resource *old_mem = &bo->mem;
|
||||
struct ttm_resource *old_mem = bo->resource;
|
||||
|
||||
WARN_ON(old_mem->mm_node != NULL);
|
||||
ttm_bo_assign_mem(bo, new_mem);
|
||||
|
|
Загрузка…
Ссылка в новой задаче