drm/radeon: use embedded gem object
Drop drm_gem_object from radeon_bo, use the ttm_buffer_object.base instead. Build tested only. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: http://patchwork.freedesktop.org/patch/msgid/20190805140119.7337-5-kraxel@redhat.com
This commit is contained in:
Родитель
e0828d54c8
Коммит
ce77038fda
|
@ -505,7 +505,6 @@ struct radeon_bo {
|
|||
struct list_head va;
|
||||
/* Constant after initialization */
|
||||
struct radeon_device *rdev;
|
||||
struct drm_gem_object gem_base;
|
||||
|
||||
struct ttm_bo_kmap_obj dma_buf_vmap;
|
||||
pid_t pid;
|
||||
|
@ -513,7 +512,7 @@ struct radeon_bo {
|
|||
struct radeon_mn *mn;
|
||||
struct list_head mn_list;
|
||||
};
|
||||
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
|
||||
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
|
||||
|
||||
int radeon_gem_debugfs_init(struct radeon_device *rdev);
|
||||
|
||||
|
|
|
@ -443,7 +443,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
|
|||
if (bo == NULL)
|
||||
continue;
|
||||
|
||||
drm_gem_object_put_unlocked(&bo->gem_base);
|
||||
drm_gem_object_put_unlocked(&bo->tbo.base);
|
||||
}
|
||||
}
|
||||
kfree(parser->track);
|
||||
|
|
|
@ -275,7 +275,7 @@ static void radeon_unpin_work_func(struct work_struct *__work)
|
|||
} else
|
||||
DRM_ERROR("failed to reserve buffer after flip\n");
|
||||
|
||||
drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
|
||||
drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
|
@ -607,7 +607,7 @@ pflip_cleanup:
|
|||
radeon_bo_unreserve(new_rbo);
|
||||
|
||||
cleanup:
|
||||
drm_gem_object_put_unlocked(&work->old_rbo->gem_base);
|
||||
drm_gem_object_put_unlocked(&work->old_rbo->tbo.base);
|
||||
dma_fence_put(work->fence);
|
||||
kfree(work);
|
||||
return r;
|
||||
|
|
|
@ -83,7 +83,7 @@ retry:
|
|||
}
|
||||
return r;
|
||||
}
|
||||
*obj = &robj->gem_base;
|
||||
*obj = &robj->tbo.base;
|
||||
robj->pid = task_pid_nr(current);
|
||||
|
||||
mutex_lock(&rdev->gem.mutex);
|
||||
|
|
|
@ -85,9 +85,9 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
|
|||
mutex_unlock(&bo->rdev->gem.mutex);
|
||||
radeon_bo_clear_surface_reg(bo);
|
||||
WARN_ON_ONCE(!list_empty(&bo->va));
|
||||
if (bo->gem_base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->gem_base);
|
||||
if (bo->tbo.base.import_attach)
|
||||
drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
|
||||
drm_gem_object_release(&bo->tbo.base);
|
||||
kfree(bo);
|
||||
}
|
||||
|
||||
|
@ -209,7 +209,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
|
||||
if (bo == NULL)
|
||||
return -ENOMEM;
|
||||
drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
|
||||
drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
|
||||
bo->rdev = rdev;
|
||||
bo->surface_reg = -1;
|
||||
INIT_LIST_HEAD(&bo->list);
|
||||
|
@ -262,7 +262,7 @@ int radeon_bo_create(struct radeon_device *rdev,
|
|||
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
|
||||
&bo->placement, page_align, !kernel, acc_size,
|
||||
sg, resv, &radeon_ttm_bo_destroy);
|
||||
bo->gem_base.resv = bo->tbo.resv;
|
||||
bo->tbo.base.resv = bo->tbo.resv;
|
||||
up_read(&rdev->pm.mclk_lock);
|
||||
if (unlikely(r != 0)) {
|
||||
return r;
|
||||
|
@ -443,13 +443,13 @@ void radeon_bo_force_delete(struct radeon_device *rdev)
|
|||
dev_err(rdev->dev, "Userspace still has active objects !\n");
|
||||
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
|
||||
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
|
||||
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
|
||||
*((unsigned long *)&bo->gem_base.refcount));
|
||||
&bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
|
||||
*((unsigned long *)&bo->tbo.base.refcount));
|
||||
mutex_lock(&bo->rdev->gem.mutex);
|
||||
list_del_init(&bo->list);
|
||||
mutex_unlock(&bo->rdev->gem.mutex);
|
||||
/* this should unref the ttm bo */
|
||||
drm_gem_object_put_unlocked(&bo->gem_base);
|
||||
drm_gem_object_put_unlocked(&bo->tbo.base);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
|
|||
mutex_unlock(&rdev->gem.mutex);
|
||||
|
||||
bo->prime_shared_count = 1;
|
||||
return &bo->gem_base;
|
||||
return &bo->tbo.base;
|
||||
}
|
||||
|
||||
int radeon_gem_prime_pin(struct drm_gem_object *obj)
|
||||
|
|
|
@ -184,7 +184,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
|||
|
||||
if (radeon_ttm_tt_has_userptr(bo->ttm))
|
||||
return -EPERM;
|
||||
return drm_vma_node_verify_access(&rbo->gem_base.vma_node,
|
||||
return drm_vma_node_verify_access(&rbo->tbo.base.vma_node,
|
||||
filp->private_data);
|
||||
}
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче