drm/ttm: Document and optimize ttm_bo_pipeline_gutting()
If the bo is idle when calling ttm_bo_pipeline_gutting(), we unnecessarily create a ghost object and push it out to delayed destroy. Fix this by adding a path for idle, and document the function. Also avoid having the bo end up in a bad state vulnerable to user-space triggered kernel BUGs if the call to ttm_tt_create() fails. Finally reuse ttm_bo_pipeline_gutting() in ttm_bo_evict(). Cc: Christian König <christian.koenig@amd.com> Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Christian König <christian.koenig@amd.com> Link: https://lore.kernel.org/r/20210602083818.241793-7-thomas.hellstrom@linux.intel.com
This commit is contained in:
Родитель
053c57696c
Коммит
a3be8cd70f
|
@ -503,10 +503,15 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
|
|||
bdev->funcs->evict_flags(bo, &placement);
|
||||
|
||||
if (!placement.num_placement && !placement.num_busy_placement) {
|
||||
ttm_bo_wait(bo, false, false);
|
||||
ret = ttm_bo_wait(bo, true, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ttm_bo_cleanup_memtype_use(bo);
|
||||
return ttm_tt_create(bo, false);
|
||||
/*
|
||||
* Since we've already synced, this frees backing store
|
||||
* immediately.
|
||||
*/
|
||||
return ttm_bo_pipeline_gutting(bo);
|
||||
}
|
||||
|
||||
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
|
||||
|
@ -947,13 +952,8 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
|
|||
/*
|
||||
* Remove the backing store if no placement is given.
|
||||
*/
|
||||
if (!placement->num_placement && !placement->num_busy_placement) {
|
||||
ret = ttm_bo_pipeline_gutting(bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ttm_tt_create(bo, false);
|
||||
}
|
||||
if (!placement->num_placement && !placement->num_busy_placement)
|
||||
return ttm_bo_pipeline_gutting(bo);
|
||||
|
||||
/*
|
||||
* Check whether we need to move buffer.
|
||||
|
|
|
@ -566,26 +566,70 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
|
|||
}
|
||||
EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
|
||||
|
||||
/**
|
||||
* ttm_bo_pipeline_gutting - purge the contents of a bo
|
||||
* @bo: The buffer object
|
||||
*
|
||||
* Purge the contents of a bo, async if the bo is not idle.
|
||||
* After a successful call, the bo is left unpopulated in
|
||||
* system placement. The function may wait uninterruptible
|
||||
* for idle on OOM.
|
||||
*
|
||||
* Return: 0 if successful, negative error code on failure.
|
||||
*/
|
||||
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
|
||||
{
|
||||
static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
|
||||
struct ttm_buffer_object *ghost;
|
||||
struct ttm_tt *ttm;
|
||||
int ret;
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost);
|
||||
/* If already idle, no need for ghost object dance. */
|
||||
ret = ttm_bo_wait(bo, false, true);
|
||||
if (ret != -EBUSY) {
|
||||
if (!bo->ttm) {
|
||||
/* See comment below about clearing. */
|
||||
ret = ttm_tt_create(bo, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
ttm_tt_unpopulate(bo->bdev, bo->ttm);
|
||||
if (bo->type == ttm_bo_type_device)
|
||||
ttm_tt_mark_for_clear(bo->ttm);
|
||||
}
|
||||
ttm_resource_free(bo, &bo->resource);
|
||||
return ttm_resource_alloc(bo, &sys_mem, &bo->resource);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need an unpopulated ttm_tt after giving our current one,
|
||||
* if any, to the ghost object. And we can't afford to fail
|
||||
* creating one *after* the operation. If the bo subsequently gets
|
||||
* resurrected, make sure it's cleared (if ttm_bo_type_device)
|
||||
* to avoid leaking sensitive information to user-space.
|
||||
*/
|
||||
|
||||
ttm = bo->ttm;
|
||||
bo->ttm = NULL;
|
||||
ret = ttm_tt_create(bo, true);
|
||||
swap(bo->ttm, ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_buffer_object_transfer(bo, &ghost);
|
||||
if (ret) {
|
||||
ttm_tt_destroy(bo->bdev, ttm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv);
|
||||
/* Last resort, wait for the BO to be idle when we are OOM */
|
||||
if (ret)
|
||||
ttm_bo_wait(bo, false, false);
|
||||
|
||||
ret = ttm_resource_alloc(bo, &sys_mem, &bo->resource);
|
||||
bo->ttm = NULL;
|
||||
|
||||
dma_resv_unlock(&ghost->base._resv);
|
||||
ttm_bo_put(ghost);
|
||||
bo->ttm = ttm;
|
||||
|
||||
return ret;
|
||||
return ttm_resource_alloc(bo, &sys_mem, &bo->resource);
|
||||
}
|
||||
|
|
|
@ -170,6 +170,19 @@ int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_oper
|
|||
*/
|
||||
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_mark_for_clear - Mark pages for clearing on populate.
|
||||
*
|
||||
* @ttm: Pointer to the ttm_tt structure
|
||||
*
|
||||
* Marks pages for clearing so that the next time the page vector is
|
||||
* populated, the pages will be cleared.
|
||||
*/
|
||||
static inline void ttm_tt_mark_for_clear(struct ttm_tt *ttm)
|
||||
{
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
|
||||
}
|
||||
|
||||
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
|
||||
|
||||
struct ttm_kmap_iter *ttm_kmap_iter_tt_init(struct ttm_kmap_iter_tt *iter_tt,
|
||||
|
|
Загрузка…
Ссылка в новой задаче