drm/ttm: wait for BO idle in ttm_bo_move_memcpy

When we want to pipeline accelerated moves we need to wait in the fallback path.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-06-06 10:17:54 +02:00 коммит произвёл Alex Deucher
Родитель 88932a7be2
Коммит 77dfc28bad
7 изменённых файлов: 18 добавлений и 7 удалений

Просмотреть файл

@ -433,7 +433,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
r = ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, new_mem);
if (r) {
return r;
}

Просмотреть файл

@ -1328,7 +1328,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
/* Fallback to software copy. */
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
if (ret == 0)
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
out:
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {

Просмотреть файл

@ -361,7 +361,8 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
qxl_move_null(bo, new_mem);
return 0;
}
return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, new_mem);
}
static void qxl_bo_move_notify(struct ttm_buffer_object *bo,

Просмотреть файл

@ -445,7 +445,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
if (r) {
memcpy:
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
r = ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, new_mem);
if (r) {
return r;
}

Просмотреть файл

@ -359,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
ret = bdev->driver->move(bo, evict, interruptible,
no_wait_gpu, mem);
else
ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem);
ret = ttm_bo_move_memcpy(bo, evict, interruptible,
no_wait_gpu, mem);
if (ret) {
if (bdev->driver->move_notify) {

Просмотреть файл

@ -320,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
}
int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
struct ttm_bo_device *bdev = bo->bdev;
@ -336,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
unsigned long add = 0;
int dir;
ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
if (ret)
return ret;
ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
if (ret)
return ret;

Просмотреть файл

@ -970,6 +970,7 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*
* @bo: A pointer to a struct ttm_buffer_object.
* @evict: 1: This is an eviction. Don't try to pipeline.
* @interruptible: Sleep interruptible if waiting.
* @no_wait_gpu: Return immediately if the GPU is busy.
* @new_mem: struct ttm_mem_reg indicating where to move.
*
@ -984,7 +985,8 @@ extern int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
*/
extern int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem);
/**