drm/ttm: revise ttm_bo_move_to_lru_tail to support bulk moves

When move a BO to the end of LRU, it need remember the BO positions.
Make sure all moved bo in between "first" and "last". And they will be bulk
moving together.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Huang Rui <ray.huang@amd.com>
Tested-by: Mike Lothian <mike@fireburn.co.uk>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Acked-by: Chunming Zhou <david1.zhou@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2018-08-06 17:05:30 +08:00 коммит произвёл Alex Deucher
Родитель 8c7655a0fd
Коммит 9a2779528e
3 изменённых файлов: 34 добавлений и 6 удалений

Просмотреть файл

@ -297,9 +297,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->parent) { if (bo->parent) {
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
ttm_bo_move_to_lru_tail(&bo->tbo); ttm_bo_move_to_lru_tail(&bo->tbo, NULL);
if (bo->shadow) if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo); ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL);
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);
} }
@ -319,9 +319,9 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (!bo->parent) if (!bo->parent)
continue; continue;
ttm_bo_move_to_lru_tail(&bo->tbo); ttm_bo_move_to_lru_tail(&bo->tbo, NULL);
if (bo->shadow) if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo); ttm_bo_move_to_lru_tail(&bo->shadow->tbo, NULL);
} }
spin_unlock(&glob->lru_lock); spin_unlock(&glob->lru_lock);

Просмотреть файл

@ -214,12 +214,36 @@ void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
} }
EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo) static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos,
struct ttm_buffer_object *bo)
{
if (!pos->first)
pos->first = bo;
pos->last = bo;
}
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk)
{ {
reservation_object_assert_held(bo->resv); reservation_object_assert_held(bo->resv);
ttm_bo_del_from_lru(bo); ttm_bo_del_from_lru(bo);
ttm_bo_add_to_lru(bo); ttm_bo_add_to_lru(bo);
if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
switch (bo->mem.mem_type) {
case TTM_PL_TT:
ttm_bo_bulk_move_set_pos(&bulk->tt[bo->priority], bo);
break;
case TTM_PL_VRAM:
ttm_bo_bulk_move_set_pos(&bulk->vram[bo->priority], bo);
break;
}
if (bo->ttm && !(bo->ttm->page_flags &
(TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED)))
ttm_bo_bulk_move_set_pos(&bulk->swap[bo->priority], bo);
}
} }
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);

Просмотреть файл

@ -51,6 +51,8 @@ struct ttm_placement;
struct ttm_place; struct ttm_place;
struct ttm_lru_bulk_move;
/** /**
* struct ttm_bus_placement * struct ttm_bus_placement
* *
@ -405,12 +407,14 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
* ttm_bo_move_to_lru_tail * ttm_bo_move_to_lru_tail
* *
* @bo: The buffer object. * @bo: The buffer object.
* @bulk: optional bulk move structure to remember BO positions
* *
* Move this BO to the tail of all lru lists used to lookup and reserve an * Move this BO to the tail of all lru lists used to lookup and reserve an
* object. This function must be called with struct ttm_bo_global::lru_lock * object. This function must be called with struct ttm_bo_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction. * held, and is used to make a BO less likely to be considered for eviction.
*/ */
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo); void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
/** /**
* ttm_bo_lock_delayed_workqueue * ttm_bo_lock_delayed_workqueue