drm/ttm: enable swapout for reserved BOs during allocation
if the bo shares same reservation object then not lock it again at swapout time to make it possible to swap out. v2: refine the commmit message Reviewed-by: Thomas Hellström <thellstrom@vmware.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chuming Zhou <david1.zhou@amd.com> Signed-off-by: Roger He <Hongbo.He@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Родитель
d5769ba315
Коммит
dc947770cf
|
@ -1699,18 +1699,20 @@ EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
|
|||
* A buffer object shrink method that tries to swap out the first
|
||||
* buffer object on the bo_global::swap_lru list.
|
||||
*/
|
||||
int ttm_bo_swapout(struct ttm_bo_global *glob)
|
||||
int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct ttm_buffer_object *bo;
|
||||
int ret = -EBUSY;
|
||||
bool locked;
|
||||
unsigned i;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
|
||||
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
|
||||
ret = reservation_object_trylock(bo->resv) ? 0 : -EBUSY;
|
||||
if (!ret)
|
||||
if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!ret)
|
||||
break;
|
||||
|
@ -1786,7 +1788,12 @@ EXPORT_SYMBOL(ttm_bo_swapout);
|
|||
|
||||
void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
|
||||
{
|
||||
while (ttm_bo_swapout(bdev->glob) == 0)
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
|
||||
while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
|
||||
;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_bo_swapout_all);
|
||||
|
|
|
@ -211,7 +211,7 @@ static bool ttm_zones_above_swap_target(struct ttm_mem_global *glob,
|
|||
*/
|
||||
|
||||
static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
||||
uint64_t extra)
|
||||
uint64_t extra, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -219,7 +219,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
|||
|
||||
while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
|
||||
spin_unlock(&glob->lock);
|
||||
ret = ttm_bo_swapout(glob->bo_glob);
|
||||
ret = ttm_bo_swapout(glob->bo_glob, ctx);
|
||||
spin_lock(&glob->lock);
|
||||
if (unlikely(ret != 0))
|
||||
break;
|
||||
|
@ -230,10 +230,14 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
|
|||
|
||||
static void ttm_shrink_work(struct work_struct *work)
|
||||
{
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false
|
||||
};
|
||||
struct ttm_mem_global *glob =
|
||||
container_of(work, struct ttm_mem_global, work);
|
||||
|
||||
ttm_shrink(glob, true, 0ULL);
|
||||
ttm_shrink(glob, true, 0ULL, &ctx);
|
||||
}
|
||||
|
||||
static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
|
||||
|
@ -520,7 +524,7 @@ static int ttm_mem_global_alloc_zone(struct ttm_mem_global *glob,
|
|||
return -ENOMEM;
|
||||
if (unlikely(count-- == 0))
|
||||
return -ENOMEM;
|
||||
ttm_shrink(glob, false, memory + (memory >> 2) + 16);
|
||||
ttm_shrink(glob, false, memory + (memory >> 2) + 16, ctx);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -752,7 +752,8 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
|
|||
const char __user *wbuf, char __user *rbuf,
|
||||
size_t count, loff_t *f_pos, bool write);
|
||||
|
||||
int ttm_bo_swapout(struct ttm_bo_global *glob);
|
||||
int ttm_bo_swapout(struct ttm_bo_global *glob,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
|
||||
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
|
||||
#endif
|
||||
|
|
Загрузка…
Ссылка в новой задаче