drm/amdgpu: clean up non-scheduler code path (v2)
Non-scheduler code is longer supported. v2: agd: rebased on upstream Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Ken Wang <Qingqing.Wang@amd.com> Reviewed-by: Monk Liu <monk.liu@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Родитель
be86c606b5
Коммит
cadf97b196
|
@ -82,7 +82,6 @@ extern int amdgpu_vm_size;
|
|||
extern int amdgpu_vm_block_size;
|
||||
extern int amdgpu_vm_fault_stop;
|
||||
extern int amdgpu_vm_debug;
|
||||
extern int amdgpu_enable_scheduler;
|
||||
extern int amdgpu_sched_jobs;
|
||||
extern int amdgpu_sched_hw_submission;
|
||||
extern int amdgpu_enable_semaphores;
|
||||
|
|
|
@ -813,7 +813,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
if (r)
|
||||
goto out;
|
||||
|
||||
if (amdgpu_enable_scheduler && parser.num_ibs) {
|
||||
if (parser.num_ibs) {
|
||||
struct amdgpu_ring * ring = parser.ibs->ring;
|
||||
struct amd_sched_fence *fence;
|
||||
struct amdgpu_job *job;
|
||||
|
@ -858,15 +858,6 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
|
||||
trace_amdgpu_cs_ioctl(job);
|
||||
amd_sched_entity_push_job(&job->base);
|
||||
|
||||
} else {
|
||||
struct amdgpu_fence *fence;
|
||||
|
||||
r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
|
||||
parser.filp);
|
||||
fence = parser.ibs[parser.num_ibs - 1].fence;
|
||||
parser.fence = fence_get(&fence->base);
|
||||
cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -45,29 +45,27 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
|
|||
ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
|
||||
amdgpu_sched_jobs * i;
|
||||
}
|
||||
if (amdgpu_enable_scheduler) {
|
||||
/* create context entity for each ring */
|
||||
for (i = 0; i < adev->num_rings; i++) {
|
||||
struct amd_sched_rq *rq;
|
||||
if (pri >= AMD_SCHED_MAX_PRIORITY) {
|
||||
kfree(ctx->fences);
|
||||
return -EINVAL;
|
||||
}
|
||||
rq = &adev->rings[i]->sched.sched_rq[pri];
|
||||
r = amd_sched_entity_init(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < adev->num_rings) {
|
||||
for (j = 0; j < i; j++)
|
||||
amd_sched_entity_fini(&adev->rings[j]->sched,
|
||||
&ctx->rings[j].entity);
|
||||
/* create context entity for each ring */
|
||||
for (i = 0; i < adev->num_rings; i++) {
|
||||
struct amd_sched_rq *rq;
|
||||
if (pri >= AMD_SCHED_MAX_PRIORITY) {
|
||||
kfree(ctx->fences);
|
||||
return r;
|
||||
return -EINVAL;
|
||||
}
|
||||
rq = &adev->rings[i]->sched.sched_rq[pri];
|
||||
r = amd_sched_entity_init(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < adev->num_rings) {
|
||||
for (j = 0; j < i; j++)
|
||||
amd_sched_entity_fini(&adev->rings[j]->sched,
|
||||
&ctx->rings[j].entity);
|
||||
kfree(ctx->fences);
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -85,11 +83,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
|
|||
fence_put(ctx->rings[i].fences[j]);
|
||||
kfree(ctx->fences);
|
||||
|
||||
if (amdgpu_enable_scheduler) {
|
||||
for (i = 0; i < adev->num_rings; i++)
|
||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
}
|
||||
for (i = 0; i < adev->num_rings; i++)
|
||||
amd_sched_entity_fini(&adev->rings[i]->sched,
|
||||
&ctx->rings[i].entity);
|
||||
}
|
||||
|
||||
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||
|
|
|
@ -78,7 +78,6 @@ int amdgpu_vm_block_size = -1;
|
|||
int amdgpu_vm_fault_stop = 0;
|
||||
int amdgpu_vm_debug = 0;
|
||||
int amdgpu_exp_hw_support = 0;
|
||||
int amdgpu_enable_scheduler = 1;
|
||||
int amdgpu_sched_jobs = 32;
|
||||
int amdgpu_sched_hw_submission = 2;
|
||||
int amdgpu_powerplay = -1;
|
||||
|
@ -152,9 +151,6 @@ module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);
|
|||
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
|
||||
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
|
||||
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
|
||||
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
|
||||
|
||||
|
|
|
@ -472,6 +472,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
|
|||
int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
||||
{
|
||||
int i, r;
|
||||
long timeout;
|
||||
|
||||
ring->fence_drv.cpu_addr = NULL;
|
||||
ring->fence_drv.gpu_addr = 0;
|
||||
|
@ -486,26 +487,24 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
|
|||
|
||||
init_waitqueue_head(&ring->fence_drv.fence_queue);
|
||||
|
||||
if (amdgpu_enable_scheduler) {
|
||||
long timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
if (timeout == 0) {
|
||||
/*
|
||||
* FIXME:
|
||||
* Delayed workqueue cannot use it directly,
|
||||
* so the scheduler will not use delayed workqueue if
|
||||
* MAX_SCHEDULE_TIMEOUT is set.
|
||||
* Currently keep it simple and silly.
|
||||
*/
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
amdgpu_sched_hw_submission,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
}
|
||||
timeout = msecs_to_jiffies(amdgpu_lockup_timeout);
|
||||
if (timeout == 0) {
|
||||
/*
|
||||
* FIXME:
|
||||
* Delayed workqueue cannot use it directly,
|
||||
* so the scheduler will not use delayed workqueue if
|
||||
* MAX_SCHEDULE_TIMEOUT is set.
|
||||
* Currently keep it simple and silly.
|
||||
*/
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
}
|
||||
r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
|
||||
amdgpu_sched_hw_submission,
|
||||
timeout, ring->name);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to create scheduler on ring %s.\n",
|
||||
ring->name);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -199,10 +199,6 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
|
|||
return r;
|
||||
}
|
||||
|
||||
if (!amdgpu_enable_scheduler && ib->ctx)
|
||||
ib->sequence = amdgpu_ctx_add_fence(ib->ctx, ring,
|
||||
&ib->fence->base);
|
||||
|
||||
/* wrap the last IB with fence */
|
||||
if (ib->user) {
|
||||
uint64_t addr = amdgpu_bo_gpu_offset(ib->user->bo);
|
||||
|
|
|
@ -76,33 +76,25 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
|
|||
void *owner,
|
||||
struct fence **f)
|
||||
{
|
||||
int r = 0;
|
||||
if (amdgpu_enable_scheduler) {
|
||||
struct amdgpu_job *job =
|
||||
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
|
||||
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
|
||||
if (!job->base.s_fence) {
|
||||
kfree(job);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*f = fence_get(&job->base.s_fence->base);
|
||||
|
||||
job->adev = adev;
|
||||
job->ibs = ibs;
|
||||
job->num_ibs = num_ibs;
|
||||
job->owner = owner;
|
||||
job->free_job = free_job;
|
||||
amd_sched_entity_push_job(&job->base);
|
||||
} else {
|
||||
r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
|
||||
if (r)
|
||||
return r;
|
||||
*f = fence_get(&ibs[num_ibs - 1].fence->base);
|
||||
struct amdgpu_job *job =
|
||||
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
|
||||
if (!job)
|
||||
return -ENOMEM;
|
||||
job->base.sched = &ring->sched;
|
||||
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
|
||||
job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
|
||||
if (!job->base.s_fence) {
|
||||
kfree(job);
|
||||
return -ENOMEM;
|
||||
}
|
||||
*f = fence_get(&job->base.s_fence->base);
|
||||
|
||||
job->adev = adev;
|
||||
job->ibs = ibs;
|
||||
job->num_ibs = num_ibs;
|
||||
job->owner = owner;
|
||||
job->free_job = free_job;
|
||||
amd_sched_entity_push_job(&job->base);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1070,10 +1070,6 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
|||
if (r)
|
||||
goto error_free;
|
||||
|
||||
if (!amdgpu_enable_scheduler) {
|
||||
amdgpu_ib_free(adev, ib);
|
||||
kfree(ib);
|
||||
}
|
||||
return 0;
|
||||
error_free:
|
||||
amdgpu_ib_free(adev, ib);
|
||||
|
|
|
@ -895,11 +895,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring,
|
|||
*fence = fence_get(f);
|
||||
amdgpu_bo_unref(&bo);
|
||||
fence_put(f);
|
||||
if (amdgpu_enable_scheduler)
|
||||
return 0;
|
||||
|
||||
amdgpu_ib_free(ring->adev, ib);
|
||||
kfree(ib);
|
||||
return 0;
|
||||
err2:
|
||||
amdgpu_ib_free(ring->adev, ib);
|
||||
|
|
|
@ -432,8 +432,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
if (fence)
|
||||
*fence = fence_get(f);
|
||||
fence_put(f);
|
||||
if (amdgpu_enable_scheduler)
|
||||
return 0;
|
||||
return 0;
|
||||
err:
|
||||
amdgpu_ib_free(adev, ib);
|
||||
kfree(ib);
|
||||
|
@ -499,8 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
|||
if (fence)
|
||||
*fence = fence_get(f);
|
||||
fence_put(f);
|
||||
if (amdgpu_enable_scheduler)
|
||||
return 0;
|
||||
return 0;
|
||||
err:
|
||||
amdgpu_ib_free(adev, ib);
|
||||
kfree(ib);
|
||||
|
|
|
@ -401,8 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
|
|||
if (!r)
|
||||
amdgpu_bo_fence(bo, fence, true);
|
||||
fence_put(fence);
|
||||
if (amdgpu_enable_scheduler)
|
||||
return 0;
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
amdgpu_ib_free(adev, ib);
|
||||
|
@ -536,7 +535,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
|||
fence_put(fence);
|
||||
}
|
||||
|
||||
if (!amdgpu_enable_scheduler || ib->length_dw == 0) {
|
||||
if (ib->length_dw == 0) {
|
||||
amdgpu_ib_free(adev, ib);
|
||||
kfree(ib);
|
||||
}
|
||||
|
@ -819,10 +818,6 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
|
|||
*fence = fence_get(f);
|
||||
}
|
||||
fence_put(f);
|
||||
if (!amdgpu_enable_scheduler) {
|
||||
amdgpu_ib_free(adev, ib);
|
||||
kfree(ib);
|
||||
}
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
|
|
Загрузка…
Ссылка в новой задаче