drm/amd: abstract kernel rq and normal rq to priority of run queue
Allows us to set priorities in the scheduler. Signed-off-by: Chunming Zhou <David1.Zhou@amd.com> Reviewed-by: Christian König <christian.koenig@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
This commit is contained in:
Родитель
ccba7691a5
Коммит
d033a6de80
|
@ -1044,7 +1044,7 @@ struct amdgpu_ctx_mgr {
|
||||||
struct idr ctx_handles;
|
struct idr ctx_handles;
|
||||||
};
|
};
|
||||||
|
|
||||||
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
|
int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
|
||||||
struct amdgpu_ctx *ctx);
|
struct amdgpu_ctx *ctx);
|
||||||
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
|
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
|
|
||||||
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
|
int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
|
||||||
struct amdgpu_ctx *ctx)
|
struct amdgpu_ctx *ctx)
|
||||||
{
|
{
|
||||||
unsigned i, j;
|
unsigned i, j;
|
||||||
|
@ -42,10 +42,9 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
|
||||||
/* create context entity for each ring */
|
/* create context entity for each ring */
|
||||||
for (i = 0; i < adev->num_rings; i++) {
|
for (i = 0; i < adev->num_rings; i++) {
|
||||||
struct amd_sched_rq *rq;
|
struct amd_sched_rq *rq;
|
||||||
if (kernel)
|
if (pri >= AMD_SCHED_MAX_PRIORITY)
|
||||||
rq = &adev->rings[i]->sched.kernel_rq;
|
return -EINVAL;
|
||||||
else
|
rq = &adev->rings[i]->sched.sched_rq[pri];
|
||||||
rq = &adev->rings[i]->sched.sched_rq;
|
|
||||||
r = amd_sched_entity_init(&adev->rings[i]->sched,
|
r = amd_sched_entity_init(&adev->rings[i]->sched,
|
||||||
&ctx->rings[i].entity,
|
&ctx->rings[i].entity,
|
||||||
rq, amdgpu_sched_jobs);
|
rq, amdgpu_sched_jobs);
|
||||||
|
@ -103,7 +102,7 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
*id = (uint32_t)r;
|
*id = (uint32_t)r;
|
||||||
r = amdgpu_ctx_init(adev, false, ctx);
|
r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
|
||||||
mutex_unlock(&mgr->lock);
|
mutex_unlock(&mgr->lock);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -1528,7 +1528,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx);
|
r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
|
dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
|
||||||
return r;
|
return r;
|
||||||
|
|
|
@ -348,14 +348,17 @@ static struct amd_sched_entity *
|
||||||
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
|
amd_sched_select_entity(struct amd_gpu_scheduler *sched)
|
||||||
{
|
{
|
||||||
struct amd_sched_entity *entity;
|
struct amd_sched_entity *entity;
|
||||||
|
int i;
|
||||||
|
|
||||||
if (!amd_sched_ready(sched))
|
if (!amd_sched_ready(sched))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/* Kernel run queue has higher priority than normal run queue*/
|
/* Kernel run queue has higher priority than normal run queue*/
|
||||||
entity = amd_sched_rq_select_entity(&sched->kernel_rq);
|
for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
|
||||||
if (entity == NULL)
|
entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
|
||||||
entity = amd_sched_rq_select_entity(&sched->sched_rq);
|
if (entity)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
return entity;
|
return entity;
|
||||||
}
|
}
|
||||||
|
@ -477,12 +480,13 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
|
||||||
struct amd_sched_backend_ops *ops,
|
struct amd_sched_backend_ops *ops,
|
||||||
unsigned hw_submission, long timeout, const char *name)
|
unsigned hw_submission, long timeout, const char *name)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
sched->ops = ops;
|
sched->ops = ops;
|
||||||
sched->hw_submission_limit = hw_submission;
|
sched->hw_submission_limit = hw_submission;
|
||||||
sched->name = name;
|
sched->name = name;
|
||||||
sched->timeout = timeout;
|
sched->timeout = timeout;
|
||||||
amd_sched_rq_init(&sched->sched_rq);
|
for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
|
||||||
amd_sched_rq_init(&sched->kernel_rq);
|
amd_sched_rq_init(&sched->sched_rq[i]);
|
||||||
|
|
||||||
init_waitqueue_head(&sched->wake_up_worker);
|
init_waitqueue_head(&sched->wake_up_worker);
|
||||||
init_waitqueue_head(&sched->job_scheduled);
|
init_waitqueue_head(&sched->job_scheduled);
|
||||||
|
|
|
@ -104,6 +104,12 @@ struct amd_sched_backend_ops {
|
||||||
struct fence *(*run_job)(struct amd_sched_job *sched_job);
|
struct fence *(*run_job)(struct amd_sched_job *sched_job);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum amd_sched_priority {
|
||||||
|
AMD_SCHED_PRIORITY_KERNEL = 0,
|
||||||
|
AMD_SCHED_PRIORITY_NORMAL,
|
||||||
|
AMD_SCHED_MAX_PRIORITY
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* One scheduler is implemented for each hardware ring
|
* One scheduler is implemented for each hardware ring
|
||||||
*/
|
*/
|
||||||
|
@ -112,8 +118,7 @@ struct amd_gpu_scheduler {
|
||||||
uint32_t hw_submission_limit;
|
uint32_t hw_submission_limit;
|
||||||
long timeout;
|
long timeout;
|
||||||
const char *name;
|
const char *name;
|
||||||
struct amd_sched_rq sched_rq;
|
struct amd_sched_rq sched_rq[AMD_SCHED_MAX_PRIORITY];
|
||||||
struct amd_sched_rq kernel_rq;
|
|
||||||
wait_queue_head_t wake_up_worker;
|
wait_queue_head_t wake_up_worker;
|
||||||
wait_queue_head_t job_scheduled;
|
wait_queue_head_t job_scheduled;
|
||||||
atomic_t hw_rq_count;
|
atomic_t hw_rq_count;
|
||||||
|
|
Загрузка…
Ссылка в новой задаче