drm/amdgpu: move the context from the IBs into the job

We only have one context for all IBs.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2016-05-06 15:57:42 +02:00 коммит произвёл Alex Deucher
Родитель f153d2867b
Коммит 92f250989b
4 изменённых файлов: 24 добавлений и 17 удалений

Просмотреть файл

@ -743,7 +743,6 @@ struct amdgpu_ib {
struct amdgpu_user_fence *user;
unsigned vm_id;
uint64_t vm_pd_addr;
uint64_t ctx;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
uint32_t oa_base, oa_size;
@ -1262,6 +1261,7 @@ struct amdgpu_job {
struct fence *fence; /* the hw fence */
uint32_t num_ibs;
void *owner;
uint64_t ctx;
struct amdgpu_user_fence uf;
};
#define to_amdgpu_job(sched_job) \

Просмотреть файл

@ -741,7 +741,6 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
ib->length_dw = chunk_ib->ib_bytes / 4;
ib->flags = chunk_ib->flags;
ib->ctx = parser->ctx->rings[ring->idx].entity.fence_context;
j++;
}
@ -840,6 +839,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_ring *ring = p->job->ring;
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
struct fence *fence;
struct amdgpu_job *job;
int r;
@ -848,16 +848,16 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
p->job = NULL;
r = amd_sched_job_init(&job->base, &ring->sched,
&p->ctx->rings[ring->idx].entity,
amdgpu_job_timeout_func,
amdgpu_job_free_func,
p->filp, &fence);
entity, amdgpu_job_timeout_func,
amdgpu_job_free_func,
p->filp, &fence);
if (r) {
amdgpu_job_free(job);
return r;
}
job->owner = p->filp;
job->ctx = entity->fence_context;
p->fence = fence_get(fence);
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence);
job->ibs[job->num_ibs - 1].sequence = cs->out.handle;

Просмотреть файл

@ -121,18 +121,26 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct fence *hwf;
struct amdgpu_vm *vm = NULL;
unsigned i, patch_offset = ~0;
bool skip_preamble, need_ctx_switch;
unsigned patch_offset = ~0;
struct amdgpu_vm *vm;
struct fence *hwf;
uint64_t ctx;
unsigned i;
int r = 0;
if (num_ibs == 0)
return -EINVAL;
if (job) /* for domain0 job like ring test, ibs->job is not assigned */
/* ring tests don't use a job */
if (job) {
vm = job->vm;
ctx = job->ctx;
} else {
vm = NULL;
ctx = 0;
}
if (!ring->ready) {
dev_err(adev->dev, "couldn't schedule ib\n");
@ -170,8 +178,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
/* always set cond_exec_polling to CONTINUE */
*ring->cond_exe_cpu_addr = 1;
skip_preamble = ring->current_ctx == ib->ctx;
need_ctx_switch = ring->current_ctx != ib->ctx;
skip_preamble = ring->current_ctx == ctx;
need_ctx_switch = ring->current_ctx != ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
@ -209,7 +217,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
ring->current_ctx = ibs->ctx;
ring->current_ctx = ctx;
amdgpu_ring_commit(ring);
return 0;
}

Просмотреть файл

@ -122,14 +122,13 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
return -EINVAL;
r = amd_sched_job_init(&job->base, &ring->sched,
entity,
amdgpu_job_timeout_func,
amdgpu_job_free_func,
owner, &fence);
entity, amdgpu_job_timeout_func,
amdgpu_job_free_func, owner, &fence);
if (r)
return r;
job->owner = owner;
job->ctx = entity->fence_context;
*f = fence_get(fence);
amd_sched_entity_push_job(&job->base);