drm/amdgpu: modify sdma start sequence

should fist halt engine, and then doing the register
programing, and later unhalt engine, and finally run
ring_test.

this help fix reloading driver hang issue of SDMA
ring

original sequence is wrong for it programing engine
after unhalt, which will lead to fault behavior when
doing driver reloading after unloaded.

Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Monk Liu 2016-05-25 16:57:14 +08:00 коммит произвёл Alex Deucher
Родитель d72f7c0685
Коммит 505dfe76cd
3 изменённых файлов: 24 добавлений и 8 удалений

Просмотреть файл

@ -448,7 +448,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true; ring->ready = true;
}
cik_sdma_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
if (r) { if (r) {
ring->ready = false; ring->ready = false;
@ -531,8 +536,8 @@ static int cik_sdma_start(struct amdgpu_device *adev)
if (r) if (r)
return r; return r;
/* unhalt the MEs */ /* halt the engine before programing */
cik_sdma_enable(adev, true); cik_sdma_enable(adev, false);
/* start the gfx rings and rlc compute queues */ /* start the gfx rings and rlc compute queues */
r = cik_sdma_gfx_resume(adev); r = cik_sdma_gfx_resume(adev);

Просмотреть файл

@ -491,7 +491,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true; ring->ready = true;
}
sdma_v2_4_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
if (r) { if (r) {
ring->ready = false; ring->ready = false;
@ -582,8 +586,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
return -EINVAL; return -EINVAL;
} }
/* unhalt the MEs */ /* halt the engine before programing */
sdma_v2_4_enable(adev, true); sdma_v2_4_enable(adev, false);
/* start the gfx rings and rlc compute queues */ /* start the gfx rings and rlc compute queues */
r = sdma_v2_4_gfx_resume(adev); r = sdma_v2_4_gfx_resume(adev);

Просмотреть файл

@ -713,7 +713,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl);
ring->ready = true; ring->ready = true;
}
/* unhalt the MEs */
sdma_v3_0_enable(adev, true);
/* enable sdma ring preemption */
sdma_v3_0_ctx_switch_enable(adev, true);
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
r = amdgpu_ring_test_ring(ring); r = amdgpu_ring_test_ring(ring);
if (r) { if (r) {
ring->ready = false; ring->ready = false;
@ -806,10 +814,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
} }
} }
/* unhalt the MEs */ /* disble sdma engine before programing it */
sdma_v3_0_enable(adev, true); sdma_v3_0_ctx_switch_enable(adev, false);
/* enable sdma ring preemption */ sdma_v3_0_enable(adev, false);
sdma_v3_0_ctx_switch_enable(adev, true);
/* start the gfx rings and rlc compute queues */ /* start the gfx rings and rlc compute queues */
r = sdma_v3_0_gfx_resume(adev); r = sdma_v3_0_gfx_resume(adev);