drm/radeon: never unpin UVD bo v3

Changing the UVD BOs offset on suspend/resume doesn't work because the VCPU
internally keeps pointers to it. Just keep it always pinned and save the
content manually.

Fixes: https://bugs.freedesktop.org/show_bug.cgi?id=66425

v2: fix compiler warning
v3: fix CIK support

Note: a version of this patch needs to go to stable.

Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Christian König 2013-07-12 10:18:09 -04:00 коммит произвёл Alex Deucher
Родитель c9a6ca4abd
Коммит 9cc2e0e9f1
5 изменённых файлов: 65 добавлений и 68 удалений

Просмотреть файл

@ -6978,7 +6978,7 @@ int cik_uvd_resume(struct radeon_device *rdev)
/* programm the VCPU memory controller bits 0-27 */
addr = rdev->uvd.gpu_addr >> 3;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
WREG32(UVD_VCPU_CACHE_SIZE0, size);

Просмотреть файл

@ -1460,6 +1460,8 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
unsigned fw_size;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
@ -2054,7 +2056,6 @@ struct radeon_device {
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
const struct firmware *uvd_fw; /* UVD firmware */
const struct firmware *mec_fw; /* CIK MEC firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */

Просмотреть файл

@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
} else {
/* put fence directly behind firmware */
index = ALIGN(rdev->uvd_fw->size, 8);
index = ALIGN(rdev->uvd.fw_size, 8);
rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
}

Просмотреть файл

@ -56,6 +56,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
int radeon_uvd_init(struct radeon_device *rdev)
{
const struct firmware *fw;
unsigned long bo_size;
const char *fw_name;
int i, r;
@ -104,14 +105,14 @@ int radeon_uvd_init(struct radeon_device *rdev)
return -EINVAL;
}
r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
r = request_firmware(&fw, fw_name, rdev->dev);
if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
fw_name);
return r;
}
bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@ -120,64 +121,6 @@ int radeon_uvd_init(struct radeon_device *rdev)
return r;
}
r = radeon_uvd_resume(rdev);
if (r)
return r;
memset(rdev->uvd.cpu_addr, 0, bo_size);
memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
r = radeon_uvd_suspend(rdev);
if (r)
return r;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
}
return 0;
}
void radeon_uvd_fini(struct radeon_device *rdev)
{
radeon_uvd_suspend(rdev);
radeon_bo_unref(&rdev->uvd.vcpu_bo);
}
int radeon_uvd_suspend(struct radeon_device *rdev)
{
int r;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
rdev->uvd.cpu_addr = NULL;
if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
}
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
if (rdev->uvd.cpu_addr) {
radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
} else {
rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
}
}
return r;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
int r;
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
if (r) {
radeon_bo_unref(&rdev->uvd.vcpu_bo);
@ -185,10 +128,6 @@ int radeon_uvd_resume(struct radeon_device *rdev)
return r;
}
/* Have been pin in cpu unmap unpin */
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
&rdev->uvd.gpu_addr);
if (r) {
@ -206,6 +145,63 @@ int radeon_uvd_resume(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
rdev->uvd.fw_size = fw->size;
memset(rdev->uvd.cpu_addr, 0, bo_size);
memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
release_firmware(fw);
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
}
return 0;
}
void radeon_uvd_fini(struct radeon_device *rdev)
{
int r;
if (rdev->uvd.vcpu_bo == NULL)
return;
r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
if (!r) {
radeon_bo_kunmap(rdev->uvd.vcpu_bo);
radeon_bo_unpin(rdev->uvd.vcpu_bo);
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
}
radeon_bo_unref(&rdev->uvd.vcpu_bo);
}
int radeon_uvd_suspend(struct radeon_device *rdev)
{
unsigned size;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
size = radeon_bo_size(rdev->uvd.vcpu_bo);
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
return 0;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
if (rdev->uvd.saved_bo != NULL) {
unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
kfree(rdev->uvd.saved_bo);
rdev->uvd.saved_bo = NULL;
}
return 0;
}

Просмотреть файл

@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev)
/* programm the VCPU memory controller bits 0-27 */
addr = rdev->uvd.gpu_addr >> 3;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
WREG32(UVD_VCPU_CACHE_SIZE0, size);