drm/amdgpu: Keep a bo-reference per-attachment

For now they all reference the same BO. For correct DMA mappings they will
refer to different BOs per-GPU.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Oak Zeng <Oak.Zeng@amd.com>
Acked-by: Ramesh Errabolu <Ramesh.Errabolu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Felix Kuehling 2021-04-08 22:27:34 -04:00 коммит произвёл Alex Deucher
Родитель c780b2eedb
Коммит 4e94272f8a
1 изменённых файлов: 17 добавлений и 5 удалений

Просмотреть файл

@ -491,11 +491,11 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
struct amdgpu_vm *vm, bool is_aql, struct amdgpu_vm *vm, bool is_aql,
struct kfd_mem_attachment **p_attachment) struct kfd_mem_attachment **p_attachment)
{ {
int ret; unsigned long bo_size = mem->bo->tbo.base.size;
struct kfd_mem_attachment *attachment;
struct amdgpu_bo *bo = mem->bo;
uint64_t va = mem->va; uint64_t va = mem->va;
unsigned long bo_size = bo->tbo.base.size; struct kfd_mem_attachment *attachment;
struct amdgpu_bo *bo;
int ret;
if (!va) { if (!va) {
pr_err("Invalid VA when adding BO to VM\n"); pr_err("Invalid VA when adding BO to VM\n");
@ -512,6 +512,14 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va, pr_debug("\t add VA 0x%llx - 0x%llx to vm %p\n", va,
va + bo_size, vm); va + bo_size, vm);
/* FIXME: For now all attachments use the same BO. This is incorrect
* because one BO can only have one DMA mapping for one GPU. We need
* one BO per GPU, e.g. a DMABuf import with dynamic attachment. This
* will be addressed one BO-type at a time in subsequent patches.
*/
bo = mem->bo;
drm_gem_object_get(&bo->tbo.base);
/* Add BO to VM internal data structures*/ /* Add BO to VM internal data structures*/
attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo); attachment->bo_va = amdgpu_vm_bo_add(adev, vm, bo);
if (!attachment->bo_va) { if (!attachment->bo_va) {
@ -531,7 +539,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
/* Allocate validate page tables if needed */ /* Allocate validate page tables if needed */
ret = vm_validate_pt_pd_bos(vm); ret = vm_validate_pt_pd_bos(vm);
if (ret) { if (unlikely(ret)) {
pr_err("validate_pt_pd_bos() failed\n"); pr_err("validate_pt_pd_bos() failed\n");
goto err_alloc_pts; goto err_alloc_pts;
} }
@ -542,15 +550,19 @@ err_alloc_pts:
amdgpu_vm_bo_rmv(adev, attachment->bo_va); amdgpu_vm_bo_rmv(adev, attachment->bo_va);
list_del(&attachment->list); list_del(&attachment->list);
err_vmadd: err_vmadd:
drm_gem_object_put(&bo->tbo.base);
kfree(attachment); kfree(attachment);
return ret; return ret;
} }
static void kfd_mem_detach(struct kfd_mem_attachment *attachment) static void kfd_mem_detach(struct kfd_mem_attachment *attachment)
{ {
struct amdgpu_bo *bo = attachment->bo_va->base.bo;
pr_debug("\t remove VA 0x%llx in entry %p\n", pr_debug("\t remove VA 0x%llx in entry %p\n",
attachment->va, attachment); attachment->va, attachment);
amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va); amdgpu_vm_bo_rmv(attachment->adev, attachment->bo_va);
drm_gem_object_put(&bo->tbo.base);
list_del(&attachment->list); list_del(&attachment->list);
kfree(attachment); kfree(attachment);
} }