drm/amdgpu: cleanup VM coding style
Fix the indentation and move the VM functions to the structures. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chunming Zhou <davdi1.zhou@amd.com> Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com> Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Родитель
eeed25ab83
Коммит
8b4fb00b5d
|
@ -916,8 +916,8 @@ struct amdgpu_ring {
|
|||
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
|
||||
|
||||
struct amdgpu_vm_pt {
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
struct amdgpu_bo *bo;
|
||||
uint64_t addr;
|
||||
};
|
||||
|
||||
struct amdgpu_vm_id {
|
||||
|
@ -959,19 +959,60 @@ struct amdgpu_vm {
|
|||
};
|
||||
|
||||
struct amdgpu_vm_manager {
|
||||
struct fence *active[AMDGPU_NUM_VM];
|
||||
uint32_t max_pfn;
|
||||
struct fence *active[AMDGPU_NUM_VM];
|
||||
uint32_t max_pfn;
|
||||
/* number of VMIDs */
|
||||
unsigned nvm;
|
||||
unsigned nvm;
|
||||
/* vram base address for page table entry */
|
||||
u64 vram_base_offset;
|
||||
u64 vram_base_offset;
|
||||
/* is vm enabled? */
|
||||
bool enabled;
|
||||
bool enabled;
|
||||
/* vm pte handling */
|
||||
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
|
||||
struct amdgpu_ring *vm_pte_funcs_ring;
|
||||
};
|
||||
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct list_head *head);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *updates);
|
||||
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *fence);
|
||||
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct amdgpu_sync *sync);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct ttm_mem_reg *mem);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
int amdgpu_vm_free_job(struct amdgpu_job *job);
|
||||
|
||||
/*
|
||||
* context related structures
|
||||
*/
|
||||
|
@ -2311,49 +2352,6 @@ int amdgpu_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
|
|||
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
|
||||
/*
|
||||
* vm
|
||||
*/
|
||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct list_head *head);
|
||||
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync);
|
||||
void amdgpu_vm_flush(struct amdgpu_ring *ring,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *updates);
|
||||
void amdgpu_vm_fence(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct fence *fence);
|
||||
uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
|
||||
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm);
|
||||
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm, struct amdgpu_sync *sync);
|
||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
struct ttm_mem_reg *mem);
|
||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
struct amdgpu_bo *bo);
|
||||
int amdgpu_vm_bo_map(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr, uint64_t offset,
|
||||
uint64_t size, uint32_t flags);
|
||||
int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va,
|
||||
uint64_t addr);
|
||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||
struct amdgpu_bo_va *bo_va);
|
||||
int amdgpu_vm_free_job(struct amdgpu_job *job);
|
||||
/*
|
||||
* functions used by amdgpu_encoder.c
|
||||
*/
|
||||
|
|
Загрузка…
Ссылка в новой задаче