drm/radeon: Remove rdev->gart.pages_addr array

radeon_vm_map_gart can use rdev->gart.pages_entry instead.

Also move the masking of the page address to radeon_vm_map_gart from its
callers.

Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Michel Dänzer <michel.daenzer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
Michel Dänzer 2015-01-21 17:36:37 +09:00 коммит произвёл Alex Deucher
Родитель 5636d2f842
Коммит 16653dbae0
6 изменённых файлов: 2 добавлений и 20 удалений

Просмотреть файл

@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {

Просмотреть файл

@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {

Просмотреть файл

@ -646,7 +646,6 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
dma_addr_t *pages_addr;
uint64_t *pages_entry;
bool ready;
};

Просмотреть файл

@ -251,7 +251,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
@ -296,7 +295,6 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
page_base = dma_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
@ -347,12 +345,6 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages);
if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
rdev->gart.num_gpu_pages);
if (rdev->gart.pages_entry == NULL) {
@ -360,8 +352,6 @@ int radeon_gart_init(struct radeon_device *rdev)
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++)
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
for (i = 0; i < rdev->gart.num_gpu_pages; i++)
rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
@ -382,10 +372,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
vfree(rdev->gart.pages_addr);
vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);

Просмотреть файл

@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
/* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK);
result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
result &= ~RADEON_GPU_PAGE_MASK;
return result;
}

Просмотреть файл

@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {