ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
Subhash Jadavani reported this partial backtrace: Now consider this call stack from MMC block driver (this is on the ARMv7 based board): [<c001b50c>] (v7_dma_inv_range+0x30/0x48) from [<c0017b8c>] (dma_cache_maint_page+0x1c4/0x24c) [<c0017b8c>] (dma_cache_maint_page+0x1c4/0x24c) from [<c0017c28>] (___dma_page_cpu_to_dev+0x14/0x1c) [<c0017c28>] (___dma_page_cpu_to_dev+0x14/0x1c) from [<c0017ff8>] (dma_map_sg+0x3c/0x114) This is caused by incrementing the struct page pointer, and running off the end of the sparsemem page array. Fix this by incrementing by pfn instead, and convert the pfn to a struct page. Cc: <stable@vger.kernel.org> Suggested-by: James Bottomley <JBottomley@Parallels.com> Tested-by: Subhash Jadavani <subhashj@codeaurora.org> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Родитель
6f16f4998f
Коммит
15653371c6
|
@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||||
size_t size, enum dma_data_direction dir,
|
size_t size, enum dma_data_direction dir,
|
||||||
void (*op)(const void *, size_t, int))
|
void (*op)(const void *, size_t, int))
|
||||||
{
|
{
|
||||||
|
unsigned long pfn;
|
||||||
|
size_t left = size;
|
||||||
|
|
||||||
|
pfn = page_to_pfn(page) + offset / PAGE_SIZE;
|
||||||
|
offset %= PAGE_SIZE;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A single sg entry may refer to multiple physically contiguous
|
* A single sg entry may refer to multiple physically contiguous
|
||||||
* pages. But we still need to process highmem pages individually.
|
* pages. But we still need to process highmem pages individually.
|
||||||
* If highmem is not configured then the bulk of this loop gets
|
* If highmem is not configured then the bulk of this loop gets
|
||||||
* optimized out.
|
* optimized out.
|
||||||
*/
|
*/
|
||||||
size_t left = size;
|
|
||||||
do {
|
do {
|
||||||
size_t len = left;
|
size_t len = left;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
|
||||||
|
page = pfn_to_page(pfn);
|
||||||
|
|
||||||
if (PageHighMem(page)) {
|
if (PageHighMem(page)) {
|
||||||
if (len + offset > PAGE_SIZE) {
|
if (len + offset > PAGE_SIZE)
|
||||||
if (offset >= PAGE_SIZE) {
|
|
||||||
page += offset / PAGE_SIZE;
|
|
||||||
offset %= PAGE_SIZE;
|
|
||||||
}
|
|
||||||
len = PAGE_SIZE - offset;
|
len = PAGE_SIZE - offset;
|
||||||
}
|
|
||||||
vaddr = kmap_high_get(page);
|
vaddr = kmap_high_get(page);
|
||||||
if (vaddr) {
|
if (vaddr) {
|
||||||
vaddr += offset;
|
vaddr += offset;
|
||||||
|
@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
||||||
op(vaddr, len, dir);
|
op(vaddr, len, dir);
|
||||||
}
|
}
|
||||||
offset = 0;
|
offset = 0;
|
||||||
page++;
|
pfn++;
|
||||||
left -= len;
|
left -= len;
|
||||||
} while (left);
|
} while (left);
|
||||||
}
|
}
|
||||||
|
|
Загрузка…
Ссылка в новой задаче