parisc: Fix cache routines to ignore vma's with an invalid pfn
The parisc architecture does not have a pte special bit. As a result, special mappings are handled with the VM_PFNMAP and VM_MIXEDMAP flags. VM_MIXEDMAP mappings may or may not have a "struct page" backing. When pfn_valid() is false, there is no "struct page" backing. Otherwise, they are treated as normal pages. The FireGL driver uses the VM_MIXEDMAP without a backing "struct page". This treatment caused a panic due to a TLB data miss in update_mmu_cache. This appeared to be in the code generated for page_address(). We were in fact using a very circular bit of code to determine the physical address of the PFN in various cache routines. This wasn't valid when there was no "struct page" backing. The needed address can in fact be determined simply from the PFN itself without using the "struct page". The attached patch updates update_mmu_cache(), flush_cache_mm(), flush_cache_range() and flush_cache_page() to check pfn_valid() and to directly compute the PFN physical and virtual addresses. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: <stable@vger.kernel.org> # 3.10 Signed-off-by: Helge Deller <deller@gmx.de>
This commit is contained in:
Родитель
06693f305e
Коммит
50861f5a02
|
@ -71,18 +71,27 @@ flush_cache_all_local(void)
|
|||
}
|
||||
EXPORT_SYMBOL(flush_cache_all_local);
|
||||
|
||||
/* Virtual address of pfn. */
|
||||
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
|
||||
|
||||
void
|
||||
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
||||
{
|
||||
struct page *page = pte_page(*ptep);
|
||||
unsigned long pfn = pte_pfn(*ptep);
|
||||
struct page *page;
|
||||
|
||||
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
|
||||
test_bit(PG_dcache_dirty, &page->flags)) {
|
||||
/* We don't have pte special. As a result, we can be called with
|
||||
an invalid pfn and we don't need to flush the kernel dcache page.
|
||||
This occurs with FireGL card in C8000. */
|
||||
if (!pfn_valid(pfn))
|
||||
return;
|
||||
|
||||
flush_kernel_dcache_page(page);
|
||||
page = pfn_to_page(pfn);
|
||||
if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
|
||||
flush_kernel_dcache_page_addr(pfn_va(pfn));
|
||||
clear_bit(PG_dcache_dirty, &page->flags);
|
||||
} else if (parisc_requires_coherency())
|
||||
flush_kernel_dcache_page(page);
|
||||
flush_kernel_dcache_page_addr(pfn_va(pfn));
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
|
|||
|
||||
void flush_cache_mm(struct mm_struct *mm)
|
||||
{
|
||||
struct vm_area_struct *vma;
|
||||
pgd_t *pgd;
|
||||
|
||||
/* Flushing the whole cache on each cpu takes forever on
|
||||
rp3440, etc. So, avoid it if the mm isn't too big. */
|
||||
if (mm_total_size(mm) < parisc_cache_flush_threshold) {
|
||||
struct vm_area_struct *vma;
|
||||
if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
|
||||
flush_cache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
if (mm->context == mfsp(3)) {
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
flush_user_dcache_range_asm(vma->vm_start,
|
||||
vma->vm_end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(
|
||||
vma->vm_start, vma->vm_end);
|
||||
}
|
||||
} else {
|
||||
pgd_t *pgd = mm->pgd;
|
||||
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = vma->vm_start; addr < vma->vm_end;
|
||||
addr += PAGE_SIZE) {
|
||||
pte_t *ptep = get_ptep(pgd, addr);
|
||||
if (ptep != NULL) {
|
||||
pte_t pte = *ptep;
|
||||
__flush_cache_page(vma, addr,
|
||||
page_to_phys(pte_page(pte)));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mm->context == mfsp(3)) {
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
|
||||
if ((vma->vm_flags & VM_EXEC) == 0)
|
||||
continue;
|
||||
flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
flush_cache_all();
|
||||
#else
|
||||
flush_cache_all_local();
|
||||
#endif
|
||||
pgd = mm->pgd;
|
||||
for (vma = mm->mmap; vma; vma = vma->vm_next) {
|
||||
unsigned long addr;
|
||||
|
||||
for (addr = vma->vm_start; addr < vma->vm_end;
|
||||
addr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
pte_t *ptep = get_ptep(pgd, addr);
|
||||
if (!ptep)
|
||||
continue;
|
||||
pfn = pte_pfn(*ptep);
|
||||
if (!pfn_valid(pfn))
|
||||
continue;
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
|
|||
void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
unsigned long addr;
|
||||
pgd_t *pgd;
|
||||
|
||||
BUG_ON(!vma->vm_mm->context);
|
||||
|
||||
if ((end - start) < parisc_cache_flush_threshold) {
|
||||
if (vma->vm_mm->context == mfsp(3)) {
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
} else {
|
||||
unsigned long addr;
|
||||
pgd_t *pgd = vma->vm_mm->pgd;
|
||||
|
||||
for (addr = start & PAGE_MASK; addr < end;
|
||||
addr += PAGE_SIZE) {
|
||||
pte_t *ptep = get_ptep(pgd, addr);
|
||||
if (ptep != NULL) {
|
||||
pte_t pte = *ptep;
|
||||
flush_cache_page(vma,
|
||||
addr, pte_pfn(pte));
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#ifdef CONFIG_SMP
|
||||
if ((end - start) >= parisc_cache_flush_threshold) {
|
||||
flush_cache_all();
|
||||
#else
|
||||
flush_cache_all_local();
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
if (vma->vm_mm->context == mfsp(3)) {
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
flush_user_icache_range_asm(start, end);
|
||||
return;
|
||||
}
|
||||
|
||||
pgd = vma->vm_mm->pgd;
|
||||
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
|
||||
unsigned long pfn;
|
||||
pte_t *ptep = get_ptep(pgd, addr);
|
||||
if (!ptep)
|
||||
continue;
|
||||
pfn = pte_pfn(*ptep);
|
||||
if (pfn_valid(pfn))
|
||||
__flush_cache_page(vma, addr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
|||
{
|
||||
BUG_ON(!vma->vm_mm->context);
|
||||
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
|
||||
|
||||
if (pfn_valid(pfn)) {
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARISC_TMPALIAS
|
||||
|
|
Загрузка…
Ссылка в новой задаче