sh: Partial revert of copy/clear_user_highpage() optimizations.

These still require more testing, so revert them for now. We keep the
off-by-1 in the fixmap colouring and drop the rest.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2009-12-04 15:14:52 +09:00
Родитель 8144a7dd51
Коммит 7e01c94998
1 изменённых файлов: 13 добавлений и 53 удалений

Просмотреть файл

@ -46,18 +46,6 @@ static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
preempt_enable();
}
/*
* copy_to_user_page
* @vma: vm_area_struct holding the pages
* @page: struct page
* @vaddr: user space address
* @dst: address of page in kernel space (possibly from kmap)
* @src: source address in kernel logical memory
* @len: length of data in bytes (may be less than PAGE_SIZE)
*
* Copy data into the address space of a process other than the current
* process (eg for ptrace).
*/
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
@ -93,49 +81,28 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
}
}
/*
* copy_user_highpage
* @to: destination page
* @from: source page
* @vaddr: address of pages in user address space
* @vma: vm_area_struct holding the pages
*
* This is used in COW implementation to copy data from page @from to
* page @to. @from was previousl mapped at @vaddr, and @to will be.
* As this is used only in the COW implementation, this means that the
* source is unmodified, and so we don't have to worry about cache
* aliasing on that side.
*/
#ifdef CONFIG_HIGHMEM
/*
* If we ever have a real highmem system, this code will need fixing
* (as will clear_user/clear_user_highmem), because the kmap potentitally
* creates another alias risk.
*/
#error This code is broken with real HIGHMEM
#endif
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
vto = kmap_atomic(to, KM_USER1);
vfrom = kmap_atomic(from, KM_USER0);
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
__flush_invalidate_region(vto, PAGE_SIZE);
if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
!test_bit(PG_dcache_dirty, &from->flags)) {
void *vto_coloured = kmap_coherent(to, vaddr);
copy_page(vto_coloured, vfrom);
kunmap_coherent(vto_coloured);
} else
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);
} else {
vfrom = kmap_atomic(from, KM_USER0);
copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0);
}
if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
__flush_purge_region(vto, PAGE_SIZE);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
}
@ -145,17 +112,10 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *kaddr = kmap_atomic(page, KM_USER0);
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) {
void *vto;
clear_page(kaddr);
/* Kernel alias may have modified data in the cache. */
__flush_invalidate_region(kaddr, PAGE_SIZE);
vto = kmap_coherent(page, vaddr);
clear_page(vto);
kunmap_coherent(vto);
} else
clear_page(kaddr);
if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
__flush_purge_region(kaddr, PAGE_SIZE);
kunmap_atomic(kaddr, KM_USER0);
}