MN10300: Rename __flush_tlb*() to local_flush_tlb*()
Rename __flush_tlb*() to local_flush_tlb*() as it's more appropriate, and ready to differentiate local from global TLB flushes when SMP is introduced. Whilst we're at it, get rid of __flush_tlb_global() and make local_flush_tlb_page() take an mm_struct pointer rather than VMA pointer. Signed-off-by: David Howells <dhowells@redhat.com>
This commit is contained in:
Родитель
8f19e3daf3
Коммит
492e675116
|
@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
|
||||||
BUG();
|
BUG();
|
||||||
#endif
|
#endif
|
||||||
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
|
set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
|
||||||
__flush_tlb_one(vaddr);
|
local_flush_tlb_one(vaddr);
|
||||||
|
|
||||||
return vaddr;
|
return vaddr;
|
||||||
}
|
}
|
||||||
|
@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
|
||||||
* this pte without first remap it
|
* this pte without first remap it
|
||||||
*/
|
*/
|
||||||
pte_clear(kmap_pte - idx);
|
pte_clear(kmap_pte - idx);
|
||||||
__flush_tlb_one(vaddr);
|
local_flush_tlb_one(vaddr);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
pagefault_enable();
|
pagefault_enable();
|
||||||
|
|
|
@ -58,7 +58,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
|
||||||
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
|
if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
|
||||||
/* we exhausted the TLB PIDs of this version on this CPU, so we
|
/* we exhausted the TLB PIDs of this version on this CPU, so we
|
||||||
* flush this CPU's TLB in its entirety and start new cycle */
|
* flush this CPU's TLB in its entirety and start new cycle */
|
||||||
flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
|
|
||||||
/* fix the TLB version if needed (we avoid version #0 so as to
|
/* fix the TLB version if needed (we avoid version #0 so as to
|
||||||
* distingush MMU_NO_CONTEXT) */
|
* distingush MMU_NO_CONTEXT) */
|
||||||
|
|
|
@ -13,21 +13,37 @@
|
||||||
|
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
|
|
||||||
#define __flush_tlb() \
|
/**
|
||||||
do { \
|
* local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
|
||||||
int w; \
|
*/
|
||||||
__asm__ __volatile__ \
|
static inline void local_flush_tlb(void)
|
||||||
(" mov %1,%0 \n" \
|
{
|
||||||
" or %2,%0 \n" \
|
int w;
|
||||||
" mov %0,%1 \n" \
|
asm volatile(
|
||||||
: "=d"(w) \
|
" mov %1,%0 \n"
|
||||||
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV) \
|
" or %2,%0 \n"
|
||||||
: "cc", "memory" \
|
" mov %0,%1 \n"
|
||||||
); \
|
: "=d"(w)
|
||||||
} while (0)
|
: "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
|
||||||
|
: "cc", "memory");
|
||||||
|
}
|
||||||
|
|
||||||
#define __flush_tlb_all() __flush_tlb()
|
/**
|
||||||
#define __flush_tlb_one(addr) __flush_tlb()
|
* local_flush_tlb_all - Flush all entries from the local CPU's TLBs
|
||||||
|
*/
|
||||||
|
#define local_flush_tlb_all() local_flush_tlb()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* local_flush_tlb_one - Flush one entry from the local CPU's TLBs
|
||||||
|
*/
|
||||||
|
#define local_flush_tlb_one(addr) local_flush_tlb()
|
||||||
|
|
||||||
|
/**
|
||||||
|
* local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
|
||||||
|
* @mm: The MM to flush for
|
||||||
|
* @addr: The address of the target page in RAM (not its page struct)
|
||||||
|
*/
|
||||||
|
extern void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr);
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -43,14 +59,14 @@ do { \
|
||||||
#define flush_tlb_all() \
|
#define flush_tlb_all() \
|
||||||
do { \
|
do { \
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
__flush_tlb_all(); \
|
local_flush_tlb_all(); \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define flush_tlb_mm(mm) \
|
#define flush_tlb_mm(mm) \
|
||||||
do { \
|
do { \
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
__flush_tlb_all(); \
|
local_flush_tlb_all(); \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
@ -59,13 +75,13 @@ do { \
|
||||||
unsigned long __s __attribute__((unused)) = (start); \
|
unsigned long __s __attribute__((unused)) = (start); \
|
||||||
unsigned long __e __attribute__((unused)) = (end); \
|
unsigned long __e __attribute__((unused)) = (end); \
|
||||||
preempt_disable(); \
|
preempt_disable(); \
|
||||||
__flush_tlb_all(); \
|
local_flush_tlb_all(); \
|
||||||
preempt_enable(); \
|
preempt_enable(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
|
||||||
|
#define flush_tlb() flush_tlb_all()
|
||||||
|
|
||||||
#define __flush_tlb_global() flush_tlb_all()
|
|
||||||
#define flush_tlb() flush_tlb_all()
|
|
||||||
#define flush_tlb_kernel_range(start, end) \
|
#define flush_tlb_kernel_range(start, end) \
|
||||||
do { \
|
do { \
|
||||||
unsigned long __s __attribute__((unused)) = (start); \
|
unsigned long __s __attribute__((unused)) = (start); \
|
||||||
|
@ -73,8 +89,6 @@ do { \
|
||||||
flush_tlb_all(); \
|
flush_tlb_all(); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
|
|
||||||
|
|
||||||
#define flush_tlb_pgtables(mm, start, end) do {} while (0)
|
#define flush_tlb_pgtables(mm, start, end) do {} while (0)
|
||||||
|
|
||||||
#endif /* _ASM_TLBFLUSH_H */
|
#endif /* _ASM_TLBFLUSH_H */
|
||||||
|
|
|
@ -73,7 +73,7 @@ void __init paging_init(void)
|
||||||
/* pass the memory from the bootmem allocator to the main allocator */
|
/* pass the memory from the bootmem allocator to the main allocator */
|
||||||
free_area_init(zones_size);
|
free_area_init(zones_size);
|
||||||
|
|
||||||
__flush_tlb_all();
|
local_flush_tlb_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -23,7 +23,7 @@ unsigned long mmu_context_cache[NR_CPUS] = {
|
||||||
/*
|
/*
|
||||||
* flush the specified TLB entry
|
* flush the specified TLB entry
|
||||||
*/
|
*/
|
||||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
|
||||||
{
|
{
|
||||||
unsigned long pteu, cnx, flags;
|
unsigned long pteu, cnx, flags;
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
|
||||||
* interference from vmalloc'd regions */
|
* interference from vmalloc'd regions */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
|
|
||||||
cnx = mm_context(vma->vm_mm);
|
cnx = mm_context(mm);
|
||||||
|
|
||||||
if (cnx != MMU_NO_CONTEXT) {
|
if (cnx != MMU_NO_CONTEXT) {
|
||||||
pteu = addr | (cnx & 0x000000ffUL);
|
pteu = addr | (cnx & 0x000000ffUL);
|
||||||
|
|
|
@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
|
||||||
* It's enough to flush this one mapping.
|
* It's enough to flush this one mapping.
|
||||||
* (PGE mappings get flushed as well)
|
* (PGE mappings get flushed as well)
|
||||||
*/
|
*/
|
||||||
__flush_tlb_one(vaddr);
|
local_flush_tlb_one(vaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
|
||||||
|
|
Загрузка…
Ссылка в новой задаче