ARM: 6405/1: Handle __flush_icache_all for CONFIG_SMP_ON_UP

Do this by adding flush_icache_all to cache_fns for ARMv6 and 7.
As flush_icache_all may neeed to be called from flush_kern_cache_all,
add it as the first entry in the cache_fns.

Note that now we can remove the ARM_ERRATA_411920 dependency
to !SMP so it can be selected on UP ARMv6 processors, such
as omap2.

Signed-off-by: Tony Lindgren <tony@atomide.com>
Signed-off-by: Anand Gadiyar <gadiyar@ti.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
Tony Lindgren 2010-09-21 17:16:40 +01:00 коммит произвёл Russell King
Родитель f9e417e901
Коммит 81d11955bf
4 изменённых файлов: 77 добавлений и 27 удалений

Просмотреть файл

@ -1002,7 +1002,7 @@ endif
config ARM_ERRATA_411920 config ARM_ERRATA_411920
bool "ARM errata: Invalidation of the Instruction Cache operation can fail" bool "ARM errata: Invalidation of the Instruction Cache operation can fail"
depends on CPU_V6 && !SMP depends on CPU_V6
help help
Invalidation of the Instruction Cache operation can Invalidation of the Instruction Cache operation can
fail. This erratum is present in 1136 (before r1p4), 1156 and 1176. fail. This erratum is present in 1136 (before r1p4), 1156 and 1176.

Просмотреть файл

@ -156,6 +156,12 @@
* Please note that the implementation of these, and the required * Please note that the implementation of these, and the required
* effects are cache-type (VIVT/VIPT/PIPT) specific. * effects are cache-type (VIVT/VIPT/PIPT) specific.
* *
* flush_icache_all()
*
* Unconditionally clean and invalidate the entire icache.
* Currently only needed for cache-v6.S and cache-v7.S, see
* __flush_icache_all for the generic implementation.
*
* flush_kern_all() * flush_kern_all()
* *
* Unconditionally clean and invalidate the entire cache. * Unconditionally clean and invalidate the entire cache.
@ -206,6 +212,7 @@
*/ */
struct cpu_cache_fns { struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void); void (*flush_kern_all)(void);
void (*flush_user_all)(void); void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@ -227,6 +234,7 @@ struct cpu_cache_fns {
extern struct cpu_cache_fns cpu_cache; extern struct cpu_cache_fns cpu_cache;
#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
#define __cpuc_flush_kern_all cpu_cache.flush_kern_all #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
#define __cpuc_flush_user_all cpu_cache.flush_user_all #define __cpuc_flush_user_all cpu_cache.flush_user_all
#define __cpuc_flush_user_range cpu_cache.flush_user_range #define __cpuc_flush_user_range cpu_cache.flush_user_range
@ -246,6 +254,7 @@ extern struct cpu_cache_fns cpu_cache;
#else #else
#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
@ -253,6 +262,7 @@ extern struct cpu_cache_fns cpu_cache;
#define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
#define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
extern void __cpuc_flush_icache_all(void);
extern void __cpuc_flush_kern_all(void); extern void __cpuc_flush_kern_all(void);
extern void __cpuc_flush_user_all(void); extern void __cpuc_flush_user_all(void);
extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
@ -291,6 +301,37 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
/* /*
* Convert calls to our calling convention. * Convert calls to our calling convention.
*/ */
/* Invalidate I-cache */
#define __flush_icache_all_generic() \
asm("mcr p15, 0, %0, c7, c5, 0" \
: : "r" (0));
/* Invalidate I-cache inner shareable */
#define __flush_icache_all_v7_smp() \
asm("mcr p15, 0, %0, c7, c1, 0" \
: : "r" (0));
/*
* Optimized __flush_icache_all for the common cases. Note that UP ARMv7
* will fall through to use __flush_icache_all_generic.
*/
#if (defined(CONFIG_CPU_V7) && defined(CONFIG_CPU_V6)) || \
defined(CONFIG_SMP_ON_UP)
#define __flush_icache_preferred __cpuc_flush_icache_all
#elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
#define __flush_icache_preferred __flush_icache_all_v7_smp
#elif __LINUX_ARM_ARCH__ == 6 && defined(CONFIG_ARM_ERRATA_411920)
#define __flush_icache_preferred __cpuc_flush_icache_all
#else
#define __flush_icache_preferred __flush_icache_all_generic
#endif
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
}
#define flush_cache_all() __cpuc_flush_kern_all() #define flush_cache_all() __cpuc_flush_kern_all()
static inline void vivt_flush_cache_mm(struct mm_struct *mm) static inline void vivt_flush_cache_mm(struct mm_struct *mm)
@ -366,21 +407,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
#ifdef CONFIG_ARM_ERRATA_411920
extern void v6_icache_inval_all(void);
v6_icache_inval_all();
#elif defined(CONFIG_SMP) && __LINUX_ARM_ARCH__ >= 7
asm("mcr p15, 0, %0, c7, c1, 0 @ invalidate I-cache inner shareable\n"
:
: "r" (0));
#else
asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
:
: "r" (0));
#endif
}
static inline void flush_kernel_vmap_range(void *addr, int size) static inline void flush_kernel_vmap_range(void *addr, int size)
{ {
if ((cache_is_vivt() || cache_is_vipt_aliasing())) if ((cache_is_vivt() || cache_is_vipt_aliasing()))

Просмотреть файл

@ -21,18 +21,22 @@
#define D_CACHE_LINE_SIZE 32 #define D_CACHE_LINE_SIZE 32
#define BTB_FLUSH_SIZE 8 #define BTB_FLUSH_SIZE 8
#ifdef CONFIG_ARM_ERRATA_411920
/* /*
* Invalidate the entire I cache (this code is a workaround for the ARM1136 * v6_flush_icache_all()
* erratum 411920 - Invalidate Instruction Cache operation can fail. This
* erratum is present in 1136, 1156 and 1176. It does not affect the MPCore.
* *
* Registers: * Flush the whole I-cache.
* r0 - set to 0 *
* r1 - corrupted * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail.
* This erratum is present in 1136, 1156 and 1176. It does not affect the
* MPCore.
*
* Registers:
* r0 - set to 0
* r1 - corrupted
*/ */
ENTRY(v6_icache_inval_all) ENTRY(v6_flush_icache_all)
mov r0, #0 mov r0, #0
#ifdef CONFIG_ARM_ERRATA_411920
mrs r1, cpsr mrs r1, cpsr
cpsid ifa @ disable interrupts cpsid ifa @ disable interrupts
mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache
@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all)
.rept 11 @ ARM Ltd recommends at least .rept 11 @ ARM Ltd recommends at least
nop @ 11 NOPs nop @ 11 NOPs
.endr .endr
mov pc, lr #else
mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache
#endif #endif
mov pc, lr
ENDPROC(v6_flush_icache_all)
/* /*
* v6_flush_cache_all() * v6_flush_cache_all()
@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all)
#ifndef CONFIG_ARM_ERRATA_411920 #ifndef CONFIG_ARM_ERRATA_411920
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else #else
b v6_icache_inval_all b v6_flush_icache_all
#endif #endif
#else #else
mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate
@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range)
#ifndef CONFIG_ARM_ERRATA_411920 #ifndef CONFIG_ARM_ERRATA_411920
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
#else #else
b v6_icache_inval_all b v6_flush_icache_all
#endif #endif
#else #else
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
@ -312,6 +319,7 @@ ENDPROC(v6_dma_unmap_area)
.type v6_cache_fns, #object .type v6_cache_fns, #object
ENTRY(v6_cache_fns) ENTRY(v6_cache_fns)
.long v6_flush_icache_all
.long v6_flush_kern_cache_all .long v6_flush_kern_cache_all
.long v6_flush_user_cache_all .long v6_flush_user_cache_all
.long v6_flush_user_cache_range .long v6_flush_user_cache_range

Просмотреть файл

@ -17,6 +17,21 @@
#include "proc-macros.S" #include "proc-macros.S"
/*
* v7_flush_icache_all()
*
* Flush the whole I-cache.
*
* Registers:
* r0 - set to 0
*/
ENTRY(v7_flush_icache_all)
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
mov pc, lr
ENDPROC(v7_flush_icache_all)
/* /*
* v7_flush_dcache_all() * v7_flush_dcache_all()
* *
@ -303,6 +318,7 @@ ENDPROC(v7_dma_unmap_area)
.type v7_cache_fns, #object .type v7_cache_fns, #object
ENTRY(v7_cache_fns) ENTRY(v7_cache_fns)
.long v7_flush_icache_all
.long v7_flush_kern_cache_all .long v7_flush_kern_cache_all
.long v7_flush_user_cache_all .long v7_flush_user_cache_all
.long v7_flush_user_cache_range .long v7_flush_user_cache_range