powerpc/cache: add cache flush operation for various e500
Various e500 core have different cache architecture, so they need different cache flush operations. Therefore, add a callback function cpu_flush_caches to the struct cpu_spec. The cache flush operation for the specific kind of e500 is selected at init time. The callback function will flush all caches inside the current cpu. Signed-off-by: Chenhui Zhao <chenhui.zhao@freescale.com> Signed-off-by: Tang Yuantian <Yuantian.Tang@feescale.com> Signed-off-by: Scott Wood <oss@buserror.net>
This commit is contained in:
Родитель
ebb9d30a6a
Коммит
e7affb1dba
|
@ -30,8 +30,6 @@ extern void flush_dcache_page(struct page *page);
|
|||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
extern void __flush_disable_L1(void);
|
||||
|
||||
extern void flush_icache_range(unsigned long, unsigned long);
|
||||
extern void flush_icache_user_range(struct vm_area_struct *vma,
|
||||
struct page *page, unsigned long addr,
|
||||
|
|
|
@ -43,6 +43,11 @@ extern int machine_check_e500(struct pt_regs *regs);
|
|||
extern int machine_check_e200(struct pt_regs *regs);
|
||||
extern int machine_check_47x(struct pt_regs *regs);
|
||||
|
||||
extern void cpu_down_flush_e500v2(void);
|
||||
extern void cpu_down_flush_e500mc(void);
|
||||
extern void cpu_down_flush_e5500(void);
|
||||
extern void cpu_down_flush_e6500(void);
|
||||
|
||||
/* NOTE WELL: Update identify_cpu() if fields are added or removed! */
|
||||
struct cpu_spec {
|
||||
/* CPU is matched via (PVR & pvr_mask) == pvr_value */
|
||||
|
@ -59,6 +64,9 @@ struct cpu_spec {
|
|||
unsigned int icache_bsize;
|
||||
unsigned int dcache_bsize;
|
||||
|
||||
/* flush caches inside the current cpu */
|
||||
void (*cpu_down_flush)(void);
|
||||
|
||||
/* number of performance monitor counters */
|
||||
unsigned int num_pmcs;
|
||||
enum powerpc_pmc_type pmc_type;
|
||||
|
|
|
@ -376,6 +376,7 @@ int main(void)
|
|||
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
|
||||
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
|
||||
DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
|
||||
DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush));
|
||||
|
||||
DEFINE(pbe_address, offsetof(struct pbe, address));
|
||||
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
|
||||
|
|
|
@ -13,11 +13,13 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/mmu-book3e.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/mpc85xx.h>
|
||||
|
||||
_GLOBAL(__e500_icache_setup)
|
||||
mfspr r0, SPRN_L1CSR1
|
||||
|
@ -233,3 +235,113 @@ _GLOBAL(__setup_cpu_e5500)
|
|||
mtlr r5
|
||||
blr
|
||||
#endif
|
||||
|
||||
/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
|
||||
_GLOBAL(flush_dcache_L1)
|
||||
mfmsr r10
|
||||
wrteei 0
|
||||
|
||||
mfspr r3,SPRN_L1CFG0
|
||||
rlwinm r5,r3,9,3 /* Extract cache block size */
|
||||
twlgti r5,1 /* Only 32 and 64 byte cache blocks
|
||||
* are currently defined.
|
||||
*/
|
||||
li r4,32
|
||||
subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
|
||||
* log2(number of ways)
|
||||
*/
|
||||
slw r5,r4,r5 /* r5 = cache block size */
|
||||
|
||||
rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
|
||||
mulli r7,r7,13 /* An 8-way cache will require 13
|
||||
* loads per set.
|
||||
*/
|
||||
slw r7,r7,r6
|
||||
|
||||
/* save off HID0 and set DCFA */
|
||||
mfspr r8,SPRN_HID0
|
||||
ori r9,r8,HID0_DCFA@l
|
||||
mtspr SPRN_HID0,r9
|
||||
isync
|
||||
|
||||
LOAD_REG_IMMEDIATE(r6, KERNELBASE)
|
||||
mr r4, r6
|
||||
mtctr r7
|
||||
|
||||
1: lwz r3,0(r4) /* Load... */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
msync
|
||||
mr r4, r6
|
||||
mtctr r7
|
||||
|
||||
1: dcbf 0,r4 /* ...and flush. */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
/* restore HID0 */
|
||||
mtspr SPRN_HID0,r8
|
||||
isync
|
||||
|
||||
wrtee r10
|
||||
|
||||
blr
|
||||
|
||||
has_L2_cache:
|
||||
/* skip L2 cache on P2040/P2040E as they have no L2 cache */
|
||||
mfspr r3, SPRN_SVR
|
||||
/* shift right by 8 bits and clear E bit of SVR */
|
||||
rlwinm r4, r3, 24, ~0x800
|
||||
|
||||
lis r3, SVR_P2040@h
|
||||
ori r3, r3, SVR_P2040@l
|
||||
cmpw r4, r3
|
||||
beq 1f
|
||||
|
||||
li r3, 1
|
||||
blr
|
||||
1:
|
||||
li r3, 0
|
||||
blr
|
||||
|
||||
/* flush backside L2 cache */
|
||||
flush_backside_L2_cache:
|
||||
mflr r10
|
||||
bl has_L2_cache
|
||||
mtlr r10
|
||||
cmpwi r3, 0
|
||||
beq 2f
|
||||
|
||||
/* Flush the L2 cache */
|
||||
mfspr r3, SPRN_L2CSR0
|
||||
ori r3, r3, L2CSR0_L2FL@l
|
||||
msync
|
||||
isync
|
||||
mtspr SPRN_L2CSR0,r3
|
||||
isync
|
||||
|
||||
/* check if it is complete */
|
||||
1: mfspr r3,SPRN_L2CSR0
|
||||
andi. r3, r3, L2CSR0_L2FL@l
|
||||
bne 1b
|
||||
2:
|
||||
blr
|
||||
|
||||
_GLOBAL(cpu_down_flush_e500v2)
|
||||
mflr r0
|
||||
bl flush_dcache_L1
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(cpu_down_flush_e500mc)
|
||||
_GLOBAL(cpu_down_flush_e5500)
|
||||
mflr r0
|
||||
bl flush_dcache_L1
|
||||
bl flush_backside_L2_cache
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/* L1 Data Cache of e6500 contains no modified data, no flush is required */
|
||||
_GLOBAL(cpu_down_flush_e6500)
|
||||
blr
|
||||
|
|
|
@ -2050,6 +2050,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
.cpu_setup = __setup_cpu_e500v2,
|
||||
.machine_check = machine_check_e500,
|
||||
.platform = "ppc8548",
|
||||
.cpu_down_flush = cpu_down_flush_e500v2,
|
||||
},
|
||||
#else
|
||||
{ /* e500mc */
|
||||
|
@ -2069,6 +2070,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
.cpu_setup = __setup_cpu_e500mc,
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce500mc",
|
||||
.cpu_down_flush = cpu_down_flush_e500mc,
|
||||
},
|
||||
#endif /* CONFIG_PPC_E500MC */
|
||||
#endif /* CONFIG_PPC32 */
|
||||
|
@ -2093,6 +2095,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
#endif
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce5500",
|
||||
.cpu_down_flush = cpu_down_flush_e5500,
|
||||
},
|
||||
{ /* e6500 */
|
||||
.pvr_mask = 0xffff0000,
|
||||
|
@ -2115,6 +2118,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
|||
#endif
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce6500",
|
||||
.cpu_down_flush = cpu_down_flush_e6500,
|
||||
},
|
||||
#endif /* CONFIG_PPC_E500MC */
|
||||
#ifdef CONFIG_PPC32
|
||||
|
|
|
@ -1037,80 +1037,6 @@ _GLOBAL(set_context)
|
|||
isync /* Force context change */
|
||||
blr
|
||||
|
||||
_GLOBAL(flush_dcache_L1)
|
||||
mfspr r3,SPRN_L1CFG0
|
||||
|
||||
rlwinm r5,r3,9,3 /* Extract cache block size */
|
||||
twlgti r5,1 /* Only 32 and 64 byte cache blocks
|
||||
* are currently defined.
|
||||
*/
|
||||
li r4,32
|
||||
subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
|
||||
* log2(number of ways)
|
||||
*/
|
||||
slw r5,r4,r5 /* r5 = cache block size */
|
||||
|
||||
rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
|
||||
mulli r7,r7,13 /* An 8-way cache will require 13
|
||||
* loads per set.
|
||||
*/
|
||||
slw r7,r7,r6
|
||||
|
||||
/* save off HID0 and set DCFA */
|
||||
mfspr r8,SPRN_HID0
|
||||
ori r9,r8,HID0_DCFA@l
|
||||
mtspr SPRN_HID0,r9
|
||||
isync
|
||||
|
||||
lis r4,KERNELBASE@h
|
||||
mtctr r7
|
||||
|
||||
1: lwz r3,0(r4) /* Load... */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
msync
|
||||
lis r4,KERNELBASE@h
|
||||
mtctr r7
|
||||
|
||||
1: dcbf 0,r4 /* ...and flush. */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
/* restore HID0 */
|
||||
mtspr SPRN_HID0,r8
|
||||
isync
|
||||
|
||||
blr
|
||||
|
||||
/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
|
||||
_GLOBAL(__flush_disable_L1)
|
||||
mflr r10
|
||||
bl flush_dcache_L1 /* Flush L1 d-cache */
|
||||
mtlr r10
|
||||
|
||||
mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
|
||||
li r5, 2
|
||||
rlwimi r4, r5, 0, 3
|
||||
|
||||
msync
|
||||
isync
|
||||
mtspr SPRN_L1CSR0, r4
|
||||
isync
|
||||
|
||||
1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
|
||||
andi. r4, r4, 2
|
||||
bne 1b
|
||||
|
||||
mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
|
||||
li r5, 2
|
||||
rlwimi r4, r5, 0, 3
|
||||
|
||||
mtspr SPRN_L1CSR1, r4
|
||||
isync
|
||||
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* When we get here, r24 needs to hold the CPU # */
|
||||
.globl __secondary_start
|
||||
|
|
|
@ -139,7 +139,8 @@ static void smp_85xx_mach_cpu_die(void)
|
|||
|
||||
mtspr(SPRN_TCR, 0);
|
||||
|
||||
__flush_disable_L1();
|
||||
cur_cpu_spec->cpu_down_flush();
|
||||
|
||||
tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP;
|
||||
mtspr(SPRN_HID0, tmp);
|
||||
isync();
|
||||
|
@ -359,7 +360,7 @@ void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
|
|||
local_irq_disable();
|
||||
|
||||
if (secondary) {
|
||||
__flush_disable_L1();
|
||||
cur_cpu_spec->cpu_down_flush();
|
||||
atomic_inc(&kexec_down_cpus);
|
||||
/* loop forever */
|
||||
while (1);
|
||||
|
|
Загрузка…
Ссылка в новой задаче