Merge branch 'misc' into devel
Conflicts: arch/arm/Kconfig arch/arm/common/Makefile arch/arm/kernel/Makefile arch/arm/kernel/smp.c
This commit is contained in:
Коммит
4073723acb
|
@ -34,3 +34,5 @@ memory.txt
|
|||
- description of the virtual memory layout
|
||||
nwfpe/
|
||||
- NWFPE floating point emulator documentation
|
||||
swp_emulation
|
||||
- SWP/SWPB emulation handler/logging description
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
Software emulation of deprecated SWP instruction (CONFIG_SWP_EMULATE)
|
||||
---------------------------------------------------------------------
|
||||
|
||||
ARMv6 architecture deprecates use of the SWP/SWPB instructions, and recommeds
|
||||
moving to the load-locked/store-conditional instructions LDREX and STREX.
|
||||
|
||||
ARMv7 multiprocessing extensions introduce the ability to disable these
|
||||
instructions, triggering an undefined instruction exception when executed.
|
||||
Trapped instructions are emulated using an LDREX/STREX or LDREXB/STREXB
|
||||
sequence. If a memory access fault (an abort) occurs, a segmentation fault is
|
||||
signalled to the triggering process.
|
||||
|
||||
/proc/cpu/swp_emulation holds some statistics/information, including the PID of
|
||||
the last process to trigger the emulation to be invocated. For example:
|
||||
---
|
||||
Emulated SWP: 12
|
||||
Emulated SWPB: 0
|
||||
Aborted SWP{B}: 1
|
||||
Last process: 314
|
||||
---
|
||||
|
||||
NOTE: when accessing uncached shared regions, LDREX/STREX rely on an external
|
||||
transaction monitoring block called a global monitor to maintain update
|
||||
atomicity. If your system does not implement a global monitor, this option can
|
||||
cause programs that perform SWP operations to uncached memory to deadlock, as
|
||||
the STREX operation will always fail.
|
||||
|
|
@ -2,6 +2,7 @@ config ARM
|
|||
bool
|
||||
default y
|
||||
select HAVE_AOUT
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_IDE
|
||||
select HAVE_MEMBLOCK
|
||||
select RTC_LIB
|
||||
|
@ -36,6 +37,9 @@ config ARM
|
|||
config HAVE_PWM
|
||||
bool
|
||||
|
||||
config MIGHT_HAVE_PCI
|
||||
bool
|
||||
|
||||
config SYS_SUPPORTS_APM_EMULATION
|
||||
bool
|
||||
|
||||
|
@ -226,7 +230,7 @@ config ARCH_INTEGRATOR
|
|||
bool "ARM Ltd. Integrator family"
|
||||
select ARM_AMBA
|
||||
select ARCH_HAS_CPUFREQ
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ICST
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select PLAT_VERSATILE
|
||||
|
@ -236,7 +240,7 @@ config ARCH_INTEGRATOR
|
|||
config ARCH_REALVIEW
|
||||
bool "ARM Ltd. RealView family"
|
||||
select ARM_AMBA
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ICST
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
@ -251,7 +255,7 @@ config ARCH_VERSATILE
|
|||
bool "ARM Ltd. Versatile family"
|
||||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ICST
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
@ -266,7 +270,7 @@ config ARCH_VEXPRESS
|
|||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select ARM_AMBA
|
||||
select ARM_TIMER_SP804
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_CLK
|
||||
select HAVE_SCHED_CLOCK
|
||||
|
@ -288,7 +292,7 @@ config ARCH_BCMRING
|
|||
depends on MMU
|
||||
select CPU_V6
|
||||
select ARM_AMBA
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
help
|
||||
|
@ -306,6 +310,7 @@ config ARCH_CNS3XXX
|
|||
select CPU_V6
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARM_GIC
|
||||
select MIGHT_HAVE_PCI
|
||||
select PCI_DOMAINS if PCI
|
||||
help
|
||||
Support for Cavium Networks CNS3XXX platform.
|
||||
|
@ -335,7 +340,7 @@ config ARCH_EP93XX
|
|||
select CPU_ARM920T
|
||||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_HAS_HOLES_MEMORYMODEL
|
||||
select ARCH_USES_GETTIMEOFFSET
|
||||
|
@ -355,14 +360,14 @@ config ARCH_MXC
|
|||
bool "Freescale MXC/iMX-based"
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
help
|
||||
Support for Freescale MXC/iMX-based family of processors
|
||||
|
||||
config ARCH_STMP3XXX
|
||||
bool "Freescale STMP3xxx"
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select USB_ARCH_HAS_EHCI
|
||||
|
@ -442,6 +447,7 @@ config ARCH_IXP4XX
|
|||
select GENERIC_GPIO
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
select MIGHT_HAVE_PCI
|
||||
select DMABOUNCE if PCI
|
||||
help
|
||||
Support for Intel's IXP4XX (XScale) family of processors.
|
||||
|
@ -481,7 +487,7 @@ config ARCH_LPC32XX
|
|||
select HAVE_IDE
|
||||
select ARM_AMBA
|
||||
select USB_ARCH_HAS_OHCI
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_TIME
|
||||
select GENERIC_CLOCKEVENTS
|
||||
help
|
||||
|
@ -515,7 +521,7 @@ config ARCH_MMP
|
|||
bool "Marvell PXA168/910/MMP2"
|
||||
depends on MMU
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
select TICK_ONESHOT
|
||||
|
@ -549,7 +555,7 @@ config ARCH_W90X900
|
|||
bool "Nuvoton W90X900 CPU"
|
||||
select CPU_ARM926T
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
help
|
||||
Support for Nuvoton (Winbond logic dept.) ARM9 processor,
|
||||
|
@ -563,19 +569,19 @@ config ARCH_W90X900
|
|||
config ARCH_NUC93X
|
||||
bool "Nuvoton NUC93X CPU"
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
help
|
||||
Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
|
||||
low-power and high performance MPEG-4/JPEG multimedia controller chip.
|
||||
|
||||
config ARCH_TEGRA
|
||||
bool "NVIDIA Tegra"
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_TIME
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_GPIO
|
||||
select HAVE_CLK
|
||||
select HAVE_SCHED_CLOCK
|
||||
select COMMON_CLKDEV
|
||||
select ARCH_HAS_BARRIERS if CACHE_L2X0
|
||||
select ARCH_HAS_CPUFREQ
|
||||
help
|
||||
|
@ -585,7 +591,7 @@ config ARCH_TEGRA
|
|||
config ARCH_PNX4008
|
||||
bool "Philips Nexperia PNX4008 Mobile"
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_USES_GETTIMEOFFSET
|
||||
help
|
||||
This enables support for Philips PNX4008 mobile platform.
|
||||
|
@ -595,7 +601,7 @@ config ARCH_PXA
|
|||
depends on MMU
|
||||
select ARCH_MTD_XIP
|
||||
select ARCH_HAS_CPUFREQ
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
|
@ -774,7 +780,7 @@ config ARCH_TCC_926
|
|||
bool "Telechips TCC ARM926-based systems"
|
||||
select CPU_ARM926T
|
||||
select HAVE_CLK
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
help
|
||||
Support for Telechips TCC ARM926-based systems.
|
||||
|
@ -799,7 +805,7 @@ config ARCH_U300
|
|||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_GPIO
|
||||
help
|
||||
Support for ST-Ericsson U300 series mobile platforms.
|
||||
|
@ -809,7 +815,7 @@ config ARCH_U8500
|
|||
select CPU_V7
|
||||
select ARM_AMBA
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
help
|
||||
Support for ST-Ericsson's Ux500 architecture
|
||||
|
@ -819,7 +825,7 @@ config ARCH_NOMADIK
|
|||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
help
|
||||
|
@ -831,7 +837,7 @@ config ARCH_DAVINCI
|
|||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ZONE_DMA
|
||||
select HAVE_IDE
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_ALLOCATOR
|
||||
select ARCH_HAS_HOLES_MEMORYMODEL
|
||||
help
|
||||
|
@ -852,7 +858,7 @@ config PLAT_SPEAR
|
|||
bool "ST SPEAr"
|
||||
select ARM_AMBA
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_CLK
|
||||
help
|
||||
|
@ -1034,6 +1040,11 @@ config CPU_HAS_PMU
|
|||
default y
|
||||
bool
|
||||
|
||||
config MULTI_IRQ_HANDLER
|
||||
bool
|
||||
help
|
||||
Allow each machine to specify it's own IRQ handler at run time.
|
||||
|
||||
if !MMU
|
||||
source "arch/arm/Kconfig-nommu"
|
||||
endif
|
||||
|
@ -1181,7 +1192,7 @@ config ISA_DMA_API
|
|||
bool
|
||||
|
||||
config PCI
|
||||
bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE || ARCH_CNS3XXX || SA1100_NANOENGINE
|
||||
bool "PCI support" if MIGHT_HAVE_PCI
|
||||
help
|
||||
Find out whether you have a PCI motherboard. PCI is the name of a
|
||||
bus system, i.e. the way the CPU talks to the other stuff inside
|
||||
|
@ -1253,7 +1264,7 @@ config SMP
|
|||
config SMP_ON_UP
|
||||
bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
depends on SMP && !XIP && !THUMB2_KERNEL
|
||||
depends on SMP && !XIP
|
||||
default y
|
||||
help
|
||||
SMP kernels contain instructions which fail on non-SMP processors.
|
||||
|
@ -1272,6 +1283,7 @@ config HAVE_ARM_SCU
|
|||
config HAVE_ARM_TWD
|
||||
bool
|
||||
depends on SMP
|
||||
select TICK_ONESHOT
|
||||
help
|
||||
This options enables support for the ARM timer and watchdog unit
|
||||
|
||||
|
@ -1335,7 +1347,7 @@ config HZ
|
|||
default 100
|
||||
|
||||
config THUMB2_KERNEL
|
||||
bool "Compile the kernel in Thumb-2 mode"
|
||||
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
|
||||
depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
|
||||
select AEABI
|
||||
select ARM_ASM_UNIFIED
|
||||
|
@ -1549,6 +1561,7 @@ config SECCOMP
|
|||
|
||||
config CC_STACKPROTECTOR
|
||||
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
This option turns on the -fstack-protector GCC feature. This
|
||||
feature puts, at the beginning of functions, a canary value on
|
||||
|
@ -1745,7 +1758,7 @@ config CPU_FREQ_S3C
|
|||
Internal configuration node for common cpufreq on Samsung SoC
|
||||
|
||||
config CPU_FREQ_S3C24XX
|
||||
bool "CPUfreq driver for Samsung S3C24XX series CPUs"
|
||||
bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
|
||||
depends on ARCH_S3C2410 && CPU_FREQ && EXPERIMENTAL
|
||||
select CPU_FREQ_S3C
|
||||
help
|
||||
|
@ -1757,7 +1770,7 @@ config CPU_FREQ_S3C24XX
|
|||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_S3C24XX_PLL
|
||||
bool "Support CPUfreq changing of PLL frequency"
|
||||
bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
|
||||
depends on CPU_FREQ_S3C24XX && EXPERIMENTAL
|
||||
help
|
||||
Compile in support for changing the PLL frequency from the
|
||||
|
|
|
@ -31,7 +31,7 @@ config FRAME_POINTER
|
|||
reported is severely limited.
|
||||
|
||||
config ARM_UNWIND
|
||||
bool "Enable stack unwinding support"
|
||||
bool "Enable stack unwinding support (EXPERIMENTAL)"
|
||||
depends on AEABI && EXPERIMENTAL
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -37,7 +37,3 @@ config SHARP_PARAM
|
|||
|
||||
config SHARP_SCOOP
|
||||
bool
|
||||
|
||||
config COMMON_CLKDEV
|
||||
bool
|
||||
select HAVE_CLK
|
||||
|
|
|
@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|||
* substitute the safe buffer for the unsafe one.
|
||||
* (basically move the buffer from an unsafe area to a safe one)
|
||||
*/
|
||||
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
|
@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
|||
|
||||
return map_single(dev, ptr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_single);
|
||||
EXPORT_SYMBOL(__dma_map_single);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
|
@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
|
|||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
|
@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_single);
|
||||
EXPORT_SYMBOL(__dma_unmap_single);
|
||||
|
||||
dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
|
||||
|
@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|||
|
||||
return map_single(dev, page_address(page) + offset, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page);
|
||||
EXPORT_SYMBOL(__dma_map_page);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
|
@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
|
|||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
|
@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_page);
|
||||
EXPORT_SYMBOL(__dma_unmap_page);
|
||||
|
||||
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
unsigned long off, size_t sz, enum dma_data_direction dir)
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#endif
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
/*
|
||||
* Endian independent macros for shifting bytes within registers.
|
||||
|
@ -157,16 +158,24 @@
|
|||
#ifdef CONFIG_SMP
|
||||
#define ALT_SMP(instr...) \
|
||||
9998: instr
|
||||
/*
|
||||
* Note: if you get assembler errors from ALT_UP() when building with
|
||||
* CONFIG_THUMB2_KERNEL, you almost certainly need to use
|
||||
* ALT_SMP( W(instr) ... )
|
||||
*/
|
||||
#define ALT_UP(instr...) \
|
||||
.pushsection ".alt.smp.init", "a" ;\
|
||||
.long 9998b ;\
|
||||
instr ;\
|
||||
9997: instr ;\
|
||||
.if . - 9997b != 4 ;\
|
||||
.error "ALT_UP() content must assemble to exactly 4 bytes";\
|
||||
.endif ;\
|
||||
.popsection
|
||||
#define ALT_UP_B(label) \
|
||||
.equ up_b_offset, label - 9998b ;\
|
||||
.pushsection ".alt.smp.init", "a" ;\
|
||||
.long 9998b ;\
|
||||
b . + up_b_offset ;\
|
||||
W(b) . + up_b_offset ;\
|
||||
.popsection
|
||||
#else
|
||||
#define ALT_SMP(instr...)
|
||||
|
@ -177,16 +186,24 @@
|
|||
/*
|
||||
* SMP data memory barrier
|
||||
*/
|
||||
.macro smp_dmb
|
||||
.macro smp_dmb mode
|
||||
#ifdef CONFIG_SMP
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
.ifeqs "\mode","arm"
|
||||
ALT_SMP(dmb)
|
||||
.else
|
||||
ALT_SMP(W(dmb))
|
||||
.endif
|
||||
#elif __LINUX_ARM_ARCH__ == 6
|
||||
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
|
||||
#else
|
||||
#error Incompatible SMP platform
|
||||
#endif
|
||||
.ifeqs "\mode","arm"
|
||||
ALT_UP(nop)
|
||||
.else
|
||||
ALT_UP(W(nop))
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -206,12 +223,12 @@
|
|||
*/
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
|
||||
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort
|
||||
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
|
||||
9999:
|
||||
.if \inc == 1
|
||||
\instr\cond\()bt \reg, [\ptr, #\off]
|
||||
\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
|
||||
.elseif \inc == 4
|
||||
\instr\cond\()t \reg, [\ptr, #\off]
|
||||
\instr\cond\()\t\().w \reg, [\ptr, #\off]
|
||||
.else
|
||||
.error "Unsupported inc macro argument"
|
||||
.endif
|
||||
|
@ -246,13 +263,13 @@
|
|||
|
||||
#else /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
.macro usracc, instr, reg, ptr, inc, cond, rept, abort
|
||||
.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
|
||||
.rept \rept
|
||||
9999:
|
||||
.if \inc == 1
|
||||
\instr\cond\()bt \reg, [\ptr], #\inc
|
||||
\instr\cond\()b\()\t \reg, [\ptr], #\inc
|
||||
.elseif \inc == 4
|
||||
\instr\cond\()t \reg, [\ptr], #\inc
|
||||
\instr\cond\()\t \reg, [\ptr], #\inc
|
||||
.else
|
||||
.error "Unsupported inc macro argument"
|
||||
.endif
|
||||
|
|
|
@ -23,4 +23,6 @@
|
|||
#define ARCH_SLAB_MINALIGN 8
|
||||
#endif
|
||||
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
#endif
|
||||
|
|
|
@ -12,23 +12,13 @@
|
|||
#ifndef __ASM_CLKDEV_H
|
||||
#define __ASM_CLKDEV_H
|
||||
|
||||
struct clk;
|
||||
struct device;
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct clk_lookup {
|
||||
struct list_head node;
|
||||
const char *dev_id;
|
||||
const char *con_id;
|
||||
struct clk *clk;
|
||||
};
|
||||
#include <mach/clkdev.h>
|
||||
|
||||
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
|
||||
const char *dev_fmt, ...);
|
||||
|
||||
void clkdev_add(struct clk_lookup *cl);
|
||||
void clkdev_drop(struct clk_lookup *cl);
|
||||
|
||||
void clkdev_add_table(struct clk_lookup *, size_t);
|
||||
int clk_add_alias(const char *, const char *, char *, struct device *);
|
||||
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
|
||||
{
|
||||
return kzalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -5,24 +5,29 @@
|
|||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
#ifdef __arch_page_to_dma
|
||||
#error Please update to __arch_pfn_to_dma
|
||||
#endif
|
||||
|
||||
/*
|
||||
* page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
|
||||
* used internally by the DMA-mapping API to provide DMA addresses. They
|
||||
* must not be used by drivers.
|
||||
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
|
||||
* functions used internally by the DMA-mapping API to provide DMA
|
||||
* addresses. They must not be used by drivers.
|
||||
*/
|
||||
#ifndef __arch_page_to_dma
|
||||
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
|
||||
#ifndef __arch_pfn_to_dma
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
|
||||
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return pfn_to_page(__bus_to_pfn(addr));
|
||||
return __bus_to_pfn(addr);
|
||||
}
|
||||
|
||||
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
||||
|
@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|||
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
||||
}
|
||||
#else
|
||||
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return __arch_page_to_dma(dev, page);
|
||||
return __arch_pfn_to_dma(dev, pfn);
|
||||
}
|
||||
|
||||
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return __arch_dma_to_page(dev, addr);
|
||||
return __arch_dma_to_pfn(dev, addr);
|
||||
}
|
||||
|
||||
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
||||
|
@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
|||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
*/
|
||||
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
|
||||
extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern dma_addr_t dma_map_page(struct device *, struct page *,
|
||||
extern dma_addr_t __dma_map_page(struct device *, struct page *,
|
||||
unsigned long, size_t, enum dma_data_direction);
|
||||
extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
|
||||
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
/*
|
||||
|
@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|||
}
|
||||
|
||||
|
||||
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_cpu_to_dev(cpu_addr, size, dir);
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
|
@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
__dma_single_cpu_to_dev(cpu_addr, size, dir);
|
||||
addr = __dma_map_single(dev, cpu_addr, size, dir);
|
||||
debug_dma_map_page(dev, virt_to_page(cpu_addr),
|
||||
(unsigned long)cpu_addr & ~PAGE_MASK, size,
|
||||
dir, addr, true);
|
||||
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
|||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
addr = __dma_map_page(dev, page, offset, size, dir);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return page_to_dma(dev, page) + offset;
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|||
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
||||
debug_dma_unmap_page(dev, handle, size, dir, true);
|
||||
__dma_unmap_single(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
|||
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
|
||||
size, dir);
|
||||
debug_dma_unmap_page(dev, handle, size, dir, false);
|
||||
__dma_unmap_page(dev, handle, size, dir);
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
|
@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
|||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
|
||||
|
||||
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
|
@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
|||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
|
||||
|
||||
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
|
|
|
@ -45,13 +45,17 @@
|
|||
*/
|
||||
#define DOMAIN_NOACCESS 0
|
||||
#define DOMAIN_CLIENT 1
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define DOMAIN_MANAGER 3
|
||||
#else
|
||||
#define DOMAIN_MANAGER 1
|
||||
#endif
|
||||
|
||||
#define domain_val(dom,type) ((type) << (2*(dom)))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define set_domain(x) \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
|
@ -74,5 +78,28 @@
|
|||
#define modify_domain(dom,type) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generate the T (user) versions of the LDR/STR and related
|
||||
* instructions (inline assembly)
|
||||
*/
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define T(instr) #instr "t"
|
||||
#else
|
||||
#define T(instr) #instr
|
||||
#endif
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Generate the T (user) versions of the LDR/STR and related
|
||||
* instructions
|
||||
*/
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define T(instr) instr ## t
|
||||
#else
|
||||
#define T(instr) instr
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* !__ASM_PROC_DOMAIN_H */
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Interrupt handling. Preserves r7, r8, r9
|
||||
*/
|
||||
.macro arch_irq_handler_default
|
||||
get_irqnr_preamble r5, lr
|
||||
1: get_irqnr_and_base r0, r6, r5, lr
|
||||
movne r1, sp
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
adrne lr, BSYM(1b)
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* this macro assumes that irqstat (r6) and base (r5) are
|
||||
* preserved from get_irqnr_and_base above
|
||||
*/
|
||||
ALT_SMP(test_for_ipi r0, r6, r5, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r1, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_IPI
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
test_for_ltirq r0, r6, r5, lr
|
||||
movne r0, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_local_timer
|
||||
#endif
|
||||
#endif
|
||||
9997:
|
||||
.endm
|
||||
|
||||
.macro arch_irq_handler, symbol_name
|
||||
.align 5
|
||||
.global \symbol_name
|
||||
\symbol_name:
|
||||
mov r4, lr
|
||||
arch_irq_handler_default
|
||||
mov pc, r4
|
||||
.endm
|
|
@ -13,12 +13,13 @@
|
|||
#include <linux/preempt.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1, [%2]\n" \
|
||||
"1: " T(ldr) " %1, [%2]\n" \
|
||||
" " insn "\n" \
|
||||
"2: strt %0, [%2]\n" \
|
||||
"2: " T(str) " %0, [%2]\n" \
|
||||
" mov %0, #0\n" \
|
||||
"3:\n" \
|
||||
" .pushsection __ex_table,\"a\"\n" \
|
||||
|
@ -97,10 +98,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
|||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: ldrt %0, [%3]\n"
|
||||
"1: " T(ldr) " %0, [%3]\n"
|
||||
" teq %0, %1\n"
|
||||
" it eq @ explicit IT needed for the 2b label\n"
|
||||
"2: streqt %2, [%3]\n"
|
||||
"2: " T(streq) " %2, [%3]\n"
|
||||
"3:\n"
|
||||
" .pushsection __ex_table,\"a\"\n"
|
||||
" .align 3\n"
|
||||
|
|
|
@ -5,13 +5,31 @@
|
|||
#include <linux/threads.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#define NR_IPI 5
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
unsigned int local_timer_irqs;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int ipi_irqs[NR_IPI];
|
||||
#endif
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
|
||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
||||
#else
|
||||
#define smp_irq_stat_cpu(cpu) 0
|
||||
#endif
|
||||
|
||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
||||
|
||||
#if NR_IRQS > 512
|
||||
#define HARDIRQ_BITS 10
|
||||
#elif NR_IRQS > 256
|
||||
|
|
|
@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
|
|||
#include "smp_twd.h"
|
||||
|
||||
#define local_timer_ack() twd_timer_ack()
|
||||
#define local_timer_stop() twd_timer_stop()
|
||||
|
||||
#else
|
||||
|
||||
|
@ -40,11 +39,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
|
|||
*/
|
||||
int local_timer_ack(void);
|
||||
|
||||
/*
|
||||
* Stop a local timer interrupt.
|
||||
*/
|
||||
void local_timer_stop(void);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -52,12 +46,6 @@ void local_timer_stop(void);
|
|||
*/
|
||||
void local_timer_setup(struct clock_event_device *);
|
||||
|
||||
#else
|
||||
|
||||
static inline void local_timer_stop(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -37,11 +37,20 @@ struct machine_desc {
|
|||
struct meminfo *);
|
||||
void (*reserve)(void);/* reserve mem blocks */
|
||||
void (*map_io)(void);/* IO mapping function */
|
||||
void (*init_early)(void);
|
||||
void (*init_irq)(void);
|
||||
struct sys_timer *timer; /* system tick timer */
|
||||
void (*init_machine)(void);
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
void (*handle_irq)(struct pt_regs *);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Current machine - only accessible during boot.
|
||||
*/
|
||||
extern struct machine_desc *machine_desc;
|
||||
|
||||
/*
|
||||
* Set of macros to define architecture features. This is built into
|
||||
* a table by the linker.
|
||||
|
|
|
@ -17,10 +17,12 @@ struct seq_file;
|
|||
/*
|
||||
* This is internal. Do not use it.
|
||||
*/
|
||||
extern unsigned int arch_nr_irqs;
|
||||
extern void (*init_arch_irq)(void);
|
||||
extern void init_FIQ(void);
|
||||
extern int show_fiq_list(struct seq_file *, void *);
|
||||
extern int show_fiq_list(struct seq_file *, int);
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
extern void (*handle_arch_irq)(struct pt_regs *);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is for easy migration, but should be changed in the source
|
||||
|
|
|
@ -43,7 +43,6 @@ struct sys_timer {
|
|||
#endif
|
||||
};
|
||||
|
||||
extern struct sys_timer *system_timer;
|
||||
extern void timer_tick(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -33,27 +33,23 @@ struct seq_file;
|
|||
/*
|
||||
* generate IPI list text
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *p);
|
||||
extern void show_ipi_list(struct seq_file *, int);
|
||||
|
||||
/*
|
||||
* Called from assembly code, this handles an IPI.
|
||||
*/
|
||||
asmlinkage void do_IPI(struct pt_regs *regs);
|
||||
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Setup the set of possible CPUs (via set_cpu_possible)
|
||||
*/
|
||||
extern void smp_init_cpus(void);
|
||||
|
||||
/*
|
||||
* Move global data into per-processor storage.
|
||||
*/
|
||||
extern void smp_store_cpu_info(unsigned int cpuid);
|
||||
|
||||
/*
|
||||
* Raise an IPI cross call on CPUs in callmap.
|
||||
*/
|
||||
extern void smp_cross_call(const struct cpumask *mask);
|
||||
extern void smp_cross_call(const struct cpumask *mask, int ipi);
|
||||
|
||||
/*
|
||||
* Boot a secondary CPU, and assign it the specified idle task.
|
||||
|
@ -72,6 +68,11 @@ asmlinkage void secondary_start_kernel(void);
|
|||
*/
|
||||
extern void platform_secondary_init(unsigned int cpu);
|
||||
|
||||
/*
|
||||
* Initialize cpu_possible map, and enable coherency
|
||||
*/
|
||||
extern void platform_smp_prepare_cpus(unsigned int);
|
||||
|
||||
/*
|
||||
* Initial data for bringing up a secondary CPU.
|
||||
*/
|
||||
|
@ -97,6 +98,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
|||
/*
|
||||
* show local interrupt info
|
||||
*/
|
||||
extern void show_local_irqs(struct seq_file *);
|
||||
extern void show_local_irqs(struct seq_file *, int);
|
||||
|
||||
#endif /* ifndef __ASM_ARM_SMP_H */
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
#ifndef ASMARM_SMP_MIDR_H
|
||||
#define ASMARM_SMP_MIDR_H
|
||||
|
||||
#define hard_smp_processor_id() \
|
||||
({ \
|
||||
unsigned int cpunum; \
|
||||
__asm__("\n" \
|
||||
"1: mrc p15, 0, %0, c0, c0, 5\n" \
|
||||
" .pushsection \".alt.smp.init\", \"a\"\n"\
|
||||
" .long 1b\n" \
|
||||
" mov %0, #0\n" \
|
||||
" .popsection" \
|
||||
: "=r" (cpunum)); \
|
||||
cpunum &= 0x0F; \
|
||||
})
|
||||
|
||||
#endif
|
|
@ -22,7 +22,6 @@ struct clock_event_device;
|
|||
|
||||
extern void __iomem *twd_base;
|
||||
|
||||
void twd_timer_stop(void);
|
||||
int twd_timer_ack(void);
|
||||
void twd_timer_setup(struct clock_event_device *);
|
||||
|
||||
|
|
|
@ -124,6 +124,13 @@ extern unsigned int user_debug;
|
|||
#define vectors_high() (0)
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
|
||||
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
|
||||
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
|
||||
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
|
||||
|
|
|
@ -46,4 +46,6 @@ static inline int in_exception_text(unsigned long ptr)
|
|||
extern void __init early_trap_init(void);
|
||||
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
|
||||
|
||||
extern void *vectors_page;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -227,7 +227,7 @@ do { \
|
|||
|
||||
#define __get_user_asm_byte(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrbt %1,[%2]\n" \
|
||||
"1: " T(ldrb) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -263,7 +263,7 @@ do { \
|
|||
|
||||
#define __get_user_asm_word(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1,[%2]\n" \
|
||||
"1: " T(ldr) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -308,7 +308,7 @@ do { \
|
|||
|
||||
#define __put_user_asm_byte(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strbt %1,[%2]\n" \
|
||||
"1: " T(strb) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -341,7 +341,7 @@ do { \
|
|||
|
||||
#define __put_user_asm_word(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strt %1,[%2]\n" \
|
||||
"1: " T(str) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -366,10 +366,10 @@ do { \
|
|||
|
||||
#define __put_user_asm_dword(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \
|
||||
ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \
|
||||
THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
|
||||
THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
|
||||
ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
|
||||
ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
|
||||
THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
|
||||
THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
|
||||
"3:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
|
|
@ -30,7 +30,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
|
|||
obj-$(CONFIG_ISA_DMA) += dma-isa.o
|
||||
obj-$(CONFIG_PCI) += bios32.o isa.o
|
||||
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
|
||||
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
|
||||
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||
|
@ -44,6 +44,8 @@ obj-$(CONFIG_KGDB) += kgdb.o
|
|||
obj-$(CONFIG_ARM_UNWIND) += unwind.o
|
||||
obj-$(CONFIG_HAVE_TCM) += tcm.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
|
||||
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
|
||||
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
|
||||
|
|
|
@ -25,42 +25,22 @@
|
|||
#include <asm/tls.h>
|
||||
|
||||
#include "entry-header.S"
|
||||
#include <asm/entry-macro-multi.S>
|
||||
|
||||
/*
|
||||
* Interrupt handling. Preserves r7, r8, r9
|
||||
*/
|
||||
.macro irq_handler
|
||||
get_irqnr_preamble r5, lr
|
||||
1: get_irqnr_and_base r0, r6, r5, lr
|
||||
movne r1, sp
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
adrne lr, BSYM(1b)
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* this macro assumes that irqstat (r6) and base (r5) are
|
||||
* preserved from get_irqnr_and_base above
|
||||
*/
|
||||
ALT_SMP(test_for_ipi r0, r6, r5, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r0, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_IPI
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
test_for_ltirq r0, r6, r5, lr
|
||||
movne r0, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_local_timer
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
ldr r5, =handle_arch_irq
|
||||
mov r0, sp
|
||||
ldr r5, [r5]
|
||||
adr lr, BSYM(9997f)
|
||||
teq r5, #0
|
||||
movne pc, r5
|
||||
#endif
|
||||
arch_irq_handler_default
|
||||
9997:
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
|
@ -735,7 +715,7 @@ ENTRY(__switch_to)
|
|||
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
|
||||
THUMB( str sp, [ip], #4 )
|
||||
THUMB( str lr, [ip], #4 )
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
ldr r6, [r2, #TI_CPU_DOMAIN]
|
||||
#endif
|
||||
set_tls r3, r4, r5
|
||||
|
@ -744,7 +724,7 @@ ENTRY(__switch_to)
|
|||
ldr r8, =__stack_chk_guard
|
||||
ldr r7, [r7, #TSK_STACK_CANARY]
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
||||
#endif
|
||||
mov r5, r0
|
||||
|
@ -842,7 +822,7 @@ __kuser_helper_start:
|
|||
*/
|
||||
|
||||
__kuser_memory_barrier: @ 0xffff0fa0
|
||||
smp_dmb
|
||||
smp_dmb arm
|
||||
usr_ret lr
|
||||
|
||||
.align 5
|
||||
|
@ -959,7 +939,7 @@ kuser_cmpxchg_fixup:
|
|||
|
||||
#else
|
||||
|
||||
smp_dmb
|
||||
smp_dmb arm
|
||||
1: ldrex r3, [r2]
|
||||
subs r3, r3, r0
|
||||
strexeq r3, r1, [r2]
|
||||
|
@ -1245,3 +1225,9 @@ cr_alignment:
|
|||
.space 4
|
||||
cr_no_alignment:
|
||||
.space 4
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
.globl handle_arch_irq
|
||||
handle_arch_irq:
|
||||
.space 4
|
||||
#endif
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <asm/fiq.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
static unsigned long no_fiq_insn;
|
||||
|
||||
|
@ -67,17 +68,22 @@ static struct fiq_handler default_owner = {
|
|||
|
||||
static struct fiq_handler *current_fiq = &default_owner;
|
||||
|
||||
int show_fiq_list(struct seq_file *p, void *v)
|
||||
int show_fiq_list(struct seq_file *p, int prec)
|
||||
{
|
||||
if (current_fiq != &default_owner)
|
||||
seq_printf(p, "FIQ: %s\n", current_fiq->name);
|
||||
seq_printf(p, "%*s: %s\n", prec, "FIQ",
|
||||
current_fiq->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void set_fiq_handler(void *start, unsigned int length)
|
||||
{
|
||||
#if defined(CONFIG_CPU_USE_DOMAINS)
|
||||
memcpy((void *)0xffff001c, start, length);
|
||||
#else
|
||||
memcpy(vectors_page + 0x1c, start, length);
|
||||
#endif
|
||||
flush_icache_range(0xffff001c, 0xffff001c + length);
|
||||
if (!vectors_high())
|
||||
flush_icache_range(0x1c, 0x1c + length);
|
||||
|
|
|
@ -91,6 +91,11 @@ ENTRY(stext)
|
|||
movs r8, r5 @ invalid machine (r5=0)?
|
||||
THUMB( it eq ) @ force fixup-able long branch encoding
|
||||
beq __error_a @ yes, error 'a'
|
||||
|
||||
/*
|
||||
* r1 = machine no, r2 = atags,
|
||||
* r8 = machinfo, r9 = cpuid, r10 = procinfo
|
||||
*/
|
||||
bl __vet_atags
|
||||
#ifdef CONFIG_SMP_ON_UP
|
||||
bl __fixup_smp
|
||||
|
@ -387,19 +392,19 @@ ENDPROC(__turn_mmu_on)
|
|||
|
||||
#ifdef CONFIG_SMP_ON_UP
|
||||
__fixup_smp:
|
||||
mov r7, #0x00070000
|
||||
orr r6, r7, #0xff000000 @ mask 0xff070000
|
||||
orr r7, r7, #0x41000000 @ val 0x41070000
|
||||
and r0, r9, r6
|
||||
teq r0, r7 @ ARM CPU and ARMv6/v7?
|
||||
mov r4, #0x00070000
|
||||
orr r3, r4, #0xff000000 @ mask 0xff070000
|
||||
orr r4, r4, #0x41000000 @ val 0x41070000
|
||||
and r0, r9, r3
|
||||
teq r0, r4 @ ARM CPU and ARMv6/v7?
|
||||
bne __fixup_smp_on_up @ no, assume UP
|
||||
|
||||
orr r6, r6, #0x0000ff00
|
||||
orr r6, r6, #0x000000f0 @ mask 0xff07fff0
|
||||
orr r7, r7, #0x0000b000
|
||||
orr r7, r7, #0x00000020 @ val 0x4107b020
|
||||
and r0, r9, r6
|
||||
teq r0, r7 @ ARM 11MPCore?
|
||||
orr r3, r3, #0x0000ff00
|
||||
orr r3, r3, #0x000000f0 @ mask 0xff07fff0
|
||||
orr r4, r4, #0x0000b000
|
||||
orr r4, r4, #0x00000020 @ val 0x4107b020
|
||||
and r0, r9, r3
|
||||
teq r0, r4 @ ARM 11MPCore?
|
||||
moveq pc, lr @ yes, assume SMP
|
||||
|
||||
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
||||
|
@ -408,15 +413,22 @@ __fixup_smp:
|
|||
|
||||
__fixup_smp_on_up:
|
||||
adr r0, 1f
|
||||
ldmia r0, {r3, r6, r7}
|
||||
ldmia r0, {r3 - r5}
|
||||
sub r3, r0, r3
|
||||
add r6, r6, r3
|
||||
add r7, r7, r3
|
||||
2: cmp r6, r7
|
||||
ldmia r6!, {r0, r4}
|
||||
strlo r4, [r0, r3]
|
||||
blo 2b
|
||||
mov pc, lr
|
||||
add r4, r4, r3
|
||||
add r5, r5, r3
|
||||
2: cmp r4, r5
|
||||
movhs pc, lr
|
||||
ldmia r4!, {r0, r6}
|
||||
ARM( str r6, [r0, r3] )
|
||||
THUMB( add r0, r0, r3 )
|
||||
#ifdef __ARMEB__
|
||||
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
|
||||
#endif
|
||||
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
|
||||
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
|
||||
THUMB( strh r6, [r0] )
|
||||
b 2b
|
||||
ENDPROC(__fixup_smp)
|
||||
|
||||
.align
|
||||
|
|
|
@ -38,6 +38,7 @@
|
|||
#include <linux/ftrace.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
|
@ -48,8 +49,6 @@
|
|||
#define irq_finish(irq) do { } while (0)
|
||||
#endif
|
||||
|
||||
unsigned int arch_nr_irqs;
|
||||
void (*init_arch_irq)(void) __initdata = NULL;
|
||||
unsigned long irq_err_count;
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
|
@ -58,11 +57,20 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
struct irq_desc *desc;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
int prec, n;
|
||||
|
||||
for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++)
|
||||
n *= 10;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (prec < 4)
|
||||
prec = 4;
|
||||
#endif
|
||||
|
||||
if (i == 0) {
|
||||
char cpuname[12];
|
||||
|
||||
seq_printf(p, " ");
|
||||
seq_printf(p, "%*s ", prec, "");
|
||||
for_each_present_cpu(cpu) {
|
||||
sprintf(cpuname, "CPU%d", cpu);
|
||||
seq_printf(p, " %10s", cpuname);
|
||||
|
@ -77,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
if (!action)
|
||||
goto unlock;
|
||||
|
||||
seq_printf(p, "%3d: ", i);
|
||||
seq_printf(p, "%*d: ", prec, i);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
|
||||
seq_printf(p, " %10s", desc->chip->name ? : "-");
|
||||
|
@ -90,13 +98,15 @@ unlock:
|
|||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
} else if (i == nr_irqs) {
|
||||
#ifdef CONFIG_FIQ
|
||||
show_fiq_list(p, v);
|
||||
show_fiq_list(p, prec);
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
show_ipi_list(p);
|
||||
show_local_irqs(p);
|
||||
show_ipi_list(p, prec);
|
||||
#endif
|
||||
seq_printf(p, "Err: %10lu\n", irq_err_count);
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
show_local_irqs(p, prec);
|
||||
#endif
|
||||
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -156,13 +166,13 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
|
|||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
init_arch_irq();
|
||||
machine_desc->init_irq();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
int __init arch_probe_nr_irqs(void)
|
||||
{
|
||||
nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
|
||||
nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
|
||||
return nr_irqs;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -75,9 +75,9 @@ extern void reboot_setup(char *str);
|
|||
|
||||
unsigned int processor_id;
|
||||
EXPORT_SYMBOL(processor_id);
|
||||
unsigned int __machine_arch_type;
|
||||
unsigned int __machine_arch_type __read_mostly;
|
||||
EXPORT_SYMBOL(__machine_arch_type);
|
||||
unsigned int cacheid;
|
||||
unsigned int cacheid __read_mostly;
|
||||
EXPORT_SYMBOL(cacheid);
|
||||
|
||||
unsigned int __atags_pointer __initdata;
|
||||
|
@ -91,24 +91,24 @@ EXPORT_SYMBOL(system_serial_low);
|
|||
unsigned int system_serial_high;
|
||||
EXPORT_SYMBOL(system_serial_high);
|
||||
|
||||
unsigned int elf_hwcap;
|
||||
unsigned int elf_hwcap __read_mostly;
|
||||
EXPORT_SYMBOL(elf_hwcap);
|
||||
|
||||
|
||||
#ifdef MULTI_CPU
|
||||
struct processor processor;
|
||||
struct processor processor __read_mostly;
|
||||
#endif
|
||||
#ifdef MULTI_TLB
|
||||
struct cpu_tlb_fns cpu_tlb;
|
||||
struct cpu_tlb_fns cpu_tlb __read_mostly;
|
||||
#endif
|
||||
#ifdef MULTI_USER
|
||||
struct cpu_user_fns cpu_user;
|
||||
struct cpu_user_fns cpu_user __read_mostly;
|
||||
#endif
|
||||
#ifdef MULTI_CACHE
|
||||
struct cpu_cache_fns cpu_cache;
|
||||
struct cpu_cache_fns cpu_cache __read_mostly;
|
||||
#endif
|
||||
#ifdef CONFIG_OUTER_CACHE
|
||||
struct outer_cache_fns outer_cache;
|
||||
struct outer_cache_fns outer_cache __read_mostly;
|
||||
EXPORT_SYMBOL(outer_cache);
|
||||
#endif
|
||||
|
||||
|
@ -126,6 +126,7 @@ EXPORT_SYMBOL(elf_platform);
|
|||
static const char *cpu_name;
|
||||
static const char *machine_name;
|
||||
static char __initdata cmd_line[COMMAND_LINE_SIZE];
|
||||
struct machine_desc *machine_desc __initdata;
|
||||
|
||||
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
|
||||
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
|
||||
|
@ -708,13 +709,11 @@ static struct init_tags {
|
|||
{ 0, ATAG_NONE }
|
||||
};
|
||||
|
||||
static void (*init_machine)(void) __initdata;
|
||||
|
||||
static int __init customize_machine(void)
|
||||
{
|
||||
/* customizes platform devices, or adds new ones */
|
||||
if (init_machine)
|
||||
init_machine();
|
||||
if (machine_desc->init_machine)
|
||||
machine_desc->init_machine();
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(customize_machine);
|
||||
|
@ -809,6 +808,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
setup_processor();
|
||||
mdesc = setup_machine(machine_arch_type);
|
||||
machine_desc = mdesc;
|
||||
machine_name = mdesc->name;
|
||||
|
||||
if (mdesc->soft_reboot)
|
||||
|
@ -868,13 +868,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
cpu_init();
|
||||
tcm_init();
|
||||
|
||||
/*
|
||||
* Set up various architecture-specific pointers
|
||||
*/
|
||||
arch_nr_irqs = mdesc->nr_irqs;
|
||||
init_arch_irq = mdesc->init_irq;
|
||||
system_timer = mdesc->timer;
|
||||
init_machine = mdesc->init_machine;
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
handle_arch_irq = mdesc->handle_irq;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
#if defined(CONFIG_VGA_CONSOLE)
|
||||
|
@ -884,6 +880,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
#endif
|
||||
early_trap_init();
|
||||
|
||||
if (mdesc->init_early)
|
||||
mdesc->init_early();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -38,7 +39,6 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
/*
|
||||
* as from 2.5, kernels no longer have an init_tasks structure
|
||||
|
@ -47,22 +47,8 @@
|
|||
*/
|
||||
struct secondary_data secondary_data;
|
||||
|
||||
/*
|
||||
* structures for inter-processor calls
|
||||
* - A collection of single bit ipi messages.
|
||||
*/
|
||||
struct ipi_data {
|
||||
spinlock_t lock;
|
||||
unsigned long ipi_count;
|
||||
unsigned long bits;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
|
||||
.lock = SPIN_LOCK_UNLOCKED,
|
||||
};
|
||||
|
||||
enum ipi_msg_type {
|
||||
IPI_TIMER,
|
||||
IPI_TIMER = 2,
|
||||
IPI_RESCHEDULE,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CALL_FUNC_SINGLE,
|
||||
|
@ -178,8 +164,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
barrier();
|
||||
}
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
if (!cpu_online(cpu)) {
|
||||
pr_crit("CPU%u: failed to come online\n", cpu);
|
||||
ret = -EIO;
|
||||
}
|
||||
} else {
|
||||
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
||||
}
|
||||
|
||||
secondary_data.stack = NULL;
|
||||
|
@ -195,18 +185,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
|
||||
pgd_free(&init_mm, pgd);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
|
||||
|
||||
/*
|
||||
* FIXME: We need to clean up the new idle thread. --rmk
|
||||
*/
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void percpu_timer_stop(void);
|
||||
|
||||
/*
|
||||
* __cpu_disable runs on the processor to be shutdown.
|
||||
*/
|
||||
|
@ -234,7 +218,7 @@ int __cpu_disable(void)
|
|||
/*
|
||||
* Stop the local timer for this CPU.
|
||||
*/
|
||||
local_timer_stop();
|
||||
percpu_timer_stop();
|
||||
|
||||
/*
|
||||
* Flush user cache and TLB mappings, and then remove this CPU
|
||||
|
@ -253,12 +237,20 @@ int __cpu_disable(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static DECLARE_COMPLETION(cpu_died);
|
||||
|
||||
/*
|
||||
* called on the thread which is asking for a CPU to be shutdown -
|
||||
* waits until shutdown has completed, or it is timed out.
|
||||
*/
|
||||
void __cpu_die(unsigned int cpu)
|
||||
{
|
||||
if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
|
||||
pr_err("CPU%u: cpu didn't die\n", cpu);
|
||||
return;
|
||||
}
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
|
||||
if (!platform_cpu_kill(cpu))
|
||||
printk("CPU%u: unable to kill\n", cpu);
|
||||
}
|
||||
|
@ -275,12 +267,17 @@ void __ref cpu_die(void)
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
local_irq_disable();
|
||||
idle_task_exit();
|
||||
|
||||
local_irq_disable();
|
||||
mb();
|
||||
|
||||
/* Tell __cpu_die() that this CPU is now safe to dispose of */
|
||||
complete(&cpu_died);
|
||||
|
||||
/*
|
||||
* actual CPU shutdown procedure is at least platform (if not
|
||||
* CPU) specific
|
||||
* CPU) specific.
|
||||
*/
|
||||
platform_cpu_die(cpu);
|
||||
|
||||
|
@ -290,12 +287,24 @@ void __ref cpu_die(void)
|
|||
* to be repeated to undo the effects of taking the CPU offline.
|
||||
*/
|
||||
__asm__("mov sp, %0\n"
|
||||
" mov fp, #0\n"
|
||||
" b secondary_start_kernel"
|
||||
:
|
||||
: "r" (task_stack_page(current) + THREAD_SIZE - 8));
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Called by both boot and secondaries to move global data into
|
||||
* per-processor storage.
|
||||
*/
|
||||
static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
||||
|
||||
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the secondary CPU boot entry. We're using this CPUs
|
||||
* idle thread stack, but a set of temporary page tables.
|
||||
|
@ -320,6 +329,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* Give the platform a chance to do its own initialisation.
|
||||
|
@ -353,17 +363,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
cpu_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by both boot and secondaries to move global data into
|
||||
* per-processor storage.
|
||||
*/
|
||||
void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
||||
|
||||
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -386,61 +385,80 @@ void __init smp_prepare_boot_cpu(void)
|
|||
per_cpu(cpu_data, cpu).idle = current;
|
||||
}
|
||||
|
||||
static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int cpu;
|
||||
unsigned int ncores = num_possible_cpus();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
|
||||
|
||||
spin_lock(&ipi->lock);
|
||||
ipi->bits |= 1 << msg;
|
||||
spin_unlock(&ipi->lock);
|
||||
}
|
||||
smp_store_cpu_info(smp_processor_id());
|
||||
|
||||
/*
|
||||
* Call the platform specific cross-CPU call function.
|
||||
* are we trying to boot more cores than exist?
|
||||
*/
|
||||
smp_cross_call(mask);
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
|
||||
local_irq_restore(flags);
|
||||
if (max_cpus > 1) {
|
||||
/*
|
||||
* Enable the local timer or broadcast device for the
|
||||
* boot CPU, but only if we have more than one CPU.
|
||||
*/
|
||||
percpu_timer_setup();
|
||||
|
||||
/*
|
||||
* Initialise the SCU if there are more than one CPU
|
||||
* and let them know where to start.
|
||||
*/
|
||||
platform_smp_prepare_cpus(max_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
send_ipi_message(mask, IPI_CALL_FUNC);
|
||||
smp_cross_call(mask, IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
|
||||
void show_ipi_list(struct seq_file *p)
|
||||
static const char *ipi_types[NR_IPI] = {
|
||||
#define S(x,s) [x - IPI_TIMER] = s
|
||||
S(IPI_TIMER, "Timer broadcast interrupts"),
|
||||
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
||||
S(IPI_CALL_FUNC, "Function call interrupts"),
|
||||
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
|
||||
S(IPI_CPU_STOP, "CPU stop interrupts"),
|
||||
};
|
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int cpu, i;
|
||||
|
||||
seq_puts(p, "IPI:");
|
||||
for (i = 0; i < NR_IPI; i++) {
|
||||
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ",
|
||||
__get_irq_stat(cpu, ipi_irqs[i]));
|
||||
|
||||
seq_putc(p, '\n');
|
||||
seq_printf(p, " %s\n", ipi_types[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void show_local_irqs(struct seq_file *p)
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned int cpu;
|
||||
u64 sum = 0;
|
||||
int i;
|
||||
|
||||
seq_printf(p, "LOC: ");
|
||||
for (i = 0; i < NR_IPI; i++)
|
||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
sum += __get_irq_stat(cpu, local_timer_irqs);
|
||||
#endif
|
||||
|
||||
seq_putc(p, '\n');
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -463,18 +481,30 @@ asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
|
|||
int cpu = smp_processor_id();
|
||||
|
||||
if (local_timer_ack()) {
|
||||
irq_stat[cpu].local_timer_irqs++;
|
||||
__inc_irq_stat(cpu, local_timer_irqs);
|
||||
ipi_timer();
|
||||
}
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
void show_local_irqs(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
seq_printf(p, "%*s: ", prec, "LOC");
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
|
||||
|
||||
seq_printf(p, " Local timer interrupts\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
static void smp_timer_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
send_ipi_message(mask, IPI_TIMER);
|
||||
smp_cross_call(mask, IPI_TIMER);
|
||||
}
|
||||
#else
|
||||
#define smp_timer_broadcast NULL
|
||||
|
@ -511,6 +541,21 @@ void __cpuinit percpu_timer_setup(void)
|
|||
local_timer_setup(evt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* The generic clock events code purposely does not stop the local timer
|
||||
* on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
|
||||
* manually here.
|
||||
*/
|
||||
static void percpu_timer_stop(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
||||
|
||||
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
||||
}
|
||||
#endif
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
/*
|
||||
|
@ -537,85 +582,70 @@ static void ipi_cpu_stop(unsigned int cpu)
|
|||
|
||||
/*
|
||||
* Main handler for inter-processor interrupts
|
||||
*
|
||||
* For ARM, the ipimask now only identifies a single
|
||||
* category of IPI (Bit 1 IPIs have been replaced by a
|
||||
* different mechanism):
|
||||
*
|
||||
* Bit 0 - Inter-processor function call
|
||||
*/
|
||||
asmlinkage void __exception_irq_entry do_IPI(struct pt_regs *regs)
|
||||
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
ipi->ipi_count++;
|
||||
if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
|
||||
__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
|
||||
|
||||
for (;;) {
|
||||
unsigned long msgs;
|
||||
switch (ipinr) {
|
||||
case IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
|
||||
spin_lock(&ipi->lock);
|
||||
msgs = ipi->bits;
|
||||
ipi->bits = 0;
|
||||
spin_unlock(&ipi->lock);
|
||||
case IPI_RESCHEDULE:
|
||||
/*
|
||||
* nothing more to do - eveything is
|
||||
* done on the interrupt return path
|
||||
*/
|
||||
break;
|
||||
|
||||
if (!msgs)
|
||||
break;
|
||||
case IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
do {
|
||||
unsigned nextmsg;
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
nextmsg = msgs & -msgs;
|
||||
msgs &= ~nextmsg;
|
||||
nextmsg = ffz(~nextmsg);
|
||||
case IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
|
||||
switch (nextmsg) {
|
||||
case IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
|
||||
case IPI_RESCHEDULE:
|
||||
/*
|
||||
* nothing more to do - eveything is
|
||||
* done on the interrupt return path
|
||||
*/
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
||||
cpu, nextmsg);
|
||||
break;
|
||||
}
|
||||
} while (msgs);
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
||||
cpu, ipinr);
|
||||
break;
|
||||
}
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
if (!cpus_empty(mask))
|
||||
send_ipi_message(&mask, IPI_CPU_STOP);
|
||||
unsigned long timeout;
|
||||
|
||||
if (num_online_cpus() > 1) {
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
}
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
while (num_online_cpus() > 1 && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
pr_warning("SMP: failed to stop secondary CPUs\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -625,128 +655,3 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void
|
||||
on_each_cpu_mask(void (*func)(void *), void *info, int wait,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
smp_call_function_many(mask, func, info, wait);
|
||||
if (cpumask_test_cpu(smp_processor_id(), mask))
|
||||
func(info);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
/*
|
||||
* TLB operations
|
||||
*/
|
||||
struct tlb_args {
|
||||
struct vm_area_struct *ta_vma;
|
||||
unsigned long ta_start;
|
||||
unsigned long ta_end;
|
||||
};
|
||||
|
||||
static inline void ipi_flush_tlb_all(void *ignored)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_mm(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = (struct mm_struct *)arg;
|
||||
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_page(ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = uaddr;
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = kaddr;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* linux/arch/arm/kernel/smp_tlb.c
|
||||
*
|
||||
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
smp_call_function_many(mask, func, info, wait);
|
||||
if (cpumask_test_cpu(smp_processor_id(), mask))
|
||||
func(info);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
/*
|
||||
* TLB operations
|
||||
*/
|
||||
struct tlb_args {
|
||||
struct vm_area_struct *ta_vma;
|
||||
unsigned long ta_start;
|
||||
unsigned long ta_end;
|
||||
};
|
||||
|
||||
static inline void ipi_flush_tlb_all(void *ignored)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_mm(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = (struct mm_struct *)arg;
|
||||
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_page(ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = uaddr;
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = kaddr;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
|
@ -145,13 +145,3 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
|||
|
||||
clockevents_register_device(clk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* take a local timer down
|
||||
*/
|
||||
void twd_timer_stop(void)
|
||||
{
|
||||
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* linux/arch/arm/kernel/swp_emulate.c
|
||||
*
|
||||
* Copyright (C) 2009 ARM Limited
|
||||
* __user_* functions adapted from include/asm/uaccess.h
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
|
||||
* store-exclusive for processors that have them disabled (or future ones that
|
||||
* might not implement them).
|
||||
*
|
||||
* Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
|
||||
* Where: Rt = destination
|
||||
* Rt2 = source
|
||||
* Rn = address
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Error-checking SWP macros implemented using ldrex{b}/strex{b}
|
||||
*/
|
||||
#define __user_swpX_asm(data, addr, res, temp, B) \
|
||||
__asm__ __volatile__( \
|
||||
" mov %2, %1\n" \
|
||||
"0: ldrex"B" %1, [%3]\n" \
|
||||
"1: strex"B" %0, %2, [%3]\n" \
|
||||
" cmp %0, #0\n" \
|
||||
" movne %0, %4\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"3: mov %0, %5\n" \
|
||||
" b 2b\n" \
|
||||
" .previous\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .align 3\n" \
|
||||
" .long 0b, 3b\n" \
|
||||
" .long 1b, 3b\n" \
|
||||
" .previous" \
|
||||
: "=&r" (res), "+r" (data), "=&r" (temp) \
|
||||
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
|
||||
: "cc", "memory")
|
||||
|
||||
#define __user_swp_asm(data, addr, res, temp) \
|
||||
__user_swpX_asm(data, addr, res, temp, "")
|
||||
#define __user_swpb_asm(data, addr, res, temp) \
|
||||
__user_swpX_asm(data, addr, res, temp, "b")
|
||||
|
||||
/*
|
||||
* Macros/defines for extracting register numbers from instruction.
|
||||
*/
|
||||
#define EXTRACT_REG_NUM(instruction, offset) \
|
||||
(((instruction) & (0xf << (offset))) >> (offset))
|
||||
#define RN_OFFSET 16
|
||||
#define RT_OFFSET 12
|
||||
#define RT2_OFFSET 0
|
||||
/*
|
||||
* Bit 22 of the instruction encoding distinguishes between
|
||||
* the SWP and SWPB variants (bit set means SWPB).
|
||||
*/
|
||||
#define TYPE_SWPB (1 << 22)
|
||||
|
||||
static unsigned long swpcounter;
|
||||
static unsigned long swpbcounter;
|
||||
static unsigned long abtcounter;
|
||||
static pid_t previous_pid;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int proc_read_status(char *page, char **start, off_t off, int count,
|
||||
int *eof, void *data)
|
||||
{
|
||||
char *p = page;
|
||||
int len;
|
||||
|
||||
p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
|
||||
p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
|
||||
p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
|
||||
if (previous_pid != 0)
|
||||
p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
|
||||
|
||||
len = (p - page) - off;
|
||||
if (len < 0)
|
||||
len = 0;
|
||||
|
||||
*eof = (len <= count) ? 1 : 0;
|
||||
*start = page + off;
|
||||
|
||||
return len;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set up process info to signal segmentation fault - called on access error.
|
||||
*/
|
||||
static void set_segfault(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
if (find_vma(current->mm, addr) == NULL)
|
||||
info.si_code = SEGV_MAPERR;
|
||||
else
|
||||
info.si_code = SEGV_ACCERR;
|
||||
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
info.si_addr = (void *) instruction_pointer(regs);
|
||||
|
||||
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
||||
arm_notify_die("Illegal memory access", regs, &info, 0, 0);
|
||||
|
||||
abtcounter++;
|
||||
}
|
||||
|
||||
static int emulate_swpX(unsigned int address, unsigned int *data,
|
||||
unsigned int type)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
if ((type != TYPE_SWPB) && (address & 0x3)) {
|
||||
/* SWP to unaligned address not permitted */
|
||||
pr_debug("SWP instruction on unaligned pointer!\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
unsigned long temp;
|
||||
|
||||
/*
|
||||
* Barrier required between accessing protected resource and
|
||||
* releasing a lock for it. Legacy code might not have done
|
||||
* this, and we cannot determine that this is not the case
|
||||
* being emulated, so insert always.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (type == TYPE_SWPB)
|
||||
__user_swpb_asm(*data, address, res, temp);
|
||||
else
|
||||
__user_swp_asm(*data, address, res, temp);
|
||||
|
||||
if (likely(res != -EAGAIN) || signal_pending(current))
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (res == 0) {
|
||||
/*
|
||||
* Barrier also required between aquiring a lock for a
|
||||
* protected resource and accessing the resource. Inserted for
|
||||
* same reason as above.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (type == TYPE_SWPB)
|
||||
swpbcounter++;
|
||||
else
|
||||
swpcounter++;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* swp_handler logs the id of calling process, dissects the instruction, sanity
|
||||
* checks the memory location, calls emulate_swpX for the actual operation and
|
||||
* deals with fixup/error handling before returning
|
||||
*/
|
||||
static int swp_handler(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
unsigned int address, destreg, data, type;
|
||||
unsigned int res = 0;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
|
||||
|
||||
if (current->pid != previous_pid) {
|
||||
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
|
||||
current->comm, (unsigned long)current->pid);
|
||||
previous_pid = current->pid;
|
||||
}
|
||||
|
||||
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
|
||||
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
|
||||
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
|
||||
|
||||
type = instr & TYPE_SWPB;
|
||||
|
||||
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
|
||||
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
|
||||
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
|
||||
|
||||
/* Check access in reasonable access range for both SWP and SWPB */
|
||||
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
|
||||
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
|
||||
(void *)address);
|
||||
res = -EFAULT;
|
||||
} else {
|
||||
res = emulate_swpX(address, &data, type);
|
||||
}
|
||||
|
||||
if (res == 0) {
|
||||
/*
|
||||
* On successful emulation, revert the adjustment to the PC
|
||||
* made in kernel/traps.c in order to resume execution at the
|
||||
* instruction following the SWP{B}.
|
||||
*/
|
||||
regs->ARM_pc += 4;
|
||||
regs->uregs[destreg] = data;
|
||||
} else if (res == -EFAULT) {
|
||||
/*
|
||||
* Memory errors do not mean emulation failed.
|
||||
* Set up signal info to return SEGV, then return OK
|
||||
*/
|
||||
set_segfault(regs, address);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only emulate SWP/SWPB executed in ARM state/User mode.
|
||||
* The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
|
||||
*/
|
||||
static struct undef_hook swp_hook = {
|
||||
.instr_mask = 0x0fb00ff0,
|
||||
.instr_val = 0x01000090,
|
||||
.cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
|
||||
.cpsr_val = USR_MODE,
|
||||
.fn = swp_handler
|
||||
};
|
||||
|
||||
/*
|
||||
* Register handler and create status file in /proc/cpu
|
||||
* Invoked as late_initcall, since not needed before init spawned.
|
||||
*/
|
||||
static int __init swp_emulation_init(void)
|
||||
{
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *res;
|
||||
|
||||
res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
|
||||
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
res->read_proc = proc_read_status;
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
|
||||
register_undef_hook(&swp_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(swp_emulation_init);
|
|
@ -30,12 +30,13 @@
|
|||
#include <asm/leds.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
/*
|
||||
* Our system timer.
|
||||
*/
|
||||
struct sys_timer *system_timer;
|
||||
static struct sys_timer *system_timer;
|
||||
|
||||
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
|
||||
/* this needs a better home */
|
||||
|
@ -160,6 +161,7 @@ device_initcall(timer_init_sysfs);
|
|||
|
||||
void __init time_init(void)
|
||||
{
|
||||
system_timer = machine_desc->timer;
|
||||
system_timer->init();
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
|
||||
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
|
||||
|
||||
void *vectors_page;
|
||||
|
||||
#ifdef CONFIG_DEBUG_USER
|
||||
unsigned int user_debug;
|
||||
|
||||
|
@ -756,7 +758,11 @@ static void __init kuser_get_tls_init(unsigned long vectors)
|
|||
|
||||
void __init early_trap_init(void)
|
||||
{
|
||||
#if defined(CONFIG_CPU_USE_DOMAINS)
|
||||
unsigned long vectors = CONFIG_VECTORS_BASE;
|
||||
#else
|
||||
unsigned long vectors = (unsigned long)vectors_page;
|
||||
#endif
|
||||
extern char __stubs_start[], __stubs_end[];
|
||||
extern char __vectors_start[], __vectors_end[];
|
||||
extern char __kuser_helper_start[], __kuser_helper_end[];
|
||||
|
@ -780,10 +786,10 @@ void __init early_trap_init(void)
|
|||
* Copy signal return handlers into the vector page, and
|
||||
* set sigreturn to be a pointer to these.
|
||||
*/
|
||||
memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
|
||||
sizeof(sigreturn_codes));
|
||||
memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,
|
||||
sizeof(syscall_restart_code));
|
||||
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
|
||||
sigreturn_codes, sizeof(sigreturn_codes));
|
||||
memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
|
||||
syscall_restart_code, sizeof(syscall_restart_code));
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE);
|
||||
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
|
||||
|
|
|
@ -168,6 +168,7 @@ SECTIONS
|
|||
|
||||
NOSAVE_DATA
|
||||
CACHELINE_ALIGNED_DATA(32)
|
||||
READ_MOSTLY_DATA(32)
|
||||
|
||||
/*
|
||||
* The exception fixup table (might need resorting at runtime)
|
||||
|
|
|
@ -28,20 +28,21 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__get_user_1)
|
||||
1: ldrbt r2, [r0]
|
||||
1: T(ldrb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
2: ldrbt r2, [r0]
|
||||
3: ldrbt r3, [r0, #1]
|
||||
2: T(ldrb) r2, [r0]
|
||||
3: T(ldrb) r3, [r0, #1]
|
||||
#else
|
||||
2: ldrbt r2, [r0], #1
|
||||
3: ldrbt r3, [r0]
|
||||
2: T(ldrb) r2, [r0], #1
|
||||
3: T(ldrb) r3, [r0]
|
||||
#endif
|
||||
#ifndef __ARMEB__
|
||||
orr r2, r2, r3, lsl #8
|
||||
|
@ -53,7 +54,7 @@ ENTRY(__get_user_2)
|
|||
ENDPROC(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
4: ldrt r2, [r0]
|
||||
4: T(ldr) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_4)
|
||||
|
|
|
@ -28,9 +28,10 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__put_user_1)
|
||||
1: strbt r2, [r0]
|
||||
1: T(strb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_1)
|
||||
|
@ -39,19 +40,19 @@ ENTRY(__put_user_2)
|
|||
mov ip, r2, lsr #8
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#ifndef __ARMEB__
|
||||
2: strbt r2, [r0]
|
||||
3: strbt ip, [r0, #1]
|
||||
2: T(strb) r2, [r0]
|
||||
3: T(strb) ip, [r0, #1]
|
||||
#else
|
||||
2: strbt ip, [r0]
|
||||
3: strbt r2, [r0, #1]
|
||||
2: T(strb) ip, [r0]
|
||||
3: T(strb) r2, [r0, #1]
|
||||
#endif
|
||||
#else /* !CONFIG_THUMB2_KERNEL */
|
||||
#ifndef __ARMEB__
|
||||
2: strbt r2, [r0], #1
|
||||
3: strbt ip, [r0]
|
||||
2: T(strb) r2, [r0], #1
|
||||
3: T(strb) ip, [r0]
|
||||
#else
|
||||
2: strbt ip, [r0], #1
|
||||
3: strbt r2, [r0]
|
||||
2: T(strb) ip, [r0], #1
|
||||
3: T(strb) r2, [r0]
|
||||
#endif
|
||||
#endif /* CONFIG_THUMB2_KERNEL */
|
||||
mov r0, #0
|
||||
|
@ -59,18 +60,18 @@ ENTRY(__put_user_2)
|
|||
ENDPROC(__put_user_2)
|
||||
|
||||
ENTRY(__put_user_4)
|
||||
4: strt r2, [r0]
|
||||
4: T(str) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_4)
|
||||
|
||||
ENTRY(__put_user_8)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
5: strt r2, [r0]
|
||||
6: strt r3, [r0, #4]
|
||||
5: T(str) r2, [r0]
|
||||
6: T(str) r3, [r0, #4]
|
||||
#else
|
||||
5: strt r2, [r0], #4
|
||||
6: strt r3, [r0]
|
||||
5: T(str) r2, [r0], #4
|
||||
6: T(str) r3, [r0]
|
||||
#endif
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
.text
|
||||
|
||||
|
@ -31,11 +32,11 @@
|
|||
rsb ip, ip, #4
|
||||
cmp ip, #2
|
||||
ldrb r3, [r1], #1
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
ldrgeb r3, [r1], #1
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #1
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
sub r2, r2, ip
|
||||
b .Lc2u_dest_aligned
|
||||
|
||||
|
@ -58,7 +59,7 @@ ENTRY(__copy_to_user)
|
|||
addmi ip, r2, #4
|
||||
bmi .Lc2u_0nowords
|
||||
ldr r3, [r1], #4
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -87,18 +88,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4} @ Shouldnt fault
|
||||
tst ip, #4
|
||||
ldrne r3, [r1], #4
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_0fupi
|
||||
.Lc2u_0nowords: teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
.Lc2u_nowords: cmp ip, #2
|
||||
ldrb r3, [r1], #1
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
ldrgeb r3, [r1], #1
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #1
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
|
||||
.Lc2u_not_enough:
|
||||
|
@ -119,7 +120,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
|
|||
mov r3, r7, pull #8
|
||||
ldr r7, [r1], #4
|
||||
orr r3, r3, r7, push #24
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -154,18 +155,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
movne r3, r7, pull #8
|
||||
ldrne r7, [r1], #4
|
||||
orrne r3, r3, r7, push #24
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_1fupi
|
||||
.Lc2u_1nowords: mov r3, r7, get_byte_1
|
||||
teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
cmp ip, #2
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
movge r3, r7, get_byte_2
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
movgt r3, r7, get_byte_3
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
|
||||
.Lc2u_2fupi: subs r2, r2, #4
|
||||
|
@ -174,7 +175,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
|
|||
mov r3, r7, pull #16
|
||||
ldr r7, [r1], #4
|
||||
orr r3, r3, r7, push #16
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -209,18 +210,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
movne r3, r7, pull #16
|
||||
ldrne r7, [r1], #4
|
||||
orrne r3, r3, r7, push #16
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_2fupi
|
||||
.Lc2u_2nowords: mov r3, r7, get_byte_2
|
||||
teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
cmp ip, #2
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
movge r3, r7, get_byte_3
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #0
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
|
||||
.Lc2u_3fupi: subs r2, r2, #4
|
||||
|
@ -229,7 +230,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
|
|||
mov r3, r7, pull #24
|
||||
ldr r7, [r1], #4
|
||||
orr r3, r3, r7, push #8
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -264,18 +265,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
movne r3, r7, pull #24
|
||||
ldrne r7, [r1], #4
|
||||
orrne r3, r3, r7, push #8
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_3fupi
|
||||
.Lc2u_3nowords: mov r3, r7, get_byte_3
|
||||
teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
cmp ip, #2
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
ldrgeb r3, [r1], #1
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #0
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
ENDPROC(__copy_to_user)
|
||||
|
||||
|
@ -294,11 +295,11 @@ ENDPROC(__copy_to_user)
|
|||
.Lcfu_dest_not_aligned:
|
||||
rsb ip, ip, #4
|
||||
cmp ip, #2
|
||||
USER( ldrbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrb) r3, [r1], #1) @ May fault
|
||||
strb r3, [r0], #1
|
||||
USER( ldrgebt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgeb) r3, [r1], #1) @ May fault
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #1) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
sub r2, r2, ip
|
||||
b .Lcfu_dest_aligned
|
||||
|
@ -321,7 +322,7 @@ ENTRY(__copy_from_user)
|
|||
.Lcfu_0fupi: subs r2, r2, #4
|
||||
addmi ip, r2, #4
|
||||
bmi .Lcfu_0nowords
|
||||
USER( ldrt r3, [r1], #4)
|
||||
USER( T(ldr) r3, [r1], #4)
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
|
||||
rsb ip, ip, #0
|
||||
|
@ -350,18 +351,18 @@ USER( ldrt r3, [r1], #4)
|
|||
ldmneia r1!, {r3 - r4} @ Shouldnt fault
|
||||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
ldrnet r3, [r1], #4 @ Shouldnt fault
|
||||
T(ldrne) r3, [r1], #4 @ Shouldnt fault
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
beq .Lcfu_0fupi
|
||||
.Lcfu_0nowords: teq ip, #0
|
||||
beq .Lcfu_finished
|
||||
.Lcfu_nowords: cmp ip, #2
|
||||
USER( ldrbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrb) r3, [r1], #1) @ May fault
|
||||
strb r3, [r0], #1
|
||||
USER( ldrgebt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgeb) r3, [r1], #1) @ May fault
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #1) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
b .Lcfu_finished
|
||||
|
||||
|
@ -374,7 +375,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
|
|||
|
||||
.Lcfu_src_not_aligned:
|
||||
bic r1, r1, #3
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
cmp ip, #2
|
||||
bgt .Lcfu_3fupi
|
||||
beq .Lcfu_2fupi
|
||||
|
@ -382,7 +383,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
addmi ip, r2, #4
|
||||
bmi .Lcfu_1nowords
|
||||
mov r3, r7, pull #8
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
orr r3, r3, r7, push #24
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT
|
||||
|
@ -417,7 +418,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
movne r3, r7, pull #8
|
||||
USER( ldrnet r7, [r1], #4) @ May fault
|
||||
USER( T(ldrne) r7, [r1], #4) @ May fault
|
||||
orrne r3, r3, r7, push #24
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
|
@ -437,7 +438,7 @@ USER( ldrnet r7, [r1], #4) @ May fault
|
|||
addmi ip, r2, #4
|
||||
bmi .Lcfu_2nowords
|
||||
mov r3, r7, pull #16
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
orr r3, r3, r7, push #16
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT
|
||||
|
@ -473,7 +474,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
movne r3, r7, pull #16
|
||||
USER( ldrnet r7, [r1], #4) @ May fault
|
||||
USER( T(ldrne) r7, [r1], #4) @ May fault
|
||||
orrne r3, r3, r7, push #16
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
|
@ -485,7 +486,7 @@ USER( ldrnet r7, [r1], #4) @ May fault
|
|||
strb r3, [r0], #1
|
||||
movge r3, r7, get_byte_3
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #0) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #0) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
b .Lcfu_finished
|
||||
|
||||
|
@ -493,7 +494,7 @@ USER( ldrgtbt r3, [r1], #0) @ May fault
|
|||
addmi ip, r2, #4
|
||||
bmi .Lcfu_3nowords
|
||||
mov r3, r7, pull #24
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
orr r3, r3, r7, push #8
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT
|
||||
|
@ -528,7 +529,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
movne r3, r7, pull #24
|
||||
USER( ldrnet r7, [r1], #4) @ May fault
|
||||
USER( T(ldrne) r7, [r1], #4) @ May fault
|
||||
orrne r3, r3, r7, push #8
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
|
@ -538,9 +539,9 @@ USER( ldrnet r7, [r1], #4) @ May fault
|
|||
beq .Lcfu_finished
|
||||
cmp ip, #2
|
||||
strb r3, [r0], #1
|
||||
USER( ldrgebt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgeb) r3, [r1], #1) @ May fault
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #1) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
b .Lcfu_finished
|
||||
ENDPROC(__copy_from_user)
|
||||
|
|
|
@ -21,13 +21,12 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <mach/csp/hw_cfg.h>
|
||||
#include <mach/csp/chipcHw_def.h>
|
||||
#include <mach/csp/chipcHw_reg.h>
|
||||
#include <mach/csp/chipcHw_inline.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
#include "clock.h"
|
||||
|
||||
#define clk_is_primary(x) ((x)->type & CLK_TYPE_PRIMARY)
|
||||
|
|
|
@ -30,10 +30,10 @@
|
|||
#include <linux/amba/bus.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/csp/mm_addr.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hardware/arm_timer.h>
|
||||
|
|
|
@ -3,6 +3,7 @@ menu "CNS3XXX platform type"
|
|||
|
||||
config MACH_CNS3420VB
|
||||
bool "Support for CNS3420 Validation Board"
|
||||
select MIGHT_HAVE_PCI
|
||||
help
|
||||
Include support for the Cavium Networks CNS3420 MPCore Platform
|
||||
Baseboard.
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#define PLLSTAT_GOSTAT BIT(0)
|
||||
#define PLLCMD_GOSET BIT(0)
|
||||
|
|
|
@ -19,10 +19,10 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
|
||||
|
|
|
@ -22,8 +22,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
#include <mach/hardware.h>
|
||||
|
|
|
@ -21,11 +21,11 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/common.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
|
|
|
@ -4,6 +4,7 @@ menu "Integrator Options"
|
|||
|
||||
config ARCH_INTEGRATOR_AP
|
||||
bool "Support Integrator/AP and Integrator/PP2 platforms"
|
||||
select MIGHT_HAVE_PCI
|
||||
help
|
||||
Include support for the ARM(R) Integrator/AP and
|
||||
Integrator/PP2 platforms.
|
||||
|
|
|
@ -21,9 +21,8 @@
|
|||
#include <linux/amba/bus.h>
|
||||
#include <linux/amba/serial.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <mach/clkdev.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/platform.h>
|
||||
#include <asm/irq.h>
|
||||
|
|
|
@ -22,9 +22,8 @@
|
|||
#include <linux/amba/clcd.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <mach/clkdev.h>
|
||||
#include <asm/hardware/icst.h>
|
||||
#include <mach/lm.h>
|
||||
#include <mach/impd1.h>
|
||||
|
|
|
@ -21,9 +21,8 @@
|
|||
#include <linux/amba/mmci.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <mach/clkdev.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/platform.h>
|
||||
#include <asm/irq.h>
|
||||
|
|
|
@ -58,13 +58,13 @@ static inline unsigned long __lbus_to_virt(dma_addr_t x)
|
|||
__dma; \
|
||||
})
|
||||
|
||||
#define __arch_page_to_dma(dev, page) \
|
||||
#define __arch_pfn_to_dma(dev, pfn) \
|
||||
({ \
|
||||
/* __is_lbus_virt() can never be true for RAM pages */ \
|
||||
(dma_addr_t)page_to_phys(page); \
|
||||
(dma_addr_t)__pfn_to_phys(pfn); \
|
||||
})
|
||||
|
||||
#define __arch_dma_to_page(dev, addr) phys_to_page(addr)
|
||||
#define __arch_dma_to_pfn(dev, addr) __phys_to_pfn(addr)
|
||||
|
||||
#endif /* CONFIG_ARCH_IOP13XX */
|
||||
#endif /* !ASSEMBLY */
|
||||
|
|
|
@ -4,6 +4,7 @@ menu "Kendin/Micrel KS8695 Implementations"
|
|||
|
||||
config MACH_KS8695
|
||||
bool "KS8695 development board"
|
||||
select MIGHT_HAVE_PCI
|
||||
help
|
||||
Say 'Y' here if you want your kernel to run on the original
|
||||
Kendin-Micrel KS8695 development board.
|
||||
|
|
|
@ -35,17 +35,17 @@ extern struct bus_type platform_bus_type;
|
|||
__phys_to_virt(x) : __bus_to_virt(x)); })
|
||||
#define __arch_virt_to_dma(dev, x) ({ is_lbus_device(dev) ? \
|
||||
(dma_addr_t)__virt_to_phys(x) : (dma_addr_t)__virt_to_bus(x); })
|
||||
#define __arch_page_to_dma(dev, x) \
|
||||
({ dma_addr_t __dma = page_to_phys(page); \
|
||||
#define __arch_pfn_to_dma(dev, pfn) \
|
||||
({ dma_addr_t __dma = __pfn_to_phys(pfn); \
|
||||
if (!is_lbus_device(dev)) \
|
||||
__dma = __dma - PHYS_OFFSET + KS8695_PCIMEM_PA; \
|
||||
__dma; })
|
||||
|
||||
#define __arch_dma_to_page(dev, x) \
|
||||
#define __arch_dma_to_pfn(dev, x) \
|
||||
({ dma_addr_t __dma = x; \
|
||||
if (!is_lbus_device(dev)) \
|
||||
__dma += PHYS_OFFSET - KS8695_PCIMEM_PA; \
|
||||
phys_to_page(__dma); \
|
||||
__phys_to_pfn(__dma); \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
|
|
@ -90,10 +90,9 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/amba/bus.h>
|
||||
#include <linux/amba/clcd.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <mach/clkdev.h>
|
||||
#include <mach/platform.h>
|
||||
#include "clock.h"
|
||||
#include "common.h"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
struct clkops {
|
||||
void (*enable)(struct clk *);
|
||||
|
|
|
@ -31,9 +31,9 @@
|
|||
|
||||
#include <asm/hardware/gic.h>
|
||||
|
||||
static inline void smp_cross_call(const struct cpumask *mask)
|
||||
static inline void smp_cross_call(const struct cpumask *mask, int ipi)
|
||||
{
|
||||
gic_raise_softirq(mask, 1);
|
||||
gic_raise_softirq(mask, ipi);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
#include <mach/hardware.h>
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
#include <mach/hardware.h>
|
||||
|
|
|
@ -14,8 +14,8 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
|
|
|
@ -2,12 +2,12 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/common.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/div64.h>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/clk.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include "clock.h"
|
||||
|
||||
/*
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
* the Free Software Foundation; either version 2 of the License.
|
||||
*/
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
void nuc93x_clk_enable(struct clk *clk, int enable);
|
||||
void clks_register(struct clk_lookup *clks, size_t num);
|
||||
|
|
|
@ -17,9 +17,9 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
#include <plat/cpu.h>
|
||||
#include <plat/usb.h>
|
||||
|
|
|
@ -26,10 +26,10 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <plat/cpu.h>
|
||||
#include <plat/clock.h>
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
#include "clock.h"
|
||||
#include "prm.h"
|
||||
|
|
|
@ -17,16 +17,13 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <mach/omap4-common.h>
|
||||
|
||||
static DECLARE_COMPLETION(cpu_killed);
|
||||
|
||||
int platform_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
return wait_for_completion_timeout(&cpu_killed, 5000);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -35,15 +32,6 @@ int platform_cpu_kill(unsigned int cpu)
|
|||
*/
|
||||
void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
unsigned int this_cpu = hard_smp_processor_id();
|
||||
|
||||
if (cpu != this_cpu) {
|
||||
pr_crit("platform_cpu_die running on %u, should be %u\n",
|
||||
this_cpu, cpu);
|
||||
BUG();
|
||||
}
|
||||
pr_notice("CPU%u: shutdown\n", cpu);
|
||||
complete(&cpu_killed);
|
||||
flush_cache_all();
|
||||
dsb();
|
||||
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
#include <linux/io.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/omap4-common.h>
|
||||
|
@ -29,22 +28,10 @@
|
|||
/* SCU base address */
|
||||
static void __iomem *scu_base;
|
||||
|
||||
/*
|
||||
* Use SCU config register to count number of cores
|
||||
*/
|
||||
static inline unsigned int get_core_count(void)
|
||||
{
|
||||
if (scu_base)
|
||||
return scu_get_core_count(scu_base);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(boot_lock);
|
||||
|
||||
void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* If any interrupts are already enabled for the primary
|
||||
* core (e.g. timer irq), then they will not have been enabled
|
||||
|
@ -76,7 +63,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
|
||||
flush_cache_all();
|
||||
smp_wmb();
|
||||
smp_cross_call(cpumask_of(cpu));
|
||||
smp_cross_call(cpumask_of(cpu), 1);
|
||||
|
||||
/*
|
||||
* Now the secondary core is starting up let it run its
|
||||
|
@ -118,25 +105,9 @@ void __init smp_init_cpus(void)
|
|||
scu_base = ioremap(OMAP44XX_SCU_BASE, SZ_256);
|
||||
BUG_ON(!scu_base);
|
||||
|
||||
ncores = get_core_count();
|
||||
|
||||
for (i = 0; i < ncores; i++)
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned int ncores = get_core_count();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
int i;
|
||||
ncores = scu_get_core_count(scu_base);
|
||||
|
||||
/* sanity check */
|
||||
if (ncores == 0) {
|
||||
printk(KERN_ERR
|
||||
"OMAP4: strange core count of 0? Default to 1\n");
|
||||
ncores = 1;
|
||||
}
|
||||
|
||||
if (ncores > NR_CPUS) {
|
||||
printk(KERN_WARNING
|
||||
"OMAP4: no. of cores (%d) greater than configured "
|
||||
|
@ -144,13 +115,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
ncores, NR_CPUS);
|
||||
ncores = NR_CPUS;
|
||||
}
|
||||
smp_store_cpu_info(cpu);
|
||||
|
||||
/*
|
||||
* are we trying to boot more cores than exist?
|
||||
*/
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
for (i = 0; i < ncores; i++)
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
||||
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
|
@ -159,18 +131,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
|
||||
if (max_cpus > 1) {
|
||||
/*
|
||||
* Enable the local timer or broadcast device for the
|
||||
* boot CPU, but only if we have more than one CPU.
|
||||
*/
|
||||
percpu_timer_setup();
|
||||
|
||||
/*
|
||||
* Initialise the SCU and wake up the secondary core using
|
||||
* wakeup_secondary().
|
||||
*/
|
||||
scu_enable(scu_base);
|
||||
wakeup_secondary();
|
||||
}
|
||||
/*
|
||||
* Initialise the SCU and wake up the secondary core using
|
||||
* wakeup_secondary().
|
||||
*/
|
||||
scu_enable(scu_base);
|
||||
wakeup_secondary();
|
||||
}
|
||||
|
|
|
@ -21,8 +21,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/clock.h>
|
||||
|
|
|
@ -94,6 +94,7 @@ config MACH_ARMCORE
|
|||
select PXA27x
|
||||
select IWMMXT
|
||||
select PXA25x
|
||||
select MIGHT_HAVE_PCI
|
||||
|
||||
config MACH_EM_X270
|
||||
bool "CompuLab EM-x270 platform"
|
||||
|
|
|
@ -11,8 +11,8 @@
|
|||
#include <linux/spinlock.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <mach/pxa2xx-regs.h>
|
||||
#include <mach/hardware.h>
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
struct clkops {
|
||||
void (*enable)(struct clk *);
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
#include <linux/ata_platform.h>
|
||||
#include <linux/amba/mmci.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/system.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/irq.h>
|
||||
|
@ -47,7 +47,6 @@
|
|||
|
||||
#include <asm/hardware/gic.h>
|
||||
|
||||
#include <mach/clkdev.h>
|
||||
#include <mach/platform.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <asm/hardware/timer-sp.h>
|
||||
|
|
|
@ -11,14 +11,11 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern volatile int pen_release;
|
||||
|
||||
static DECLARE_COMPLETION(cpu_killed);
|
||||
|
||||
static inline void cpu_enter_lowpower(void)
|
||||
{
|
||||
unsigned int v;
|
||||
|
@ -34,10 +31,10 @@ static inline void cpu_enter_lowpower(void)
|
|||
" bic %0, %0, #0x20\n"
|
||||
" mcr p15, 0, %0, c1, c0, 1\n"
|
||||
" mrc p15, 0, %0, c1, c0, 0\n"
|
||||
" bic %0, %0, #0x04\n"
|
||||
" bic %0, %0, %2\n"
|
||||
" mcr p15, 0, %0, c1, c0, 0\n"
|
||||
: "=&r" (v)
|
||||
: "r" (0)
|
||||
: "r" (0), "Ir" (CR_C)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
|
@ -46,17 +43,17 @@ static inline void cpu_leave_lowpower(void)
|
|||
unsigned int v;
|
||||
|
||||
asm volatile( "mrc p15, 0, %0, c1, c0, 0\n"
|
||||
" orr %0, %0, #0x04\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" mcr p15, 0, %0, c1, c0, 0\n"
|
||||
" mrc p15, 0, %0, c1, c0, 1\n"
|
||||
" orr %0, %0, #0x20\n"
|
||||
" mcr p15, 0, %0, c1, c0, 1\n"
|
||||
: "=&r" (v)
|
||||
:
|
||||
: "Ir" (CR_C)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void platform_do_lowpower(unsigned int cpu)
|
||||
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
||||
{
|
||||
/*
|
||||
* there is no power-control hardware on this platform, so all
|
||||
|
@ -80,22 +77,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
|
|||
}
|
||||
|
||||
/*
|
||||
* getting here, means that we have come out of WFI without
|
||||
* Getting here, means that we have come out of WFI without
|
||||
* having been woken up - this shouldn't happen
|
||||
*
|
||||
* The trouble is, letting people know about this is not really
|
||||
* possible, since we are currently running incoherently, and
|
||||
* therefore cannot safely call printk() or anything else
|
||||
* Just note it happening - when we're woken, we can report
|
||||
* its occurrence.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
printk("CPU%u: spurious wakeup call\n", cpu);
|
||||
#endif
|
||||
(*spurious)++;
|
||||
}
|
||||
}
|
||||
|
||||
int platform_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
return wait_for_completion_timeout(&cpu_killed, 5000);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -105,30 +99,22 @@ int platform_cpu_kill(unsigned int cpu)
|
|||
*/
|
||||
void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
unsigned int this_cpu = hard_smp_processor_id();
|
||||
|
||||
if (cpu != this_cpu) {
|
||||
printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
|
||||
this_cpu, cpu);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
complete(&cpu_killed);
|
||||
int spurious = 0;
|
||||
|
||||
/*
|
||||
* we're ready for shutdown now, so do it
|
||||
*/
|
||||
cpu_enter_lowpower();
|
||||
platform_do_lowpower(cpu);
|
||||
platform_do_lowpower(cpu, &spurious);
|
||||
|
||||
/*
|
||||
* bring this CPU back into the world of cache
|
||||
* coherency, and then restore interrupts
|
||||
*/
|
||||
cpu_leave_lowpower();
|
||||
|
||||
if (spurious)
|
||||
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
|
||||
}
|
||||
|
||||
int platform_cpu_disable(unsigned int cpu)
|
||||
|
|
|
@ -2,14 +2,13 @@
|
|||
#define ASMARM_ARCH_SMP_H
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
#include <asm/smp_mpidr.h>
|
||||
|
||||
/*
|
||||
* We use IRQ1 as the IPI
|
||||
*/
|
||||
static inline void smp_cross_call(const struct cpumask *mask)
|
||||
static inline void smp_cross_call(const struct cpumask *mask, int ipi)
|
||||
{
|
||||
gic_raise_softirq(mask, 1);
|
||||
gic_raise_softirq(mask, ipi);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/unified.h>
|
||||
|
||||
#include <mach/board-eb.h>
|
||||
|
@ -37,6 +36,19 @@ extern void realview_secondary_startup(void);
|
|||
*/
|
||||
volatile int __cpuinitdata pen_release = -1;
|
||||
|
||||
/*
|
||||
* Write pen_release in a way that is guaranteed to be visible to all
|
||||
* observers, irrespective of whether they're taking part in coherency
|
||||
* or not. This is necessary for the hotplug code to work reliably.
|
||||
*/
|
||||
static void write_pen_release(int val)
|
||||
{
|
||||
pen_release = val;
|
||||
smp_wmb();
|
||||
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
|
||||
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
||||
}
|
||||
|
||||
static void __iomem *scu_base_addr(void)
|
||||
{
|
||||
if (machine_is_realview_eb_mp())
|
||||
|
@ -50,20 +62,10 @@ static void __iomem *scu_base_addr(void)
|
|||
return (void __iomem *)0;
|
||||
}
|
||||
|
||||
static inline unsigned int get_core_count(void)
|
||||
{
|
||||
void __iomem *scu_base = scu_base_addr();
|
||||
if (scu_base)
|
||||
return scu_get_core_count(scu_base);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(boot_lock);
|
||||
|
||||
void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* if any interrupts are already enabled for the primary
|
||||
* core (e.g. timer irq), then they will not have been enabled
|
||||
|
@ -75,8 +77,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|||
* let the primary processor know we're out of the
|
||||
* pen, then head off into the C entry point
|
||||
*/
|
||||
pen_release = -1;
|
||||
smp_wmb();
|
||||
write_pen_release(-1);
|
||||
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
|
@ -103,20 +104,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
* Note that "pen_release" is the hardware CPU ID, whereas
|
||||
* "cpu" is Linux's internal ID.
|
||||
*/
|
||||
pen_release = cpu;
|
||||
flush_cache_all();
|
||||
write_pen_release(cpu);
|
||||
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* This is a later addition to the booting protocol: the
|
||||
* bootMonitor now puts secondary cores into WFI, so
|
||||
* poke_milo() no longer gets the cores moving; we need
|
||||
* to send a soft interrupt to wake the secondary core.
|
||||
* Use smp_cross_call() for this, since there's little
|
||||
* point duplicating the code here
|
||||
* Send the secondary CPU a soft interrupt, thereby causing
|
||||
* the boot monitor to read the system wide flags register,
|
||||
* and branch to the address found there.
|
||||
*/
|
||||
smp_cross_call(cpumask_of(cpu));
|
||||
smp_cross_call(cpumask_of(cpu), 1);
|
||||
|
||||
timeout = jiffies + (1 * HZ);
|
||||
while (time_before(jiffies, timeout)) {
|
||||
|
@ -136,48 +131,18 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
return pen_release != -1 ? -ENOSYS : 0;
|
||||
}
|
||||
|
||||
static void __init poke_milo(void)
|
||||
{
|
||||
/* nobody is to be released from the pen yet */
|
||||
pen_release = -1;
|
||||
|
||||
/*
|
||||
* Write the address of secondary startup into the system-wide flags
|
||||
* register. The BootMonitor waits for this register to become
|
||||
* non-zero.
|
||||
*/
|
||||
__raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
|
||||
__io_address(REALVIEW_SYS_FLAGSSET));
|
||||
|
||||
mb();
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the CPU possible map early - this describes the CPUs
|
||||
* which may be present or become present in the system.
|
||||
*/
|
||||
void __init smp_init_cpus(void)
|
||||
{
|
||||
unsigned int i, ncores = get_core_count();
|
||||
void __iomem *scu_base = scu_base_addr();
|
||||
unsigned int i, ncores;
|
||||
|
||||
for (i = 0; i < ncores; i++)
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned int ncores = get_core_count();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
int i;
|
||||
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
||||
|
||||
/* sanity check */
|
||||
if (ncores == 0) {
|
||||
printk(KERN_ERR
|
||||
"Realview: strange CM count of 0? Default to 1\n");
|
||||
|
||||
ncores = 1;
|
||||
}
|
||||
|
||||
if (ncores > NR_CPUS) {
|
||||
printk(KERN_WARNING
|
||||
"Realview: no. of cores (%d) greater than configured "
|
||||
|
@ -186,13 +151,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
ncores = NR_CPUS;
|
||||
}
|
||||
|
||||
smp_store_cpu_info(cpu);
|
||||
for (i = 0; i < ncores; i++)
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* are we trying to boot more cores than exist?
|
||||
*/
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
|
@ -201,21 +166,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
|
||||
/*
|
||||
* Initialise the SCU if there are more than one CPU and let
|
||||
* them know where to start. Note that, on modern versions of
|
||||
* MILO, the "poke" doesn't actually do anything until each
|
||||
* individual core is sent a soft interrupt to get it out of
|
||||
* WFI
|
||||
*/
|
||||
if (max_cpus > 1) {
|
||||
/*
|
||||
* Enable the local timer or broadcast device for the
|
||||
* boot CPU, but only if we have more than one CPU.
|
||||
*/
|
||||
percpu_timer_setup();
|
||||
scu_enable(scu_base_addr());
|
||||
|
||||
scu_enable(scu_base_addr());
|
||||
poke_milo();
|
||||
}
|
||||
/*
|
||||
* Write the address of secondary startup into the
|
||||
* system-wide flags register. The BootMonitor waits
|
||||
* until it receives a soft interrupt, and then the
|
||||
* secondary CPU branches to this address.
|
||||
*/
|
||||
__raw_writel(BSYM(virt_to_phys(realview_secondary_startup)),
|
||||
__io_address(REALVIEW_SYS_FLAGSSET));
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ config MACH_JIVE
|
|||
Say Y here if you are using the Logitech Jive.
|
||||
|
||||
config MACH_JIVE_SHOW_BOOTLOADER
|
||||
bool "Allow access to bootloader partitions in MTD"
|
||||
bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)"
|
||||
depends on MACH_JIVE && EXPERIMENTAL
|
||||
|
||||
config MACH_SMDK2413
|
||||
|
|
|
@ -13,14 +13,11 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern volatile int pen_release;
|
||||
|
||||
static DECLARE_COMPLETION(cpu_killed);
|
||||
|
||||
static inline void cpu_enter_lowpower(void)
|
||||
{
|
||||
unsigned int v;
|
||||
|
@ -33,13 +30,13 @@ static inline void cpu_enter_lowpower(void)
|
|||
* Turn off coherency
|
||||
*/
|
||||
" mrc p15, 0, %0, c1, c0, 1\n"
|
||||
" bic %0, %0, #0x20\n"
|
||||
" bic %0, %0, %2\n"
|
||||
" mcr p15, 0, %0, c1, c0, 1\n"
|
||||
" mrc p15, 0, %0, c1, c0, 0\n"
|
||||
" bic %0, %0, #0x04\n"
|
||||
" mcr p15, 0, %0, c1, c0, 0\n"
|
||||
: "=&r" (v)
|
||||
: "r" (0)
|
||||
: "r" (0), "Ir" (CR_C)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
|
@ -49,17 +46,17 @@ static inline void cpu_leave_lowpower(void)
|
|||
|
||||
asm volatile(
|
||||
"mrc p15, 0, %0, c1, c0, 0\n"
|
||||
" orr %0, %0, #0x04\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" mcr p15, 0, %0, c1, c0, 0\n"
|
||||
" mrc p15, 0, %0, c1, c0, 1\n"
|
||||
" orr %0, %0, #0x20\n"
|
||||
" mcr p15, 0, %0, c1, c0, 1\n"
|
||||
: "=&r" (v)
|
||||
:
|
||||
: "Ir" (CR_C)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void platform_do_lowpower(unsigned int cpu)
|
||||
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
||||
{
|
||||
/*
|
||||
* there is no power-control hardware on this platform, so all
|
||||
|
@ -83,22 +80,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
|
|||
}
|
||||
|
||||
/*
|
||||
* getting here, means that we have come out of WFI without
|
||||
* Getting here, means that we have come out of WFI without
|
||||
* having been woken up - this shouldn't happen
|
||||
*
|
||||
* The trouble is, letting people know about this is not really
|
||||
* possible, since we are currently running incoherently, and
|
||||
* therefore cannot safely call printk() or anything else
|
||||
* Just note it happening - when we're woken, we can report
|
||||
* its occurrence.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
|
||||
#endif
|
||||
(*spurious)++;
|
||||
}
|
||||
}
|
||||
|
||||
int platform_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
return wait_for_completion_timeout(&cpu_killed, 5000);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -108,30 +102,22 @@ int platform_cpu_kill(unsigned int cpu)
|
|||
*/
|
||||
void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
unsigned int this_cpu = hard_smp_processor_id();
|
||||
|
||||
if (cpu != this_cpu) {
|
||||
printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
|
||||
this_cpu, cpu);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
complete(&cpu_killed);
|
||||
int spurious = 0;
|
||||
|
||||
/*
|
||||
* we're ready for shutdown now, so do it
|
||||
*/
|
||||
cpu_enter_lowpower();
|
||||
platform_do_lowpower(cpu);
|
||||
platform_do_lowpower(cpu, &spurious);
|
||||
|
||||
/*
|
||||
* bring this CPU back into the world of cache
|
||||
* coherency, and then restore interrupts
|
||||
*/
|
||||
cpu_leave_lowpower();
|
||||
|
||||
if (spurious)
|
||||
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
|
||||
}
|
||||
|
||||
int platform_cpu_disable(unsigned int cpu)
|
||||
|
|
|
@ -7,14 +7,13 @@
|
|||
#define ASM_ARCH_SMP_H __FILE__
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
#include <asm/smp_mpidr.h>
|
||||
|
||||
/*
|
||||
* We use IRQ1 as the IPI
|
||||
*/
|
||||
static inline void smp_cross_call(const struct cpumask *mask)
|
||||
static inline void smp_cross_call(const struct cpumask *mask, int ipi)
|
||||
{
|
||||
gic_raise_softirq(mask, 1);
|
||||
gic_raise_softirq(mask, ipi);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <linux/io.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <asm/unified.h>
|
||||
|
||||
|
@ -38,6 +37,19 @@ extern void s5pv310_secondary_startup(void);
|
|||
|
||||
volatile int __cpuinitdata pen_release = -1;
|
||||
|
||||
/*
|
||||
* Write pen_release in a way that is guaranteed to be visible to all
|
||||
* observers, irrespective of whether they're taking part in coherency
|
||||
* or not. This is necessary for the hotplug code to work reliably.
|
||||
*/
|
||||
static void write_pen_release(int val)
|
||||
{
|
||||
pen_release = val;
|
||||
smp_wmb();
|
||||
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
|
||||
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
||||
}
|
||||
|
||||
static void __iomem *scu_base_addr(void)
|
||||
{
|
||||
return (void __iomem *)(S5P_VA_SCU);
|
||||
|
@ -47,8 +59,6 @@ static DEFINE_SPINLOCK(boot_lock);
|
|||
|
||||
void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* if any interrupts are already enabled for the primary
|
||||
* core (e.g. timer irq), then they will not have been enabled
|
||||
|
@ -60,8 +70,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|||
* let the primary processor know we're out of the
|
||||
* pen, then head off into the C entry point
|
||||
*/
|
||||
pen_release = -1;
|
||||
smp_wmb();
|
||||
write_pen_release(-1);
|
||||
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
|
@ -88,16 +97,14 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
* Note that "pen_release" is the hardware CPU ID, whereas
|
||||
* "cpu" is Linux's internal ID.
|
||||
*/
|
||||
pen_release = cpu;
|
||||
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
|
||||
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
||||
write_pen_release(cpu);
|
||||
|
||||
/*
|
||||
* Send the secondary CPU a soft interrupt, thereby causing
|
||||
* the boot monitor to read the system wide flags register,
|
||||
* and branch to the address found there.
|
||||
*/
|
||||
smp_cross_call(cpumask_of(cpu));
|
||||
smp_cross_call(cpumask_of(cpu), 1);
|
||||
|
||||
timeout = jiffies + (1 * HZ);
|
||||
while (time_before(jiffies, timeout)) {
|
||||
|
@ -130,13 +137,6 @@ void __init smp_init_cpus(void)
|
|||
ncores = scu_base ? scu_get_core_count(scu_base) : 1;
|
||||
|
||||
/* sanity check */
|
||||
if (ncores == 0) {
|
||||
printk(KERN_ERR
|
||||
"S5PV310: strange CM count of 0? Default to 1\n");
|
||||
|
||||
ncores = 1;
|
||||
}
|
||||
|
||||
if (ncores > NR_CPUS) {
|
||||
printk(KERN_WARNING
|
||||
"S5PV310: no. of cores (%d) greater than configured "
|
||||
|
@ -149,18 +149,10 @@ void __init smp_init_cpus(void)
|
|||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned int ncores = num_possible_cpus();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
int i;
|
||||
|
||||
smp_store_cpu_info(cpu);
|
||||
|
||||
/* are we trying to boot more cores than exist? */
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
|
||||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
* actually populated at the present time.
|
||||
|
@ -168,25 +160,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
|
||||
scu_enable(scu_base_addr());
|
||||
|
||||
/*
|
||||
* Initialise the SCU if there are more than one CPU and let
|
||||
* them know where to start.
|
||||
* Write the address of secondary startup into the
|
||||
* system-wide flags register. The boot monitor waits
|
||||
* until it receives a soft interrupt, and then the
|
||||
* secondary CPU branches to this address.
|
||||
*/
|
||||
if (max_cpus > 1) {
|
||||
/*
|
||||
* Enable the local timer or broadcast device for the
|
||||
* boot CPU, but only if we have more than one CPU.
|
||||
*/
|
||||
percpu_timer_setup();
|
||||
|
||||
scu_enable(scu_base_addr());
|
||||
|
||||
/*
|
||||
* Write the address of secondary startup into the
|
||||
* system-wide flags register. The boot monitor waits
|
||||
* until it receives a soft interrupt, and then the
|
||||
* secondary CPU branches to this address.
|
||||
*/
|
||||
__raw_writel(BSYM(virt_to_phys(s5pv310_secondary_startup)), S5P_VA_SYSRAM);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ config ARCH_SH7367
|
|||
bool "SH-Mobile G3 (SH7367)"
|
||||
select CPU_V6
|
||||
select HAVE_CLK
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select SH_CLK_CPG
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
||||
|
@ -14,7 +14,7 @@ config ARCH_SH7377
|
|||
bool "SH-Mobile G4 (SH7377)"
|
||||
select CPU_V7
|
||||
select HAVE_CLK
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select SH_CLK_CPG
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
||||
|
@ -22,7 +22,7 @@ config ARCH_SH7372
|
|||
bool "SH-Mobile AP4 (SH7372)"
|
||||
select CPU_V7
|
||||
select HAVE_CLK
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select SH_CLK_CPG
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/sh_clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <mach/common.h>
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
/* SH7367 registers */
|
||||
#define RTFRQCR 0xe6150000
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/sh_clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <mach/common.h>
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
/* SH7372 registers */
|
||||
#define FRQCRA 0xe6150000
|
||||
|
|
|
@ -20,8 +20,8 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/sh_clk.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <mach/common.h>
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
/* SH7377 registers */
|
||||
#define RTFRQCR 0xe6150000
|
||||
|
|
|
@ -12,8 +12,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/clock.h>
|
||||
#include <mach/irqs.h>
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include "clock.h"
|
||||
#include "board.h"
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
#define __MACH_TEGRA_CLOCK_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#define DIV_BUS (1 << 0)
|
||||
#define DIV_U71 (1 << 1)
|
||||
|
|
|
@ -11,12 +11,9 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static DECLARE_COMPLETION(cpu_killed);
|
||||
|
||||
static inline void cpu_enter_lowpower(void)
|
||||
{
|
||||
unsigned int v;
|
||||
|
@ -29,13 +26,13 @@ static inline void cpu_enter_lowpower(void)
|
|||
* Turn off coherency
|
||||
*/
|
||||
" mrc p15, 0, %0, c1, c0, 1\n"
|
||||
" bic %0, %0, #0x20\n"
|
||||
" bic %0, %0, %2\n"
|
||||
" mcr p15, 0, %0, c1, c0, 1\n"
|
||||
" mrc p15, 0, %0, c1, c0, 0\n"
|
||||
" bic %0, %0, #0x04\n"
|
||||
" mcr p15, 0, %0, c1, c0, 0\n"
|
||||
: "=&r" (v)
|
||||
: "r" (0)
|
||||
: "r" (0), "Ir" (CR_C)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
|
@ -45,17 +42,17 @@ static inline void cpu_leave_lowpower(void)
|
|||
|
||||
asm volatile(
|
||||
"mrc p15, 0, %0, c1, c0, 0\n"
|
||||
" orr %0, %0, #0x04\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" mcr p15, 0, %0, c1, c0, 0\n"
|
||||
" mrc p15, 0, %0, c1, c0, 1\n"
|
||||
" orr %0, %0, #0x20\n"
|
||||
" mcr p15, 0, %0, c1, c0, 1\n"
|
||||
: "=&r" (v)
|
||||
:
|
||||
: "Ir" (CR_C)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void platform_do_lowpower(unsigned int cpu)
|
||||
static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
|
||||
{
|
||||
/*
|
||||
* there is no power-control hardware on this platform, so all
|
||||
|
@ -79,22 +76,19 @@ static inline void platform_do_lowpower(unsigned int cpu)
|
|||
/*}*/
|
||||
|
||||
/*
|
||||
* getting here, means that we have come out of WFI without
|
||||
* Getting here, means that we have come out of WFI without
|
||||
* having been woken up - this shouldn't happen
|
||||
*
|
||||
* The trouble is, letting people know about this is not really
|
||||
* possible, since we are currently running incoherently, and
|
||||
* therefore cannot safely call printk() or anything else
|
||||
* Just note it happening - when we're woken, we can report
|
||||
* its occurrence.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
|
||||
#endif
|
||||
(*spurious)++;
|
||||
}
|
||||
}
|
||||
|
||||
int platform_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
return wait_for_completion_timeout(&cpu_killed, 5000);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -104,30 +98,22 @@ int platform_cpu_kill(unsigned int cpu)
|
|||
*/
|
||||
void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
unsigned int this_cpu = hard_smp_processor_id();
|
||||
|
||||
if (cpu != this_cpu) {
|
||||
printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
|
||||
this_cpu, cpu);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
complete(&cpu_killed);
|
||||
int spurious = 0;
|
||||
|
||||
/*
|
||||
* we're ready for shutdown now, so do it
|
||||
*/
|
||||
cpu_enter_lowpower();
|
||||
platform_do_lowpower(cpu);
|
||||
platform_do_lowpower(cpu, &spurious);
|
||||
|
||||
/*
|
||||
* bring this CPU back into the world of cache
|
||||
* coherency, and then restore interrupts
|
||||
*/
|
||||
cpu_leave_lowpower();
|
||||
|
||||
if (spurious)
|
||||
pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
|
||||
}
|
||||
|
||||
int platform_cpu_disable(unsigned int cpu)
|
||||
|
|
|
@ -2,21 +2,13 @@
|
|||
#define ASMARM_ARCH_SMP_H
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
#include <asm/smp_mpidr.h>
|
||||
|
||||
/*
|
||||
* We use IRQ1 as the IPI
|
||||
*/
|
||||
static inline void smp_cross_call(const struct cpumask *mask)
|
||||
{
|
||||
gic_raise_softirq(mask, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do nothing on MPcore.
|
||||
*/
|
||||
static inline void smp_cross_call_done(cpumask_t callmap)
|
||||
static inline void smp_cross_call(const struct cpumask *mask, int ipi)
|
||||
{
|
||||
gic_raise_softirq(mask, ipi);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/cacheflush.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/smp_scu.h>
|
||||
|
||||
#include <mach/iomap.h>
|
||||
|
@ -41,8 +40,6 @@ static void __iomem *scu_base = IO_ADDRESS(TEGRA_ARM_PERIF_BASE);
|
|||
|
||||
void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* if any interrupts are already enabled for the primary
|
||||
* core (e.g. timer irq), then they will not have been enabled
|
||||
|
@ -117,24 +114,20 @@ void __init smp_init_cpus(void)
|
|||
{
|
||||
unsigned int i, ncores = scu_get_core_count(scu_base);
|
||||
|
||||
if (ncores > NR_CPUS) {
|
||||
printk(KERN_ERR "Tegra: no. of cores (%u) greater than configured (%u), clipping\n",
|
||||
ncores, NR_CPUS);
|
||||
ncores = NR_CPUS;
|
||||
}
|
||||
|
||||
for (i = 0; i < ncores; i++)
|
||||
cpu_set(i, cpu_possible_map);
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned int ncores = scu_get_core_count(scu_base);
|
||||
unsigned int cpu = smp_processor_id();
|
||||
int i;
|
||||
|
||||
smp_store_cpu_info(cpu);
|
||||
|
||||
/*
|
||||
* are we trying to boot more cores than exist?
|
||||
*/
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
|
||||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
* actually populated at the present time.
|
||||
|
@ -142,15 +135,5 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
|
||||
/*
|
||||
* Initialise the SCU if there are more than one CPU and let
|
||||
* them know where to start. Note that, on modern versions of
|
||||
* MILO, the "poke" doesn't actually do anything until each
|
||||
* individual core is sent a soft interrupt to get it out of
|
||||
* WFI
|
||||
*/
|
||||
if (max_cpus > 1) {
|
||||
percpu_timer_setup();
|
||||
scu_enable(scu_base);
|
||||
}
|
||||
scu_enable(scu_base);
|
||||
}
|
||||
|
|
|
@ -24,8 +24,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/hrtimer.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/iomap.h>
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@
|
|||
#include <linux/timer.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/syscon.h>
|
||||
|
||||
|
|
|
@ -13,8 +13,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <plat/mtu.h>
|
||||
#include <mach/hardware.h>
|
||||
|
|
|
@ -23,7 +23,6 @@ ENTRY(u8500_secondary_startup)
|
|||
ldmia r4, {r5, r6}
|
||||
sub r4, r4, r5
|
||||
add r6, r6, r4
|
||||
dsb
|
||||
pen: ldr r7, [r6]
|
||||
cmp r7, r0
|
||||
bne pen
|
||||
|
|
|
@ -11,14 +11,11 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
extern volatile int pen_release;
|
||||
|
||||
static DECLARE_COMPLETION(cpu_killed);
|
||||
|
||||
static inline void platform_do_lowpower(unsigned int cpu)
|
||||
{
|
||||
flush_cache_all();
|
||||
|
@ -38,7 +35,7 @@ static inline void platform_do_lowpower(unsigned int cpu)
|
|||
|
||||
int platform_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
return wait_for_completion_timeout(&cpu_killed, 5000);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -48,19 +45,6 @@ int platform_cpu_kill(unsigned int cpu)
|
|||
*/
|
||||
void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
unsigned int this_cpu = hard_smp_processor_id();
|
||||
|
||||
if (cpu != this_cpu) {
|
||||
printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
|
||||
this_cpu, cpu);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
complete(&cpu_killed);
|
||||
|
||||
/* directly enter low power state, skipping secure registers */
|
||||
platform_do_lowpower(cpu);
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#define ASMARM_ARCH_SMP_H
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
#include <asm/smp_mpidr.h>
|
||||
|
||||
/* This is required to wakeup the secondary core */
|
||||
extern void u8500_secondary_startup(void);
|
||||
|
@ -18,8 +17,8 @@ extern void u8500_secondary_startup(void);
|
|||
/*
|
||||
* We use IRQ1 as the IPI
|
||||
*/
|
||||
static inline void smp_cross_call(const struct cpumask *mask)
|
||||
static inline void smp_cross_call(const struct cpumask *mask, int ipi)
|
||||
{
|
||||
gic_raise_softirq(mask, 1);
|
||||
gic_raise_softirq(mask, ipi);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <linux/io.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <mach/hardware.h>
|
||||
|
||||
|
@ -28,17 +27,23 @@
|
|||
*/
|
||||
volatile int __cpuinitdata pen_release = -1;
|
||||
|
||||
static unsigned int __init get_core_count(void)
|
||||
/*
|
||||
* Write pen_release in a way that is guaranteed to be visible to all
|
||||
* observers, irrespective of whether they're taking part in coherency
|
||||
* or not. This is necessary for the hotplug code to work reliably.
|
||||
*/
|
||||
static void write_pen_release(int val)
|
||||
{
|
||||
return scu_get_core_count(__io_address(UX500_SCU_BASE));
|
||||
pen_release = val;
|
||||
smp_wmb();
|
||||
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
|
||||
outer_clean_range(__pa(&pen_release), __pa(&pen_release + 1));
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(boot_lock);
|
||||
|
||||
void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* if any interrupts are already enabled for the primary
|
||||
* core (e.g. timer irq), then they will not have been enabled
|
||||
|
@ -50,7 +55,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|||
* let the primary processor know we're out of the
|
||||
* pen, then head off into the C entry point
|
||||
*/
|
||||
pen_release = -1;
|
||||
write_pen_release(-1);
|
||||
|
||||
/*
|
||||
* Synchronise with the boot thread.
|
||||
|
@ -74,11 +79,9 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
* the holding pen - release it, then wait for it to flag
|
||||
* that it has been released by resetting pen_release.
|
||||
*/
|
||||
pen_release = cpu;
|
||||
__cpuc_flush_dcache_area((void *)&pen_release, sizeof(pen_release));
|
||||
outer_clean_range(__pa(&pen_release), __pa(&pen_release) + 1);
|
||||
write_pen_release(cpu);
|
||||
|
||||
smp_cross_call(cpumask_of(cpu));
|
||||
smp_cross_call(cpumask_of(cpu), 1);
|
||||
|
||||
timeout = jiffies + (1 * HZ);
|
||||
while (time_before(jiffies, timeout)) {
|
||||
|
@ -97,9 +100,6 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
|
||||
static void __init wakeup_secondary(void)
|
||||
{
|
||||
/* nobody is to be released from the pen yet */
|
||||
pen_release = -1;
|
||||
|
||||
/*
|
||||
* write the address of secondary startup into the backup ram register
|
||||
* at offset 0x1FF4, then write the magic number 0xA1FEED01 to the
|
||||
|
@ -126,41 +126,27 @@ static void __init wakeup_secondary(void)
|
|||
*/
|
||||
void __init smp_init_cpus(void)
|
||||
{
|
||||
unsigned int i, ncores = get_core_count();
|
||||
unsigned int i, ncores;
|
||||
|
||||
ncores = scu_get_core_count(__io_address(UX500_SCU_BASE));
|
||||
|
||||
/* sanity check */
|
||||
if (ncores > NR_CPUS) {
|
||||
printk(KERN_WARNING
|
||||
"U8500: no. of cores (%d) greater than configured "
|
||||
"maximum of %d - clipping\n",
|
||||
ncores, NR_CPUS);
|
||||
ncores = NR_CPUS;
|
||||
}
|
||||
|
||||
for (i = 0; i < ncores; i++)
|
||||
set_cpu_possible(i, true);
|
||||
}
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned int ncores = get_core_count();
|
||||
unsigned int cpu = smp_processor_id();
|
||||
int i;
|
||||
|
||||
/* sanity check */
|
||||
if (ncores == 0) {
|
||||
printk(KERN_ERR
|
||||
"U8500: strange CM count of 0? Default to 1\n");
|
||||
ncores = 1;
|
||||
}
|
||||
|
||||
if (ncores > num_possible_cpus()) {
|
||||
printk(KERN_WARNING
|
||||
"U8500: no. of cores (%d) greater than configured "
|
||||
"maximum of %d - clipping\n",
|
||||
ncores, num_possible_cpus());
|
||||
ncores = num_possible_cpus();
|
||||
}
|
||||
|
||||
smp_store_cpu_info(cpu);
|
||||
|
||||
/*
|
||||
* are we trying to boot more cores than exist?
|
||||
*/
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
|
||||
/*
|
||||
* Initialise the present map, which describes the set of CPUs
|
||||
* actually populated at the present time.
|
||||
|
@ -168,13 +154,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
|||
for (i = 0; i < max_cpus; i++)
|
||||
set_cpu_present(i, true);
|
||||
|
||||
if (max_cpus > 1) {
|
||||
/*
|
||||
* Enable the local timer or broadcast device for the
|
||||
* boot CPU, but only if we have more than one CPU.
|
||||
*/
|
||||
percpu_timer_setup();
|
||||
scu_enable(__io_address(UX500_SCU_BASE));
|
||||
wakeup_secondary();
|
||||
}
|
||||
scu_enable(__io_address(UX500_SCU_BASE));
|
||||
wakeup_secondary();
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@ menu "Versatile platform type"
|
|||
config ARCH_VERSATILE_PB
|
||||
bool "Support Versatile/PB platform"
|
||||
select CPU_ARM926T
|
||||
select MIGHT_HAVE_PCI
|
||||
default y
|
||||
help
|
||||
Include support for the ARM(R) Versatile/PB platform.
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
#include <linux/amba/pl022.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/leds.h>
|
||||
|
@ -46,7 +46,6 @@
|
|||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/mach/map.h>
|
||||
#include <mach/clkdev.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/platform.h>
|
||||
#include <asm/hardware/timer-sp.h>
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче