Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (416 commits) ARM: DMA: add support for DMA debugging ARM: PL011: add DMA burst threshold support for ST variants ARM: PL011: Add support for transmit DMA ARM: PL011: Ensure IRQs are disabled in UART interrupt handler ARM: PL011: Separate hardware FIFO size from TTY FIFO size ARM: PL011: Allow better handling of vendor data ARM: PL011: Ensure error flags are clear at startup ARM: PL011: include revision number in boot-time port printk ARM: vexpress: add sched_clock() for Versatile Express ARM i.MX53: Make MX53 EVK bootable ARM i.MX53: Some bug fix about MX53 MSL code ARM: 6607/1: sa1100: Update platform device registration ARM: 6606/1: sa1100: Fix platform device registration ARM i.MX51: rename IPU irqs ARM i.MX51: Add ipu clock support ARM: imx/mx27_3ds: Add PMIC support ARM: DMA: Replace page_to_dma()/dma_to_page() with pfn_to_dma()/dma_to_pfn() mx51: fix usb clock support MX51: Add support for usb host 2 arch/arm/plat-mxc/ehci.c: fix errors/typos ...
This commit is contained in:
Коммит
3c0cb7c31c
|
@ -34,3 +34,5 @@ memory.txt
|
|||
- description of the virtual memory layout
|
||||
nwfpe/
|
||||
- NWFPE floating point emulator documentation
|
||||
swp_emulation
|
||||
- SWP/SWPB emulation handler/logging description
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
Software emulation of deprecated SWP instruction (CONFIG_SWP_EMULATE)
|
||||
---------------------------------------------------------------------
|
||||
|
||||
ARMv6 architecture deprecates use of the SWP/SWPB instructions, and recommeds
|
||||
moving to the load-locked/store-conditional instructions LDREX and STREX.
|
||||
|
||||
ARMv7 multiprocessing extensions introduce the ability to disable these
|
||||
instructions, triggering an undefined instruction exception when executed.
|
||||
Trapped instructions are emulated using an LDREX/STREX or LDREXB/STREXB
|
||||
sequence. If a memory access fault (an abort) occurs, a segmentation fault is
|
||||
signalled to the triggering process.
|
||||
|
||||
/proc/cpu/swp_emulation holds some statistics/information, including the PID of
|
||||
the last process to trigger the emulation to be invocated. For example:
|
||||
---
|
||||
Emulated SWP: 12
|
||||
Emulated SWPB: 0
|
||||
Aborted SWP{B}: 1
|
||||
Last process: 314
|
||||
---
|
||||
|
||||
NOTE: when accessing uncached shared regions, LDREX/STREX rely on an external
|
||||
transaction monitoring block called a global monitor to maintain update
|
||||
atomicity. If your system does not implement a global monitor, this option can
|
||||
cause programs that perform SWP operations to uncached memory to deadlock, as
|
||||
the STREX operation will always fail.
|
||||
|
126
arch/arm/Kconfig
126
arch/arm/Kconfig
|
@ -2,6 +2,7 @@ config ARM
|
|||
bool
|
||||
default y
|
||||
select HAVE_AOUT
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_IDE
|
||||
select HAVE_MEMBLOCK
|
||||
select RTC_LIB
|
||||
|
@ -14,6 +15,7 @@ config ARM
|
|||
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
||||
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
|
||||
select HAVE_DYNAMIC_FTRACE if (!XIP_KERNEL)
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZO
|
||||
|
@ -23,6 +25,7 @@ config ARM
|
|||
select PERF_USE_VMALLOC
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V7))
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
help
|
||||
The ARM series is a line of low-power-consumption RISC chip designs
|
||||
licensed by ARM Ltd and targeted at embedded applications and
|
||||
|
@ -34,9 +37,15 @@ config ARM
|
|||
config HAVE_PWM
|
||||
bool
|
||||
|
||||
config MIGHT_HAVE_PCI
|
||||
bool
|
||||
|
||||
config SYS_SUPPORTS_APM_EMULATION
|
||||
bool
|
||||
|
||||
config HAVE_SCHED_CLOCK
|
||||
bool
|
||||
|
||||
config GENERIC_GPIO
|
||||
bool
|
||||
|
||||
|
@ -221,7 +230,7 @@ config ARCH_INTEGRATOR
|
|||
bool "ARM Ltd. Integrator family"
|
||||
select ARM_AMBA
|
||||
select ARCH_HAS_CPUFREQ
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ICST
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select PLAT_VERSATILE
|
||||
|
@ -231,7 +240,8 @@ config ARCH_INTEGRATOR
|
|||
config ARCH_REALVIEW
|
||||
bool "ARM Ltd. RealView family"
|
||||
select ARM_AMBA
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ICST
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
|
@ -245,7 +255,8 @@ config ARCH_VERSATILE
|
|||
bool "ARM Ltd. Versatile family"
|
||||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ICST
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
|
@ -259,9 +270,10 @@ config ARCH_VEXPRESS
|
|||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select ARM_AMBA
|
||||
select ARM_TIMER_SP804
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_CLK
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ICST
|
||||
select PLAT_VERSATILE
|
||||
help
|
||||
|
@ -280,7 +292,7 @@ config ARCH_BCMRING
|
|||
depends on MMU
|
||||
select CPU_V6
|
||||
select ARM_AMBA
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
help
|
||||
|
@ -298,6 +310,7 @@ config ARCH_CNS3XXX
|
|||
select CPU_V6
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARM_GIC
|
||||
select MIGHT_HAVE_PCI
|
||||
select PCI_DOMAINS if PCI
|
||||
help
|
||||
Support for Cavium Networks CNS3XXX platform.
|
||||
|
@ -327,7 +340,7 @@ config ARCH_EP93XX
|
|||
select CPU_ARM920T
|
||||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_HAS_HOLES_MEMORYMODEL
|
||||
select ARCH_USES_GETTIMEOFFSET
|
||||
|
@ -347,14 +360,22 @@ config ARCH_MXC
|
|||
bool "Freescale MXC/iMX-based"
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
help
|
||||
Support for Freescale MXC/iMX-based family of processors
|
||||
|
||||
config ARCH_MXS
|
||||
bool "Freescale MXS-based"
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
help
|
||||
Support for Freescale MXS-based family of processors
|
||||
|
||||
config ARCH_STMP3XXX
|
||||
bool "Freescale STMP3xxx"
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select USB_ARCH_HAS_EHCI
|
||||
|
@ -433,6 +454,8 @@ config ARCH_IXP4XX
|
|||
select CPU_XSCALE
|
||||
select GENERIC_GPIO
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
select MIGHT_HAVE_PCI
|
||||
select DMABOUNCE if PCI
|
||||
help
|
||||
Support for Intel's IXP4XX (XScale) family of processors.
|
||||
|
@ -472,7 +495,7 @@ config ARCH_LPC32XX
|
|||
select HAVE_IDE
|
||||
select ARM_AMBA
|
||||
select USB_ARCH_HAS_OHCI
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_TIME
|
||||
select GENERIC_CLOCKEVENTS
|
||||
help
|
||||
|
@ -506,8 +529,9 @@ config ARCH_MMP
|
|||
bool "Marvell PXA168/910/MMP2"
|
||||
depends on MMU
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
select TICK_ONESHOT
|
||||
select PLAT_PXA
|
||||
select SPARSE_IRQ
|
||||
|
@ -539,7 +563,7 @@ config ARCH_W90X900
|
|||
bool "Nuvoton W90X900 CPU"
|
||||
select CPU_ARM926T
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
help
|
||||
Support for Nuvoton (Winbond logic dept.) ARM9 processor,
|
||||
|
@ -553,18 +577,19 @@ config ARCH_W90X900
|
|||
config ARCH_NUC93X
|
||||
bool "Nuvoton NUC93X CPU"
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
help
|
||||
Support for Nuvoton (Winbond logic dept.) NUC93X MCU,The NUC93X is a
|
||||
low-power and high performance MPEG-4/JPEG multimedia controller chip.
|
||||
|
||||
config ARCH_TEGRA
|
||||
bool "NVIDIA Tegra"
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_TIME
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_GPIO
|
||||
select HAVE_CLK
|
||||
select COMMON_CLKDEV
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ARCH_HAS_BARRIERS if CACHE_L2X0
|
||||
select ARCH_HAS_CPUFREQ
|
||||
help
|
||||
|
@ -574,7 +599,7 @@ config ARCH_TEGRA
|
|||
config ARCH_PNX4008
|
||||
bool "Philips Nexperia PNX4008 Mobile"
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_USES_GETTIMEOFFSET
|
||||
help
|
||||
This enables support for Philips PNX4008 mobile platform.
|
||||
|
@ -584,9 +609,10 @@ config ARCH_PXA
|
|||
depends on MMU
|
||||
select ARCH_MTD_XIP
|
||||
select ARCH_HAS_CPUFREQ
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
select TICK_ONESHOT
|
||||
select PLAT_PXA
|
||||
select SPARSE_IRQ
|
||||
|
@ -635,6 +661,7 @@ config ARCH_SA1100
|
|||
select CPU_FREQ
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_CLK
|
||||
select HAVE_SCHED_CLOCK
|
||||
select TICK_ONESHOT
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
help
|
||||
|
@ -761,7 +788,7 @@ config ARCH_TCC_926
|
|||
bool "Telechips TCC ARM926-based systems"
|
||||
select CPU_ARM926T
|
||||
select HAVE_CLK
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
help
|
||||
Support for Telechips TCC ARM926-based systems.
|
||||
|
@ -781,11 +808,12 @@ config ARCH_U300
|
|||
bool "ST-Ericsson U300 Series"
|
||||
depends on MMU
|
||||
select CPU_ARM926T
|
||||
select HAVE_SCHED_CLOCK
|
||||
select HAVE_TCM
|
||||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_GPIO
|
||||
help
|
||||
Support for ST-Ericsson U300 series mobile platforms.
|
||||
|
@ -795,8 +823,9 @@ config ARCH_U8500
|
|||
select CPU_V7
|
||||
select ARM_AMBA
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_HAS_CPUFREQ
|
||||
help
|
||||
Support for ST-Ericsson's Ux500 architecture
|
||||
|
||||
|
@ -805,7 +834,7 @@ config ARCH_NOMADIK
|
|||
select ARM_AMBA
|
||||
select ARM_VIC
|
||||
select CPU_ARM926T
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
help
|
||||
|
@ -817,7 +846,7 @@ config ARCH_DAVINCI
|
|||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ZONE_DMA
|
||||
select HAVE_IDE
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_ALLOCATOR
|
||||
select ARCH_HAS_HOLES_MEMORYMODEL
|
||||
help
|
||||
|
@ -829,6 +858,7 @@ config ARCH_OMAP
|
|||
select ARCH_REQUIRE_GPIOLIB
|
||||
select ARCH_HAS_CPUFREQ
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
select ARCH_HAS_HOLES_MEMORYMODEL
|
||||
help
|
||||
Support for TI's OMAP platform (OMAP1/2/3/4).
|
||||
|
@ -837,7 +867,7 @@ config PLAT_SPEAR
|
|||
bool "ST SPEAr"
|
||||
select ARM_AMBA
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLKDEV
|
||||
select CLKDEV_LOOKUP
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_CLK
|
||||
help
|
||||
|
@ -902,6 +932,8 @@ source "arch/arm/mach-mv78xx0/Kconfig"
|
|||
|
||||
source "arch/arm/plat-mxc/Kconfig"
|
||||
|
||||
source "arch/arm/mach-mxs/Kconfig"
|
||||
|
||||
source "arch/arm/mach-netx/Kconfig"
|
||||
|
||||
source "arch/arm/mach-nomadik/Kconfig"
|
||||
|
@ -982,9 +1014,11 @@ config ARCH_ACORN
|
|||
config PLAT_IOP
|
||||
bool
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_SCHED_CLOCK
|
||||
|
||||
config PLAT_ORION
|
||||
bool
|
||||
select HAVE_SCHED_CLOCK
|
||||
|
||||
config PLAT_PXA
|
||||
bool
|
||||
|
@ -999,8 +1033,8 @@ source arch/arm/mm/Kconfig
|
|||
|
||||
config IWMMXT
|
||||
bool "Enable iWMMXt support"
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK
|
||||
default y if PXA27x || PXA3xx || ARCH_MMP
|
||||
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
|
||||
default y if PXA27x || PXA3xx || PXA95x || ARCH_MMP
|
||||
help
|
||||
Enable support for iWMMXt context switching at run time if
|
||||
running on a CPU that supports it.
|
||||
|
@ -1017,6 +1051,11 @@ config CPU_HAS_PMU
|
|||
default y
|
||||
bool
|
||||
|
||||
config MULTI_IRQ_HANDLER
|
||||
bool
|
||||
help
|
||||
Allow each machine to specify it's own IRQ handler at run time.
|
||||
|
||||
if !MMU
|
||||
source "arch/arm/Kconfig-nommu"
|
||||
endif
|
||||
|
@ -1164,7 +1203,7 @@ config ISA_DMA_API
|
|||
bool
|
||||
|
||||
config PCI
|
||||
bool "PCI support" if ARCH_INTEGRATOR_AP || ARCH_VERSATILE_PB || ARCH_IXP4XX || ARCH_KS8695 || MACH_ARMCORE || ARCH_CNS3XXX
|
||||
bool "PCI support" if MIGHT_HAVE_PCI
|
||||
help
|
||||
Find out whether you have a PCI motherboard. PCI is the name of a
|
||||
bus system, i.e. the way the CPU talks to the other stuff inside
|
||||
|
@ -1175,6 +1214,12 @@ config PCI_DOMAINS
|
|||
bool
|
||||
depends on PCI
|
||||
|
||||
config PCI_NANOENGINE
|
||||
bool "BSE nanoEngine PCI support"
|
||||
depends on SA1100_NANOENGINE
|
||||
help
|
||||
Enable PCI on the BSE nanoEngine board.
|
||||
|
||||
config PCI_SYSCALL
|
||||
def_bool PCI
|
||||
|
||||
|
@ -1205,10 +1250,11 @@ config SMP
|
|||
depends on EXPERIMENTAL
|
||||
depends on GENERIC_CLOCKEVENTS
|
||||
depends on REALVIEW_EB_ARM11MP || REALVIEW_EB_A9MP || \
|
||||
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 ||\
|
||||
ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4
|
||||
MACH_REALVIEW_PB11MP || MACH_REALVIEW_PBX || ARCH_OMAP4 || \
|
||||
ARCH_S5PV310 || ARCH_TEGRA || ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || \
|
||||
ARCH_MSM_SCORPIONMP
|
||||
select USE_GENERIC_SMP_HELPERS
|
||||
select HAVE_ARM_SCU
|
||||
select HAVE_ARM_SCU if !ARCH_MSM_SCORPIONMP
|
||||
help
|
||||
This enables support for systems with more than one CPU. If you have
|
||||
a system with only one CPU, like most personal computers, say N. If
|
||||
|
@ -1229,7 +1275,7 @@ config SMP
|
|||
config SMP_ON_UP
|
||||
bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
depends on SMP && !XIP && !THUMB2_KERNEL
|
||||
depends on SMP && !XIP
|
||||
default y
|
||||
help
|
||||
SMP kernels contain instructions which fail on non-SMP processors.
|
||||
|
@ -1248,6 +1294,7 @@ config HAVE_ARM_SCU
|
|||
config HAVE_ARM_TWD
|
||||
bool
|
||||
depends on SMP
|
||||
select TICK_ONESHOT
|
||||
help
|
||||
This options enables support for the ARM timer and watchdog unit
|
||||
|
||||
|
@ -1283,6 +1330,7 @@ config NR_CPUS
|
|||
config HOTPLUG_CPU
|
||||
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
|
||||
depends on SMP && HOTPLUG && EXPERIMENTAL
|
||||
depends on !ARCH_MSM
|
||||
help
|
||||
Say Y here to experiment with turning CPUs off and on. CPUs
|
||||
can be controlled through /sys/devices/system/cpu.
|
||||
|
@ -1291,7 +1339,7 @@ config LOCAL_TIMERS
|
|||
bool "Use local timer interrupts"
|
||||
depends on SMP
|
||||
default y
|
||||
select HAVE_ARM_TWD
|
||||
select HAVE_ARM_TWD if !ARCH_MSM_SCORPIONMP
|
||||
help
|
||||
Enable support for local timers on SMP platforms, rather then the
|
||||
legacy IPI broadcast method. Local timers allows the system
|
||||
|
@ -1310,7 +1358,7 @@ config HZ
|
|||
default 100
|
||||
|
||||
config THUMB2_KERNEL
|
||||
bool "Compile the kernel in Thumb-2 mode"
|
||||
bool "Compile the kernel in Thumb-2 mode (EXPERIMENTAL)"
|
||||
depends on CPU_V7 && !CPU_V6 && EXPERIMENTAL
|
||||
select AEABI
|
||||
select ARM_ASM_UNIFIED
|
||||
|
@ -1524,6 +1572,7 @@ config SECCOMP
|
|||
|
||||
config CC_STACKPROTECTOR
|
||||
bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
This option turns on the -fstack-protector GCC feature. This
|
||||
feature puts, at the beginning of functions, a canary value on
|
||||
|
@ -1650,6 +1699,19 @@ config ATAGS_PROC
|
|||
Should the atags used to boot the kernel be exported in an "atags"
|
||||
file in procfs. Useful with kexec.
|
||||
|
||||
config CRASH_DUMP
|
||||
bool "Build kdump crash kernel (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL
|
||||
help
|
||||
Generate crash dump after being started by kexec. This should
|
||||
be normally only set in special crash dump kernels which are
|
||||
loaded in the main kernel with kexec-tools into a specially
|
||||
reserved region and then later executed after a crash by
|
||||
kdump/kexec. The crash dump kernel must be compiled to a
|
||||
memory address not used by the main kernel
|
||||
|
||||
For more details see Documentation/kdump/kdump.txt
|
||||
|
||||
config AUTO_ZRELADDR
|
||||
bool "Auto calculation of the decompressed kernel image address"
|
||||
depends on !ZBOOT_ROM && !ARCH_U300
|
||||
|
@ -1707,7 +1769,7 @@ config CPU_FREQ_S3C
|
|||
Internal configuration node for common cpufreq on Samsung SoC
|
||||
|
||||
config CPU_FREQ_S3C24XX
|
||||
bool "CPUfreq driver for Samsung S3C24XX series CPUs"
|
||||
bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)"
|
||||
depends on ARCH_S3C2410 && CPU_FREQ && EXPERIMENTAL
|
||||
select CPU_FREQ_S3C
|
||||
help
|
||||
|
@ -1719,7 +1781,7 @@ config CPU_FREQ_S3C24XX
|
|||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_S3C24XX_PLL
|
||||
bool "Support CPUfreq changing of PLL frequency"
|
||||
bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)"
|
||||
depends on CPU_FREQ_S3C24XX && EXPERIMENTAL
|
||||
help
|
||||
Compile in support for changing the PLL frequency from the
|
||||
|
|
|
@ -23,7 +23,7 @@ config STRICT_DEVMEM
|
|||
config FRAME_POINTER
|
||||
bool
|
||||
depends on !THUMB2_KERNEL
|
||||
default y if !ARM_UNWIND
|
||||
default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
|
||||
help
|
||||
If you say N here, the resulting kernel will be slightly smaller and
|
||||
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
|
||||
|
@ -31,7 +31,7 @@ config FRAME_POINTER
|
|||
reported is severely limited.
|
||||
|
||||
config ARM_UNWIND
|
||||
bool "Enable stack unwinding support"
|
||||
bool "Enable stack unwinding support (EXPERIMENTAL)"
|
||||
depends on AEABI && EXPERIMENTAL
|
||||
default y
|
||||
help
|
||||
|
|
|
@ -154,10 +154,11 @@ machine-$(CONFIG_ARCH_MSM) := msm
|
|||
machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
|
||||
machine-$(CONFIG_ARCH_MX1) := imx
|
||||
machine-$(CONFIG_ARCH_MX2) := imx
|
||||
machine-$(CONFIG_ARCH_MX25) := mx25
|
||||
machine-$(CONFIG_ARCH_MX25) := imx
|
||||
machine-$(CONFIG_ARCH_MX3) := mx3
|
||||
machine-$(CONFIG_ARCH_MX5) := mx5
|
||||
machine-$(CONFIG_ARCH_MXC91231) := mxc91231
|
||||
machine-$(CONFIG_ARCH_MXS) := mxs
|
||||
machine-$(CONFIG_ARCH_NETX) := netx
|
||||
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
|
||||
machine-$(CONFIG_ARCH_NS9XXX) := ns9xxx
|
||||
|
|
|
@ -45,6 +45,10 @@ else
|
|||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_ARCH_SHMOBILE),y)
|
||||
OBJS += head-shmobile.o
|
||||
endif
|
||||
|
||||
#
|
||||
# We now have a PIC decompressor implementation. Decompressors running
|
||||
# from RAM should not define ZTEXTADDR. Decompressors running directly
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
* The head-file for SH-Mobile ARM platforms
|
||||
*
|
||||
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
|
||||
* Simon Horman <horms@verge.net.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ZBOOT_ROM
|
||||
|
||||
.section ".start", "ax"
|
||||
|
||||
/* load board-specific initialization code */
|
||||
#include <mach/zboot.h>
|
||||
|
||||
b 1f
|
||||
__atags:@ tag #1
|
||||
.long 12 @ tag->hdr.size = tag_size(tag_core);
|
||||
.long 0x54410001 @ tag->hdr.tag = ATAG_CORE;
|
||||
.long 0 @ tag->u.core.flags = 0;
|
||||
.long 0 @ tag->u.core.pagesize = 0;
|
||||
.long 0 @ tag->u.core.rootdev = 0;
|
||||
@ tag #2
|
||||
.long 8 @ tag->hdr.size = tag_size(tag_mem32);
|
||||
.long 0x54410002 @ tag->hdr.tag = ATAG_MEM;
|
||||
.long CONFIG_MEMORY_SIZE @ tag->u.mem.size = CONFIG_MEMORY_SIZE;
|
||||
.long CONFIG_MEMORY_START @ @ tag->u.mem.start = CONFIG_MEMORY_START;
|
||||
@ tag #3
|
||||
.long 0 @ tag->hdr.size = 0
|
||||
.long 0 @ tag->hdr.tag = ATAG_NONE;
|
||||
1:
|
||||
|
||||
/* Set board ID necessary for boot */
|
||||
ldr r7, 1f @ Set machine type register
|
||||
adr r8, __atags @ Set atag register
|
||||
b 2f
|
||||
|
||||
1 : .long MACH_TYPE
|
||||
2 :
|
||||
|
||||
#endif /* CONFIG_ZBOOT_ROM */
|
|
@ -37,7 +37,3 @@ config SHARP_PARAM
|
|||
|
||||
config SHARP_SCOOP
|
||||
bool
|
||||
|
||||
config COMMON_CLKDEV
|
||||
bool
|
||||
select HAVE_CLK
|
||||
|
|
|
@ -17,3 +17,4 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o
|
|||
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
|
||||
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
|
||||
obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o
|
||||
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
|
||||
|
|
|
@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
|
|||
* substitute the safe buffer for the unsafe one.
|
||||
* (basically move the buffer from an unsafe area to a safe one)
|
||||
*/
|
||||
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
|
@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
|||
|
||||
return map_single(dev, ptr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_single);
|
||||
EXPORT_SYMBOL(__dma_map_single);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
|
@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
|
|||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
|
@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_single);
|
||||
EXPORT_SYMBOL(__dma_unmap_single);
|
||||
|
||||
dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
|
||||
|
@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|||
|
||||
return map_single(dev, page_address(page) + offset, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_map_page);
|
||||
EXPORT_SYMBOL(__dma_map_page);
|
||||
|
||||
/*
|
||||
* see if a mapped address was really a "safe" buffer and if so, copy
|
||||
|
@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
|
|||
* the safe buffer. (basically return things back to the way they
|
||||
* should be)
|
||||
*/
|
||||
void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
|
||||
|
@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
|
|||
|
||||
unmap_single(dev, dma_addr, size, dir);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_unmap_page);
|
||||
EXPORT_SYMBOL(__dma_unmap_page);
|
||||
|
||||
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
||||
unsigned long off, size_t sz, enum dma_data_direction dir)
|
||||
|
|
|
@ -35,6 +35,9 @@
|
|||
|
||||
static DEFINE_SPINLOCK(irq_controller_lock);
|
||||
|
||||
/* Address of GIC 0 CPU interface */
|
||||
void __iomem *gic_cpu_base_addr __read_mostly;
|
||||
|
||||
struct gic_chip_data {
|
||||
unsigned int irq_offset;
|
||||
void __iomem *dist_base;
|
||||
|
@ -45,7 +48,7 @@ struct gic_chip_data {
|
|||
#define MAX_GIC_NR 1
|
||||
#endif
|
||||
|
||||
static struct gic_chip_data gic_data[MAX_GIC_NR];
|
||||
static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
|
||||
|
||||
static inline void __iomem *gic_dist_base(unsigned int irq)
|
||||
{
|
||||
|
@ -213,21 +216,16 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
|
|||
set_irq_chained_handler(irq, gic_handle_cascade_irq);
|
||||
}
|
||||
|
||||
void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
||||
unsigned int irq_start)
|
||||
static void __init gic_dist_init(struct gic_chip_data *gic,
|
||||
unsigned int irq_start)
|
||||
{
|
||||
unsigned int gic_irqs, irq_limit, i;
|
||||
void __iomem *base = gic->dist_base;
|
||||
u32 cpumask = 1 << smp_processor_id();
|
||||
|
||||
if (gic_nr >= MAX_GIC_NR)
|
||||
BUG();
|
||||
|
||||
cpumask |= cpumask << 8;
|
||||
cpumask |= cpumask << 16;
|
||||
|
||||
gic_data[gic_nr].dist_base = base;
|
||||
gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
|
||||
|
||||
writel(0, base + GIC_DIST_CTRL);
|
||||
|
||||
/*
|
||||
|
@ -267,7 +265,7 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
|||
/*
|
||||
* Limit number of interrupts registered to the platform maximum
|
||||
*/
|
||||
irq_limit = gic_data[gic_nr].irq_offset + gic_irqs;
|
||||
irq_limit = gic->irq_offset + gic_irqs;
|
||||
if (WARN_ON(irq_limit > NR_IRQS))
|
||||
irq_limit = NR_IRQS;
|
||||
|
||||
|
@ -276,7 +274,7 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
|||
*/
|
||||
for (i = irq_start; i < irq_limit; i++) {
|
||||
set_irq_chip(i, &gic_chip);
|
||||
set_irq_chip_data(i, &gic_data[gic_nr]);
|
||||
set_irq_chip_data(i, gic);
|
||||
set_irq_handler(i, handle_level_irq);
|
||||
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
|
||||
}
|
||||
|
@ -284,19 +282,12 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
|
|||
writel(1, base + GIC_DIST_CTRL);
|
||||
}
|
||||
|
||||
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
|
||||
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
|
||||
{
|
||||
void __iomem *dist_base;
|
||||
void __iomem *dist_base = gic->dist_base;
|
||||
void __iomem *base = gic->cpu_base;
|
||||
int i;
|
||||
|
||||
if (gic_nr >= MAX_GIC_NR)
|
||||
BUG();
|
||||
|
||||
dist_base = gic_data[gic_nr].dist_base;
|
||||
BUG_ON(!dist_base);
|
||||
|
||||
gic_data[gic_nr].cpu_base = base;
|
||||
|
||||
/*
|
||||
* Deal with the banked PPI and SGI interrupts - disable all
|
||||
* PPI interrupts, ensure all SGI interrupts are enabled.
|
||||
|
@ -314,6 +305,42 @@ void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
|
|||
writel(1, base + GIC_CPU_CTRL);
|
||||
}
|
||||
|
||||
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
|
||||
void __iomem *dist_base, void __iomem *cpu_base)
|
||||
{
|
||||
struct gic_chip_data *gic;
|
||||
|
||||
BUG_ON(gic_nr >= MAX_GIC_NR);
|
||||
|
||||
gic = &gic_data[gic_nr];
|
||||
gic->dist_base = dist_base;
|
||||
gic->cpu_base = cpu_base;
|
||||
gic->irq_offset = (irq_start - 1) & ~31;
|
||||
|
||||
if (gic_nr == 0)
|
||||
gic_cpu_base_addr = cpu_base;
|
||||
|
||||
gic_dist_init(gic, irq_start);
|
||||
gic_cpu_init(gic);
|
||||
}
|
||||
|
||||
void __cpuinit gic_secondary_init(unsigned int gic_nr)
|
||||
{
|
||||
BUG_ON(gic_nr >= MAX_GIC_NR);
|
||||
|
||||
gic_cpu_init(&gic_data[gic_nr]);
|
||||
}
|
||||
|
||||
void __cpuinit gic_enable_ppi(unsigned int irq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
irq_to_desc(irq)->status |= IRQ_NOPROBE;
|
||||
gic_unmask_irq(irq);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
||||
{
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* linux/arch/arm/plat-versatile/timer-sp.c
|
||||
* linux/arch/arm/common/timer-sp.c
|
||||
*
|
||||
* Copyright (C) 1999 - 2003 ARM Limited
|
||||
* Copyright (C) 2000 Deep Blue Solutions Ltd
|
||||
|
@ -26,8 +26,6 @@
|
|||
|
||||
#include <asm/hardware/arm_timer.h>
|
||||
|
||||
#include <plat/timer-sp.h>
|
||||
|
||||
/*
|
||||
* These timers are currently always setup to be clocked at 1MHz.
|
||||
*/
|
||||
|
@ -46,7 +44,6 @@ static struct clocksource clocksource_sp804 = {
|
|||
.rating = 200,
|
||||
.read = sp804_read,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 20,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
|
@ -63,8 +60,7 @@ void __init sp804_clocksource_init(void __iomem *base)
|
|||
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
|
||||
clksrc_base + TIMER_CTRL);
|
||||
|
||||
cs->mult = clocksource_khz2mult(TIMER_FREQ_KHZ, cs->shift);
|
||||
clocksource_register(cs);
|
||||
clocksource_register_khz(cs, TIMER_FREQ_KHZ);
|
||||
}
|
||||
|
||||
|
|
@ -84,6 +84,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
|
|||
CONFIG_I2C=y
|
||||
CONFIG_I2C_CHARDEV=y
|
||||
CONFIG_I2C_IMX=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_W1=y
|
||||
CONFIG_W1_MASTER_MXC=y
|
||||
CONFIG_W1_SLAVE_THERM=y
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#endif
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
/*
|
||||
* Endian independent macros for shifting bytes within registers.
|
||||
|
@ -157,16 +158,24 @@
|
|||
#ifdef CONFIG_SMP
|
||||
#define ALT_SMP(instr...) \
|
||||
9998: instr
|
||||
/*
|
||||
* Note: if you get assembler errors from ALT_UP() when building with
|
||||
* CONFIG_THUMB2_KERNEL, you almost certainly need to use
|
||||
* ALT_SMP( W(instr) ... )
|
||||
*/
|
||||
#define ALT_UP(instr...) \
|
||||
.pushsection ".alt.smp.init", "a" ;\
|
||||
.long 9998b ;\
|
||||
instr ;\
|
||||
9997: instr ;\
|
||||
.if . - 9997b != 4 ;\
|
||||
.error "ALT_UP() content must assemble to exactly 4 bytes";\
|
||||
.endif ;\
|
||||
.popsection
|
||||
#define ALT_UP_B(label) \
|
||||
.equ up_b_offset, label - 9998b ;\
|
||||
.pushsection ".alt.smp.init", "a" ;\
|
||||
.long 9998b ;\
|
||||
b . + up_b_offset ;\
|
||||
W(b) . + up_b_offset ;\
|
||||
.popsection
|
||||
#else
|
||||
#define ALT_SMP(instr...)
|
||||
|
@ -177,16 +186,24 @@
|
|||
/*
|
||||
* SMP data memory barrier
|
||||
*/
|
||||
.macro smp_dmb
|
||||
.macro smp_dmb mode
|
||||
#ifdef CONFIG_SMP
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
.ifeqs "\mode","arm"
|
||||
ALT_SMP(dmb)
|
||||
.else
|
||||
ALT_SMP(W(dmb))
|
||||
.endif
|
||||
#elif __LINUX_ARM_ARCH__ == 6
|
||||
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
|
||||
#else
|
||||
#error Incompatible SMP platform
|
||||
#endif
|
||||
.ifeqs "\mode","arm"
|
||||
ALT_UP(nop)
|
||||
.else
|
||||
ALT_UP(W(nop))
|
||||
.endif
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
@ -206,12 +223,12 @@
|
|||
*/
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
|
||||
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort
|
||||
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
|
||||
9999:
|
||||
.if \inc == 1
|
||||
\instr\cond\()bt \reg, [\ptr, #\off]
|
||||
\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
|
||||
.elseif \inc == 4
|
||||
\instr\cond\()t \reg, [\ptr, #\off]
|
||||
\instr\cond\()\t\().w \reg, [\ptr, #\off]
|
||||
.else
|
||||
.error "Unsupported inc macro argument"
|
||||
.endif
|
||||
|
@ -246,13 +263,13 @@
|
|||
|
||||
#else /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
.macro usracc, instr, reg, ptr, inc, cond, rept, abort
|
||||
.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
|
||||
.rept \rept
|
||||
9999:
|
||||
.if \inc == 1
|
||||
\instr\cond\()bt \reg, [\ptr], #\inc
|
||||
\instr\cond\()b\()\t \reg, [\ptr], #\inc
|
||||
.elseif \inc == 4
|
||||
\instr\cond\()t \reg, [\ptr], #\inc
|
||||
\instr\cond\()\t \reg, [\ptr], #\inc
|
||||
.else
|
||||
.error "Unsupported inc macro argument"
|
||||
.endif
|
||||
|
|
|
@ -23,4 +23,6 @@
|
|||
#define ARCH_SLAB_MINALIGN 8
|
||||
#endif
|
||||
|
||||
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
|
||||
|
||||
#endif
|
||||
|
|
|
@ -12,23 +12,13 @@
|
|||
#ifndef __ASM_CLKDEV_H
|
||||
#define __ASM_CLKDEV_H
|
||||
|
||||
struct clk;
|
||||
struct device;
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct clk_lookup {
|
||||
struct list_head node;
|
||||
const char *dev_id;
|
||||
const char *con_id;
|
||||
struct clk *clk;
|
||||
};
|
||||
#include <mach/clkdev.h>
|
||||
|
||||
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
|
||||
const char *dev_fmt, ...);
|
||||
|
||||
void clkdev_add(struct clk_lookup *cl);
|
||||
void clkdev_drop(struct clk_lookup *cl);
|
||||
|
||||
void clkdev_add_table(struct clk_lookup *, size_t);
|
||||
int clk_add_alias(const char *, const char *, char *, struct device *);
|
||||
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
|
||||
{
|
||||
return kzalloc(size, GFP_KERNEL);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -5,24 +5,29 @@
|
|||
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/dma-debug.h>
|
||||
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
#ifdef __arch_page_to_dma
|
||||
#error Please update to __arch_pfn_to_dma
|
||||
#endif
|
||||
|
||||
/*
|
||||
* page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
|
||||
* used internally by the DMA-mapping API to provide DMA addresses. They
|
||||
* must not be used by drivers.
|
||||
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
|
||||
* functions used internally by the DMA-mapping API to provide DMA
|
||||
* addresses. They must not be used by drivers.
|
||||
*/
|
||||
#ifndef __arch_page_to_dma
|
||||
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
|
||||
#ifndef __arch_pfn_to_dma
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
|
||||
return (dma_addr_t)__pfn_to_bus(pfn);
|
||||
}
|
||||
|
||||
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return pfn_to_page(__bus_to_pfn(addr));
|
||||
return __bus_to_pfn(addr);
|
||||
}
|
||||
|
||||
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
||||
|
@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
|
|||
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
|
||||
}
|
||||
#else
|
||||
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
|
||||
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
|
||||
{
|
||||
return __arch_page_to_dma(dev, page);
|
||||
return __arch_pfn_to_dma(dev, pfn);
|
||||
}
|
||||
|
||||
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
|
||||
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
|
||||
{
|
||||
return __arch_dma_to_page(dev, addr);
|
||||
return __arch_dma_to_pfn(dev, addr);
|
||||
}
|
||||
|
||||
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
|
||||
|
@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
|
|||
/*
|
||||
* The DMA API, implemented by dmabounce.c. See below for descriptions.
|
||||
*/
|
||||
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
|
||||
extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
|
||||
enum dma_data_direction);
|
||||
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
extern dma_addr_t dma_map_page(struct device *, struct page *,
|
||||
extern dma_addr_t __dma_map_page(struct device *, struct page *,
|
||||
unsigned long, size_t, enum dma_data_direction);
|
||||
extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
|
||||
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
|
||||
enum dma_data_direction);
|
||||
|
||||
/*
|
||||
|
@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|||
}
|
||||
|
||||
|
||||
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_cpu_to_dev(cpu_addr, size, dir);
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
}
|
||||
|
||||
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
||||
}
|
||||
|
||||
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
|
||||
handle & ~PAGE_MASK, size, dir);
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_map_single - map a single buffer for streaming DMA
|
||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||
|
@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
|
|||
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
__dma_single_cpu_to_dev(cpu_addr, size, dir);
|
||||
addr = __dma_map_single(dev, cpu_addr, size, dir);
|
||||
debug_dma_map_page(dev, virt_to_page(cpu_addr),
|
||||
(unsigned long)cpu_addr & ~PAGE_MASK, size,
|
||||
dir, addr, true);
|
||||
|
||||
return virt_to_dma(dev, cpu_addr);
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
|||
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
__dma_page_cpu_to_dev(page, offset, size, dir);
|
||||
addr = __dma_map_page(dev, page, offset, size, dir);
|
||||
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
|
||||
|
||||
return page_to_dma(dev, page) + offset;
|
||||
return addr;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
|
|||
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
|
||||
debug_dma_unmap_page(dev, handle, size, dir, true);
|
||||
__dma_unmap_single(dev, handle, size, dir);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
|
|||
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
|
||||
size, dir);
|
||||
debug_dma_unmap_page(dev, handle, size, dir, false);
|
||||
__dma_unmap_page(dev, handle, size, dir);
|
||||
}
|
||||
#endif /* CONFIG_DMABOUNCE */
|
||||
|
||||
/**
|
||||
* dma_sync_single_range_for_cpu
|
||||
|
@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
|||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
|
||||
|
||||
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
|
@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
|
|||
{
|
||||
BUG_ON(!valid_dma_direction(dir));
|
||||
|
||||
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
|
||||
|
||||
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
|
||||
return;
|
||||
|
||||
|
|
|
@ -45,13 +45,17 @@
|
|||
*/
|
||||
#define DOMAIN_NOACCESS 0
|
||||
#define DOMAIN_CLIENT 1
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define DOMAIN_MANAGER 3
|
||||
#else
|
||||
#define DOMAIN_MANAGER 1
|
||||
#endif
|
||||
|
||||
#define domain_val(dom,type) ((type) << (2*(dom)))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define set_domain(x) \
|
||||
do { \
|
||||
__asm__ __volatile__( \
|
||||
|
@ -74,5 +78,28 @@
|
|||
#define modify_domain(dom,type) do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Generate the T (user) versions of the LDR/STR and related
|
||||
* instructions (inline assembly)
|
||||
*/
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define T(instr) #instr "t"
|
||||
#else
|
||||
#define T(instr) #instr
|
||||
#endif
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#else /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Generate the T (user) versions of the LDR/STR and related
|
||||
* instructions
|
||||
*/
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define T(instr) instr ## t
|
||||
#else
|
||||
#define T(instr) instr
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* !__ASM_PROC_DOMAIN_H */
|
||||
|
|
|
@ -99,6 +99,8 @@ struct elf32_hdr;
|
|||
extern int elf_check_arch(const struct elf32_hdr *);
|
||||
#define elf_check_arch elf_check_arch
|
||||
|
||||
#define vmcore_elf64_check_arch(x) (0)
|
||||
|
||||
extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
|
||||
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
|
||||
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* Interrupt handling. Preserves r7, r8, r9
|
||||
*/
|
||||
.macro arch_irq_handler_default
|
||||
get_irqnr_preamble r5, lr
|
||||
1: get_irqnr_and_base r0, r6, r5, lr
|
||||
movne r1, sp
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
adrne lr, BSYM(1b)
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* this macro assumes that irqstat (r6) and base (r5) are
|
||||
* preserved from get_irqnr_and_base above
|
||||
*/
|
||||
ALT_SMP(test_for_ipi r0, r6, r5, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r1, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_IPI
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
test_for_ltirq r0, r6, r5, lr
|
||||
movne r0, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_local_timer
|
||||
#endif
|
||||
#endif
|
||||
9997:
|
||||
.endm
|
||||
|
||||
.macro arch_irq_handler, symbol_name
|
||||
.align 5
|
||||
.global \symbol_name
|
||||
\symbol_name:
|
||||
mov r4, lr
|
||||
arch_irq_handler_default
|
||||
mov pc, r4
|
||||
.endm
|
|
@ -13,12 +13,13 @@
|
|||
#include <linux/preempt.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1, [%2]\n" \
|
||||
"1: " T(ldr) " %1, [%2]\n" \
|
||||
" " insn "\n" \
|
||||
"2: strt %0, [%2]\n" \
|
||||
"2: " T(str) " %0, [%2]\n" \
|
||||
" mov %0, #0\n" \
|
||||
"3:\n" \
|
||||
" .pushsection __ex_table,\"a\"\n" \
|
||||
|
@ -97,10 +98,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
|
|||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
||||
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
|
||||
"1: ldrt %0, [%3]\n"
|
||||
"1: " T(ldr) " %0, [%3]\n"
|
||||
" teq %0, %1\n"
|
||||
" it eq @ explicit IT needed for the 2b label\n"
|
||||
"2: streqt %2, [%3]\n"
|
||||
"2: " T(streq) " %2, [%3]\n"
|
||||
"3:\n"
|
||||
" .pushsection __ex_table,\"a\"\n"
|
||||
" .align 3\n"
|
||||
|
|
|
@ -5,13 +5,31 @@
|
|||
#include <linux/threads.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
#define NR_IPI 5
|
||||
|
||||
typedef struct {
|
||||
unsigned int __softirq_pending;
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
unsigned int local_timer_irqs;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned int ipi_irqs[NR_IPI];
|
||||
#endif
|
||||
} ____cacheline_aligned irq_cpustat_t;
|
||||
|
||||
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
|
||||
|
||||
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
|
||||
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu);
|
||||
#else
|
||||
#define smp_irq_stat_cpu(cpu) 0
|
||||
#endif
|
||||
|
||||
#define arch_irq_stat_cpu smp_irq_stat_cpu
|
||||
|
||||
#if NR_IRQS > 512
|
||||
#define HARDIRQ_BITS 10
|
||||
#elif NR_IRQS > 256
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
/*
|
||||
* arch/arm/include/asm/hardware/entry-macro-gic.S
|
||||
*
|
||||
* Low-level IRQ helper macros for GIC
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public
|
||||
* License version 2. This program is licensed "as is" without any
|
||||
* warranty of any kind, whether express or implied.
|
||||
*/
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
|
||||
#ifndef HAVE_GET_IRQNR_PREAMBLE
|
||||
.macro get_irqnr_preamble, base, tmp
|
||||
ldr \base, =gic_cpu_base_addr
|
||||
ldr \base, [\base]
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The interrupt numbering scheme is defined in the
|
||||
* interrupt controller spec. To wit:
|
||||
*
|
||||
* Interrupts 0-15 are IPI
|
||||
* 16-28 are reserved
|
||||
* 29-31 are local. We allow 30 to be used for the watchdog.
|
||||
* 32-1020 are global
|
||||
* 1021-1022 are reserved
|
||||
* 1023 is "spurious" (no interrupt)
|
||||
*
|
||||
* For now, we ignore all local interrupts so only return an interrupt if it's
|
||||
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
|
||||
*
|
||||
* A simple read from the controller will tell us the number of the highest
|
||||
* priority enabled interrupt. We then just need to check whether it is in the
|
||||
* valid range for an IRQ (30-1020 inclusive).
|
||||
*/
|
||||
|
||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||||
|
||||
ldr \irqstat, [\base, #GIC_CPU_INTACK]
|
||||
/* bits 12-10 = src CPU, 9-0 = int # */
|
||||
|
||||
ldr \tmp, =1021
|
||||
bic \irqnr, \irqstat, #0x1c00
|
||||
cmp \irqnr, #29
|
||||
cmpcc \irqnr, \irqnr
|
||||
cmpne \irqnr, \tmp
|
||||
cmpcs \irqnr, \irqnr
|
||||
.endm
|
||||
|
||||
/* We assume that irqstat (the raw value of the IRQ acknowledge
|
||||
* register) is preserved from the macro above.
|
||||
* If there is an IPI, we immediately signal end of interrupt on the
|
||||
* controller, since this requires the original irqstat value which
|
||||
* we won't easily be able to recreate later.
|
||||
*/
|
||||
|
||||
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
||||
bic \irqnr, \irqstat, #0x1c00
|
||||
cmp \irqnr, #16
|
||||
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
||||
cmpcs \irqnr, \irqnr
|
||||
.endm
|
||||
|
||||
/* As above, this assumes that irqstat and base are preserved.. */
|
||||
|
||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
||||
bic \irqnr, \irqstat, #0x1c00
|
||||
mov \tmp, #0
|
||||
cmp \irqnr, #29
|
||||
moveq \tmp, #1
|
||||
streq \irqstat, [\base, #GIC_CPU_EOI]
|
||||
cmp \tmp, #0
|
||||
.endm
|
|
@ -33,10 +33,13 @@
|
|||
#define GIC_DIST_SOFTINT 0xf00
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
|
||||
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
|
||||
extern void __iomem *gic_cpu_base_addr;
|
||||
|
||||
void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
|
||||
void gic_secondary_init(unsigned int);
|
||||
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
|
||||
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
|
||||
void gic_enable_ppi(unsigned int);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -20,8 +20,8 @@ struct arch_hw_breakpoint_ctrl {
|
|||
struct arch_hw_breakpoint {
|
||||
u32 address;
|
||||
u32 trigger;
|
||||
struct perf_event *suspended_wp;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
struct arch_hw_breakpoint_ctrl step_ctrl;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
};
|
||||
|
||||
static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
|
||||
|
|
|
@ -241,18 +241,15 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
|
|||
*
|
||||
*/
|
||||
#ifndef __arch_ioremap
|
||||
#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
|
||||
#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
|
||||
#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED)
|
||||
#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC)
|
||||
#define iounmap(cookie) __iounmap(cookie)
|
||||
#else
|
||||
#define __arch_ioremap __arm_ioremap
|
||||
#define __arch_iounmap __iounmap
|
||||
#endif
|
||||
|
||||
#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
|
||||
#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
|
||||
#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
|
||||
#define iounmap(cookie) __arch_iounmap(cookie)
|
||||
#endif
|
||||
#define iounmap __arch_iounmap
|
||||
|
||||
/*
|
||||
* io{read,write}{8,16,32} macros
|
||||
|
|
|
@ -33,10 +33,20 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
|
|||
if (oldregs) {
|
||||
memcpy(newregs, oldregs, sizeof(*newregs));
|
||||
} else {
|
||||
__asm__ __volatile__ ("stmia %0, {r0 - r15}"
|
||||
: : "r" (&newregs->ARM_r0));
|
||||
__asm__ __volatile__ ("mrs %0, cpsr"
|
||||
: "=r" (newregs->ARM_cpsr));
|
||||
__asm__ __volatile__ (
|
||||
"stmia %[regs_base], {r0-r12}\n\t"
|
||||
"mov %[_ARM_sp], sp\n\t"
|
||||
"str lr, %[_ARM_lr]\n\t"
|
||||
"adr %[_ARM_pc], 1f\n\t"
|
||||
"mrs %[_ARM_cpsr], cpsr\n\t"
|
||||
"1:"
|
||||
: [_ARM_pc] "=r" (newregs->ARM_pc),
|
||||
[_ARM_cpsr] "=r" (newregs->ARM_cpsr),
|
||||
[_ARM_sp] "=r" (newregs->ARM_sp),
|
||||
[_ARM_lr] "=o" (newregs->ARM_lr)
|
||||
: [regs_base] "r" (&newregs->ARM_r0)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
|
|||
#include "smp_twd.h"
|
||||
|
||||
#define local_timer_ack() twd_timer_ack()
|
||||
#define local_timer_stop() twd_timer_stop()
|
||||
|
||||
#else
|
||||
|
||||
|
@ -40,11 +39,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
|
|||
*/
|
||||
int local_timer_ack(void);
|
||||
|
||||
/*
|
||||
* Stop a local timer interrupt.
|
||||
*/
|
||||
void local_timer_stop(void);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -52,12 +46,6 @@ void local_timer_stop(void);
|
|||
*/
|
||||
void local_timer_setup(struct clock_event_device *);
|
||||
|
||||
#else
|
||||
|
||||
static inline void local_timer_stop(void)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -37,11 +37,20 @@ struct machine_desc {
|
|||
struct meminfo *);
|
||||
void (*reserve)(void);/* reserve mem blocks */
|
||||
void (*map_io)(void);/* IO mapping function */
|
||||
void (*init_early)(void);
|
||||
void (*init_irq)(void);
|
||||
struct sys_timer *timer; /* system tick timer */
|
||||
void (*init_machine)(void);
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
void (*handle_irq)(struct pt_regs *);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Current machine - only accessible during boot.
|
||||
*/
|
||||
extern struct machine_desc *machine_desc;
|
||||
|
||||
/*
|
||||
* Set of macros to define architecture features. This is built into
|
||||
* a table by the linker.
|
||||
|
|
|
@ -17,10 +17,12 @@ struct seq_file;
|
|||
/*
|
||||
* This is internal. Do not use it.
|
||||
*/
|
||||
extern unsigned int arch_nr_irqs;
|
||||
extern void (*init_arch_irq)(void);
|
||||
extern void init_FIQ(void);
|
||||
extern int show_fiq_list(struct seq_file *, void *);
|
||||
extern int show_fiq_list(struct seq_file *, int);
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
extern void (*handle_arch_irq)(struct pt_regs *);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This is for easy migration, but should be changed in the source
|
||||
|
|
|
@ -43,7 +43,6 @@ struct sys_timer {
|
|||
#endif
|
||||
};
|
||||
|
||||
extern struct sys_timer *system_timer;
|
||||
extern void timer_tick(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -8,11 +8,6 @@
|
|||
struct unwind_table;
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
struct arm_unwind_mapping {
|
||||
Elf_Shdr *unw_sec;
|
||||
Elf_Shdr *sec_text;
|
||||
struct unwind_table *unwind;
|
||||
};
|
||||
enum {
|
||||
ARM_SEC_INIT,
|
||||
ARM_SEC_DEVINIT,
|
||||
|
@ -21,14 +16,14 @@ enum {
|
|||
ARM_SEC_DEVEXIT,
|
||||
ARM_SEC_MAX,
|
||||
};
|
||||
struct mod_arch_specific {
|
||||
struct arm_unwind_mapping map[ARM_SEC_MAX];
|
||||
};
|
||||
#else
|
||||
struct mod_arch_specific {
|
||||
};
|
||||
#endif
|
||||
|
||||
struct mod_arch_specific {
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
struct unwind_table *unwind[ARM_SEC_MAX];
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Include the ARM architecture version.
|
||||
*/
|
||||
|
|
|
@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
|
|||
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
|
||||
extern void copy_page(void *to, const void *from);
|
||||
|
||||
typedef unsigned long pteval_t;
|
||||
|
||||
#undef STRICT_MM_TYPECHECKS
|
||||
|
||||
#ifdef STRICT_MM_TYPECHECKS
|
||||
/*
|
||||
* These are used to make use of C type-checking..
|
||||
*/
|
||||
typedef struct { unsigned long pte; } pte_t;
|
||||
typedef struct { pteval_t pte; } pte_t;
|
||||
typedef struct { unsigned long pmd; } pmd_t;
|
||||
typedef struct { unsigned long pgd[2]; } pgd_t;
|
||||
typedef struct { unsigned long pgprot; } pgprot_t;
|
||||
|
@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||
/*
|
||||
* .. while these make it easier on the compiler
|
||||
*/
|
||||
typedef unsigned long pte_t;
|
||||
typedef pteval_t pte_t;
|
||||
typedef unsigned long pmd_t;
|
||||
typedef unsigned long pgd_t[2];
|
||||
typedef unsigned long pgprot_t;
|
||||
|
|
|
@ -30,14 +30,16 @@
|
|||
#define pmd_free(mm, pmd) do { } while (0)
|
||||
#define pgd_populate(mm,pmd,pte) BUG()
|
||||
|
||||
extern pgd_t *get_pgd_slow(struct mm_struct *mm);
|
||||
extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
#define pgd_alloc(mm) get_pgd_slow(mm)
|
||||
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
|
||||
|
||||
static inline void clean_pte_table(pte_t *pte)
|
||||
{
|
||||
clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate one PTE table.
|
||||
*
|
||||
|
@ -45,14 +47,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
|
|||
* into one table thus:
|
||||
*
|
||||
* +------------+
|
||||
* | h/w pt 0 |
|
||||
* +------------+
|
||||
* | h/w pt 1 |
|
||||
* +------------+
|
||||
* | Linux pt 0 |
|
||||
* +------------+
|
||||
* | Linux pt 1 |
|
||||
* +------------+
|
||||
* | h/w pt 0 |
|
||||
* +------------+
|
||||
* | h/w pt 1 |
|
||||
* +------------+
|
||||
*/
|
||||
static inline pte_t *
|
||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
||||
|
@ -60,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
|||
pte_t *pte;
|
||||
|
||||
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
|
||||
if (pte) {
|
||||
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
|
||||
pte += PTRS_PER_PTE;
|
||||
}
|
||||
if (pte)
|
||||
clean_pte_table(pte);
|
||||
|
||||
return pte;
|
||||
}
|
||||
|
@ -79,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|||
pte = alloc_pages(PGALLOC_GFP, 0);
|
||||
#endif
|
||||
if (pte) {
|
||||
if (!PageHighMem(pte)) {
|
||||
void *page = page_address(pte);
|
||||
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
|
||||
}
|
||||
if (!PageHighMem(pte))
|
||||
clean_pte_table(page_address(pte));
|
||||
pgtable_page_ctor(pte);
|
||||
}
|
||||
|
||||
|
@ -94,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|||
*/
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
if (pte) {
|
||||
pte -= PTRS_PER_PTE;
|
||||
if (pte)
|
||||
free_page((unsigned long)pte);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
||||
|
@ -106,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|||
__free_page(pte);
|
||||
}
|
||||
|
||||
static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
|
||||
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
|
||||
unsigned long prot)
|
||||
{
|
||||
unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot;
|
||||
pmdp[0] = __pmd(pmdval);
|
||||
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
|
||||
flush_pmd_entry(pmdp);
|
||||
|
@ -122,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
|
|||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
unsigned long pte_ptr = (unsigned long)ptep;
|
||||
|
||||
/*
|
||||
* The pmd must be loaded with the physical
|
||||
* address of the PTE table
|
||||
* The pmd must be loaded with the physical address of the PTE table
|
||||
*/
|
||||
pte_ptr -= PTRS_PER_PTE * sizeof(void *);
|
||||
__pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
|
||||
__pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
|
||||
{
|
||||
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
|
||||
__pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
|
||||
}
|
||||
#define pmd_pgtable(pmd) pmd_page(pmd)
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#ifndef _ASMARM_PGTABLE_H
|
||||
#define _ASMARM_PGTABLE_H
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <asm-generic/4level-fixup.h>
|
||||
#include <asm/proc-fns.h>
|
||||
|
||||
|
@ -54,7 +55,7 @@
|
|||
* Therefore, we tweak the implementation slightly - we tell Linux that we
|
||||
* have 2048 entries in the first level, each of which is 8 bytes (iow, two
|
||||
* hardware pointers to the second level.) The second level contains two
|
||||
* hardware PTE tables arranged contiguously, followed by Linux versions
|
||||
* hardware PTE tables arranged contiguously, preceded by Linux versions
|
||||
* which contain the state information Linux needs. We, therefore, end up
|
||||
* with 512 entries in the "PTE" level.
|
||||
*
|
||||
|
@ -62,15 +63,15 @@
|
|||
*
|
||||
* pgd pte
|
||||
* | |
|
||||
* +--------+ +0
|
||||
* | |-----> +------------+ +0
|
||||
* +- - - - + +4 | h/w pt 0 |
|
||||
* | |-----> +------------+ +1024
|
||||
* +--------+ +8 | h/w pt 1 |
|
||||
* | | +------------+ +2048
|
||||
* +--------+
|
||||
* | | +------------+ +0
|
||||
* +- - - - + | Linux pt 0 |
|
||||
* | | +------------+ +3072
|
||||
* +--------+ | Linux pt 1 |
|
||||
* | | +------------+ +1024
|
||||
* +--------+ +0 | Linux pt 1 |
|
||||
* | |-----> +------------+ +2048
|
||||
* +- - - - + +4 | h/w pt 0 |
|
||||
* | |-----> +------------+ +3072
|
||||
* +--------+ +8 | h/w pt 1 |
|
||||
* | | +------------+ +4096
|
||||
*
|
||||
* See L_PTE_xxx below for definitions of bits in the "Linux pt", and
|
||||
|
@ -102,6 +103,10 @@
|
|||
#define PTRS_PER_PMD 1
|
||||
#define PTRS_PER_PGD 2048
|
||||
|
||||
#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
|
||||
#define PTE_HWTABLE_OFF (PTE_HWTABLE_PTRS * sizeof(pte_t))
|
||||
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u32))
|
||||
|
||||
/*
|
||||
* PMD_SHIFT determines the size of the area a second-level page table can map
|
||||
* PGDIR_SHIFT determines what a third-level page table entry can map
|
||||
|
@ -112,13 +117,13 @@
|
|||
#define LIBRARY_TEXT_START 0x0c000000
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void __pte_error(const char *file, int line, unsigned long val);
|
||||
extern void __pmd_error(const char *file, int line, unsigned long val);
|
||||
extern void __pgd_error(const char *file, int line, unsigned long val);
|
||||
extern void __pte_error(const char *file, int line, pte_t);
|
||||
extern void __pmd_error(const char *file, int line, pmd_t);
|
||||
extern void __pgd_error(const char *file, int line, pgd_t);
|
||||
|
||||
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
|
||||
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
|
||||
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
|
||||
#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte)
|
||||
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
|
||||
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
|
@ -133,8 +138,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
*/
|
||||
#define FIRST_USER_ADDRESS PAGE_SIZE
|
||||
|
||||
#define FIRST_USER_PGD_NR 1
|
||||
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)
|
||||
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
|
||||
|
||||
/*
|
||||
* section address mask and size definitions.
|
||||
|
@ -161,30 +165,30 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
* The PTE table pointer refers to the hardware entries; the "Linux"
|
||||
* entries are stored 1024 bytes below.
|
||||
*/
|
||||
#define L_PTE_PRESENT (1 << 0)
|
||||
#define L_PTE_YOUNG (1 << 1)
|
||||
#define L_PTE_FILE (1 << 2) /* only when !PRESENT */
|
||||
#define L_PTE_DIRTY (1 << 6)
|
||||
#define L_PTE_WRITE (1 << 7)
|
||||
#define L_PTE_USER (1 << 8)
|
||||
#define L_PTE_EXEC (1 << 9)
|
||||
#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */
|
||||
#define L_PTE_PRESENT (_AT(pteval_t, 1) << 0)
|
||||
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 1)
|
||||
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
|
||||
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 6)
|
||||
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7)
|
||||
#define L_PTE_USER (_AT(pteval_t, 1) << 8)
|
||||
#define L_PTE_XN (_AT(pteval_t, 1) << 9)
|
||||
#define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
|
||||
|
||||
/*
|
||||
* These are the memory types, defined to be compatible with
|
||||
* pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
|
||||
*/
|
||||
#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
|
||||
#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
|
||||
#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
|
||||
#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
|
||||
#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
|
||||
#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
|
||||
#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
|
||||
#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
|
||||
#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
|
||||
#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
|
||||
#define L_PTE_MT_MASK (0x0f << 2)
|
||||
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0x00) << 2) /* 0000 */
|
||||
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 0x01) << 2) /* 0001 */
|
||||
#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 0x02) << 2) /* 0010 */
|
||||
#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 0x03) << 2) /* 0011 */
|
||||
#define L_PTE_MT_MINICACHE (_AT(pteval_t, 0x06) << 2) /* 0110 (sa1100, xscale) */
|
||||
#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 0x07) << 2) /* 0111 */
|
||||
#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 0x04) << 2) /* 0100 */
|
||||
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 0x0c) << 2) /* 1100 */
|
||||
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 0x09) << 2) /* 1001 */
|
||||
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
|
||||
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -201,23 +205,44 @@ extern pgprot_t pgprot_kernel;
|
|||
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
#define PAGE_NONE pgprot_user
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
|
||||
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
|
||||
#define PAGE_KERNEL pgprot_kernel
|
||||
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)
|
||||
#define PAGE_NONE _MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY)
|
||||
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
|
||||
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER)
|
||||
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
|
||||
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
|
||||
#define PAGE_KERNEL_EXEC pgprot_kernel
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
|
||||
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
|
||||
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
|
||||
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
|
||||
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
|
||||
|
||||
#define __pgprot_modify(prot,mask,bits) \
|
||||
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
|
||||
|
||||
#define pgprot_noncached(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
|
||||
|
||||
#define pgprot_writecombine(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
|
||||
|
||||
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
|
||||
#define pgprot_dmacoherent(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
#else
|
||||
#define pgprot_dmacoherent(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
|
||||
#endif
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
@ -255,94 +280,31 @@ extern pgprot_t pgprot_kernel;
|
|||
extern struct page *empty_zero_page;
|
||||
#define ZERO_PAGE(vaddr) (empty_zero_page)
|
||||
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
|
||||
#define pte_none(pte) (!pte_val(pte))
|
||||
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
|
||||
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
|
||||
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
#define pte_offset_map(dir,addr) (__pte_map(dir) + __pte_index(addr))
|
||||
#define pte_unmap(pte) __pte_unmap(pte)
|
||||
/* to find an entry in a page-table-directory */
|
||||
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
|
||||
|
||||
#ifndef CONFIG_HIGHPTE
|
||||
#define __pte_map(dir) pmd_page_vaddr(*(dir))
|
||||
#define __pte_unmap(pte) do { } while (0)
|
||||
#else
|
||||
#define __pte_map(dir) ((pte_t *)kmap_atomic(pmd_page(*(dir))) + PTRS_PER_PTE)
|
||||
#define __pte_unmap(pte) kunmap_atomic((pte - PTRS_PER_PTE))
|
||||
#endif
|
||||
#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
|
||||
|
||||
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
static inline void __sync_icache_dcache(pte_t pteval)
|
||||
{
|
||||
}
|
||||
#else
|
||||
extern void __sync_icache_dcache(pte_t pteval);
|
||||
#endif
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
if (addr >= TASK_SIZE)
|
||||
set_pte_ext(ptep, pteval, 0);
|
||||
else {
|
||||
__sync_icache_dcache(pteval);
|
||||
set_pte_ext(ptep, pteval, PTE_EXT_NG);
|
||||
}
|
||||
}
|
||||
/* to find an entry in a kernel page-table-directory */
|
||||
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
||||
|
||||
/*
|
||||
* The following only work if pte_present() is true.
|
||||
* Undefined behaviour if not..
|
||||
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
||||
* setup: the pgd is never bad, and a pmd always exists (as it's folded
|
||||
* into the pgd entry)
|
||||
*/
|
||||
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
|
||||
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
|
||||
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
|
||||
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
|
||||
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
|
||||
#define pte_special(pte) (0)
|
||||
#define pgd_none(pgd) (0)
|
||||
#define pgd_bad(pgd) (0)
|
||||
#define pgd_present(pgd) (1)
|
||||
#define pgd_clear(pgdp) do { } while (0)
|
||||
#define set_pgd(pgd,pgdp) do { } while (0)
|
||||
|
||||
#define pte_present_user(pte) \
|
||||
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
|
||||
(L_PTE_PRESENT | L_PTE_USER))
|
||||
|
||||
#define PTE_BIT_FUNC(fn,op) \
|
||||
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
|
||||
|
||||
PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
|
||||
PTE_BIT_FUNC(mkwrite, |= L_PTE_WRITE);
|
||||
PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
|
||||
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
#define __pgprot_modify(prot,mask,bits) \
|
||||
__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
|
||||
|
||||
/*
|
||||
* Mark the prot value as uncacheable and unbufferable.
|
||||
*/
|
||||
#define pgprot_noncached(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
|
||||
#define pgprot_writecombine(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
|
||||
#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
|
||||
#define pgprot_dmacoherent(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_BUFFERABLE)
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
#else
|
||||
#define pgprot_dmacoherent(prot) \
|
||||
__pgprot_modify(prot, L_PTE_MT_MASK|L_PTE_EXEC, L_PTE_MT_UNCACHED)
|
||||
#endif
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
|
||||
|
||||
#define pmd_none(pmd) (!pmd_val(pmd))
|
||||
#define pmd_present(pmd) (pmd_val(pmd))
|
||||
|
@ -364,12 +326,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
|
||||
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
||||
{
|
||||
unsigned long ptr;
|
||||
|
||||
ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
|
||||
ptr += PTRS_PER_PTE * sizeof(void *);
|
||||
|
||||
return __va(ptr);
|
||||
return __va(pmd_val(pmd) & PAGE_MASK);
|
||||
}
|
||||
|
||||
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
|
||||
|
@ -377,46 +334,81 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
|
|||
/* we don't need complex calculations here as the pmd is folded into the pgd */
|
||||
#define pmd_addr_end(addr,end) (end)
|
||||
|
||||
/*
|
||||
* Conversion functions: convert a page and protection to a page entry,
|
||||
* and a page entry and page directory to the page they refer to.
|
||||
*/
|
||||
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)
|
||||
|
||||
/*
|
||||
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
||||
* setup: the pgd is never bad, and a pmd always exists (as it's folded
|
||||
* into the pgd entry)
|
||||
*/
|
||||
#define pgd_none(pgd) (0)
|
||||
#define pgd_bad(pgd) (0)
|
||||
#define pgd_present(pgd) (1)
|
||||
#define pgd_clear(pgdp) do { } while (0)
|
||||
#define set_pgd(pgd,pgdp) do { } while (0)
|
||||
#ifndef CONFIG_HIGHPTE
|
||||
#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
|
||||
#define __pte_unmap(pte) do { } while (0)
|
||||
#else
|
||||
#define __pte_map(pmd) (pte_t *)kmap_atomic(pmd_page(*(pmd)))
|
||||
#define __pte_unmap(pte) kunmap_atomic(pte)
|
||||
#endif
|
||||
|
||||
/* to find an entry in a page-table-directory */
|
||||
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)
|
||||
#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
|
||||
#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))
|
||||
#define pte_offset_kernel(pmd,addr) (pmd_page_vaddr(*(pmd)) + pte_index(addr))
|
||||
|
||||
/* to find an entry in a kernel page-table-directory */
|
||||
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
|
||||
#define pte_offset_map(pmd,addr) (__pte_map(pmd) + pte_index(addr))
|
||||
#define pte_unmap(pte) __pte_unmap(pte)
|
||||
|
||||
/* Find an entry in the second-level page table.. */
|
||||
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
#define pfn_pte(pfn,prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
|
||||
/* Find an entry in the third-level page table.. */
|
||||
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
|
||||
|
||||
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
|
||||
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
static inline void __sync_icache_dcache(pte_t pteval)
|
||||
{
|
||||
}
|
||||
#else
|
||||
extern void __sync_icache_dcache(pte_t pteval);
|
||||
#endif
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pteval)
|
||||
{
|
||||
if (addr >= TASK_SIZE)
|
||||
set_pte_ext(ptep, pteval, 0);
|
||||
else {
|
||||
__sync_icache_dcache(pteval);
|
||||
set_pte_ext(ptep, pteval, PTE_EXT_NG);
|
||||
}
|
||||
}
|
||||
|
||||
#define pte_none(pte) (!pte_val(pte))
|
||||
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)
|
||||
#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY))
|
||||
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
|
||||
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
|
||||
#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN))
|
||||
#define pte_special(pte) (0)
|
||||
|
||||
#define pte_present_user(pte) \
|
||||
((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
|
||||
(L_PTE_PRESENT | L_PTE_USER))
|
||||
|
||||
#define PTE_BIT_FUNC(fn,op) \
|
||||
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
|
||||
|
||||
PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY);
|
||||
PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY);
|
||||
PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY);
|
||||
PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG);
|
||||
PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG);
|
||||
|
||||
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
|
||||
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
||||
{
|
||||
const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
|
||||
const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER;
|
||||
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
|
||||
return pte;
|
||||
}
|
||||
|
||||
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
||||
|
||||
/*
|
||||
* Encode and decode a swap entry. Swap entries are stored in the Linux
|
||||
* page tables as follows:
|
||||
|
@ -481,6 +473,9 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|||
|
||||
#define pgtable_cache_init() do { } while (0)
|
||||
|
||||
void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
|
||||
void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* CONFIG_MMU */
|
||||
|
|
|
@ -0,0 +1,118 @@
|
|||
/*
|
||||
* sched_clock.h: support for extending counters to full 64-bit ns counter
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef ASM_SCHED_CLOCK
|
||||
#define ASM_SCHED_CLOCK
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct clock_data {
|
||||
u64 epoch_ns;
|
||||
u32 epoch_cyc;
|
||||
u32 epoch_cyc_copy;
|
||||
u32 mult;
|
||||
u32 shift;
|
||||
};
|
||||
|
||||
#define DEFINE_CLOCK_DATA(name) struct clock_data name
|
||||
|
||||
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
|
||||
{
|
||||
return (cyc * mult) >> shift;
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomically update the sched_clock epoch. Your update callback will
|
||||
* be called from a timer before the counter wraps - read the current
|
||||
* counter value, and call this function to safely move the epochs
|
||||
* forward. Only use this from the update callback.
|
||||
*/
|
||||
static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
u64 ns = cd->epoch_ns +
|
||||
cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
|
||||
|
||||
/*
|
||||
* Write epoch_cyc and epoch_ns in a way that the update is
|
||||
* detectable in cyc_to_fixed_sched_clock().
|
||||
*/
|
||||
raw_local_irq_save(flags);
|
||||
cd->epoch_cyc = cyc;
|
||||
smp_wmb();
|
||||
cd->epoch_ns = ns;
|
||||
smp_wmb();
|
||||
cd->epoch_cyc_copy = cyc;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* If your clock rate is known at compile time, using this will allow
|
||||
* you to optimize the mult/shift loads away. This is paired with
|
||||
* init_fixed_sched_clock() to ensure that your mult/shift are correct.
|
||||
*/
|
||||
static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
|
||||
u32 cyc, u32 mask, u32 mult, u32 shift)
|
||||
{
|
||||
u64 epoch_ns;
|
||||
u32 epoch_cyc;
|
||||
|
||||
/*
|
||||
* Load the epoch_cyc and epoch_ns atomically. We do this by
|
||||
* ensuring that we always write epoch_cyc, epoch_ns and
|
||||
* epoch_cyc_copy in strict order, and read them in strict order.
|
||||
* If epoch_cyc and epoch_cyc_copy are not equal, then we're in
|
||||
* the middle of an update, and we should repeat the load.
|
||||
*/
|
||||
do {
|
||||
epoch_cyc = cd->epoch_cyc;
|
||||
smp_rmb();
|
||||
epoch_ns = cd->epoch_ns;
|
||||
smp_rmb();
|
||||
} while (epoch_cyc != cd->epoch_cyc_copy);
|
||||
|
||||
return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
|
||||
}
|
||||
|
||||
/*
|
||||
* Otherwise, you need to use this, which will obtain the mult/shift
|
||||
* from the clock_data structure. Use init_sched_clock() with this.
|
||||
*/
|
||||
static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
|
||||
u32 cyc, u32 mask)
|
||||
{
|
||||
return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize the clock data - calculate the appropriate multiplier
|
||||
* and shift. Also setup a timer to ensure that the epoch is refreshed
|
||||
* at the appropriate time interval, which will call your update
|
||||
* handler.
|
||||
*/
|
||||
void init_sched_clock(struct clock_data *, void (*)(void),
|
||||
unsigned int, unsigned long);
|
||||
|
||||
/*
|
||||
* Use this initialization function rather than init_sched_clock() if
|
||||
* you're using cyc_to_fixed_sched_clock, which will warn if your
|
||||
* constants are incorrect.
|
||||
*/
|
||||
static inline void init_fixed_sched_clock(struct clock_data *cd,
|
||||
void (*update)(void), unsigned int bits, unsigned long rate,
|
||||
u32 mult, u32 shift)
|
||||
{
|
||||
init_sched_clock(cd, update, bits, rate);
|
||||
if (cd->mult != mult || cd->shift != shift) {
|
||||
pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
|
||||
"sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
|
||||
mult, shift, cd->mult, cd->shift);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -33,27 +33,23 @@ struct seq_file;
|
|||
/*
|
||||
* generate IPI list text
|
||||
*/
|
||||
extern void show_ipi_list(struct seq_file *p);
|
||||
extern void show_ipi_list(struct seq_file *, int);
|
||||
|
||||
/*
|
||||
* Called from assembly code, this handles an IPI.
|
||||
*/
|
||||
asmlinkage void do_IPI(struct pt_regs *regs);
|
||||
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* Setup the set of possible CPUs (via set_cpu_possible)
|
||||
*/
|
||||
extern void smp_init_cpus(void);
|
||||
|
||||
/*
|
||||
* Move global data into per-processor storage.
|
||||
*/
|
||||
extern void smp_store_cpu_info(unsigned int cpuid);
|
||||
|
||||
/*
|
||||
* Raise an IPI cross call on CPUs in callmap.
|
||||
*/
|
||||
extern void smp_cross_call(const struct cpumask *mask);
|
||||
extern void smp_cross_call(const struct cpumask *mask, int ipi);
|
||||
|
||||
/*
|
||||
* Boot a secondary CPU, and assign it the specified idle task.
|
||||
|
@ -72,6 +68,11 @@ asmlinkage void secondary_start_kernel(void);
|
|||
*/
|
||||
extern void platform_secondary_init(unsigned int cpu);
|
||||
|
||||
/*
|
||||
* Initialize cpu_possible map, and enable coherency
|
||||
*/
|
||||
extern void platform_smp_prepare_cpus(unsigned int);
|
||||
|
||||
/*
|
||||
* Initial data for bringing up a secondary CPU.
|
||||
*/
|
||||
|
@ -97,6 +98,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
|||
/*
|
||||
* show local interrupt info
|
||||
*/
|
||||
extern void show_local_irqs(struct seq_file *);
|
||||
extern void show_local_irqs(struct seq_file *, int);
|
||||
|
||||
#endif /* ifndef __ASM_ARM_SMP_H */
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
#ifndef ASMARM_SMP_MIDR_H
|
||||
#define ASMARM_SMP_MIDR_H
|
||||
|
||||
#define hard_smp_processor_id() \
|
||||
({ \
|
||||
unsigned int cpunum; \
|
||||
__asm__("\n" \
|
||||
"1: mrc p15, 0, %0, c0, c0, 5\n" \
|
||||
" .pushsection \".alt.smp.init\", \"a\"\n"\
|
||||
" .long 1b\n" \
|
||||
" mov %0, #0\n" \
|
||||
" .popsection" \
|
||||
: "=r" (cpunum)); \
|
||||
cpunum &= 0x0F; \
|
||||
})
|
||||
|
||||
#endif
|
|
@ -22,7 +22,6 @@ struct clock_event_device;
|
|||
|
||||
extern void __iomem *twd_base;
|
||||
|
||||
void twd_timer_stop(void);
|
||||
int twd_timer_ack(void);
|
||||
void twd_timer_setup(struct clock_event_device *);
|
||||
|
||||
|
|
|
@ -63,6 +63,11 @@
|
|||
#include <asm/outercache.h>
|
||||
|
||||
#define __exception __attribute__((section(".exception.text")))
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
#define __exception_irq_entry __irq_entry
|
||||
#else
|
||||
#define __exception_irq_entry __exception
|
||||
#endif
|
||||
|
||||
struct thread_info;
|
||||
struct task_struct;
|
||||
|
@ -119,6 +124,13 @@ extern unsigned int user_debug;
|
|||
#define vectors_high() (0)
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7 || \
|
||||
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
|
||||
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
|
||||
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
|
||||
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
|
||||
#endif
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
|
||||
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
|
||||
|
|
|
@ -15,16 +15,37 @@ struct undef_hook {
|
|||
void register_undef_hook(struct undef_hook *hook);
|
||||
void unregister_undef_hook(struct undef_hook *hook);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
extern char __irqentry_text_start[];
|
||||
extern char __irqentry_text_end[];
|
||||
|
||||
return ptr >= (unsigned long)&__irqentry_text_start &&
|
||||
ptr < (unsigned long)&__irqentry_text_end;
|
||||
}
|
||||
#else
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int in_exception_text(unsigned long ptr)
|
||||
{
|
||||
extern char __exception_text_start[];
|
||||
extern char __exception_text_end[];
|
||||
int in;
|
||||
|
||||
return ptr >= (unsigned long)&__exception_text_start &&
|
||||
ptr < (unsigned long)&__exception_text_end;
|
||||
in = ptr >= (unsigned long)&__exception_text_start &&
|
||||
ptr < (unsigned long)&__exception_text_end;
|
||||
|
||||
return in ? : __in_irqentry_text(ptr);
|
||||
}
|
||||
|
||||
extern void __init early_trap_init(void);
|
||||
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
|
||||
|
||||
extern void *vectors_page;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -227,7 +227,7 @@ do { \
|
|||
|
||||
#define __get_user_asm_byte(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrbt %1,[%2]\n" \
|
||||
"1: " T(ldrb) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -263,7 +263,7 @@ do { \
|
|||
|
||||
#define __get_user_asm_word(x,addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: ldrt %1,[%2]\n" \
|
||||
"1: " T(ldr) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -308,7 +308,7 @@ do { \
|
|||
|
||||
#define __put_user_asm_byte(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strbt %1,[%2]\n" \
|
||||
"1: " T(strb) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -341,7 +341,7 @@ do { \
|
|||
|
||||
#define __put_user_asm_word(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
"1: strt %1,[%2]\n" \
|
||||
"1: " T(str) " %1,[%2],#0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
@ -366,10 +366,10 @@ do { \
|
|||
|
||||
#define __put_user_asm_dword(x,__pu_addr,err) \
|
||||
__asm__ __volatile__( \
|
||||
ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \
|
||||
ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \
|
||||
THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
|
||||
THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
|
||||
ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
|
||||
ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
|
||||
THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
|
||||
THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
|
||||
"3:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
|
||||
ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_ftrace.o = -pg
|
||||
endif
|
||||
|
||||
|
@ -29,10 +29,12 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
|
|||
obj-$(CONFIG_ARTHUR) += arthur.o
|
||||
obj-$(CONFIG_ISA_DMA) += dma-isa.o
|
||||
obj-$(CONFIG_PCI) += bios32.o isa.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
|
||||
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
|
||||
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
|
||||
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
|
||||
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
|
||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
|
||||
obj-$(CONFIG_ATAGS_PROC) += atags.o
|
||||
|
@ -42,6 +44,8 @@ obj-$(CONFIG_KGDB) += kgdb.o
|
|||
obj-$(CONFIG_ARM_UNWIND) += unwind.o
|
||||
obj-$(CONFIG_HAVE_TCM) += tcm.o
|
||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
|
||||
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
|
||||
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
|
||||
|
||||
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
|
||||
|
@ -50,6 +54,7 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
|
|||
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
|
||||
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
|
||||
obj-$(CONFIG_IWMMXT) += iwmmxt.o
|
||||
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
|
||||
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
|
||||
|
|
|
@ -25,42 +25,22 @@
|
|||
#include <asm/tls.h>
|
||||
|
||||
#include "entry-header.S"
|
||||
#include <asm/entry-macro-multi.S>
|
||||
|
||||
/*
|
||||
* Interrupt handling. Preserves r7, r8, r9
|
||||
*/
|
||||
.macro irq_handler
|
||||
get_irqnr_preamble r5, lr
|
||||
1: get_irqnr_and_base r0, r6, r5, lr
|
||||
movne r1, sp
|
||||
@
|
||||
@ routine called with r0 = irq number, r1 = struct pt_regs *
|
||||
@
|
||||
adrne lr, BSYM(1b)
|
||||
bne asm_do_IRQ
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* XXX
|
||||
*
|
||||
* this macro assumes that irqstat (r6) and base (r5) are
|
||||
* preserved from get_irqnr_and_base above
|
||||
*/
|
||||
ALT_SMP(test_for_ipi r0, r6, r5, lr)
|
||||
ALT_UP_B(9997f)
|
||||
movne r0, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_IPI
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
test_for_ltirq r0, r6, r5, lr
|
||||
movne r0, sp
|
||||
adrne lr, BSYM(1b)
|
||||
bne do_local_timer
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
ldr r5, =handle_arch_irq
|
||||
mov r0, sp
|
||||
ldr r5, [r5]
|
||||
adr lr, BSYM(9997f)
|
||||
teq r5, #0
|
||||
movne pc, r5
|
||||
#endif
|
||||
arch_irq_handler_default
|
||||
9997:
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
|
@ -198,6 +178,7 @@ __dabt_svc:
|
|||
@
|
||||
@ set desired IRQ state, then call main handler
|
||||
@
|
||||
debug_entry r1
|
||||
msr cpsr_c, r9
|
||||
mov r2, sp
|
||||
bl do_DataAbort
|
||||
|
@ -324,6 +305,7 @@ __pabt_svc:
|
|||
#else
|
||||
bl CPU_PABORT_HANDLER
|
||||
#endif
|
||||
debug_entry r1
|
||||
msr cpsr_c, r9 @ Maybe enable interrupts
|
||||
mov r2, sp @ regs
|
||||
bl do_PrefetchAbort @ call abort handler
|
||||
|
@ -439,6 +421,7 @@ __dabt_usr:
|
|||
@
|
||||
@ IRQs on, then call the main handler
|
||||
@
|
||||
debug_entry r1
|
||||
enable_irq
|
||||
mov r2, sp
|
||||
adr lr, BSYM(ret_from_exception)
|
||||
|
@ -703,6 +686,7 @@ __pabt_usr:
|
|||
#else
|
||||
bl CPU_PABORT_HANDLER
|
||||
#endif
|
||||
debug_entry r1
|
||||
enable_irq @ Enable interrupts
|
||||
mov r2, sp @ regs
|
||||
bl do_PrefetchAbort @ call abort handler
|
||||
|
@ -735,7 +719,7 @@ ENTRY(__switch_to)
|
|||
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
|
||||
THUMB( str sp, [ip], #4 )
|
||||
THUMB( str lr, [ip], #4 )
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
ldr r6, [r2, #TI_CPU_DOMAIN]
|
||||
#endif
|
||||
set_tls r3, r4, r5
|
||||
|
@ -744,7 +728,7 @@ ENTRY(__switch_to)
|
|||
ldr r8, =__stack_chk_guard
|
||||
ldr r7, [r7, #TSK_STACK_CANARY]
|
||||
#endif
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
|
||||
#endif
|
||||
mov r5, r0
|
||||
|
@ -842,7 +826,7 @@ __kuser_helper_start:
|
|||
*/
|
||||
|
||||
__kuser_memory_barrier: @ 0xffff0fa0
|
||||
smp_dmb
|
||||
smp_dmb arm
|
||||
usr_ret lr
|
||||
|
||||
.align 5
|
||||
|
@ -959,7 +943,7 @@ kuser_cmpxchg_fixup:
|
|||
|
||||
#else
|
||||
|
||||
smp_dmb
|
||||
smp_dmb arm
|
||||
1: ldrex r3, [r2]
|
||||
subs r3, r3, r0
|
||||
strexeq r3, r1, [r2]
|
||||
|
@ -1245,3 +1229,9 @@ cr_alignment:
|
|||
.space 4
|
||||
cr_no_alignment:
|
||||
.space 4
|
||||
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
.globl handle_arch_irq
|
||||
handle_arch_irq:
|
||||
.space 4
|
||||
#endif
|
||||
|
|
|
@ -147,98 +147,170 @@ ENDPROC(ret_from_fork)
|
|||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
mov pc, ip
|
||||
ENDPROC(__gnu_mcount_nc)
|
||||
|
||||
ENTRY(ftrace_caller)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
mov r0, lr
|
||||
sub r0, r0, #MCOUNT_INSN_SIZE
|
||||
ldr r1, [sp, #20]
|
||||
|
||||
.global ftrace_call
|
||||
ftrace_call:
|
||||
bl ftrace_stub
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
mov pc, ip
|
||||
ENDPROC(ftrace_caller)
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
ENTRY(mcount)
|
||||
stmdb sp!, {lr}
|
||||
ldr lr, [fp, #-4]
|
||||
ldmia sp!, {pc}
|
||||
ENDPROC(mcount)
|
||||
|
||||
ENTRY(ftrace_caller_old)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
ldr r1, [fp, #-4]
|
||||
mov r0, lr
|
||||
sub r0, r0, #MCOUNT_INSN_SIZE
|
||||
|
||||
.globl ftrace_call_old
|
||||
ftrace_call_old:
|
||||
bl ftrace_stub
|
||||
ldr lr, [fp, #-4] @ restore lr
|
||||
ldmia sp!, {r0-r3, pc}
|
||||
ENDPROC(ftrace_caller_old)
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
.macro __mcount suffix
|
||||
mcount_enter
|
||||
ldr r0, =ftrace_trace_function
|
||||
ldr r2, [r0]
|
||||
adr r0, .Lftrace_stub
|
||||
cmp r0, r2
|
||||
bne gnu_trace
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
mov pc, ip
|
||||
bne 1f
|
||||
|
||||
gnu_trace:
|
||||
ldr r1, [sp, #20] @ lr of instrumented routine
|
||||
mov r0, lr
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ldr r1, =ftrace_graph_return
|
||||
ldr r2, [r1]
|
||||
cmp r0, r2
|
||||
bne ftrace_graph_caller\suffix
|
||||
|
||||
ldr r1, =ftrace_graph_entry
|
||||
ldr r2, [r1]
|
||||
ldr r0, =ftrace_graph_entry_stub
|
||||
cmp r0, r2
|
||||
bne ftrace_graph_caller\suffix
|
||||
#endif
|
||||
|
||||
mcount_exit
|
||||
|
||||
1: mcount_get_lr r1 @ lr of instrumented func
|
||||
mov r0, lr @ instrumented function
|
||||
sub r0, r0, #MCOUNT_INSN_SIZE
|
||||
adr lr, BSYM(1f)
|
||||
adr lr, BSYM(2f)
|
||||
mov pc, r2
|
||||
1:
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
mov pc, ip
|
||||
ENDPROC(__gnu_mcount_nc)
|
||||
2: mcount_exit
|
||||
.endm
|
||||
|
||||
.macro __ftrace_caller suffix
|
||||
mcount_enter
|
||||
|
||||
mcount_get_lr r1 @ lr of instrumented func
|
||||
mov r0, lr @ instrumented function
|
||||
sub r0, r0, #MCOUNT_INSN_SIZE
|
||||
|
||||
.globl ftrace_call\suffix
|
||||
ftrace_call\suffix:
|
||||
bl ftrace_stub
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl ftrace_graph_call\suffix
|
||||
ftrace_graph_call\suffix:
|
||||
mov r0, r0
|
||||
#endif
|
||||
|
||||
mcount_exit
|
||||
.endm
|
||||
|
||||
.macro __ftrace_graph_caller
|
||||
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
@ called from __ftrace_caller, saved in mcount_enter
|
||||
ldr r1, [sp, #16] @ instrumented routine (func)
|
||||
#else
|
||||
@ called from __mcount, untouched in lr
|
||||
mov r1, lr @ instrumented routine (func)
|
||||
#endif
|
||||
sub r1, r1, #MCOUNT_INSN_SIZE
|
||||
mov r2, fp @ frame pointer
|
||||
bl prepare_ftrace_return
|
||||
mcount_exit
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
/*
|
||||
* This is under an ifdef in order to force link-time errors for people trying
|
||||
* to build with !FRAME_POINTER with a GCC which doesn't use the new-style
|
||||
* mcount.
|
||||
* mcount
|
||||
*/
|
||||
ENTRY(mcount)
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
ldr r0, =ftrace_trace_function
|
||||
ldr r2, [r0]
|
||||
adr r0, ftrace_stub
|
||||
cmp r0, r2
|
||||
bne trace
|
||||
ldr lr, [fp, #-4] @ restore lr
|
||||
ldmia sp!, {r0-r3, pc}
|
||||
|
||||
trace:
|
||||
ldr r1, [fp, #-4] @ lr of instrumented routine
|
||||
mov r0, lr
|
||||
sub r0, r0, #MCOUNT_INSN_SIZE
|
||||
mov lr, pc
|
||||
mov pc, r2
|
||||
ldr lr, [fp, #-4] @ restore lr
|
||||
.macro mcount_enter
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr reg
|
||||
ldr \reg, [fp, #-4]
|
||||
.endm
|
||||
|
||||
.macro mcount_exit
|
||||
ldr lr, [fp, #-4]
|
||||
ldmia sp!, {r0-r3, pc}
|
||||
.endm
|
||||
|
||||
ENTRY(mcount)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
stmdb sp!, {lr}
|
||||
ldr lr, [fp, #-4]
|
||||
ldmia sp!, {pc}
|
||||
#else
|
||||
__mcount _old
|
||||
#endif
|
||||
ENDPROC(mcount)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(ftrace_caller_old)
|
||||
__ftrace_caller _old
|
||||
ENDPROC(ftrace_caller_old)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller_old)
|
||||
__ftrace_graph_caller
|
||||
ENDPROC(ftrace_graph_caller_old)
|
||||
#endif
|
||||
|
||||
.purgem mcount_enter
|
||||
.purgem mcount_get_lr
|
||||
.purgem mcount_exit
|
||||
#endif
|
||||
|
||||
/*
|
||||
* __gnu_mcount_nc
|
||||
*/
|
||||
|
||||
.macro mcount_enter
|
||||
stmdb sp!, {r0-r3, lr}
|
||||
.endm
|
||||
|
||||
.macro mcount_get_lr reg
|
||||
ldr \reg, [sp, #20]
|
||||
.endm
|
||||
|
||||
.macro mcount_exit
|
||||
ldmia sp!, {r0-r3, ip, lr}
|
||||
mov pc, ip
|
||||
.endm
|
||||
|
||||
ENTRY(__gnu_mcount_nc)
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
mov ip, lr
|
||||
ldmia sp!, {lr}
|
||||
mov pc, ip
|
||||
#else
|
||||
__mcount
|
||||
#endif
|
||||
ENDPROC(__gnu_mcount_nc)
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
ENTRY(ftrace_caller)
|
||||
__ftrace_caller
|
||||
ENDPROC(ftrace_caller)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
ENTRY(ftrace_graph_caller)
|
||||
__ftrace_graph_caller
|
||||
ENDPROC(ftrace_graph_caller)
|
||||
#endif
|
||||
|
||||
.purgem mcount_enter
|
||||
.purgem mcount_get_lr
|
||||
.purgem mcount_exit
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
.globl return_to_handler
|
||||
return_to_handler:
|
||||
stmdb sp!, {r0-r3}
|
||||
mov r0, fp @ frame pointer
|
||||
bl ftrace_return_to_handler
|
||||
mov lr, r0 @ r0 has real ret addr
|
||||
ldmia sp!, {r0-r3}
|
||||
mov pc, lr
|
||||
#endif
|
||||
|
||||
ENTRY(ftrace_stub)
|
||||
.Lftrace_stub:
|
||||
|
|
|
@ -165,6 +165,25 @@
|
|||
.endm
|
||||
#endif /* !CONFIG_THUMB2_KERNEL */
|
||||
|
||||
@
|
||||
@ Debug exceptions are taken as prefetch or data aborts.
|
||||
@ We must disable preemption during the handler so that
|
||||
@ we can access the debug registers safely.
|
||||
@
|
||||
.macro debug_entry, fsr
|
||||
#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
|
||||
ldr r4, =0x40f @ mask out fsr.fs
|
||||
and r5, r4, \fsr
|
||||
cmp r5, #2 @ debug exception
|
||||
bne 1f
|
||||
get_thread_info r10
|
||||
ldr r6, [r10, #TI_PREEMPT] @ get preempt count
|
||||
add r11, r6, #1 @ increment it
|
||||
str r11, [r10, #TI_PREEMPT]
|
||||
1:
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* These are the registers used in the syscall handler, and allow us to
|
||||
* have in theory up to 7 arguments to a function - r0 to r6.
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#include <asm/fiq.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
static unsigned long no_fiq_insn;
|
||||
|
||||
|
@ -67,17 +68,22 @@ static struct fiq_handler default_owner = {
|
|||
|
||||
static struct fiq_handler *current_fiq = &default_owner;
|
||||
|
||||
int show_fiq_list(struct seq_file *p, void *v)
|
||||
int show_fiq_list(struct seq_file *p, int prec)
|
||||
{
|
||||
if (current_fiq != &default_owner)
|
||||
seq_printf(p, "FIQ: %s\n", current_fiq->name);
|
||||
seq_printf(p, "%*s: %s\n", prec, "FIQ",
|
||||
current_fiq->name);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void set_fiq_handler(void *start, unsigned int length)
|
||||
{
|
||||
#if defined(CONFIG_CPU_USE_DOMAINS)
|
||||
memcpy((void *)0xffff001c, start, length);
|
||||
#else
|
||||
memcpy(vectors_page + 0x1c, start, length);
|
||||
#endif
|
||||
flush_icache_range(0xffff001c, 0xffff001c + length);
|
||||
if (!vectors_high())
|
||||
flush_icache_range(0x1c, 0x1c + length);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#define NOP 0xe8bd4000 /* pop {lr} */
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
|
||||
#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
|
||||
|
@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* construct a branch (BL) instruction to addr */
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
|
||||
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
|
||||
bool link)
|
||||
{
|
||||
unsigned long s, j1, j2, i1, i2, imm10, imm11;
|
||||
unsigned long first, second;
|
||||
|
@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
|
|||
j2 = (!i2) ^ s;
|
||||
|
||||
first = 0xf000 | (s << 10) | imm10;
|
||||
second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
|
||||
second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
|
||||
if (link)
|
||||
second |= 1 << 14;
|
||||
|
||||
return (second << 16) | first;
|
||||
}
|
||||
#else
|
||||
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
|
||||
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
|
||||
bool link)
|
||||
{
|
||||
unsigned long opcode = 0xea000000;
|
||||
long offset;
|
||||
|
||||
if (link)
|
||||
opcode |= 1 << 24;
|
||||
|
||||
offset = (long)addr - (long)(pc + 8);
|
||||
if (unlikely(offset < -33554432 || offset > 33554428)) {
|
||||
/* Can't generate branches that far (from ARM ARM). Ftrace
|
||||
|
@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
|
|||
|
||||
offset = (offset >> 2) & 0x00ffffff;
|
||||
|
||||
return 0xeb000000 | offset;
|
||||
return opcode | offset;
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
|
||||
{
|
||||
return ftrace_gen_branch(pc, addr, true);
|
||||
}
|
||||
|
||||
static int ftrace_modify_code(unsigned long pc, unsigned long old,
|
||||
unsigned long new)
|
||||
{
|
||||
|
@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data)
|
|||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
||||
unsigned long frame_pointer)
|
||||
{
|
||||
unsigned long return_hooker = (unsigned long) &return_to_handler;
|
||||
struct ftrace_graph_ent trace;
|
||||
unsigned long old;
|
||||
int err;
|
||||
|
||||
if (unlikely(atomic_read(¤t->tracing_graph_pause)))
|
||||
return;
|
||||
|
||||
old = *parent;
|
||||
*parent = return_hooker;
|
||||
|
||||
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
|
||||
frame_pointer);
|
||||
if (err == -EBUSY) {
|
||||
*parent = old;
|
||||
return;
|
||||
}
|
||||
|
||||
trace.func = self_addr;
|
||||
|
||||
/* Only trace if the calling function expects to */
|
||||
if (!ftrace_graph_entry(&trace)) {
|
||||
current->curr_ret_stack--;
|
||||
*parent = old;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
extern unsigned long ftrace_graph_call;
|
||||
extern unsigned long ftrace_graph_call_old;
|
||||
extern void ftrace_graph_caller_old(void);
|
||||
|
||||
static int __ftrace_modify_caller(unsigned long *callsite,
|
||||
void (*func) (void), bool enable)
|
||||
{
|
||||
unsigned long caller_fn = (unsigned long) func;
|
||||
unsigned long pc = (unsigned long) callsite;
|
||||
unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
|
||||
unsigned long nop = 0xe1a00000; /* mov r0, r0 */
|
||||
unsigned long old = enable ? nop : branch;
|
||||
unsigned long new = enable ? branch : nop;
|
||||
|
||||
return ftrace_modify_code(pc, old, new);
|
||||
}
|
||||
|
||||
static int ftrace_modify_graph_caller(bool enable)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = __ftrace_modify_caller(&ftrace_graph_call,
|
||||
ftrace_graph_caller,
|
||||
enable);
|
||||
|
||||
#ifdef CONFIG_OLD_MCOUNT
|
||||
if (!ret)
|
||||
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
|
||||
ftrace_graph_caller_old,
|
||||
enable);
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int ftrace_enable_ftrace_graph_caller(void)
|
||||
{
|
||||
return ftrace_modify_graph_caller(true);
|
||||
}
|
||||
|
||||
int ftrace_disable_ftrace_graph_caller(void)
|
||||
{
|
||||
return ftrace_modify_graph_caller(false);
|
||||
}
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
|
|
|
@ -91,6 +91,11 @@ ENTRY(stext)
|
|||
movs r8, r5 @ invalid machine (r5=0)?
|
||||
THUMB( it eq ) @ force fixup-able long branch encoding
|
||||
beq __error_a @ yes, error 'a'
|
||||
|
||||
/*
|
||||
* r1 = machine no, r2 = atags,
|
||||
* r8 = machinfo, r9 = cpuid, r10 = procinfo
|
||||
*/
|
||||
bl __vet_atags
|
||||
#ifdef CONFIG_SMP_ON_UP
|
||||
bl __fixup_smp
|
||||
|
@ -387,19 +392,19 @@ ENDPROC(__turn_mmu_on)
|
|||
|
||||
#ifdef CONFIG_SMP_ON_UP
|
||||
__fixup_smp:
|
||||
mov r7, #0x00070000
|
||||
orr r6, r7, #0xff000000 @ mask 0xff070000
|
||||
orr r7, r7, #0x41000000 @ val 0x41070000
|
||||
and r0, r9, r6
|
||||
teq r0, r7 @ ARM CPU and ARMv6/v7?
|
||||
mov r4, #0x00070000
|
||||
orr r3, r4, #0xff000000 @ mask 0xff070000
|
||||
orr r4, r4, #0x41000000 @ val 0x41070000
|
||||
and r0, r9, r3
|
||||
teq r0, r4 @ ARM CPU and ARMv6/v7?
|
||||
bne __fixup_smp_on_up @ no, assume UP
|
||||
|
||||
orr r6, r6, #0x0000ff00
|
||||
orr r6, r6, #0x000000f0 @ mask 0xff07fff0
|
||||
orr r7, r7, #0x0000b000
|
||||
orr r7, r7, #0x00000020 @ val 0x4107b020
|
||||
and r0, r9, r6
|
||||
teq r0, r7 @ ARM 11MPCore?
|
||||
orr r3, r3, #0x0000ff00
|
||||
orr r3, r3, #0x000000f0 @ mask 0xff07fff0
|
||||
orr r4, r4, #0x0000b000
|
||||
orr r4, r4, #0x00000020 @ val 0x4107b020
|
||||
and r0, r9, r3
|
||||
teq r0, r4 @ ARM 11MPCore?
|
||||
moveq pc, lr @ yes, assume SMP
|
||||
|
||||
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
||||
|
@ -408,15 +413,22 @@ __fixup_smp:
|
|||
|
||||
__fixup_smp_on_up:
|
||||
adr r0, 1f
|
||||
ldmia r0, {r3, r6, r7}
|
||||
ldmia r0, {r3 - r5}
|
||||
sub r3, r0, r3
|
||||
add r6, r6, r3
|
||||
add r7, r7, r3
|
||||
2: cmp r6, r7
|
||||
ldmia r6!, {r0, r4}
|
||||
strlo r4, [r0, r3]
|
||||
blo 2b
|
||||
mov pc, lr
|
||||
add r4, r4, r3
|
||||
add r5, r5, r3
|
||||
2: cmp r4, r5
|
||||
movhs pc, lr
|
||||
ldmia r4!, {r0, r6}
|
||||
ARM( str r6, [r0, r3] )
|
||||
THUMB( add r0, r0, r3 )
|
||||
#ifdef __ARMEB__
|
||||
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
|
||||
#endif
|
||||
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
|
||||
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
|
||||
THUMB( strh r6, [r0] )
|
||||
b 2b
|
||||
ENDPROC(__fixup_smp)
|
||||
|
||||
.align
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#define pr_fmt(fmt) "hw-breakpoint: " fmt
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/smp.h>
|
||||
|
@ -44,6 +45,7 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
|
|||
|
||||
/* Number of BRP/WRP registers on this CPU. */
|
||||
static int core_num_brps;
|
||||
static int core_num_reserved_brps;
|
||||
static int core_num_wrps;
|
||||
|
||||
/* Debug architecture version. */
|
||||
|
@ -52,87 +54,6 @@ static u8 debug_arch;
|
|||
/* Maximum supported watchpoint length. */
|
||||
static u8 max_watchpoint_len;
|
||||
|
||||
/* Determine number of BRP registers available. */
|
||||
static int get_num_brps(void)
|
||||
{
|
||||
u32 didr;
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
return ((didr >> 24) & 0xf) + 1;
|
||||
}
|
||||
|
||||
/* Determine number of WRP registers available. */
|
||||
static int get_num_wrps(void)
|
||||
{
|
||||
/*
|
||||
* FIXME: When a watchpoint fires, the only way to work out which
|
||||
* watchpoint it was is by disassembling the faulting instruction
|
||||
* and working out the address of the memory access.
|
||||
*
|
||||
* Furthermore, we can only do this if the watchpoint was precise
|
||||
* since imprecise watchpoints prevent us from calculating register
|
||||
* based addresses.
|
||||
*
|
||||
* For the time being, we only report 1 watchpoint register so we
|
||||
* always know which watchpoint fired. In the future we can either
|
||||
* add a disassembler and address generation emulator, or we can
|
||||
* insert a check to see if the DFAR is set on watchpoint exception
|
||||
* entry [the ARM ARM states that the DFAR is UNKNOWN, but
|
||||
* experience shows that it is set on some implementations].
|
||||
*/
|
||||
|
||||
#if 0
|
||||
u32 didr, wrps;
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
return ((didr >> 28) & 0xf) + 1;
|
||||
#endif
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
int hw_breakpoint_slots(int type)
|
||||
{
|
||||
/*
|
||||
* We can be called early, so don't rely on
|
||||
* our static variables being initialised.
|
||||
*/
|
||||
switch (type) {
|
||||
case TYPE_INST:
|
||||
return get_num_brps();
|
||||
case TYPE_DATA:
|
||||
return get_num_wrps();
|
||||
default:
|
||||
pr_warning("unknown slot type: %d\n", type);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Determine debug architecture. */
|
||||
static u8 get_debug_arch(void)
|
||||
{
|
||||
u32 didr;
|
||||
|
||||
/* Do we implement the extended CPUID interface? */
|
||||
if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
|
||||
pr_warning("CPUID feature registers not supported. "
|
||||
"Assuming v6 debug is present.\n");
|
||||
return ARM_DEBUG_ARCH_V6;
|
||||
}
|
||||
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
return (didr >> 16) & 0xf;
|
||||
}
|
||||
|
||||
/* Does this core support mismatch breakpoints? */
|
||||
static int core_has_mismatch_bps(void)
|
||||
{
|
||||
return debug_arch >= ARM_DEBUG_ARCH_V7_ECP14 && core_num_brps > 1;
|
||||
}
|
||||
|
||||
u8 arch_get_debug_arch(void)
|
||||
{
|
||||
return debug_arch;
|
||||
}
|
||||
|
||||
#define READ_WB_REG_CASE(OP2, M, VAL) \
|
||||
case ((OP2 << 4) + M): \
|
||||
ARM_DBG_READ(c ## M, OP2, VAL); \
|
||||
|
@ -210,6 +131,94 @@ static void write_wb_reg(int n, u32 val)
|
|||
isb();
|
||||
}
|
||||
|
||||
/* Determine debug architecture. */
|
||||
static u8 get_debug_arch(void)
|
||||
{
|
||||
u32 didr;
|
||||
|
||||
/* Do we implement the extended CPUID interface? */
|
||||
if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
|
||||
pr_warning("CPUID feature registers not supported. "
|
||||
"Assuming v6 debug is present.\n");
|
||||
return ARM_DEBUG_ARCH_V6;
|
||||
}
|
||||
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
return (didr >> 16) & 0xf;
|
||||
}
|
||||
|
||||
u8 arch_get_debug_arch(void)
|
||||
{
|
||||
return debug_arch;
|
||||
}
|
||||
|
||||
/* Determine number of BRP register available. */
|
||||
static int get_num_brp_resources(void)
|
||||
{
|
||||
u32 didr;
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
return ((didr >> 24) & 0xf) + 1;
|
||||
}
|
||||
|
||||
/* Does this core support mismatch breakpoints? */
|
||||
static int core_has_mismatch_brps(void)
|
||||
{
|
||||
return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
|
||||
get_num_brp_resources() > 1);
|
||||
}
|
||||
|
||||
/* Determine number of usable WRPs available. */
|
||||
static int get_num_wrps(void)
|
||||
{
|
||||
/*
|
||||
* FIXME: When a watchpoint fires, the only way to work out which
|
||||
* watchpoint it was is by disassembling the faulting instruction
|
||||
* and working out the address of the memory access.
|
||||
*
|
||||
* Furthermore, we can only do this if the watchpoint was precise
|
||||
* since imprecise watchpoints prevent us from calculating register
|
||||
* based addresses.
|
||||
*
|
||||
* Providing we have more than 1 breakpoint register, we only report
|
||||
* a single watchpoint register for the time being. This way, we always
|
||||
* know which watchpoint fired. In the future we can either add a
|
||||
* disassembler and address generation emulator, or we can insert a
|
||||
* check to see if the DFAR is set on watchpoint exception entry
|
||||
* [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
|
||||
* that it is set on some implementations].
|
||||
*/
|
||||
|
||||
#if 0
|
||||
int wrps;
|
||||
u32 didr;
|
||||
ARM_DBG_READ(c0, 0, didr);
|
||||
wrps = ((didr >> 28) & 0xf) + 1;
|
||||
#endif
|
||||
int wrps = 1;
|
||||
|
||||
if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
|
||||
wrps = get_num_brp_resources() - 1;
|
||||
|
||||
return wrps;
|
||||
}
|
||||
|
||||
/* We reserve one breakpoint for each watchpoint. */
|
||||
static int get_num_reserved_brps(void)
|
||||
{
|
||||
if (core_has_mismatch_brps())
|
||||
return get_num_wrps();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Determine number of usable BRPs available. */
|
||||
static int get_num_brps(void)
|
||||
{
|
||||
int brps = get_num_brp_resources();
|
||||
if (core_has_mismatch_brps())
|
||||
brps -= get_num_reserved_brps();
|
||||
return brps;
|
||||
}
|
||||
|
||||
/*
|
||||
* In order to access the breakpoint/watchpoint control registers,
|
||||
* we must be running in debug monitor mode. Unfortunately, we can
|
||||
|
@ -230,8 +239,12 @@ static int enable_monitor_mode(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* If monitor mode is already enabled, just return. */
|
||||
if (dscr & ARM_DSCR_MDBGEN)
|
||||
goto out;
|
||||
|
||||
/* Write to the corresponding DSCR. */
|
||||
switch (debug_arch) {
|
||||
switch (get_debug_arch()) {
|
||||
case ARM_DEBUG_ARCH_V6:
|
||||
case ARM_DEBUG_ARCH_V6_1:
|
||||
ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
|
||||
|
@ -246,15 +259,30 @@ static int enable_monitor_mode(void)
|
|||
|
||||
/* Check that the write made it through. */
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
if (WARN_ONCE(!(dscr & ARM_DSCR_MDBGEN),
|
||||
"failed to enable monitor mode.")) {
|
||||
if (!(dscr & ARM_DSCR_MDBGEN))
|
||||
ret = -EPERM;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hw_breakpoint_slots(int type)
|
||||
{
|
||||
/*
|
||||
* We can be called early, so don't rely on
|
||||
* our static variables being initialised.
|
||||
*/
|
||||
switch (type) {
|
||||
case TYPE_INST:
|
||||
return get_num_brps();
|
||||
case TYPE_DATA:
|
||||
return get_num_wrps();
|
||||
default:
|
||||
pr_warning("unknown slot type: %d\n", type);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if 8-bit byte-address select is available.
|
||||
* This clobbers WRP 0.
|
||||
|
@ -268,9 +296,6 @@ static u8 get_max_wp_len(void)
|
|||
if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
|
||||
goto out;
|
||||
|
||||
if (enable_monitor_mode())
|
||||
goto out;
|
||||
|
||||
memset(&ctrl, 0, sizeof(ctrl));
|
||||
ctrl.len = ARM_BREAKPOINT_LEN_8;
|
||||
ctrl_reg = encode_ctrl_reg(ctrl);
|
||||
|
@ -289,23 +314,6 @@ u8 arch_get_max_wp_len(void)
|
|||
return max_watchpoint_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler for reactivating a suspended watchpoint when the single
|
||||
* step `mismatch' breakpoint is triggered.
|
||||
*/
|
||||
static void wp_single_step_handler(struct perf_event *bp, int unused,
|
||||
struct perf_sample_data *data,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
perf_event_enable(counter_arch_bp(bp)->suspended_wp);
|
||||
unregister_hw_breakpoint(bp);
|
||||
}
|
||||
|
||||
static int bp_is_single_step(struct perf_event *bp)
|
||||
{
|
||||
return bp->overflow_handler == wp_single_step_handler;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install a perf counter breakpoint.
|
||||
*/
|
||||
|
@ -314,30 +322,41 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
|||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
struct perf_event **slot, **slots;
|
||||
int i, max_slots, ctrl_base, val_base, ret = 0;
|
||||
u32 addr, ctrl;
|
||||
|
||||
/* Ensure that we are in monitor mode and halting mode is disabled. */
|
||||
ret = enable_monitor_mode();
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
addr = info->address;
|
||||
ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
|
||||
|
||||
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
|
||||
/* Breakpoint */
|
||||
ctrl_base = ARM_BASE_BCR;
|
||||
val_base = ARM_BASE_BVR;
|
||||
slots = __get_cpu_var(bp_on_reg);
|
||||
max_slots = core_num_brps - 1;
|
||||
|
||||
if (bp_is_single_step(bp)) {
|
||||
info->ctrl.mismatch = 1;
|
||||
i = max_slots;
|
||||
slots[i] = bp;
|
||||
goto setup;
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
if (info->step_ctrl.enabled) {
|
||||
/* Override the breakpoint data with the step data. */
|
||||
addr = info->trigger & ~0x3;
|
||||
ctrl = encode_ctrl_reg(info->step_ctrl);
|
||||
}
|
||||
} else {
|
||||
/* Watchpoint */
|
||||
ctrl_base = ARM_BASE_WCR;
|
||||
val_base = ARM_BASE_WVR;
|
||||
slots = __get_cpu_var(wp_on_reg);
|
||||
if (info->step_ctrl.enabled) {
|
||||
/* Install into the reserved breakpoint region. */
|
||||
ctrl_base = ARM_BASE_BCR + core_num_brps;
|
||||
val_base = ARM_BASE_BVR + core_num_brps;
|
||||
/* Override the watchpoint data with the step data. */
|
||||
addr = info->trigger & ~0x3;
|
||||
ctrl = encode_ctrl_reg(info->step_ctrl);
|
||||
} else {
|
||||
ctrl_base = ARM_BASE_WCR;
|
||||
val_base = ARM_BASE_WVR;
|
||||
}
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
}
|
||||
|
||||
|
@ -355,12 +374,11 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
|
|||
goto out;
|
||||
}
|
||||
|
||||
setup:
|
||||
/* Setup the address register. */
|
||||
write_wb_reg(val_base + i, info->address);
|
||||
write_wb_reg(val_base + i, addr);
|
||||
|
||||
/* Setup the control register. */
|
||||
write_wb_reg(ctrl_base + i, encode_ctrl_reg(info->ctrl) | 0x1);
|
||||
write_wb_reg(ctrl_base + i, ctrl);
|
||||
|
||||
out:
|
||||
return ret;
|
||||
|
@ -375,18 +393,15 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
|||
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
|
||||
/* Breakpoint */
|
||||
base = ARM_BASE_BCR;
|
||||
slots = __get_cpu_var(bp_on_reg);
|
||||
max_slots = core_num_brps - 1;
|
||||
|
||||
if (bp_is_single_step(bp)) {
|
||||
i = max_slots;
|
||||
slots[i] = NULL;
|
||||
goto reset;
|
||||
}
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
max_slots = core_num_brps;
|
||||
} else {
|
||||
/* Watchpoint */
|
||||
base = ARM_BASE_WCR;
|
||||
slots = __get_cpu_var(wp_on_reg);
|
||||
if (info->step_ctrl.enabled)
|
||||
base = ARM_BASE_BCR + core_num_brps;
|
||||
else
|
||||
base = ARM_BASE_WCR;
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
max_slots = core_num_wrps;
|
||||
}
|
||||
|
||||
|
@ -403,7 +418,6 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
|
|||
if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
|
||||
return;
|
||||
|
||||
reset:
|
||||
/* Reset the control register. */
|
||||
write_wb_reg(base + i, 0);
|
||||
}
|
||||
|
@ -537,12 +551,23 @@ static int arch_build_bp_info(struct perf_event *bp)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
|
||||
* Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
|
||||
* by the hardware and must be aligned to the appropriate number of
|
||||
* bytes.
|
||||
*/
|
||||
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
|
||||
info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
|
||||
info->ctrl.len != ARM_BREAKPOINT_LEN_4)
|
||||
return -EINVAL;
|
||||
|
||||
/* Address */
|
||||
info->address = bp->attr.bp_addr;
|
||||
|
||||
/* Privilege */
|
||||
info->ctrl.privilege = ARM_BREAKPOINT_USER;
|
||||
if (arch_check_bp_in_kernelspace(bp) && !bp_is_single_step(bp))
|
||||
if (arch_check_bp_in_kernelspace(bp))
|
||||
info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
|
||||
|
||||
/* Enabled? */
|
||||
|
@ -561,7 +586,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
|||
{
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
int ret = 0;
|
||||
u32 bytelen, max_len, offset, alignment_mask = 0x3;
|
||||
u32 offset, alignment_mask = 0x3;
|
||||
|
||||
/* Build the arch_hw_breakpoint. */
|
||||
ret = arch_build_bp_info(bp);
|
||||
|
@ -571,84 +596,85 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
|||
/* Check address alignment. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
|
||||
alignment_mask = 0x7;
|
||||
if (info->address & alignment_mask) {
|
||||
/*
|
||||
* Try to fix the alignment. This may result in a length
|
||||
* that is too large, so we must check for that.
|
||||
*/
|
||||
bytelen = get_hbp_len(info->ctrl.len);
|
||||
max_len = info->ctrl.type == ARM_BREAKPOINT_EXECUTE ? 4 :
|
||||
max_watchpoint_len;
|
||||
|
||||
if (max_len >= 8)
|
||||
offset = info->address & 0x7;
|
||||
else
|
||||
offset = info->address & 0x3;
|
||||
|
||||
if (bytelen > (1 << ((max_len - (offset + 1)) >> 1))) {
|
||||
ret = -EFBIG;
|
||||
goto out;
|
||||
}
|
||||
|
||||
info->ctrl.len <<= offset;
|
||||
info->address &= ~offset;
|
||||
|
||||
pr_debug("breakpoint alignment fixup: length = 0x%x, "
|
||||
"address = 0x%x\n", info->ctrl.len, info->address);
|
||||
offset = info->address & alignment_mask;
|
||||
switch (offset) {
|
||||
case 0:
|
||||
/* Aligned */
|
||||
break;
|
||||
case 1:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
case 2:
|
||||
/* Allow halfword watchpoints and breakpoints. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
info->address &= ~alignment_mask;
|
||||
info->ctrl.len <<= offset;
|
||||
|
||||
/*
|
||||
* Currently we rely on an overflow handler to take
|
||||
* care of single-stepping the breakpoint when it fires.
|
||||
* In the case of userspace breakpoints on a core with V7 debug,
|
||||
* we can use the mismatch feature as a poor-man's hardware single-step.
|
||||
* we can use the mismatch feature as a poor-man's hardware
|
||||
* single-step, but this only works for per-task breakpoints.
|
||||
*/
|
||||
if (WARN_ONCE(!bp->overflow_handler &&
|
||||
(arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_bps()),
|
||||
(arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
|
||||
|| !bp->hw.bp_target),
|
||||
"overflow handler required but none found")) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void update_mismatch_flag(int idx, int flag)
|
||||
/*
|
||||
* Enable/disable single-stepping over the breakpoint bp at address addr.
|
||||
*/
|
||||
static void enable_single_step(struct perf_event *bp, u32 addr)
|
||||
{
|
||||
struct perf_event *bp = __get_cpu_var(bp_on_reg[idx]);
|
||||
struct arch_hw_breakpoint *info;
|
||||
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
|
||||
|
||||
if (bp == NULL)
|
||||
return;
|
||||
arch_uninstall_hw_breakpoint(bp);
|
||||
info->step_ctrl.mismatch = 1;
|
||||
info->step_ctrl.len = ARM_BREAKPOINT_LEN_4;
|
||||
info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE;
|
||||
info->step_ctrl.privilege = info->ctrl.privilege;
|
||||
info->step_ctrl.enabled = 1;
|
||||
info->trigger = addr;
|
||||
arch_install_hw_breakpoint(bp);
|
||||
}
|
||||
|
||||
info = counter_arch_bp(bp);
|
||||
|
||||
/* Update the mismatch field to enter/exit `single-step' mode */
|
||||
if (!bp->overflow_handler && info->ctrl.mismatch != flag) {
|
||||
info->ctrl.mismatch = flag;
|
||||
write_wb_reg(ARM_BASE_BCR + idx, encode_ctrl_reg(info->ctrl) | 0x1);
|
||||
}
|
||||
static void disable_single_step(struct perf_event *bp)
|
||||
{
|
||||
arch_uninstall_hw_breakpoint(bp);
|
||||
counter_arch_bp(bp)->step_ctrl.enabled = 0;
|
||||
arch_install_hw_breakpoint(bp);
|
||||
}
|
||||
|
||||
static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
||||
{
|
||||
int i;
|
||||
struct perf_event *bp, **slots = __get_cpu_var(wp_on_reg);
|
||||
struct perf_event *wp, **slots;
|
||||
struct arch_hw_breakpoint *info;
|
||||
struct perf_event_attr attr;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
|
||||
/* Without a disassembler, we can only handle 1 watchpoint. */
|
||||
BUG_ON(core_num_wrps > 1);
|
||||
|
||||
hw_breakpoint_init(&attr);
|
||||
attr.bp_addr = regs->ARM_pc & ~0x3;
|
||||
attr.bp_len = HW_BREAKPOINT_LEN_4;
|
||||
attr.bp_type = HW_BREAKPOINT_X;
|
||||
|
||||
for (i = 0; i < core_num_wrps; ++i) {
|
||||
rcu_read_lock();
|
||||
|
||||
if (slots[i] == NULL) {
|
||||
wp = slots[i];
|
||||
|
||||
if (wp == NULL) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
|
@ -658,87 +684,121 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
|||
* single watchpoint, we can set the trigger to the lowest
|
||||
* possible faulting address.
|
||||
*/
|
||||
info = counter_arch_bp(slots[i]);
|
||||
info->trigger = slots[i]->attr.bp_addr;
|
||||
info = counter_arch_bp(wp);
|
||||
info->trigger = wp->attr.bp_addr;
|
||||
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
|
||||
perf_bp_event(slots[i], regs);
|
||||
perf_bp_event(wp, regs);
|
||||
|
||||
/*
|
||||
* If no overflow handler is present, insert a temporary
|
||||
* mismatch breakpoint so we can single-step over the
|
||||
* watchpoint trigger.
|
||||
*/
|
||||
if (!slots[i]->overflow_handler) {
|
||||
bp = register_user_hw_breakpoint(&attr,
|
||||
wp_single_step_handler,
|
||||
current);
|
||||
counter_arch_bp(bp)->suspended_wp = slots[i];
|
||||
perf_event_disable(slots[i]);
|
||||
}
|
||||
if (!wp->overflow_handler)
|
||||
enable_single_step(wp, instruction_pointer(regs));
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
static void watchpoint_single_step_handler(unsigned long pc)
|
||||
{
|
||||
int i;
|
||||
struct perf_event *wp, **slots;
|
||||
struct arch_hw_breakpoint *info;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
|
||||
|
||||
for (i = 0; i < core_num_reserved_brps; ++i) {
|
||||
rcu_read_lock();
|
||||
|
||||
wp = slots[i];
|
||||
|
||||
if (wp == NULL)
|
||||
goto unlock;
|
||||
|
||||
info = counter_arch_bp(wp);
|
||||
if (!info->step_ctrl.enabled)
|
||||
goto unlock;
|
||||
|
||||
/*
|
||||
* Restore the original watchpoint if we've completed the
|
||||
* single-step.
|
||||
*/
|
||||
if (info->trigger != pc)
|
||||
disable_single_step(wp);
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
|
||||
{
|
||||
int i;
|
||||
int mismatch;
|
||||
u32 ctrl_reg, val, addr;
|
||||
struct perf_event *bp, **slots = __get_cpu_var(bp_on_reg);
|
||||
struct perf_event *bp, **slots;
|
||||
struct arch_hw_breakpoint *info;
|
||||
struct arch_hw_breakpoint_ctrl ctrl;
|
||||
|
||||
slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
|
||||
|
||||
/* The exception entry code places the amended lr in the PC. */
|
||||
addr = regs->ARM_pc;
|
||||
|
||||
/* Check the currently installed breakpoints first. */
|
||||
for (i = 0; i < core_num_brps; ++i) {
|
||||
rcu_read_lock();
|
||||
|
||||
bp = slots[i];
|
||||
|
||||
if (bp == NULL) {
|
||||
rcu_read_unlock();
|
||||
continue;
|
||||
}
|
||||
if (bp == NULL)
|
||||
goto unlock;
|
||||
|
||||
mismatch = 0;
|
||||
info = counter_arch_bp(bp);
|
||||
|
||||
/* Check if the breakpoint value matches. */
|
||||
val = read_wb_reg(ARM_BASE_BVR + i);
|
||||
if (val != (addr & ~0x3))
|
||||
goto unlock;
|
||||
goto mismatch;
|
||||
|
||||
/* Possible match, check the byte address select to confirm. */
|
||||
ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
|
||||
decode_ctrl_reg(ctrl_reg, &ctrl);
|
||||
if ((1 << (addr & 0x3)) & ctrl.len) {
|
||||
mismatch = 1;
|
||||
info = counter_arch_bp(bp);
|
||||
info->trigger = addr;
|
||||
}
|
||||
|
||||
unlock:
|
||||
if ((mismatch && !info->ctrl.mismatch) || bp_is_single_step(bp)) {
|
||||
pr_debug("breakpoint fired: address = 0x%x\n", addr);
|
||||
perf_bp_event(bp, regs);
|
||||
if (!bp->overflow_handler)
|
||||
enable_single_step(bp, addr);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
update_mismatch_flag(i, mismatch);
|
||||
mismatch:
|
||||
/* If we're stepping a breakpoint, it can now be restored. */
|
||||
if (info->step_ctrl.enabled)
|
||||
disable_single_step(bp);
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/* Handle any pending watchpoint single-step breakpoints. */
|
||||
watchpoint_single_step_handler(addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called from either the Data Abort Handler [watchpoint] or the
|
||||
* Prefetch Abort Handler [breakpoint].
|
||||
* Prefetch Abort Handler [breakpoint] with preemption disabled.
|
||||
*/
|
||||
static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
int ret = 1; /* Unhandled fault. */
|
||||
int ret = 0;
|
||||
u32 dscr;
|
||||
|
||||
/* We must be called with preemption disabled. */
|
||||
WARN_ON(preemptible());
|
||||
|
||||
/* We only handle watchpoints and hardware breakpoints. */
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
|
||||
|
@ -753,25 +813,47 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
|
|||
watchpoint_handler(addr, regs);
|
||||
break;
|
||||
default:
|
||||
goto out;
|
||||
ret = 1; /* Unhandled fault. */
|
||||
}
|
||||
|
||||
ret = 0;
|
||||
out:
|
||||
/*
|
||||
* Re-enable preemption after it was disabled in the
|
||||
* low-level exception handling code.
|
||||
*/
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* One-time initialisation.
|
||||
*/
|
||||
static void __init reset_ctrl_regs(void *unused)
|
||||
static void reset_ctrl_regs(void *unused)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* v7 debug contains save and restore registers so that debug state
|
||||
* can be maintained across low-power modes without leaving
|
||||
* the debug logic powered up. It is IMPLEMENTATION DEFINED whether
|
||||
* we can write to the debug registers out of reset, so we must
|
||||
* unlock the OS Lock Access Register to avoid taking undefined
|
||||
* instruction exceptions later on.
|
||||
*/
|
||||
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
|
||||
/*
|
||||
* Unconditionally clear the lock by writing a value
|
||||
* other than 0xC5ACCE55 to the access register.
|
||||
*/
|
||||
asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
|
||||
isb();
|
||||
}
|
||||
|
||||
if (enable_monitor_mode())
|
||||
return;
|
||||
|
||||
for (i = 0; i < core_num_brps; ++i) {
|
||||
/* We must also reset any reserved registers. */
|
||||
for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
|
||||
write_wb_reg(ARM_BASE_BCR + i, 0UL);
|
||||
write_wb_reg(ARM_BASE_BVR + i, 0UL);
|
||||
}
|
||||
|
@ -782,45 +864,57 @@ static void __init reset_ctrl_regs(void *unused)
|
|||
}
|
||||
}
|
||||
|
||||
static int __cpuinit dbg_reset_notify(struct notifier_block *self,
|
||||
unsigned long action, void *cpu)
|
||||
{
|
||||
if (action == CPU_ONLINE)
|
||||
smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata dbg_reset_nb = {
|
||||
.notifier_call = dbg_reset_notify,
|
||||
};
|
||||
|
||||
static int __init arch_hw_breakpoint_init(void)
|
||||
{
|
||||
int ret = 0;
|
||||
u32 dscr;
|
||||
|
||||
debug_arch = get_debug_arch();
|
||||
|
||||
if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
|
||||
pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Determine how many BRPs/WRPs are available. */
|
||||
core_num_brps = get_num_brps();
|
||||
core_num_reserved_brps = get_num_reserved_brps();
|
||||
core_num_wrps = get_num_wrps();
|
||||
|
||||
pr_info("found %d breakpoint and %d watchpoint registers.\n",
|
||||
core_num_brps, core_num_wrps);
|
||||
core_num_brps + core_num_reserved_brps, core_num_wrps);
|
||||
|
||||
if (core_has_mismatch_bps())
|
||||
pr_info("1 breakpoint reserved for watchpoint single-step.\n");
|
||||
if (core_num_reserved_brps)
|
||||
pr_info("%d breakpoint(s) reserved for watchpoint "
|
||||
"single-step.\n", core_num_reserved_brps);
|
||||
|
||||
ARM_DBG_READ(c1, 0, dscr);
|
||||
if (dscr & ARM_DSCR_HDBGEN) {
|
||||
pr_warning("halting debug mode enabled. Assuming maximum "
|
||||
"watchpoint size of 4 bytes.");
|
||||
} else {
|
||||
/* Work out the maximum supported watchpoint length. */
|
||||
max_watchpoint_len = get_max_wp_len();
|
||||
pr_info("maximum watchpoint size is %u bytes.\n",
|
||||
max_watchpoint_len);
|
||||
|
||||
/*
|
||||
* Reset the breakpoint resources. We assume that a halting
|
||||
* debugger will leave the world in a nice state for us.
|
||||
*/
|
||||
smp_call_function(reset_ctrl_regs, NULL, 1);
|
||||
reset_ctrl_regs(NULL);
|
||||
|
||||
/* Work out the maximum supported watchpoint length. */
|
||||
max_watchpoint_len = get_max_wp_len();
|
||||
pr_info("maximum watchpoint size is %u bytes.\n",
|
||||
max_watchpoint_len);
|
||||
}
|
||||
|
||||
/* Register debug fault handler. */
|
||||
|
@ -829,8 +923,9 @@ static int __init arch_hw_breakpoint_init(void)
|
|||
hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
|
||||
"breakpoint debug exception");
|
||||
|
||||
out:
|
||||
return ret;
|
||||
/* Register hotplug notifier. */
|
||||
register_cpu_notifier(&dbg_reset_nb);
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(arch_hw_breakpoint_init);
|
||||
|
||||
|
|
|
@ -35,8 +35,10 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
|
@ -47,8 +49,6 @@
|
|||
#define irq_finish(irq) do { } while (0)
|
||||
#endif
|
||||
|
||||
unsigned int arch_nr_irqs;
|
||||
void (*init_arch_irq)(void) __initdata = NULL;
|
||||
unsigned long irq_err_count;
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
|
@ -57,11 +57,20 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
struct irq_desc *desc;
|
||||
struct irqaction * action;
|
||||
unsigned long flags;
|
||||
int prec, n;
|
||||
|
||||
for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++)
|
||||
n *= 10;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (prec < 4)
|
||||
prec = 4;
|
||||
#endif
|
||||
|
||||
if (i == 0) {
|
||||
char cpuname[12];
|
||||
|
||||
seq_printf(p, " ");
|
||||
seq_printf(p, "%*s ", prec, "");
|
||||
for_each_present_cpu(cpu) {
|
||||
sprintf(cpuname, "CPU%d", cpu);
|
||||
seq_printf(p, " %10s", cpuname);
|
||||
|
@ -76,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
if (!action)
|
||||
goto unlock;
|
||||
|
||||
seq_printf(p, "%3d: ", i);
|
||||
seq_printf(p, "%*d: ", prec, i);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
|
||||
seq_printf(p, " %10s", desc->chip->name ? : "-");
|
||||
|
@ -89,13 +98,15 @@ unlock:
|
|||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
} else if (i == nr_irqs) {
|
||||
#ifdef CONFIG_FIQ
|
||||
show_fiq_list(p, v);
|
||||
show_fiq_list(p, prec);
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
show_ipi_list(p);
|
||||
show_local_irqs(p);
|
||||
show_ipi_list(p, prec);
|
||||
#endif
|
||||
seq_printf(p, "Err: %10lu\n", irq_err_count);
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
show_local_irqs(p, prec);
|
||||
#endif
|
||||
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -105,7 +116,8 @@ unlock:
|
|||
* come via this function. Instead, they should provide their
|
||||
* own 'handler'
|
||||
*/
|
||||
asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
asmlinkage void __exception_irq_entry
|
||||
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
|
@ -154,13 +166,13 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
|
|||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
init_arch_irq();
|
||||
machine_desc->init_irq();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
int __init arch_probe_nr_irqs(void)
|
||||
{
|
||||
nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
|
||||
nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
|
||||
return nr_irqs;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -19,6 +19,14 @@
|
|||
#include <asm/thread_info.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#if defined(CONFIG_CPU_PJ4)
|
||||
#define PJ4(code...) code
|
||||
#define XSC(code...)
|
||||
#else
|
||||
#define PJ4(code...)
|
||||
#define XSC(code...) code
|
||||
#endif
|
||||
|
||||
#define MMX_WR0 (0x00)
|
||||
#define MMX_WR1 (0x08)
|
||||
#define MMX_WR2 (0x10)
|
||||
|
@ -58,11 +66,17 @@
|
|||
|
||||
ENTRY(iwmmxt_task_enable)
|
||||
|
||||
mrc p15, 0, r2, c15, c1, 0
|
||||
tst r2, #0x3 @ CP0 and CP1 accessible?
|
||||
XSC(mrc p15, 0, r2, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r2, c1, c0, 2)
|
||||
@ CP0 and CP1 accessible?
|
||||
XSC(tst r2, #0x3)
|
||||
PJ4(tst r2, #0xf)
|
||||
movne pc, lr @ if so no business here
|
||||
orr r2, r2, #0x3 @ enable access to CP0 and CP1
|
||||
mcr p15, 0, r2, c15, c1, 0
|
||||
@ enable access to CP0 and CP1
|
||||
XSC(orr r2, r2, #0x3)
|
||||
XSC(mcr p15, 0, r2, c15, c1, 0)
|
||||
PJ4(orr r2, r2, #0xf)
|
||||
PJ4(mcr p15, 0, r2, c1, c0, 2)
|
||||
|
||||
ldr r3, =concan_owner
|
||||
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
|
||||
|
@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable)
|
|||
teqne r1, r2 @ or specified one?
|
||||
bne 1f @ no: quit
|
||||
|
||||
mrc p15, 0, r4, c15, c1, 0
|
||||
orr r4, r4, #0x3 @ enable access to CP0 and CP1
|
||||
mcr p15, 0, r4, c15, c1, 0
|
||||
@ enable access to CP0 and CP1
|
||||
XSC(mrc p15, 0, r4, c15, c1, 0)
|
||||
XSC(orr r4, r4, #0xf)
|
||||
XSC(mcr p15, 0, r4, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r4, c1, c0, 2)
|
||||
PJ4(orr r4, r4, #0x3)
|
||||
PJ4(mcr p15, 0, r4, c1, c0, 2)
|
||||
|
||||
mov r0, #0 @ nothing to load
|
||||
str r0, [r3] @ no more current owner
|
||||
mrc p15, 0, r2, c2, c0, 0
|
||||
mov r2, r2 @ cpwait
|
||||
bl concan_save
|
||||
|
||||
bic r4, r4, #0x3 @ disable access to CP0 and CP1
|
||||
mcr p15, 0, r4, c15, c1, 0
|
||||
@ disable access to CP0 and CP1
|
||||
XSC(bic r4, r4, #0x3)
|
||||
XSC(mcr p15, 0, r4, c15, c1, 0)
|
||||
PJ4(bic r4, r4, #0xf)
|
||||
PJ4(mcr p15, 0, r4, c1, c0, 2)
|
||||
|
||||
mrc p15, 0, r2, c2, c0, 0
|
||||
mov r2, r2 @ cpwait
|
||||
|
||||
|
@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore)
|
|||
*/
|
||||
ENTRY(iwmmxt_task_switch)
|
||||
|
||||
mrc p15, 0, r1, c15, c1, 0
|
||||
tst r1, #0x3 @ CP0 and CP1 accessible?
|
||||
XSC(mrc p15, 0, r1, c15, c1, 0)
|
||||
PJ4(mrc p15, 0, r1, c1, c0, 2)
|
||||
@ CP0 and CP1 accessible?
|
||||
XSC(tst r1, #0x3)
|
||||
PJ4(tst r1, #0xf)
|
||||
bne 1f @ yes: block them for next task
|
||||
|
||||
ldr r2, =concan_owner
|
||||
|
@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch)
|
|||
teq r2, r3 @ next task owns it?
|
||||
movne pc, lr @ no: leave Concan disabled
|
||||
|
||||
1: eor r1, r1, #3 @ flip Concan access
|
||||
mcr p15, 0, r1, c15, c1, 0
|
||||
1: @ flip Conan access
|
||||
XSC(eor r1, r1, #0x3)
|
||||
XSC(mcr p15, 0, r1, c15, c1, 0)
|
||||
PJ4(eor r1, r1, #0xf)
|
||||
PJ4(mcr p15, 0, r1, c1, c0, 2)
|
||||
|
||||
mrc p15, 0, r1, c2, c0, 0
|
||||
sub pc, lr, r1, lsr #32 @ cpwait and return
|
||||
|
|
|
@ -23,6 +23,8 @@ extern unsigned long kexec_indirection_page;
|
|||
extern unsigned long kexec_mach_type;
|
||||
extern unsigned long kexec_boot_atags;
|
||||
|
||||
static atomic_t waiting_for_crash_ipi;
|
||||
|
||||
/*
|
||||
* Provide a dummy crash_notes definition while crash dump arrives to arm.
|
||||
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
|
||||
|
@ -37,9 +39,37 @@ void machine_kexec_cleanup(struct kimage *image)
|
|||
{
|
||||
}
|
||||
|
||||
void machine_crash_nonpanic_core(void *unused)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
||||
crash_setup_regs(®s, NULL);
|
||||
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
|
||||
smp_processor_id());
|
||||
crash_save_cpu(®s, smp_processor_id());
|
||||
flush_cache_all();
|
||||
|
||||
atomic_dec(&waiting_for_crash_ipi);
|
||||
while (1)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long msecs;
|
||||
|
||||
local_irq_disable();
|
||||
|
||||
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
|
||||
smp_call_function(machine_crash_nonpanic_core, NULL, false);
|
||||
msecs = 1000; /* Wait at most a second for the other cpus to stop */
|
||||
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
|
||||
mdelay(1);
|
||||
msecs--;
|
||||
}
|
||||
if (atomic_read(&waiting_for_crash_ipi) > 0)
|
||||
printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
|
||||
|
||||
crash_save_cpu(regs, smp_processor_id());
|
||||
|
||||
printk(KERN_INFO "Loading crashdump kernel...\n");
|
||||
|
|
|
@ -67,35 +67,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
|
|||
char *secstrings,
|
||||
struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
|
||||
struct arm_unwind_mapping *maps = mod->arch.map;
|
||||
|
||||
for (s = sechdrs; s < sechdrs_end; s++) {
|
||||
char const *secname = secstrings + s->sh_name;
|
||||
|
||||
if (strcmp(".ARM.exidx.init.text", secname) == 0)
|
||||
maps[ARM_SEC_INIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVINIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx", secname) == 0)
|
||||
maps[ARM_SEC_CORE].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
|
||||
maps[ARM_SEC_EXIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVEXIT].unw_sec = s;
|
||||
else if (strcmp(".init.text", secname) == 0)
|
||||
maps[ARM_SEC_INIT].sec_text = s;
|
||||
else if (strcmp(".devinit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVINIT].sec_text = s;
|
||||
else if (strcmp(".text", secname) == 0)
|
||||
maps[ARM_SEC_CORE].sec_text = s;
|
||||
else if (strcmp(".exit.text", secname) == 0)
|
||||
maps[ARM_SEC_EXIT].sec_text = s;
|
||||
else if (strcmp(".devexit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVEXIT].sec_text = s;
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -300,41 +271,69 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
|
|||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
struct mod_unwind_map {
|
||||
const Elf_Shdr *unw_sec;
|
||||
const Elf_Shdr *txt_sec;
|
||||
};
|
||||
|
||||
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
||||
struct module *mod)
|
||||
{
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
static void register_unwind_tables(struct module *mod)
|
||||
{
|
||||
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
||||
const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
|
||||
struct mod_unwind_map maps[ARM_SEC_MAX];
|
||||
int i;
|
||||
for (i = 0; i < ARM_SEC_MAX; ++i) {
|
||||
struct arm_unwind_mapping *map = &mod->arch.map[i];
|
||||
if (map->unw_sec && map->sec_text)
|
||||
map->unwind = unwind_table_add(map->unw_sec->sh_addr,
|
||||
map->unw_sec->sh_size,
|
||||
map->sec_text->sh_addr,
|
||||
map->sec_text->sh_size);
|
||||
|
||||
memset(maps, 0, sizeof(maps));
|
||||
|
||||
for (s = sechdrs; s < sechdrs_end; s++) {
|
||||
const char *secname = secstrs + s->sh_name;
|
||||
|
||||
if (!(s->sh_flags & SHF_ALLOC))
|
||||
continue;
|
||||
|
||||
if (strcmp(".ARM.exidx.init.text", secname) == 0)
|
||||
maps[ARM_SEC_INIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVINIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx", secname) == 0)
|
||||
maps[ARM_SEC_CORE].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
|
||||
maps[ARM_SEC_EXIT].unw_sec = s;
|
||||
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVEXIT].unw_sec = s;
|
||||
else if (strcmp(".init.text", secname) == 0)
|
||||
maps[ARM_SEC_INIT].txt_sec = s;
|
||||
else if (strcmp(".devinit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVINIT].txt_sec = s;
|
||||
else if (strcmp(".text", secname) == 0)
|
||||
maps[ARM_SEC_CORE].txt_sec = s;
|
||||
else if (strcmp(".exit.text", secname) == 0)
|
||||
maps[ARM_SEC_EXIT].txt_sec = s;
|
||||
else if (strcmp(".devexit.text", secname) == 0)
|
||||
maps[ARM_SEC_DEVEXIT].txt_sec = s;
|
||||
}
|
||||
}
|
||||
|
||||
static void unregister_unwind_tables(struct module *mod)
|
||||
{
|
||||
int i = ARM_SEC_MAX;
|
||||
while (--i >= 0)
|
||||
unwind_table_del(mod->arch.map[i].unwind);
|
||||
}
|
||||
#else
|
||||
static inline void register_unwind_tables(struct module *mod) { }
|
||||
static inline void unregister_unwind_tables(struct module *mod) { }
|
||||
for (i = 0; i < ARM_SEC_MAX; i++)
|
||||
if (maps[i].unw_sec && maps[i].txt_sec)
|
||||
mod->arch.unwind[i] =
|
||||
unwind_table_add(maps[i].unw_sec->sh_addr,
|
||||
maps[i].unw_sec->sh_size,
|
||||
maps[i].txt_sec->sh_addr,
|
||||
maps[i].txt_sec->sh_size);
|
||||
#endif
|
||||
|
||||
int
|
||||
module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
|
||||
struct module *module)
|
||||
{
|
||||
register_unwind_tables(module);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
unregister_unwind_tables(mod);
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARM_SEC_MAX; i++)
|
||||
if (mod->arch.unwind[i])
|
||||
unwind_table_del(mod->arch.unwind[i]);
|
||||
#endif
|
||||
}
|
||||
|
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,672 @@
|
|||
/*
|
||||
* ARMv6 Performance counter handling code.
|
||||
*
|
||||
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
|
||||
*
|
||||
* ARMv6 has 2 configurable performance counters and a single cycle counter.
|
||||
* They all share a single reset bit but can be written to zero so we can use
|
||||
* that for a reset.
|
||||
*
|
||||
* The counters can't be individually enabled or disabled so when we remove
|
||||
* one event and replace it with another we could get spurious counts from the
|
||||
* wrong event. However, we can take advantage of the fact that the
|
||||
* performance counters can export events to the event bus, and the event bus
|
||||
* itself can be monitored. This requires that we *don't* export the events to
|
||||
* the event bus. The procedure for disabling a configurable counter is:
|
||||
* - change the counter to count the ETMEXTOUT[0] signal (0x20). This
|
||||
* effectively stops the counter from counting.
|
||||
* - disable the counter's interrupt generation (each counter has it's
|
||||
* own interrupt enable bit).
|
||||
* Once stopped, the counter value can be written as 0 to reset.
|
||||
*
|
||||
* To enable a counter:
|
||||
* - enable the counter's interrupt generation.
|
||||
* - set the new event type.
|
||||
*
|
||||
* Note: the dedicated cycle counter only counts cycles and can't be
|
||||
* enabled/disabled independently of the others. When we want to disable the
|
||||
* cycle counter, we have to just disable the interrupt reporting and start
|
||||
* ignoring that counter. When re-enabling, we have to reset the value and
|
||||
* enable the interrupt.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_V6
|
||||
enum armv6_perf_types {
|
||||
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
|
||||
ARMV6_PERFCTR_IBUF_STALL = 0x1,
|
||||
ARMV6_PERFCTR_DDEP_STALL = 0x2,
|
||||
ARMV6_PERFCTR_ITLB_MISS = 0x3,
|
||||
ARMV6_PERFCTR_DTLB_MISS = 0x4,
|
||||
ARMV6_PERFCTR_BR_EXEC = 0x5,
|
||||
ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
|
||||
ARMV6_PERFCTR_INSTR_EXEC = 0x7,
|
||||
ARMV6_PERFCTR_DCACHE_HIT = 0x9,
|
||||
ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
|
||||
ARMV6_PERFCTR_DCACHE_MISS = 0xB,
|
||||
ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
|
||||
ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
|
||||
ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
|
||||
ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
|
||||
ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
|
||||
ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
|
||||
ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
|
||||
ARMV6_PERFCTR_NOP = 0x20,
|
||||
};
|
||||
|
||||
enum armv6_counters {
|
||||
ARMV6_CYCLE_COUNTER = 1,
|
||||
ARMV6_COUNTER0,
|
||||
ARMV6_COUNTER1,
|
||||
};
|
||||
|
||||
/*
|
||||
* The hardware events that we support. We do support cache operations but
|
||||
* we have harvard caches and no way to combine instruction and data
|
||||
* accesses/misses in hardware.
|
||||
*/
|
||||
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
|
||||
};
|
||||
|
||||
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
/*
|
||||
* The performance counters don't differentiate between read
|
||||
* and write accesses/misses so this isn't strictly correct,
|
||||
* but it's the best we can do. Writes and reads get
|
||||
* combined.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
/*
|
||||
* The ARM performance counters can count micro DTLB misses,
|
||||
* micro ITLB misses and main TLB misses. There isn't an event
|
||||
* for TLB misses, so use the micro misses here and if users
|
||||
* want the main TLB misses they can use a raw counter.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
enum armv6mpcore_perf_types {
|
||||
ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
|
||||
ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
|
||||
ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
|
||||
ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
|
||||
ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
|
||||
ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
|
||||
ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
|
||||
ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
|
||||
ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
|
||||
ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
|
||||
ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
|
||||
ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
|
||||
ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
|
||||
ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
|
||||
ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
|
||||
};
|
||||
|
||||
/*
|
||||
* The hardware events that we support. We do support cache operations but
|
||||
* we have harvard caches and no way to combine instruction and data
|
||||
* accesses/misses in hardware.
|
||||
*/
|
||||
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
|
||||
};
|
||||
|
||||
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
|
||||
[C(RESULT_MISS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
|
||||
[C(RESULT_MISS)] =
|
||||
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
/*
|
||||
* The ARM performance counters can count micro DTLB misses,
|
||||
* micro ITLB misses and main TLB misses. There isn't an event
|
||||
* for TLB misses, so use the micro misses here and if users
|
||||
* want the main TLB misses they can use a raw counter.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
static inline unsigned long
|
||||
armv6_pmcr_read(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
armv6_pmcr_write(unsigned long val)
|
||||
{
|
||||
asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
|
||||
}
|
||||
|
||||
#define ARMV6_PMCR_ENABLE (1 << 0)
|
||||
#define ARMV6_PMCR_CTR01_RESET (1 << 1)
|
||||
#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
|
||||
#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
|
||||
#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
|
||||
#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
|
||||
#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
|
||||
#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
|
||||
#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
|
||||
#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
|
||||
#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
|
||||
#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
|
||||
#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
|
||||
#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
|
||||
|
||||
#define ARMV6_PMCR_OVERFLOWED_MASK \
|
||||
(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
|
||||
ARMV6_PMCR_CCOUNT_OVERFLOW)
|
||||
|
||||
static inline int
|
||||
armv6_pmcr_has_overflowed(unsigned long pmcr)
|
||||
{
|
||||
return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
|
||||
}
|
||||
|
||||
static inline int
|
||||
armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
|
||||
enum armv6_counters counter)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (ARMV6_CYCLE_COUNTER == counter)
|
||||
ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
|
||||
else if (ARMV6_COUNTER0 == counter)
|
||||
ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
|
||||
else if (ARMV6_COUNTER1 == counter)
|
||||
ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
|
||||
else
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32
|
||||
armv6pmu_read_counter(int counter)
|
||||
{
|
||||
unsigned long value = 0;
|
||||
|
||||
if (ARMV6_CYCLE_COUNTER == counter)
|
||||
asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
|
||||
else if (ARMV6_COUNTER0 == counter)
|
||||
asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
|
||||
else if (ARMV6_COUNTER1 == counter)
|
||||
asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
|
||||
else
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void
|
||||
armv6pmu_write_counter(int counter,
|
||||
u32 value)
|
||||
{
|
||||
if (ARMV6_CYCLE_COUNTER == counter)
|
||||
asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
|
||||
else if (ARMV6_COUNTER0 == counter)
|
||||
asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
|
||||
else if (ARMV6_COUNTER1 == counter)
|
||||
asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
|
||||
else
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
|
||||
}
|
||||
|
||||
static void
|
||||
armv6pmu_enable_event(struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
{
|
||||
unsigned long val, mask, evt, flags;
|
||||
|
||||
if (ARMV6_CYCLE_COUNTER == idx) {
|
||||
mask = 0;
|
||||
evt = ARMV6_PMCR_CCOUNT_IEN;
|
||||
} else if (ARMV6_COUNTER0 == idx) {
|
||||
mask = ARMV6_PMCR_EVT_COUNT0_MASK;
|
||||
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
|
||||
ARMV6_PMCR_COUNT0_IEN;
|
||||
} else if (ARMV6_COUNTER1 == idx) {
|
||||
mask = ARMV6_PMCR_EVT_COUNT1_MASK;
|
||||
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
|
||||
ARMV6_PMCR_COUNT1_IEN;
|
||||
} else {
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask out the current event and set the counter to count the event
|
||||
* that we're interested in.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
armv6_pmcr_write(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
armv6pmu_handle_irq(int irq_num,
|
||||
void *dev)
|
||||
{
|
||||
unsigned long pmcr = armv6_pmcr_read();
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
if (!armv6_pmcr_has_overflowed(pmcr))
|
||||
return IRQ_NONE;
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
/*
|
||||
* The interrupts are cleared by writing the overflow flags back to
|
||||
* the control register. All of the other bits don't have any effect
|
||||
* if they are rewritten, so write the whole value back.
|
||||
*/
|
||||
armv6_pmcr_write(pmcr);
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We have a single interrupt for all counters. Check that
|
||||
* each counter has overflowed before we process it.
|
||||
*/
|
||||
if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
|
||||
continue;
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the pending perf events.
|
||||
*
|
||||
* Note: this call *must* be run with interrupts disabled. For
|
||||
* platforms that can have the PMU interrupts raised as an NMI, this
|
||||
* will not work.
|
||||
*/
|
||||
irq_work_run();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void
|
||||
armv6pmu_start(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val |= ARMV6_PMCR_ENABLE;
|
||||
armv6_pmcr_write(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
armv6pmu_stop(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~ARMV6_PMCR_ENABLE;
|
||||
armv6_pmcr_write(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
||||
struct hw_perf_event *event)
|
||||
{
|
||||
/* Always place a cycle counter into the cycle counter. */
|
||||
if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
|
||||
if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
|
||||
return ARMV6_CYCLE_COUNTER;
|
||||
} else {
|
||||
/*
|
||||
* For anything other than a cycle counter, try and use
|
||||
* counter0 and counter1.
|
||||
*/
|
||||
if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
|
||||
return ARMV6_COUNTER1;
|
||||
|
||||
if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
|
||||
return ARMV6_COUNTER0;
|
||||
|
||||
/* The counters are all in use. */
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
armv6pmu_disable_event(struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
{
|
||||
unsigned long val, mask, evt, flags;
|
||||
|
||||
if (ARMV6_CYCLE_COUNTER == idx) {
|
||||
mask = ARMV6_PMCR_CCOUNT_IEN;
|
||||
evt = 0;
|
||||
} else if (ARMV6_COUNTER0 == idx) {
|
||||
mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
|
||||
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
|
||||
} else if (ARMV6_COUNTER1 == idx) {
|
||||
mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
|
||||
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
|
||||
} else {
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask out the current event and set the counter to count the number
|
||||
* of ETM bus signal assertion cycles. The external reporting should
|
||||
* be disabled and so this should never increment.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
armv6_pmcr_write(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
|
||||
int idx)
|
||||
{
|
||||
unsigned long val, mask, flags, evt = 0;
|
||||
|
||||
if (ARMV6_CYCLE_COUNTER == idx) {
|
||||
mask = ARMV6_PMCR_CCOUNT_IEN;
|
||||
} else if (ARMV6_COUNTER0 == idx) {
|
||||
mask = ARMV6_PMCR_COUNT0_IEN;
|
||||
} else if (ARMV6_COUNTER1 == idx) {
|
||||
mask = ARMV6_PMCR_COUNT1_IEN;
|
||||
} else {
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
|
||||
* simply disable the interrupt reporting.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = armv6_pmcr_read();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
armv6_pmcr_write(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static const struct arm_pmu armv6pmu = {
|
||||
.id = ARM_PERF_PMU_ID_V6,
|
||||
.name = "v6",
|
||||
.handle_irq = armv6pmu_handle_irq,
|
||||
.enable = armv6pmu_enable_event,
|
||||
.disable = armv6pmu_disable_event,
|
||||
.read_counter = armv6pmu_read_counter,
|
||||
.write_counter = armv6pmu_write_counter,
|
||||
.get_event_idx = armv6pmu_get_event_idx,
|
||||
.start = armv6pmu_start,
|
||||
.stop = armv6pmu_stop,
|
||||
.cache_map = &armv6_perf_cache_map,
|
||||
.event_map = &armv6_perf_map,
|
||||
.raw_event_mask = 0xFF,
|
||||
.num_events = 3,
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
static const struct arm_pmu *__init armv6pmu_init(void)
|
||||
{
|
||||
return &armv6pmu;
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMv6mpcore is almost identical to single core ARMv6 with the exception
|
||||
* that some of the events have different enumerations and that there is no
|
||||
* *hack* to stop the programmable counters. To stop the counters we simply
|
||||
* disable the interrupt reporting and update the event. When unthrottling we
|
||||
* reset the period and enable the interrupt reporting.
|
||||
*/
|
||||
static const struct arm_pmu armv6mpcore_pmu = {
|
||||
.id = ARM_PERF_PMU_ID_V6MP,
|
||||
.name = "v6mpcore",
|
||||
.handle_irq = armv6pmu_handle_irq,
|
||||
.enable = armv6pmu_enable_event,
|
||||
.disable = armv6mpcore_pmu_disable_event,
|
||||
.read_counter = armv6pmu_read_counter,
|
||||
.write_counter = armv6pmu_write_counter,
|
||||
.get_event_idx = armv6pmu_get_event_idx,
|
||||
.start = armv6pmu_start,
|
||||
.stop = armv6pmu_stop,
|
||||
.cache_map = &armv6mpcore_perf_cache_map,
|
||||
.event_map = &armv6mpcore_perf_map,
|
||||
.raw_event_mask = 0xFF,
|
||||
.num_events = 3,
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||
{
|
||||
return &armv6mpcore_pmu;
|
||||
}
|
||||
#else
|
||||
static const struct arm_pmu *__init armv6pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_CPU_V6 */
|
|
@ -0,0 +1,906 @@
|
|||
/*
|
||||
* ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
|
||||
*
|
||||
* ARMv7 support: Jean Pihet <jpihet@mvista.com>
|
||||
* 2010 (c) MontaVista Software, LLC.
|
||||
*
|
||||
* Copied from ARMv6 code, with the low level code inspired
|
||||
* by the ARMv7 Oprofile code.
|
||||
*
|
||||
* Cortex-A8 has up to 4 configurable performance counters and
|
||||
* a single cycle counter.
|
||||
* Cortex-A9 has up to 31 configurable performance counters and
|
||||
* a single cycle counter.
|
||||
*
|
||||
* All counters can be enabled/disabled and IRQ masked separately. The cycle
|
||||
* counter and all 4 performance counters together can be reset separately.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_V7
|
||||
/* Common ARMv7 event types */
|
||||
enum armv7_perf_types {
|
||||
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
|
||||
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
|
||||
ARMV7_PERFCTR_ITLB_MISS = 0x02,
|
||||
ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
|
||||
ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
|
||||
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
|
||||
ARMV7_PERFCTR_DREAD = 0x06,
|
||||
ARMV7_PERFCTR_DWRITE = 0x07,
|
||||
|
||||
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
|
||||
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
|
||||
ARMV7_PERFCTR_CID_WRITE = 0x0B,
|
||||
/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
|
||||
* It counts:
|
||||
* - all branch instructions,
|
||||
* - instructions that explicitly write the PC,
|
||||
* - exception generating instructions.
|
||||
*/
|
||||
ARMV7_PERFCTR_PC_WRITE = 0x0C,
|
||||
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
|
||||
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
|
||||
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
|
||||
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
|
||||
|
||||
ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
|
||||
|
||||
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
|
||||
};
|
||||
|
||||
/* ARMv7 Cortex-A8 specific event types */
|
||||
enum armv7_a8_perf_types {
|
||||
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
|
||||
|
||||
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
|
||||
|
||||
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
|
||||
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
|
||||
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
|
||||
ARMV7_PERFCTR_L2_ACCESS = 0x43,
|
||||
ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
|
||||
ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
|
||||
ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
|
||||
ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
|
||||
ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
|
||||
ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
|
||||
ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
|
||||
ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
|
||||
ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
|
||||
ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
|
||||
ARMV7_PERFCTR_L2_NEON = 0x4E,
|
||||
ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
|
||||
ARMV7_PERFCTR_L1_INST = 0x50,
|
||||
ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
|
||||
ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
|
||||
ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
|
||||
ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
|
||||
ARMV7_PERFCTR_OP_EXECUTED = 0x55,
|
||||
ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
|
||||
ARMV7_PERFCTR_CYCLES_INST = 0x57,
|
||||
ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
|
||||
ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
|
||||
ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
|
||||
|
||||
ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
|
||||
ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
|
||||
ARMV7_PERFCTR_PMU_EVENTS = 0x72,
|
||||
};
|
||||
|
||||
/* ARMv7 Cortex-A9 specific event types */
|
||||
enum armv7_a9_perf_types {
|
||||
ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
|
||||
ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
|
||||
ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
|
||||
|
||||
ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
|
||||
ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
|
||||
|
||||
ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
|
||||
ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
|
||||
ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
|
||||
ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
|
||||
ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
|
||||
ARMV7_PERFCTR_DATA_EVICTION = 0x65,
|
||||
ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
|
||||
ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
|
||||
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
|
||||
|
||||
ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
|
||||
|
||||
ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
|
||||
ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
|
||||
ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
|
||||
ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
|
||||
ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
|
||||
|
||||
ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
|
||||
ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
|
||||
ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
|
||||
ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
|
||||
ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
|
||||
ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
|
||||
ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
|
||||
|
||||
ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
|
||||
ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
|
||||
|
||||
ARMV7_PERFCTR_ISB_INST = 0x90,
|
||||
ARMV7_PERFCTR_DSB_INST = 0x91,
|
||||
ARMV7_PERFCTR_DMB_INST = 0x92,
|
||||
ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
|
||||
|
||||
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
|
||||
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
|
||||
ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
|
||||
ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
|
||||
ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
|
||||
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
|
||||
};
|
||||
|
||||
/*
|
||||
* Cortex-A8 HW events mapping
|
||||
*
|
||||
* The hardware events that we support. We do support cache operations but
|
||||
* we have harvard caches and no way to combine instruction and data
|
||||
* accesses/misses in hardware.
|
||||
*/
|
||||
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
||||
};
|
||||
|
||||
static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
/*
|
||||
* The performance counters don't differentiate between read
|
||||
* and write accesses/misses so this isn't strictly correct,
|
||||
* but it's the best we can do. Writes and reads get
|
||||
* combined.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
/*
|
||||
* Only ITLB misses and DTLB refills are supported.
|
||||
* If users want the DTLB refills misses a raw counter
|
||||
* must be used.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[C(RESULT_MISS)]
|
||||
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[C(RESULT_MISS)]
|
||||
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Cortex-A9 HW events mapping
|
||||
*/
|
||||
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] =
|
||||
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
|
||||
};
|
||||
|
||||
static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
/*
|
||||
* The performance counters don't differentiate between read
|
||||
* and write accesses/misses so this isn't strictly correct,
|
||||
* but it's the best we can do. Writes and reads get
|
||||
* combined.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
/*
|
||||
* Only ITLB misses and DTLB refills are supported.
|
||||
* If users want the DTLB refills misses a raw counter
|
||||
* must be used.
|
||||
*/
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[C(RESULT_MISS)]
|
||||
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
|
||||
[C(RESULT_MISS)]
|
||||
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Perf Events counters
|
||||
*/
|
||||
enum armv7_counters {
|
||||
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
|
||||
ARMV7_COUNTER0 = 2, /* First event counter */
|
||||
};
|
||||
|
||||
/*
|
||||
* The cycle counter is ARMV7_CYCLE_COUNTER.
|
||||
* The first event counter is ARMV7_COUNTER0.
|
||||
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
|
||||
*/
|
||||
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
|
||||
|
||||
/*
|
||||
* ARMv7 low level PMNC access
|
||||
*/
|
||||
|
||||
/*
|
||||
* Per-CPU PMNC: config reg
|
||||
*/
|
||||
#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
|
||||
#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
|
||||
#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
|
||||
#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
|
||||
#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
|
||||
#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
|
||||
#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
|
||||
#define ARMV7_PMNC_N_MASK 0x1f
|
||||
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
|
||||
|
||||
/*
|
||||
* Available counters
|
||||
*/
|
||||
#define ARMV7_CNT0 0 /* First event counter */
|
||||
#define ARMV7_CCNT 31 /* Cycle counter */
|
||||
|
||||
/* Perf Event to low level counters mapping */
|
||||
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
|
||||
|
||||
/*
|
||||
* CNTENS: counters enable reg
|
||||
*/
|
||||
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
||||
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
|
||||
|
||||
/*
|
||||
* CNTENC: counters disable reg
|
||||
*/
|
||||
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
||||
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
|
||||
|
||||
/*
|
||||
* INTENS: counters overflow interrupt enable reg
|
||||
*/
|
||||
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
||||
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
|
||||
|
||||
/*
|
||||
* INTENC: counters overflow interrupt disable reg
|
||||
*/
|
||||
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
||||
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
|
||||
|
||||
/*
|
||||
* EVTSEL: Event selection reg
|
||||
*/
|
||||
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
|
||||
|
||||
/*
|
||||
* SELECT: Counter selection reg
|
||||
*/
|
||||
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
|
||||
|
||||
/*
|
||||
* FLAG: counters overflow flag status reg
|
||||
*/
|
||||
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
|
||||
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
|
||||
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
|
||||
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
|
||||
|
||||
static inline unsigned long armv7_pmnc_read(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void armv7_pmnc_write(unsigned long val)
|
||||
{
|
||||
val &= ARMV7_PMNC_MASK;
|
||||
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
|
||||
}
|
||||
|
||||
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
|
||||
{
|
||||
return pmnc & ARMV7_OVERFLOWED_MASK;
|
||||
}
|
||||
|
||||
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
|
||||
enum armv7_counters counter)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (counter == ARMV7_CYCLE_COUNTER)
|
||||
ret = pmnc & ARMV7_FLAG_C;
|
||||
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
|
||||
ret = pmnc & ARMV7_FLAG_P(counter);
|
||||
else
|
||||
pr_err("CPU%u checking wrong counter %d overflow status\n",
|
||||
smp_processor_id(), counter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int armv7_pmnc_select_counter(unsigned int idx)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
|
||||
pr_err("CPU%u selecting wrong PMNC counter"
|
||||
" %d\n", smp_processor_id(), idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
|
||||
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv7pmu_read_counter(int idx)
|
||||
{
|
||||
unsigned long value = 0;
|
||||
|
||||
if (idx == ARMV7_CYCLE_COUNTER)
|
||||
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
|
||||
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
|
||||
if (armv7_pmnc_select_counter(idx) == idx)
|
||||
asm volatile("mrc p15, 0, %0, c9, c13, 2"
|
||||
: "=r" (value));
|
||||
} else
|
||||
pr_err("CPU%u reading wrong counter %d\n",
|
||||
smp_processor_id(), idx);
|
||||
|
||||
return value;
|
||||
}
|
||||
|
||||
static inline void armv7pmu_write_counter(int idx, u32 value)
|
||||
{
|
||||
if (idx == ARMV7_CYCLE_COUNTER)
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
|
||||
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
|
||||
if (armv7_pmnc_select_counter(idx) == idx)
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 2"
|
||||
: : "r" (value));
|
||||
} else
|
||||
pr_err("CPU%u writing wrong counter %d\n",
|
||||
smp_processor_id(), idx);
|
||||
}
|
||||
|
||||
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
|
||||
{
|
||||
if (armv7_pmnc_select_counter(idx) == idx) {
|
||||
val &= ARMV7_EVTSEL_MASK;
|
||||
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
|
||||
}
|
||||
}
|
||||
|
||||
static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
||||
pr_err("CPU%u enabling wrong PMNC counter"
|
||||
" %d\n", smp_processor_id(), idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (idx == ARMV7_CYCLE_COUNTER)
|
||||
val = ARMV7_CNTENS_C;
|
||||
else
|
||||
val = ARMV7_CNTENS_P(idx);
|
||||
|
||||
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
||||
pr_err("CPU%u disabling wrong PMNC counter"
|
||||
" %d\n", smp_processor_id(), idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (idx == ARMV7_CYCLE_COUNTER)
|
||||
val = ARMV7_CNTENC_C;
|
||||
else
|
||||
val = ARMV7_CNTENC_P(idx);
|
||||
|
||||
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
||||
pr_err("CPU%u enabling wrong PMNC counter"
|
||||
" interrupt enable %d\n", smp_processor_id(), idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (idx == ARMV7_CYCLE_COUNTER)
|
||||
val = ARMV7_INTENS_C;
|
||||
else
|
||||
val = ARMV7_INTENS_P(idx);
|
||||
|
||||
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if ((idx != ARMV7_CYCLE_COUNTER) &&
|
||||
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
|
||||
pr_err("CPU%u disabling wrong PMNC counter"
|
||||
" interrupt enable %d\n", smp_processor_id(), idx);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (idx == ARMV7_CYCLE_COUNTER)
|
||||
val = ARMV7_INTENC_C;
|
||||
else
|
||||
val = ARMV7_INTENC_P(idx);
|
||||
|
||||
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static inline u32 armv7_pmnc_getreset_flags(void)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/* Read */
|
||||
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
|
||||
|
||||
/* Write to clear flags */
|
||||
val &= ARMV7_FLAG_MASK;
|
||||
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static void armv7_pmnc_dump_regs(void)
|
||||
{
|
||||
u32 val;
|
||||
unsigned int cnt;
|
||||
|
||||
printk(KERN_INFO "PMNC registers dump:\n");
|
||||
|
||||
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
|
||||
printk(KERN_INFO "PMNC =0x%08x\n", val);
|
||||
|
||||
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
|
||||
printk(KERN_INFO "CNTENS=0x%08x\n", val);
|
||||
|
||||
asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
|
||||
printk(KERN_INFO "INTENS=0x%08x\n", val);
|
||||
|
||||
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
|
||||
printk(KERN_INFO "FLAGS =0x%08x\n", val);
|
||||
|
||||
asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
|
||||
printk(KERN_INFO "SELECT=0x%08x\n", val);
|
||||
|
||||
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
|
||||
printk(KERN_INFO "CCNT =0x%08x\n", val);
|
||||
|
||||
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
|
||||
armv7_pmnc_select_counter(cnt);
|
||||
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
|
||||
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
|
||||
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
|
||||
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
|
||||
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
|
||||
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Enable counter and interrupt, and set the counter to count
|
||||
* the event that we're interested in.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
|
||||
/*
|
||||
* Disable counter
|
||||
*/
|
||||
armv7_pmnc_disable_counter(idx);
|
||||
|
||||
/*
|
||||
* Set event (if destined for PMNx counters)
|
||||
* We don't need to set the event if it's a cycle count
|
||||
*/
|
||||
if (idx != ARMV7_CYCLE_COUNTER)
|
||||
armv7_pmnc_write_evtsel(idx, hwc->config_base);
|
||||
|
||||
/*
|
||||
* Enable interrupt for this counter
|
||||
*/
|
||||
armv7_pmnc_enable_intens(idx);
|
||||
|
||||
/*
|
||||
* Enable counter
|
||||
*/
|
||||
armv7_pmnc_enable_counter(idx);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Disable counter and interrupt
|
||||
*/
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
|
||||
/*
|
||||
* Disable counter
|
||||
*/
|
||||
armv7_pmnc_disable_counter(idx);
|
||||
|
||||
/*
|
||||
* Disable interrupt for this counter
|
||||
*/
|
||||
armv7_pmnc_disable_intens(idx);
|
||||
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
|
||||
{
|
||||
unsigned long pmnc;
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* Get and reset the IRQ flags
|
||||
*/
|
||||
pmnc = armv7_pmnc_getreset_flags();
|
||||
|
||||
/*
|
||||
* Did an overflow occur?
|
||||
*/
|
||||
if (!armv7_pmnc_has_overflowed(pmnc))
|
||||
return IRQ_NONE;
|
||||
|
||||
/*
|
||||
* Handle the counter(s) overflow(s)
|
||||
*/
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We have a single interrupt for all counters. Check that
|
||||
* each counter has overflowed before we process it.
|
||||
*/
|
||||
if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
|
||||
continue;
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the pending perf events.
|
||||
*
|
||||
* Note: this call *must* be run with interrupts disabled. For
|
||||
* platforms that can have the PMU interrupts raised as an NMI, this
|
||||
* will not work.
|
||||
*/
|
||||
irq_work_run();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void armv7pmu_start(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
/* Enable all counters */
|
||||
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void armv7pmu_stop(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
/* Disable all counters */
|
||||
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
||||
struct hw_perf_event *event)
|
||||
{
|
||||
int idx;
|
||||
|
||||
/* Always place a cycle counter into the cycle counter. */
|
||||
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
|
||||
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
|
||||
return ARMV7_CYCLE_COUNTER;
|
||||
} else {
|
||||
/*
|
||||
* For anything other than a cycle counter, try and use
|
||||
* the events counters
|
||||
*/
|
||||
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
|
||||
if (!test_and_set_bit(idx, cpuc->used_mask))
|
||||
return idx;
|
||||
}
|
||||
|
||||
/* The counters are all in use. */
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
static struct arm_pmu armv7pmu = {
|
||||
.handle_irq = armv7pmu_handle_irq,
|
||||
.enable = armv7pmu_enable_event,
|
||||
.disable = armv7pmu_disable_event,
|
||||
.read_counter = armv7pmu_read_counter,
|
||||
.write_counter = armv7pmu_write_counter,
|
||||
.get_event_idx = armv7pmu_get_event_idx,
|
||||
.start = armv7pmu_start,
|
||||
.stop = armv7pmu_stop,
|
||||
.raw_event_mask = 0xFF,
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
static u32 __init armv7_reset_read_pmnc(void)
|
||||
{
|
||||
u32 nb_cnt;
|
||||
|
||||
/* Initialize & Reset PMNC: C and P bits */
|
||||
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
|
||||
|
||||
/* Read the nb of CNTx counters supported from PMNC */
|
||||
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
|
||||
|
||||
/* Add the CPU cycles counter and return */
|
||||
return nb_cnt + 1;
|
||||
}
|
||||
|
||||
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
{
|
||||
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
|
||||
armv7pmu.name = "ARMv7 Cortex-A8";
|
||||
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
|
||||
armv7pmu.event_map = &armv7_a8_perf_map;
|
||||
armv7pmu.num_events = armv7_reset_read_pmnc();
|
||||
return &armv7pmu;
|
||||
}
|
||||
|
||||
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
{
|
||||
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
|
||||
armv7pmu.name = "ARMv7 Cortex-A9";
|
||||
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
|
||||
armv7pmu.event_map = &armv7_a9_perf_map;
|
||||
armv7pmu.num_events = armv7_reset_read_pmnc();
|
||||
return &armv7pmu;
|
||||
}
|
||||
#else
|
||||
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_CPU_V7 */
|
|
@ -0,0 +1,807 @@
|
|||
/*
|
||||
* ARMv5 [xscale] Performance counter handling code.
|
||||
*
|
||||
* Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
|
||||
*
|
||||
* Based on the previous xscale OProfile code.
|
||||
*
|
||||
* There are two variants of the xscale PMU that we support:
|
||||
* - xscale1pmu: 2 event counters and a cycle counter
|
||||
* - xscale2pmu: 4 event counters and a cycle counter
|
||||
* The two variants share event definitions, but have different
|
||||
* PMU structures.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_CPU_XSCALE
|
||||
enum xscale_perf_types {
|
||||
XSCALE_PERFCTR_ICACHE_MISS = 0x00,
|
||||
XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
|
||||
XSCALE_PERFCTR_DATA_STALL = 0x02,
|
||||
XSCALE_PERFCTR_ITLB_MISS = 0x03,
|
||||
XSCALE_PERFCTR_DTLB_MISS = 0x04,
|
||||
XSCALE_PERFCTR_BRANCH = 0x05,
|
||||
XSCALE_PERFCTR_BRANCH_MISS = 0x06,
|
||||
XSCALE_PERFCTR_INSTRUCTION = 0x07,
|
||||
XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
|
||||
XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
|
||||
XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
|
||||
XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
|
||||
XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
|
||||
XSCALE_PERFCTR_PC_CHANGED = 0x0D,
|
||||
XSCALE_PERFCTR_BCU_REQUEST = 0x10,
|
||||
XSCALE_PERFCTR_BCU_FULL = 0x11,
|
||||
XSCALE_PERFCTR_BCU_DRAIN = 0x12,
|
||||
XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
|
||||
XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
|
||||
XSCALE_PERFCTR_RMW = 0x16,
|
||||
/* XSCALE_PERFCTR_CCNT is not hardware defined */
|
||||
XSCALE_PERFCTR_CCNT = 0xFE,
|
||||
XSCALE_PERFCTR_UNUSED = 0xFF,
|
||||
};
|
||||
|
||||
enum xscale_counters {
|
||||
XSCALE_CYCLE_COUNTER = 1,
|
||||
XSCALE_COUNTER0,
|
||||
XSCALE_COUNTER1,
|
||||
XSCALE_COUNTER2,
|
||||
XSCALE_COUNTER3,
|
||||
};
|
||||
|
||||
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
|
||||
};
|
||||
|
||||
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(L1I)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(LL)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(DTLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(ITLB)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
[C(BPU)] = {
|
||||
[C(OP_READ)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_WRITE)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
[C(OP_PREFETCH)] = {
|
||||
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
|
||||
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
#define XSCALE_PMU_ENABLE 0x001
|
||||
#define XSCALE_PMN_RESET 0x002
|
||||
#define XSCALE_CCNT_RESET 0x004
|
||||
#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
|
||||
#define XSCALE_PMU_CNT64 0x008
|
||||
|
||||
#define XSCALE1_OVERFLOWED_MASK 0x700
|
||||
#define XSCALE1_CCOUNT_OVERFLOW 0x400
|
||||
#define XSCALE1_COUNT0_OVERFLOW 0x100
|
||||
#define XSCALE1_COUNT1_OVERFLOW 0x200
|
||||
#define XSCALE1_CCOUNT_INT_EN 0x040
|
||||
#define XSCALE1_COUNT0_INT_EN 0x010
|
||||
#define XSCALE1_COUNT1_INT_EN 0x020
|
||||
#define XSCALE1_COUNT0_EVT_SHFT 12
|
||||
#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
|
||||
#define XSCALE1_COUNT1_EVT_SHFT 20
|
||||
#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
|
||||
|
||||
static inline u32
|
||||
xscale1pmu_read_pmnc(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xscale1pmu_write_pmnc(u32 val)
|
||||
{
|
||||
/* upper 4bits and 7, 11 are write-as-0 */
|
||||
val &= 0xffff77f;
|
||||
asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
|
||||
}
|
||||
|
||||
static inline int
|
||||
xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
|
||||
enum xscale_counters counter)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (counter) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
xscale1pmu_handle_irq(int irq_num, void *dev)
|
||||
{
|
||||
unsigned long pmnc;
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
/*
|
||||
* NOTE: there's an A stepping erratum that states if an overflow
|
||||
* bit already exists and another occurs, the previous
|
||||
* Overflow bit gets cleared. There's no workaround.
|
||||
* Fixed in B stepping or later.
|
||||
*/
|
||||
pmnc = xscale1pmu_read_pmnc();
|
||||
|
||||
/*
|
||||
* Write the value back to clear the overflow flags. Overflow
|
||||
* flags remain in pmnc for use below. We also disable the PMU
|
||||
* while we process the interrupt.
|
||||
*/
|
||||
xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
|
||||
|
||||
if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
|
||||
return IRQ_NONE;
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
|
||||
continue;
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
irq_work_run();
|
||||
|
||||
/*
|
||||
* Re-enable the PMU.
|
||||
*/
|
||||
pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
|
||||
xscale1pmu_write_pmnc(pmnc);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void
|
||||
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long val, mask, evt, flags;
|
||||
|
||||
switch (idx) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
mask = 0;
|
||||
evt = XSCALE1_CCOUNT_INT_EN;
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
mask = XSCALE1_COUNT0_EVT_MASK;
|
||||
evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
|
||||
XSCALE1_COUNT0_INT_EN;
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
mask = XSCALE1_COUNT1_EVT_MASK;
|
||||
evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
|
||||
XSCALE1_COUNT1_INT_EN;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long val, mask, evt, flags;
|
||||
|
||||
switch (idx) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
mask = XSCALE1_CCOUNT_INT_EN;
|
||||
evt = 0;
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
|
||||
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
|
||||
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val &= ~mask;
|
||||
val |= evt;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
||||
struct hw_perf_event *event)
|
||||
{
|
||||
if (XSCALE_PERFCTR_CCNT == event->config_base) {
|
||||
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
|
||||
return XSCALE_CYCLE_COUNTER;
|
||||
} else {
|
||||
if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
|
||||
return XSCALE_COUNTER1;
|
||||
|
||||
if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
|
||||
return XSCALE_COUNTER0;
|
||||
|
||||
return -EAGAIN;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
xscale1pmu_start(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val |= XSCALE_PMU_ENABLE;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
xscale1pmu_stop(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale1pmu_read_pmnc();
|
||||
val &= ~XSCALE_PMU_ENABLE;
|
||||
xscale1pmu_write_pmnc(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
xscale1pmu_read_counter(int counter)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
switch (counter) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xscale1pmu_write_counter(int counter, u32 val)
|
||||
{
|
||||
switch (counter) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct arm_pmu xscale1pmu = {
|
||||
.id = ARM_PERF_PMU_ID_XSCALE1,
|
||||
.name = "xscale1",
|
||||
.handle_irq = xscale1pmu_handle_irq,
|
||||
.enable = xscale1pmu_enable_event,
|
||||
.disable = xscale1pmu_disable_event,
|
||||
.read_counter = xscale1pmu_read_counter,
|
||||
.write_counter = xscale1pmu_write_counter,
|
||||
.get_event_idx = xscale1pmu_get_event_idx,
|
||||
.start = xscale1pmu_start,
|
||||
.stop = xscale1pmu_stop,
|
||||
.cache_map = &xscale_perf_cache_map,
|
||||
.event_map = &xscale_perf_map,
|
||||
.raw_event_mask = 0xFF,
|
||||
.num_events = 3,
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
static const struct arm_pmu *__init xscale1pmu_init(void)
|
||||
{
|
||||
return &xscale1pmu;
|
||||
}
|
||||
|
||||
#define XSCALE2_OVERFLOWED_MASK 0x01f
|
||||
#define XSCALE2_CCOUNT_OVERFLOW 0x001
|
||||
#define XSCALE2_COUNT0_OVERFLOW 0x002
|
||||
#define XSCALE2_COUNT1_OVERFLOW 0x004
|
||||
#define XSCALE2_COUNT2_OVERFLOW 0x008
|
||||
#define XSCALE2_COUNT3_OVERFLOW 0x010
|
||||
#define XSCALE2_CCOUNT_INT_EN 0x001
|
||||
#define XSCALE2_COUNT0_INT_EN 0x002
|
||||
#define XSCALE2_COUNT1_INT_EN 0x004
|
||||
#define XSCALE2_COUNT2_INT_EN 0x008
|
||||
#define XSCALE2_COUNT3_INT_EN 0x010
|
||||
#define XSCALE2_COUNT0_EVT_SHFT 0
|
||||
#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
|
||||
#define XSCALE2_COUNT1_EVT_SHFT 8
|
||||
#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
|
||||
#define XSCALE2_COUNT2_EVT_SHFT 16
|
||||
#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
|
||||
#define XSCALE2_COUNT3_EVT_SHFT 24
|
||||
#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
|
||||
|
||||
static inline u32
|
||||
xscale2pmu_read_pmnc(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
|
||||
/* bits 1-2 and 4-23 are read-unpredictable */
|
||||
return val & 0xff000009;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xscale2pmu_write_pmnc(u32 val)
|
||||
{
|
||||
/* bits 4-23 are write-as-0, 24-31 are write ignored */
|
||||
val &= 0xf;
|
||||
asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
|
||||
}
|
||||
|
||||
static inline u32
|
||||
xscale2pmu_read_overflow_flags(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xscale2pmu_write_overflow_flags(u32 val)
|
||||
{
|
||||
asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
|
||||
}
|
||||
|
||||
static inline u32
|
||||
xscale2pmu_read_event_select(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xscale2pmu_write_event_select(u32 val)
|
||||
{
|
||||
asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
|
||||
}
|
||||
|
||||
static inline u32
|
||||
xscale2pmu_read_int_enable(void)
|
||||
{
|
||||
u32 val;
|
||||
asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
|
||||
return val;
|
||||
}
|
||||
|
||||
static void
|
||||
xscale2pmu_write_int_enable(u32 val)
|
||||
{
|
||||
asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
|
||||
}
|
||||
|
||||
static inline int
|
||||
xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
|
||||
enum xscale_counters counter)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
switch (counter) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
|
||||
break;
|
||||
case XSCALE_COUNTER2:
|
||||
ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
|
||||
break;
|
||||
case XSCALE_COUNTER3:
|
||||
ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
xscale2pmu_handle_irq(int irq_num, void *dev)
|
||||
{
|
||||
unsigned long pmnc, of_flags;
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_events *cpuc;
|
||||
struct pt_regs *regs;
|
||||
int idx;
|
||||
|
||||
/* Disable the PMU. */
|
||||
pmnc = xscale2pmu_read_pmnc();
|
||||
xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
|
||||
|
||||
/* Check the overflow flag register. */
|
||||
of_flags = xscale2pmu_read_overflow_flags();
|
||||
if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Clear the overflow bits. */
|
||||
xscale2pmu_write_overflow_flags(of_flags);
|
||||
|
||||
regs = get_irq_regs();
|
||||
|
||||
perf_sample_data_init(&data, 0);
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
for (idx = 0; idx <= armpmu->num_events; ++idx) {
|
||||
struct perf_event *event = cpuc->events[idx];
|
||||
struct hw_perf_event *hwc;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
|
||||
continue;
|
||||
|
||||
hwc = &event->hw;
|
||||
armpmu_event_update(event, hwc, idx);
|
||||
data.period = event->hw.last_period;
|
||||
if (!armpmu_event_set_period(event, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_event_overflow(event, 0, &data, regs))
|
||||
armpmu->disable(hwc, idx);
|
||||
}
|
||||
|
||||
irq_work_run();
|
||||
|
||||
/*
|
||||
* Re-enable the PMU.
|
||||
*/
|
||||
pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
|
||||
xscale2pmu_write_pmnc(pmnc);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void
|
||||
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long flags, ien, evtsel;
|
||||
|
||||
ien = xscale2pmu_read_int_enable();
|
||||
evtsel = xscale2pmu_read_event_select();
|
||||
|
||||
switch (idx) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
ien |= XSCALE2_CCOUNT_INT_EN;
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
ien |= XSCALE2_COUNT0_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
|
||||
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
ien |= XSCALE2_COUNT1_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
|
||||
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER2:
|
||||
ien |= XSCALE2_COUNT2_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
|
||||
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER3:
|
||||
ien |= XSCALE2_COUNT3_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
|
||||
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
xscale2pmu_write_event_select(evtsel);
|
||||
xscale2pmu_write_int_enable(ien);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
|
||||
{
|
||||
unsigned long flags, ien, evtsel;
|
||||
|
||||
ien = xscale2pmu_read_int_enable();
|
||||
evtsel = xscale2pmu_read_event_select();
|
||||
|
||||
switch (idx) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
ien &= ~XSCALE2_CCOUNT_INT_EN;
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
ien &= ~XSCALE2_COUNT0_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
|
||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
ien &= ~XSCALE2_COUNT1_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
|
||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER2:
|
||||
ien &= ~XSCALE2_COUNT2_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
|
||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
|
||||
break;
|
||||
case XSCALE_COUNTER3:
|
||||
ien &= ~XSCALE2_COUNT3_INT_EN;
|
||||
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
|
||||
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
|
||||
break;
|
||||
default:
|
||||
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
xscale2pmu_write_event_select(evtsel);
|
||||
xscale2pmu_write_int_enable(ien);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
|
||||
struct hw_perf_event *event)
|
||||
{
|
||||
int idx = xscale1pmu_get_event_idx(cpuc, event);
|
||||
if (idx >= 0)
|
||||
goto out;
|
||||
|
||||
if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
|
||||
idx = XSCALE_COUNTER3;
|
||||
else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
|
||||
idx = XSCALE_COUNTER2;
|
||||
out:
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void
|
||||
xscale2pmu_start(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
|
||||
val |= XSCALE_PMU_ENABLE;
|
||||
xscale2pmu_write_pmnc(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
xscale2pmu_stop(void)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&pmu_lock, flags);
|
||||
val = xscale2pmu_read_pmnc();
|
||||
val &= ~XSCALE_PMU_ENABLE;
|
||||
xscale2pmu_write_pmnc(val);
|
||||
raw_spin_unlock_irqrestore(&pmu_lock, flags);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
xscale2pmu_read_counter(int counter)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
switch (counter) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER2:
|
||||
asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER3:
|
||||
asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void
|
||||
xscale2pmu_write_counter(int counter, u32 val)
|
||||
{
|
||||
switch (counter) {
|
||||
case XSCALE_CYCLE_COUNTER:
|
||||
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER0:
|
||||
asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER1:
|
||||
asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER2:
|
||||
asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
|
||||
break;
|
||||
case XSCALE_COUNTER3:
|
||||
asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static const struct arm_pmu xscale2pmu = {
|
||||
.id = ARM_PERF_PMU_ID_XSCALE2,
|
||||
.name = "xscale2",
|
||||
.handle_irq = xscale2pmu_handle_irq,
|
||||
.enable = xscale2pmu_enable_event,
|
||||
.disable = xscale2pmu_disable_event,
|
||||
.read_counter = xscale2pmu_read_counter,
|
||||
.write_counter = xscale2pmu_write_counter,
|
||||
.get_event_idx = xscale2pmu_get_event_idx,
|
||||
.start = xscale2pmu_start,
|
||||
.stop = xscale2pmu_stop,
|
||||
.cache_map = &xscale_perf_cache_map,
|
||||
.event_map = &xscale_perf_map,
|
||||
.raw_event_mask = 0xFF,
|
||||
.num_events = 5,
|
||||
.max_period = (1LLU << 32) - 1,
|
||||
};
|
||||
|
||||
static const struct arm_pmu *__init xscale2pmu_init(void)
|
||||
{
|
||||
return &xscale2pmu;
|
||||
}
|
||||
#else
|
||||
static const struct arm_pmu *__init xscale1pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static const struct arm_pmu *__init xscale2pmu_init(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_CPU_XSCALE */
|
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
* linux/arch/arm/kernel/pj4-cp0.c
|
||||
*
|
||||
* PJ4 iWMMXt coprocessor context switching and handling
|
||||
*
|
||||
* Copyright (c) 2010 Marvell International Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/thread_notify.h>
|
||||
|
||||
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
|
||||
{
|
||||
struct thread_info *thread = t;
|
||||
|
||||
switch (cmd) {
|
||||
case THREAD_NOTIFY_FLUSH:
|
||||
/*
|
||||
* flush_thread() zeroes thread->fpstate, so no need
|
||||
* to do anything here.
|
||||
*
|
||||
* FALLTHROUGH: Ensure we don't try to overwrite our newly
|
||||
* initialised state information on the first fault.
|
||||
*/
|
||||
|
||||
case THREAD_NOTIFY_EXIT:
|
||||
iwmmxt_task_release(thread);
|
||||
break;
|
||||
|
||||
case THREAD_NOTIFY_SWITCH:
|
||||
iwmmxt_task_switch(thread);
|
||||
break;
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block iwmmxt_notifier_block = {
|
||||
.notifier_call = iwmmxt_do,
|
||||
};
|
||||
|
||||
|
||||
static u32 __init pj4_cp_access_read(void)
|
||||
{
|
||||
u32 value;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mrc p15, 0, %0, c1, c0, 2\n\t"
|
||||
: "=r" (value));
|
||||
return value;
|
||||
}
|
||||
|
||||
static void __init pj4_cp_access_write(u32 value)
|
||||
{
|
||||
u32 temp;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"mcr p15, 0, %1, c1, c0, 2\n\t"
|
||||
"mrc p15, 0, %0, c1, c0, 2\n\t"
|
||||
"mov %0, %0\n\t"
|
||||
"sub pc, pc, #4\n\t"
|
||||
: "=r" (temp) : "r" (value));
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
|
||||
* switch code handle iWMMXt context switching.
|
||||
*/
|
||||
static int __init pj4_cp0_init(void)
|
||||
{
|
||||
u32 cp_access;
|
||||
|
||||
cp_access = pj4_cp_access_read() & ~0xf;
|
||||
pj4_cp_access_write(cp_access);
|
||||
|
||||
printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
|
||||
elf_hwcap |= HWCAP_IWMMXT;
|
||||
thread_register_notifier(&iwmmxt_notifier_block);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(pj4_cp0_init);
|
|
@ -1060,8 +1060,8 @@ static int ptrace_sethbpregs(struct task_struct *tsk, long num,
|
|||
goto out;
|
||||
|
||||
if ((gen_type & implied_type) != gen_type) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
attr.bp_len = gen_len;
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
* sched_clock.c: support for extending counters to full 64-bit ns counter
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
#include <asm/sched_clock.h>
|
||||
|
||||
static void sched_clock_poll(unsigned long wrap_ticks);
|
||||
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
|
||||
static void (*sched_clock_update_fn)(void);
|
||||
|
||||
static void sched_clock_poll(unsigned long wrap_ticks)
|
||||
{
|
||||
mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
|
||||
sched_clock_update_fn();
|
||||
}
|
||||
|
||||
void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
|
||||
unsigned int clock_bits, unsigned long rate)
|
||||
{
|
||||
unsigned long r, w;
|
||||
u64 res, wrap;
|
||||
char r_unit;
|
||||
|
||||
sched_clock_update_fn = update;
|
||||
|
||||
/* calculate the mult/shift to convert counter ticks to ns. */
|
||||
clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60);
|
||||
|
||||
r = rate;
|
||||
if (r >= 4000000) {
|
||||
r /= 1000000;
|
||||
r_unit = 'M';
|
||||
} else {
|
||||
r /= 1000;
|
||||
r_unit = 'k';
|
||||
}
|
||||
|
||||
/* calculate how many ns until we wrap */
|
||||
wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
|
||||
do_div(wrap, NSEC_PER_MSEC);
|
||||
w = wrap;
|
||||
|
||||
/* calculate the ns resolution of this counter */
|
||||
res = cyc_to_ns(1ULL, cd->mult, cd->shift);
|
||||
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
|
||||
clock_bits, r, r_unit, res, w);
|
||||
|
||||
/*
|
||||
* Start the timer to keep sched_clock() properly updated and
|
||||
* sets the initial epoch.
|
||||
*/
|
||||
sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
|
||||
sched_clock_poll(sched_clock_timer.data);
|
||||
|
||||
/*
|
||||
* Ensure that sched_clock() starts off at 0ns
|
||||
*/
|
||||
cd->epoch_ns = 0;
|
||||
}
|
|
@ -75,9 +75,9 @@ extern void reboot_setup(char *str);
|
|||
|
||||
unsigned int processor_id;
|
||||
EXPORT_SYMBOL(processor_id);
|
||||
unsigned int __machine_arch_type;
|
||||
unsigned int __machine_arch_type __read_mostly;
|
||||
EXPORT_SYMBOL(__machine_arch_type);
|
||||
unsigned int cacheid;
|
||||
unsigned int cacheid __read_mostly;
|
||||
EXPORT_SYMBOL(cacheid);
|
||||
|
||||
unsigned int __atags_pointer __initdata;
|
||||
|
@ -91,24 +91,24 @@ EXPORT_SYMBOL(system_serial_low);
|
|||
unsigned int system_serial_high;
|
||||
EXPORT_SYMBOL(system_serial_high);
|
||||
|
||||
unsigned int elf_hwcap;
|
||||
unsigned int elf_hwcap __read_mostly;
|
||||
EXPORT_SYMBOL(elf_hwcap);
|
||||
|
||||
|
||||
#ifdef MULTI_CPU
|
||||
struct processor processor;
|
||||
struct processor processor __read_mostly;
|
||||
#endif
|
||||
#ifdef MULTI_TLB
|
||||
struct cpu_tlb_fns cpu_tlb;
|
||||
struct cpu_tlb_fns cpu_tlb __read_mostly;
|
||||
#endif
|
||||
#ifdef MULTI_USER
|
||||
struct cpu_user_fns cpu_user;
|
||||
struct cpu_user_fns cpu_user __read_mostly;
|
||||
#endif
|
||||
#ifdef MULTI_CACHE
|
||||
struct cpu_cache_fns cpu_cache;
|
||||
struct cpu_cache_fns cpu_cache __read_mostly;
|
||||
#endif
|
||||
#ifdef CONFIG_OUTER_CACHE
|
||||
struct outer_cache_fns outer_cache;
|
||||
struct outer_cache_fns outer_cache __read_mostly;
|
||||
EXPORT_SYMBOL(outer_cache);
|
||||
#endif
|
||||
|
||||
|
@ -126,6 +126,7 @@ EXPORT_SYMBOL(elf_platform);
|
|||
static const char *cpu_name;
|
||||
static const char *machine_name;
|
||||
static char __initdata cmd_line[COMMAND_LINE_SIZE];
|
||||
struct machine_desc *machine_desc __initdata;
|
||||
|
||||
static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
|
||||
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
|
||||
|
@ -708,13 +709,11 @@ static struct init_tags {
|
|||
{ 0, ATAG_NONE }
|
||||
};
|
||||
|
||||
static void (*init_machine)(void) __initdata;
|
||||
|
||||
static int __init customize_machine(void)
|
||||
{
|
||||
/* customizes platform devices, or adds new ones */
|
||||
if (init_machine)
|
||||
init_machine();
|
||||
if (machine_desc->init_machine)
|
||||
machine_desc->init_machine();
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(customize_machine);
|
||||
|
@ -809,6 +808,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
setup_processor();
|
||||
mdesc = setup_machine(machine_arch_type);
|
||||
machine_desc = mdesc;
|
||||
machine_name = mdesc->name;
|
||||
|
||||
if (mdesc->soft_reboot)
|
||||
|
@ -868,13 +868,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
cpu_init();
|
||||
tcm_init();
|
||||
|
||||
/*
|
||||
* Set up various architecture-specific pointers
|
||||
*/
|
||||
arch_nr_irqs = mdesc->nr_irqs;
|
||||
init_arch_irq = mdesc->init_irq;
|
||||
system_timer = mdesc->timer;
|
||||
init_machine = mdesc->init_machine;
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
handle_arch_irq = mdesc->handle_irq;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_VT
|
||||
#if defined(CONFIG_VGA_CONSOLE)
|
||||
|
@ -884,6 +880,9 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
#endif
|
||||
early_trap_init();
|
||||
|
||||
if (mdesc->init_early)
|
||||
mdesc->init_early();
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/cache.h>
|
||||
#include <linux/profile.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/cpu.h>
|
||||
|
@ -24,6 +25,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
@ -37,7 +39,6 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/localtimer.h>
|
||||
#include <asm/smp_plat.h>
|
||||
|
||||
/*
|
||||
* as from 2.5, kernels no longer have an init_tasks structure
|
||||
|
@ -46,64 +47,14 @@
|
|||
*/
|
||||
struct secondary_data secondary_data;
|
||||
|
||||
/*
|
||||
* structures for inter-processor calls
|
||||
* - A collection of single bit ipi messages.
|
||||
*/
|
||||
struct ipi_data {
|
||||
spinlock_t lock;
|
||||
unsigned long ipi_count;
|
||||
unsigned long bits;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ipi_data, ipi_data) = {
|
||||
.lock = SPIN_LOCK_UNLOCKED,
|
||||
};
|
||||
|
||||
enum ipi_msg_type {
|
||||
IPI_TIMER,
|
||||
IPI_TIMER = 2,
|
||||
IPI_RESCHEDULE,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CALL_FUNC_SINGLE,
|
||||
IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
static inline void identity_mapping_add(pgd_t *pgd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long addr, prot;
|
||||
pmd_t *pmd;
|
||||
|
||||
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
|
||||
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
|
||||
prot |= PMD_BIT4;
|
||||
|
||||
for (addr = start & PGDIR_MASK; addr < end;) {
|
||||
pmd = pmd_offset(pgd + pgd_index(addr), addr);
|
||||
pmd[0] = __pmd(addr | prot);
|
||||
addr += SECTION_SIZE;
|
||||
pmd[1] = __pmd(addr | prot);
|
||||
addr += SECTION_SIZE;
|
||||
flush_pmd_entry(pmd);
|
||||
outer_clean_range(__pa(pmd), __pa(pmd + 1));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void identity_mapping_del(pgd_t *pgd, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long addr;
|
||||
pmd_t *pmd;
|
||||
|
||||
for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) {
|
||||
pmd = pmd_offset(pgd + pgd_index(addr), addr);
|
||||
pmd[0] = __pmd(0);
|
||||
pmd[1] = __pmd(0);
|
||||
clean_pmd_entry(pmd);
|
||||
outer_clean_range(__pa(pmd), __pa(pmd + 1));
|
||||
}
|
||||
}
|
||||
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
|
||||
|
@ -177,8 +128,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
barrier();
|
||||
}
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
if (!cpu_online(cpu)) {
|
||||
pr_crit("CPU%u: failed to come online\n", cpu);
|
||||
ret = -EIO;
|
||||
}
|
||||
} else {
|
||||
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
|
||||
}
|
||||
|
||||
secondary_data.stack = NULL;
|
||||
|
@ -194,18 +149,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
|
|||
|
||||
pgd_free(&init_mm, pgd);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu);
|
||||
|
||||
/*
|
||||
* FIXME: We need to clean up the new idle thread. --rmk
|
||||
*/
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void percpu_timer_stop(void);
|
||||
|
||||
/*
|
||||
* __cpu_disable runs on the processor to be shutdown.
|
||||
*/
|
||||
|
@ -233,7 +182,7 @@ int __cpu_disable(void)
|
|||
/*
|
||||
* Stop the local timer for this CPU.
|
||||
*/
|
||||
local_timer_stop();
|
||||
percpu_timer_stop();
|
||||
|
||||
/*
|
||||
* Flush user cache and TLB mappings, and then remove this CPU
|
||||
|
@ -252,12 +201,20 @@ int __cpu_disable(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static DECLARE_COMPLETION(cpu_died);
|
||||
|
||||
/*
|
||||
* called on the thread which is asking for a CPU to be shutdown -
|
||||
* waits until shutdown has completed, or it is timed out.
|
||||
*/
|
||||
void __cpu_die(unsigned int cpu)
|
||||
{
|
||||
if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
|
||||
pr_err("CPU%u: cpu didn't die\n", cpu);
|
||||
return;
|
||||
}
|
||||
printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
|
||||
|
||||
if (!platform_cpu_kill(cpu))
|
||||
printk("CPU%u: unable to kill\n", cpu);
|
||||
}
|
||||
|
@ -274,12 +231,17 @@ void __ref cpu_die(void)
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
local_irq_disable();
|
||||
idle_task_exit();
|
||||
|
||||
local_irq_disable();
|
||||
mb();
|
||||
|
||||
/* Tell __cpu_die() that this CPU is now safe to dispose of */
|
||||
complete(&cpu_died);
|
||||
|
||||
/*
|
||||
* actual CPU shutdown procedure is at least platform (if not
|
||||
* CPU) specific
|
||||
* CPU) specific.
|
||||
*/
|
||||
platform_cpu_die(cpu);
|
||||
|
||||
|
@ -289,12 +251,24 @@ void __ref cpu_die(void)
|
|||
* to be repeated to undo the effects of taking the CPU offline.
|
||||
*/
|
||||
__asm__("mov sp, %0\n"
|
||||
" mov fp, #0\n"
|
||||
" b secondary_start_kernel"
|
||||
:
|
||||
: "r" (task_stack_page(current) + THREAD_SIZE - 8));
|
||||
}
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Called by both boot and secondaries to move global data into
|
||||
* per-processor storage.
|
||||
*/
|
||||
static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
||||
|
||||
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the secondary CPU boot entry. We're using this CPUs
|
||||
* idle thread stack, but a set of temporary page tables.
|
||||
|
@ -319,6 +293,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
trace_hardirqs_off();
|
||||
|
||||
/*
|
||||
* Give the platform a chance to do its own initialisation.
|
||||
|
@ -352,17 +327,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
|
|||
cpu_idle();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by both boot and secondaries to move global data into
|
||||
* per-processor storage.
|
||||
*/
|
||||
void __cpuinit smp_store_cpu_info(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid);
|
||||
|
||||
cpu_info->loops_per_jiffy = loops_per_jiffy;
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
int cpu;
|
||||
|
@ -385,61 +349,80 @@ void __init smp_prepare_boot_cpu(void)
|
|||
per_cpu(cpu_data, cpu).idle = current;
|
||||
}
|
||||
|
||||
static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg)
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int cpu;
|
||||
unsigned int ncores = num_possible_cpus();
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
|
||||
|
||||
spin_lock(&ipi->lock);
|
||||
ipi->bits |= 1 << msg;
|
||||
spin_unlock(&ipi->lock);
|
||||
}
|
||||
smp_store_cpu_info(smp_processor_id());
|
||||
|
||||
/*
|
||||
* Call the platform specific cross-CPU call function.
|
||||
* are we trying to boot more cores than exist?
|
||||
*/
|
||||
smp_cross_call(mask);
|
||||
if (max_cpus > ncores)
|
||||
max_cpus = ncores;
|
||||
|
||||
local_irq_restore(flags);
|
||||
if (max_cpus > 1) {
|
||||
/*
|
||||
* Enable the local timer or broadcast device for the
|
||||
* boot CPU, but only if we have more than one CPU.
|
||||
*/
|
||||
percpu_timer_setup();
|
||||
|
||||
/*
|
||||
* Initialise the SCU if there are more than one CPU
|
||||
* and let them know where to start.
|
||||
*/
|
||||
platform_smp_prepare_cpus(max_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
send_ipi_message(mask, IPI_CALL_FUNC);
|
||||
smp_cross_call(mask, IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
|
||||
void show_ipi_list(struct seq_file *p)
|
||||
static const char *ipi_types[NR_IPI] = {
|
||||
#define S(x,s) [x - IPI_TIMER] = s
|
||||
S(IPI_TIMER, "Timer broadcast interrupts"),
|
||||
S(IPI_RESCHEDULE, "Rescheduling interrupts"),
|
||||
S(IPI_CALL_FUNC, "Function call interrupts"),
|
||||
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
|
||||
S(IPI_CPU_STOP, "CPU stop interrupts"),
|
||||
};
|
||||
|
||||
void show_ipi_list(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu;
|
||||
unsigned int cpu, i;
|
||||
|
||||
seq_puts(p, "IPI:");
|
||||
for (i = 0; i < NR_IPI; i++) {
|
||||
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count);
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ",
|
||||
__get_irq_stat(cpu, ipi_irqs[i]));
|
||||
|
||||
seq_putc(p, '\n');
|
||||
seq_printf(p, " %s\n", ipi_types[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void show_local_irqs(struct seq_file *p)
|
||||
u64 smp_irq_stat_cpu(unsigned int cpu)
|
||||
{
|
||||
unsigned int cpu;
|
||||
u64 sum = 0;
|
||||
int i;
|
||||
|
||||
seq_printf(p, "LOC: ");
|
||||
for (i = 0; i < NR_IPI; i++)
|
||||
sum += __get_irq_stat(cpu, ipi_irqs[i]);
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs);
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
sum += __get_irq_stat(cpu, local_timer_irqs);
|
||||
#endif
|
||||
|
||||
seq_putc(p, '\n');
|
||||
return sum;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -456,24 +439,36 @@ static void ipi_timer(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
asmlinkage void __exception do_local_timer(struct pt_regs *regs)
|
||||
asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (local_timer_ack()) {
|
||||
irq_stat[cpu].local_timer_irqs++;
|
||||
__inc_irq_stat(cpu, local_timer_irqs);
|
||||
ipi_timer();
|
||||
}
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
void show_local_irqs(struct seq_file *p, int prec)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
seq_printf(p, "%*s: ", prec, "LOC");
|
||||
|
||||
for_each_present_cpu(cpu)
|
||||
seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
|
||||
|
||||
seq_printf(p, " Local timer interrupts\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
static void smp_timer_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
send_ipi_message(mask, IPI_TIMER);
|
||||
smp_cross_call(mask, IPI_TIMER);
|
||||
}
|
||||
#else
|
||||
#define smp_timer_broadcast NULL
|
||||
|
@ -510,6 +505,21 @@ void __cpuinit percpu_timer_setup(void)
|
|||
local_timer_setup(evt);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* The generic clock events code purposely does not stop the local timer
|
||||
* on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
|
||||
* manually here.
|
||||
*/
|
||||
static void percpu_timer_stop(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
|
||||
|
||||
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
||||
}
|
||||
#endif
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
/*
|
||||
|
@ -536,85 +546,70 @@ static void ipi_cpu_stop(unsigned int cpu)
|
|||
|
||||
/*
|
||||
* Main handler for inter-processor interrupts
|
||||
*
|
||||
* For ARM, the ipimask now only identifies a single
|
||||
* category of IPI (Bit 1 IPIs have been replaced by a
|
||||
* different mechanism):
|
||||
*
|
||||
* Bit 0 - Inter-processor function call
|
||||
*/
|
||||
asmlinkage void __exception do_IPI(struct pt_regs *regs)
|
||||
asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
ipi->ipi_count++;
|
||||
if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI)
|
||||
__inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]);
|
||||
|
||||
for (;;) {
|
||||
unsigned long msgs;
|
||||
switch (ipinr) {
|
||||
case IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
|
||||
spin_lock(&ipi->lock);
|
||||
msgs = ipi->bits;
|
||||
ipi->bits = 0;
|
||||
spin_unlock(&ipi->lock);
|
||||
case IPI_RESCHEDULE:
|
||||
/*
|
||||
* nothing more to do - eveything is
|
||||
* done on the interrupt return path
|
||||
*/
|
||||
break;
|
||||
|
||||
if (!msgs)
|
||||
break;
|
||||
case IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
do {
|
||||
unsigned nextmsg;
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
nextmsg = msgs & -msgs;
|
||||
msgs &= ~nextmsg;
|
||||
nextmsg = ffz(~nextmsg);
|
||||
case IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
|
||||
switch (nextmsg) {
|
||||
case IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
|
||||
case IPI_RESCHEDULE:
|
||||
/*
|
||||
* nothing more to do - eveything is
|
||||
* done on the interrupt return path
|
||||
*/
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC:
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
||||
cpu, nextmsg);
|
||||
break;
|
||||
}
|
||||
} while (msgs);
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
|
||||
cpu, ipinr);
|
||||
break;
|
||||
}
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
}
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
if (!cpus_empty(mask))
|
||||
send_ipi_message(&mask, IPI_CPU_STOP);
|
||||
unsigned long timeout;
|
||||
|
||||
if (num_online_cpus() > 1) {
|
||||
cpumask_t mask = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), mask);
|
||||
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
}
|
||||
|
||||
/* Wait up to one second for other CPUs to stop */
|
||||
timeout = USEC_PER_SEC;
|
||||
while (num_online_cpus() > 1 && timeout--)
|
||||
udelay(1);
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
pr_warning("SMP: failed to stop secondary CPUs\n");
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -624,128 +619,3 @@ int setup_profiling_timer(unsigned int multiplier)
|
|||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void
|
||||
on_each_cpu_mask(void (*func)(void *), void *info, int wait,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
smp_call_function_many(mask, func, info, wait);
|
||||
if (cpumask_test_cpu(smp_processor_id(), mask))
|
||||
func(info);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
/*
|
||||
* TLB operations
|
||||
*/
|
||||
struct tlb_args {
|
||||
struct vm_area_struct *ta_vma;
|
||||
unsigned long ta_start;
|
||||
unsigned long ta_end;
|
||||
};
|
||||
|
||||
static inline void ipi_flush_tlb_all(void *ignored)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_mm(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = (struct mm_struct *)arg;
|
||||
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_page(ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = uaddr;
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = kaddr;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
* linux/arch/arm/kernel/smp_tlb.c
|
||||
*
|
||||
* Copyright (C) 2002 ARM Limited, All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static void on_each_cpu_mask(void (*func)(void *), void *info, int wait,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
preempt_disable();
|
||||
|
||||
smp_call_function_many(mask, func, info, wait);
|
||||
if (cpumask_test_cpu(smp_processor_id(), mask))
|
||||
func(info);
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/**********************************************************************/
|
||||
|
||||
/*
|
||||
* TLB operations
|
||||
*/
|
||||
struct tlb_args {
|
||||
struct vm_area_struct *ta_vma;
|
||||
unsigned long ta_start;
|
||||
unsigned long ta_end;
|
||||
};
|
||||
|
||||
static inline void ipi_flush_tlb_all(void *ignored)
|
||||
{
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_mm(void *arg)
|
||||
{
|
||||
struct mm_struct *mm = (struct mm_struct *)arg;
|
||||
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_page(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_page(ta->ta_start);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
static inline void ipi_flush_tlb_kernel_range(void *arg)
|
||||
{
|
||||
struct tlb_args *ta = (struct tlb_args *)arg;
|
||||
|
||||
local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
|
||||
}
|
||||
|
||||
void flush_tlb_all(void)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
|
||||
else
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
|
||||
void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
if (tlb_ops_need_broadcast())
|
||||
on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm));
|
||||
else
|
||||
local_flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = uaddr;
|
||||
on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_page(vma, uaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_page(unsigned long kaddr)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = kaddr;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_page(kaddr);
|
||||
}
|
||||
|
||||
void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_vma = vma;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm));
|
||||
} else
|
||||
local_flush_tlb_range(vma, start, end);
|
||||
}
|
||||
|
||||
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (tlb_ops_need_broadcast()) {
|
||||
struct tlb_args ta;
|
||||
ta.ta_start = start;
|
||||
ta.ta_end = end;
|
||||
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
|
||||
} else
|
||||
local_flush_tlb_kernel_range(start, end);
|
||||
}
|
||||
|
|
@ -127,8 +127,6 @@ static void __cpuinit twd_calibrate_rate(void)
|
|||
*/
|
||||
void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
twd_calibrate_rate();
|
||||
|
||||
clk->name = "local_timer";
|
||||
|
@ -143,20 +141,7 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
|
|||
clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
|
||||
|
||||
/* Make sure our local interrupt controller has this enabled */
|
||||
local_irq_save(flags);
|
||||
irq_to_desc(clk->irq)->status |= IRQ_NOPROBE;
|
||||
get_irq_chip(clk->irq)->unmask(clk->irq);
|
||||
local_irq_restore(flags);
|
||||
gic_enable_ppi(clk->irq);
|
||||
|
||||
clockevents_register_device(clk);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* take a local timer down
|
||||
*/
|
||||
void twd_timer_stop(void)
|
||||
{
|
||||
__raw_writel(0, twd_base + TWD_TIMER_CONTROL);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,267 @@
|
|||
/*
|
||||
* linux/arch/arm/kernel/swp_emulate.c
|
||||
*
|
||||
* Copyright (C) 2009 ARM Limited
|
||||
* __user_* functions adapted from include/asm/uaccess.h
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
|
||||
* store-exclusive for processors that have them disabled (or future ones that
|
||||
* might not implement them).
|
||||
*
|
||||
* Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
|
||||
* Where: Rt = destination
|
||||
* Rt2 = source
|
||||
* Rn = address
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/perf_event.h>
|
||||
|
||||
#include <asm/traps.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
/*
|
||||
* Error-checking SWP macros implemented using ldrex{b}/strex{b}
|
||||
*/
|
||||
#define __user_swpX_asm(data, addr, res, temp, B) \
|
||||
__asm__ __volatile__( \
|
||||
" mov %2, %1\n" \
|
||||
"0: ldrex"B" %1, [%3]\n" \
|
||||
"1: strex"B" %0, %2, [%3]\n" \
|
||||
" cmp %0, #0\n" \
|
||||
" movne %0, %4\n" \
|
||||
"2:\n" \
|
||||
" .section .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"3: mov %0, %5\n" \
|
||||
" b 2b\n" \
|
||||
" .previous\n" \
|
||||
" .section __ex_table,\"a\"\n" \
|
||||
" .align 3\n" \
|
||||
" .long 0b, 3b\n" \
|
||||
" .long 1b, 3b\n" \
|
||||
" .previous" \
|
||||
: "=&r" (res), "+r" (data), "=&r" (temp) \
|
||||
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
|
||||
: "cc", "memory")
|
||||
|
||||
#define __user_swp_asm(data, addr, res, temp) \
|
||||
__user_swpX_asm(data, addr, res, temp, "")
|
||||
#define __user_swpb_asm(data, addr, res, temp) \
|
||||
__user_swpX_asm(data, addr, res, temp, "b")
|
||||
|
||||
/*
|
||||
* Macros/defines for extracting register numbers from instruction.
|
||||
*/
|
||||
#define EXTRACT_REG_NUM(instruction, offset) \
|
||||
(((instruction) & (0xf << (offset))) >> (offset))
|
||||
#define RN_OFFSET 16
|
||||
#define RT_OFFSET 12
|
||||
#define RT2_OFFSET 0
|
||||
/*
|
||||
* Bit 22 of the instruction encoding distinguishes between
|
||||
* the SWP and SWPB variants (bit set means SWPB).
|
||||
*/
|
||||
#define TYPE_SWPB (1 << 22)
|
||||
|
||||
static unsigned long swpcounter;
|
||||
static unsigned long swpbcounter;
|
||||
static unsigned long abtcounter;
|
||||
static pid_t previous_pid;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static int proc_read_status(char *page, char **start, off_t off, int count,
|
||||
int *eof, void *data)
|
||||
{
|
||||
char *p = page;
|
||||
int len;
|
||||
|
||||
p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
|
||||
p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
|
||||
p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
|
||||
if (previous_pid != 0)
|
||||
p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
|
||||
|
||||
len = (p - page) - off;
|
||||
if (len < 0)
|
||||
len = 0;
|
||||
|
||||
*eof = (len <= count) ? 1 : 0;
|
||||
*start = page + off;
|
||||
|
||||
return len;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set up process info to signal segmentation fault - called on access error.
|
||||
*/
|
||||
static void set_segfault(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
if (find_vma(current->mm, addr) == NULL)
|
||||
info.si_code = SEGV_MAPERR;
|
||||
else
|
||||
info.si_code = SEGV_ACCERR;
|
||||
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
info.si_addr = (void *) instruction_pointer(regs);
|
||||
|
||||
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
||||
arm_notify_die("Illegal memory access", regs, &info, 0, 0);
|
||||
|
||||
abtcounter++;
|
||||
}
|
||||
|
||||
static int emulate_swpX(unsigned int address, unsigned int *data,
|
||||
unsigned int type)
|
||||
{
|
||||
unsigned int res = 0;
|
||||
|
||||
if ((type != TYPE_SWPB) && (address & 0x3)) {
|
||||
/* SWP to unaligned address not permitted */
|
||||
pr_debug("SWP instruction on unaligned pointer!\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
unsigned long temp;
|
||||
|
||||
/*
|
||||
* Barrier required between accessing protected resource and
|
||||
* releasing a lock for it. Legacy code might not have done
|
||||
* this, and we cannot determine that this is not the case
|
||||
* being emulated, so insert always.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (type == TYPE_SWPB)
|
||||
__user_swpb_asm(*data, address, res, temp);
|
||||
else
|
||||
__user_swp_asm(*data, address, res, temp);
|
||||
|
||||
if (likely(res != -EAGAIN) || signal_pending(current))
|
||||
break;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
if (res == 0) {
|
||||
/*
|
||||
* Barrier also required between aquiring a lock for a
|
||||
* protected resource and accessing the resource. Inserted for
|
||||
* same reason as above.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (type == TYPE_SWPB)
|
||||
swpbcounter++;
|
||||
else
|
||||
swpcounter++;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/*
|
||||
* swp_handler logs the id of calling process, dissects the instruction, sanity
|
||||
* checks the memory location, calls emulate_swpX for the actual operation and
|
||||
* deals with fixup/error handling before returning
|
||||
*/
|
||||
static int swp_handler(struct pt_regs *regs, unsigned int instr)
|
||||
{
|
||||
unsigned int address, destreg, data, type;
|
||||
unsigned int res = 0;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
|
||||
|
||||
if (current->pid != previous_pid) {
|
||||
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
|
||||
current->comm, (unsigned long)current->pid);
|
||||
previous_pid = current->pid;
|
||||
}
|
||||
|
||||
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
|
||||
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
|
||||
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
|
||||
|
||||
type = instr & TYPE_SWPB;
|
||||
|
||||
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
|
||||
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
|
||||
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
|
||||
|
||||
/* Check access in reasonable access range for both SWP and SWPB */
|
||||
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
|
||||
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
|
||||
(void *)address);
|
||||
res = -EFAULT;
|
||||
} else {
|
||||
res = emulate_swpX(address, &data, type);
|
||||
}
|
||||
|
||||
if (res == 0) {
|
||||
/*
|
||||
* On successful emulation, revert the adjustment to the PC
|
||||
* made in kernel/traps.c in order to resume execution at the
|
||||
* instruction following the SWP{B}.
|
||||
*/
|
||||
regs->ARM_pc += 4;
|
||||
regs->uregs[destreg] = data;
|
||||
} else if (res == -EFAULT) {
|
||||
/*
|
||||
* Memory errors do not mean emulation failed.
|
||||
* Set up signal info to return SEGV, then return OK
|
||||
*/
|
||||
set_segfault(regs, address);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Only emulate SWP/SWPB executed in ARM state/User mode.
|
||||
* The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
|
||||
*/
|
||||
static struct undef_hook swp_hook = {
|
||||
.instr_mask = 0x0fb00ff0,
|
||||
.instr_val = 0x01000090,
|
||||
.cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
|
||||
.cpsr_val = USR_MODE,
|
||||
.fn = swp_handler
|
||||
};
|
||||
|
||||
/*
|
||||
* Register handler and create status file in /proc/cpu
|
||||
* Invoked as late_initcall, since not needed before init spawned.
|
||||
*/
|
||||
static int __init swp_emulation_init(void)
|
||||
{
|
||||
#ifdef CONFIG_PROC_FS
|
||||
struct proc_dir_entry *res;
|
||||
|
||||
res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
|
||||
|
||||
if (!res)
|
||||
return -ENOMEM;
|
||||
|
||||
res->read_proc = proc_read_status;
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
|
||||
register_undef_hook(&swp_hook);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(swp_emulation_init);
|
|
@ -30,12 +30,13 @@
|
|||
#include <asm/leds.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/time.h>
|
||||
|
||||
/*
|
||||
* Our system timer.
|
||||
*/
|
||||
struct sys_timer *system_timer;
|
||||
static struct sys_timer *system_timer;
|
||||
|
||||
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
|
||||
/* this needs a better home */
|
||||
|
@ -160,6 +161,7 @@ device_initcall(timer_init_sysfs);
|
|||
|
||||
void __init time_init(void)
|
||||
{
|
||||
system_timer = machine_desc->timer;
|
||||
system_timer->init();
|
||||
}
|
||||
|
||||
|
|
|
@ -37,6 +37,8 @@
|
|||
|
||||
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
|
||||
|
||||
void *vectors_page;
|
||||
|
||||
#ifdef CONFIG_DEBUG_USER
|
||||
unsigned int user_debug;
|
||||
|
||||
|
@ -708,19 +710,19 @@ void __readwrite_bug(const char *fn)
|
|||
}
|
||||
EXPORT_SYMBOL(__readwrite_bug);
|
||||
|
||||
void __pte_error(const char *file, int line, unsigned long val)
|
||||
void __pte_error(const char *file, int line, pte_t pte)
|
||||
{
|
||||
printk("%s:%d: bad pte %08lx.\n", file, line, val);
|
||||
printk("%s:%d: bad pte %08lx.\n", file, line, pte_val(pte));
|
||||
}
|
||||
|
||||
void __pmd_error(const char *file, int line, unsigned long val)
|
||||
void __pmd_error(const char *file, int line, pmd_t pmd)
|
||||
{
|
||||
printk("%s:%d: bad pmd %08lx.\n", file, line, val);
|
||||
printk("%s:%d: bad pmd %08lx.\n", file, line, pmd_val(pmd));
|
||||
}
|
||||
|
||||
void __pgd_error(const char *file, int line, unsigned long val)
|
||||
void __pgd_error(const char *file, int line, pgd_t pgd)
|
||||
{
|
||||
printk("%s:%d: bad pgd %08lx.\n", file, line, val);
|
||||
printk("%s:%d: bad pgd %08lx.\n", file, line, pgd_val(pgd));
|
||||
}
|
||||
|
||||
asmlinkage void __div0(void)
|
||||
|
@ -756,7 +758,11 @@ static void __init kuser_get_tls_init(unsigned long vectors)
|
|||
|
||||
void __init early_trap_init(void)
|
||||
{
|
||||
#if defined(CONFIG_CPU_USE_DOMAINS)
|
||||
unsigned long vectors = CONFIG_VECTORS_BASE;
|
||||
#else
|
||||
unsigned long vectors = (unsigned long)vectors_page;
|
||||
#endif
|
||||
extern char __stubs_start[], __stubs_end[];
|
||||
extern char __vectors_start[], __vectors_end[];
|
||||
extern char __kuser_helper_start[], __kuser_helper_end[];
|
||||
|
@ -780,10 +786,10 @@ void __init early_trap_init(void)
|
|||
* Copy signal return handlers into the vector page, and
|
||||
* set sigreturn to be a pointer to these.
|
||||
*/
|
||||
memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
|
||||
sizeof(sigreturn_codes));
|
||||
memcpy((void *)KERN_RESTART_CODE, syscall_restart_code,
|
||||
sizeof(syscall_restart_code));
|
||||
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
|
||||
sigreturn_codes, sizeof(sigreturn_codes));
|
||||
memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
|
||||
syscall_restart_code, sizeof(syscall_restart_code));
|
||||
|
||||
flush_icache_range(vectors, vectors + PAGE_SIZE);
|
||||
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
|
||||
|
|
|
@ -101,6 +101,7 @@ SECTIONS
|
|||
__exception_text_start = .;
|
||||
*(.exception.text)
|
||||
__exception_text_end = .;
|
||||
IRQENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
|
@ -167,6 +168,7 @@ SECTIONS
|
|||
|
||||
NOSAVE_DATA
|
||||
CACHELINE_ALIGNED_DATA(32)
|
||||
READ_MOSTLY_DATA(32)
|
||||
|
||||
/*
|
||||
* The exception fixup table (might need resorting at runtime)
|
||||
|
|
|
@ -28,20 +28,21 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__get_user_1)
|
||||
1: ldrbt r2, [r0]
|
||||
1: T(ldrb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
2: ldrbt r2, [r0]
|
||||
3: ldrbt r3, [r0, #1]
|
||||
2: T(ldrb) r2, [r0]
|
||||
3: T(ldrb) r3, [r0, #1]
|
||||
#else
|
||||
2: ldrbt r2, [r0], #1
|
||||
3: ldrbt r3, [r0]
|
||||
2: T(ldrb) r2, [r0], #1
|
||||
3: T(ldrb) r3, [r0]
|
||||
#endif
|
||||
#ifndef __ARMEB__
|
||||
orr r2, r2, r3, lsl #8
|
||||
|
@ -53,7 +54,7 @@ ENTRY(__get_user_2)
|
|||
ENDPROC(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
4: ldrt r2, [r0]
|
||||
4: T(ldr) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_4)
|
||||
|
|
|
@ -28,9 +28,10 @@
|
|||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__put_user_1)
|
||||
1: strbt r2, [r0]
|
||||
1: T(strb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_1)
|
||||
|
@ -39,19 +40,19 @@ ENTRY(__put_user_2)
|
|||
mov ip, r2, lsr #8
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#ifndef __ARMEB__
|
||||
2: strbt r2, [r0]
|
||||
3: strbt ip, [r0, #1]
|
||||
2: T(strb) r2, [r0]
|
||||
3: T(strb) ip, [r0, #1]
|
||||
#else
|
||||
2: strbt ip, [r0]
|
||||
3: strbt r2, [r0, #1]
|
||||
2: T(strb) ip, [r0]
|
||||
3: T(strb) r2, [r0, #1]
|
||||
#endif
|
||||
#else /* !CONFIG_THUMB2_KERNEL */
|
||||
#ifndef __ARMEB__
|
||||
2: strbt r2, [r0], #1
|
||||
3: strbt ip, [r0]
|
||||
2: T(strb) r2, [r0], #1
|
||||
3: T(strb) ip, [r0]
|
||||
#else
|
||||
2: strbt ip, [r0], #1
|
||||
3: strbt r2, [r0]
|
||||
2: T(strb) ip, [r0], #1
|
||||
3: T(strb) r2, [r0]
|
||||
#endif
|
||||
#endif /* CONFIG_THUMB2_KERNEL */
|
||||
mov r0, #0
|
||||
|
@ -59,18 +60,18 @@ ENTRY(__put_user_2)
|
|||
ENDPROC(__put_user_2)
|
||||
|
||||
ENTRY(__put_user_4)
|
||||
4: strt r2, [r0]
|
||||
4: T(str) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_4)
|
||||
|
||||
ENTRY(__put_user_8)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
5: strt r2, [r0]
|
||||
6: strt r3, [r0, #4]
|
||||
5: T(str) r2, [r0]
|
||||
6: T(str) r3, [r0, #4]
|
||||
#else
|
||||
5: strt r2, [r0], #4
|
||||
6: strt r3, [r0]
|
||||
5: T(str) r2, [r0], #4
|
||||
6: T(str) r3, [r0]
|
||||
#endif
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
.text
|
||||
|
||||
|
@ -31,11 +32,11 @@
|
|||
rsb ip, ip, #4
|
||||
cmp ip, #2
|
||||
ldrb r3, [r1], #1
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
ldrgeb r3, [r1], #1
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #1
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
sub r2, r2, ip
|
||||
b .Lc2u_dest_aligned
|
||||
|
||||
|
@ -58,7 +59,7 @@ ENTRY(__copy_to_user)
|
|||
addmi ip, r2, #4
|
||||
bmi .Lc2u_0nowords
|
||||
ldr r3, [r1], #4
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -87,18 +88,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4} @ Shouldnt fault
|
||||
tst ip, #4
|
||||
ldrne r3, [r1], #4
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_0fupi
|
||||
.Lc2u_0nowords: teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
.Lc2u_nowords: cmp ip, #2
|
||||
ldrb r3, [r1], #1
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
ldrgeb r3, [r1], #1
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #1
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
|
||||
.Lc2u_not_enough:
|
||||
|
@ -119,7 +120,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
|
|||
mov r3, r7, pull #8
|
||||
ldr r7, [r1], #4
|
||||
orr r3, r3, r7, push #24
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -154,18 +155,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
movne r3, r7, pull #8
|
||||
ldrne r7, [r1], #4
|
||||
orrne r3, r3, r7, push #24
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_1fupi
|
||||
.Lc2u_1nowords: mov r3, r7, get_byte_1
|
||||
teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
cmp ip, #2
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
movge r3, r7, get_byte_2
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
movgt r3, r7, get_byte_3
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
|
||||
.Lc2u_2fupi: subs r2, r2, #4
|
||||
|
@ -174,7 +175,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
|
|||
mov r3, r7, pull #16
|
||||
ldr r7, [r1], #4
|
||||
orr r3, r3, r7, push #16
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -209,18 +210,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
movne r3, r7, pull #16
|
||||
ldrne r7, [r1], #4
|
||||
orrne r3, r3, r7, push #16
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_2fupi
|
||||
.Lc2u_2nowords: mov r3, r7, get_byte_2
|
||||
teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
cmp ip, #2
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
movge r3, r7, get_byte_3
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #0
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
|
||||
.Lc2u_3fupi: subs r2, r2, #4
|
||||
|
@ -229,7 +230,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
|
|||
mov r3, r7, pull #24
|
||||
ldr r7, [r1], #4
|
||||
orr r3, r3, r7, push #8
|
||||
USER( strt r3, [r0], #4) @ May fault
|
||||
USER( T(str) r3, [r0], #4) @ May fault
|
||||
mov ip, r0, lsl #32 - PAGE_SHIFT
|
||||
rsb ip, ip, #0
|
||||
movs ip, ip, lsr #32 - PAGE_SHIFT
|
||||
|
@ -264,18 +265,18 @@ USER( strt r3, [r0], #4) @ May fault
|
|||
movne r3, r7, pull #24
|
||||
ldrne r7, [r1], #4
|
||||
orrne r3, r3, r7, push #8
|
||||
strnet r3, [r0], #4 @ Shouldnt fault
|
||||
T(strne) r3, [r0], #4 @ Shouldnt fault
|
||||
ands ip, ip, #3
|
||||
beq .Lc2u_3fupi
|
||||
.Lc2u_3nowords: mov r3, r7, get_byte_3
|
||||
teq ip, #0
|
||||
beq .Lc2u_finished
|
||||
cmp ip, #2
|
||||
USER( strbt r3, [r0], #1) @ May fault
|
||||
USER( T(strb) r3, [r0], #1) @ May fault
|
||||
ldrgeb r3, [r1], #1
|
||||
USER( strgebt r3, [r0], #1) @ May fault
|
||||
USER( T(strgeb) r3, [r0], #1) @ May fault
|
||||
ldrgtb r3, [r1], #0
|
||||
USER( strgtbt r3, [r0], #1) @ May fault
|
||||
USER( T(strgtb) r3, [r0], #1) @ May fault
|
||||
b .Lc2u_finished
|
||||
ENDPROC(__copy_to_user)
|
||||
|
||||
|
@ -294,11 +295,11 @@ ENDPROC(__copy_to_user)
|
|||
.Lcfu_dest_not_aligned:
|
||||
rsb ip, ip, #4
|
||||
cmp ip, #2
|
||||
USER( ldrbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrb) r3, [r1], #1) @ May fault
|
||||
strb r3, [r0], #1
|
||||
USER( ldrgebt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgeb) r3, [r1], #1) @ May fault
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #1) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
sub r2, r2, ip
|
||||
b .Lcfu_dest_aligned
|
||||
|
@ -321,7 +322,7 @@ ENTRY(__copy_from_user)
|
|||
.Lcfu_0fupi: subs r2, r2, #4
|
||||
addmi ip, r2, #4
|
||||
bmi .Lcfu_0nowords
|
||||
USER( ldrt r3, [r1], #4)
|
||||
USER( T(ldr) r3, [r1], #4)
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction
|
||||
rsb ip, ip, #0
|
||||
|
@ -350,18 +351,18 @@ USER( ldrt r3, [r1], #4)
|
|||
ldmneia r1!, {r3 - r4} @ Shouldnt fault
|
||||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
ldrnet r3, [r1], #4 @ Shouldnt fault
|
||||
T(ldrne) r3, [r1], #4 @ Shouldnt fault
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
beq .Lcfu_0fupi
|
||||
.Lcfu_0nowords: teq ip, #0
|
||||
beq .Lcfu_finished
|
||||
.Lcfu_nowords: cmp ip, #2
|
||||
USER( ldrbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrb) r3, [r1], #1) @ May fault
|
||||
strb r3, [r0], #1
|
||||
USER( ldrgebt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgeb) r3, [r1], #1) @ May fault
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #1) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
b .Lcfu_finished
|
||||
|
||||
|
@ -374,7 +375,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
|
|||
|
||||
.Lcfu_src_not_aligned:
|
||||
bic r1, r1, #3
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
cmp ip, #2
|
||||
bgt .Lcfu_3fupi
|
||||
beq .Lcfu_2fupi
|
||||
|
@ -382,7 +383,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
addmi ip, r2, #4
|
||||
bmi .Lcfu_1nowords
|
||||
mov r3, r7, pull #8
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
orr r3, r3, r7, push #24
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT
|
||||
|
@ -417,7 +418,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
movne r3, r7, pull #8
|
||||
USER( ldrnet r7, [r1], #4) @ May fault
|
||||
USER( T(ldrne) r7, [r1], #4) @ May fault
|
||||
orrne r3, r3, r7, push #24
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
|
@ -437,7 +438,7 @@ USER( ldrnet r7, [r1], #4) @ May fault
|
|||
addmi ip, r2, #4
|
||||
bmi .Lcfu_2nowords
|
||||
mov r3, r7, pull #16
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
orr r3, r3, r7, push #16
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT
|
||||
|
@ -473,7 +474,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
movne r3, r7, pull #16
|
||||
USER( ldrnet r7, [r1], #4) @ May fault
|
||||
USER( T(ldrne) r7, [r1], #4) @ May fault
|
||||
orrne r3, r3, r7, push #16
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
|
@ -485,7 +486,7 @@ USER( ldrnet r7, [r1], #4) @ May fault
|
|||
strb r3, [r0], #1
|
||||
movge r3, r7, get_byte_3
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #0) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #0) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
b .Lcfu_finished
|
||||
|
||||
|
@ -493,7 +494,7 @@ USER( ldrgtbt r3, [r1], #0) @ May fault
|
|||
addmi ip, r2, #4
|
||||
bmi .Lcfu_3nowords
|
||||
mov r3, r7, pull #24
|
||||
USER( ldrt r7, [r1], #4) @ May fault
|
||||
USER( T(ldr) r7, [r1], #4) @ May fault
|
||||
orr r3, r3, r7, push #8
|
||||
str r3, [r0], #4
|
||||
mov ip, r1, lsl #32 - PAGE_SHIFT
|
||||
|
@ -528,7 +529,7 @@ USER( ldrt r7, [r1], #4) @ May fault
|
|||
stmneia r0!, {r3 - r4}
|
||||
tst ip, #4
|
||||
movne r3, r7, pull #24
|
||||
USER( ldrnet r7, [r1], #4) @ May fault
|
||||
USER( T(ldrne) r7, [r1], #4) @ May fault
|
||||
orrne r3, r3, r7, push #8
|
||||
strne r3, [r0], #4
|
||||
ands ip, ip, #3
|
||||
|
@ -538,9 +539,9 @@ USER( ldrnet r7, [r1], #4) @ May fault
|
|||
beq .Lcfu_finished
|
||||
cmp ip, #2
|
||||
strb r3, [r0], #1
|
||||
USER( ldrgebt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgeb) r3, [r1], #1) @ May fault
|
||||
strgeb r3, [r0], #1
|
||||
USER( ldrgtbt r3, [r1], #1) @ May fault
|
||||
USER( T(ldrgtb) r3, [r1], #1) @ May fault
|
||||
strgtb r3, [r0], #1
|
||||
b .Lcfu_finished
|
||||
ENDPROC(__copy_from_user)
|
||||
|
|
|
@ -101,7 +101,6 @@ static struct clocksource clk32k = {
|
|||
.rating = 150,
|
||||
.read = read_clk32k,
|
||||
.mask = CLOCKSOURCE_MASK(20),
|
||||
.shift = 10,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
|
@ -201,8 +200,7 @@ void __init at91rm9200_timer_init(void)
|
|||
clockevents_register_device(&clkevt);
|
||||
|
||||
/* register clocksource */
|
||||
clk32k.mult = clocksource_hz2mult(AT91_SLOW_CLOCK, clk32k.shift);
|
||||
clocksource_register(&clk32k);
|
||||
clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
|
||||
}
|
||||
|
||||
struct sys_timer at91rm9200_timer = {
|
||||
|
|
|
@ -51,7 +51,6 @@ static struct clocksource pit_clk = {
|
|||
.name = "pit",
|
||||
.rating = 175,
|
||||
.read = read_pit_clk,
|
||||
.shift = 20,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
|
@ -163,10 +162,9 @@ static void __init at91sam926x_pit_init(void)
|
|||
* Register clocksource. The high order bits of PIV are unused,
|
||||
* so this isn't a 32-bit counter unless we get clockevent irqs.
|
||||
*/
|
||||
pit_clk.mult = clocksource_hz2mult(pit_rate, pit_clk.shift);
|
||||
bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */;
|
||||
pit_clk.mask = CLOCKSOURCE_MASK(bits);
|
||||
clocksource_register(&pit_clk);
|
||||
clocksource_register_hz(&pit_clk, pit_rate);
|
||||
|
||||
/* Set up irq handler */
|
||||
setup_irq(AT91_ID_SYS, &at91sam926x_pit_irq);
|
||||
|
|
|
@ -21,13 +21,12 @@
|
|||
#include <linux/string.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/clkdev.h>
|
||||
#include <mach/csp/hw_cfg.h>
|
||||
#include <mach/csp/chipcHw_def.h>
|
||||
#include <mach/csp/chipcHw_reg.h>
|
||||
#include <mach/csp/chipcHw_inline.h>
|
||||
|
||||
#include <asm/clkdev.h>
|
||||
|
||||
#include "clock.h"
|
||||
|
||||
#define clk_is_primary(x) ((x)->type & CLK_TYPE_PRIMARY)
|
||||
|
|
|
@ -30,10 +30,10 @@
|
|||
#include <linux/amba/bus.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#include <mach/csp/mm_addr.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hardware/arm_timer.h>
|
||||
|
@ -294,7 +294,6 @@ static struct clocksource clocksource_bcmring_timer1 = {
|
|||
.rating = 200,
|
||||
.read = bcmring_get_cycles_timer1,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 20,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
|
@ -303,7 +302,6 @@ static struct clocksource clocksource_bcmring_timer3 = {
|
|||
.rating = 100,
|
||||
.read = bcmring_get_cycles_timer3,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 20,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
|
@ -316,10 +314,8 @@ static int __init bcmring_clocksource_init(void)
|
|||
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
|
||||
TIMER1_VA_BASE + TIMER_CTRL);
|
||||
|
||||
clocksource_bcmring_timer1.mult =
|
||||
clocksource_khz2mult(TIMER1_FREQUENCY_MHZ * 1000,
|
||||
clocksource_bcmring_timer1.shift);
|
||||
clocksource_register(&clocksource_bcmring_timer1);
|
||||
clocksource_register_khz(&clocksource_bcmring_timer1,
|
||||
TIMER1_FREQUENCY_MHZ * 1000);
|
||||
|
||||
/* setup timer3 as free-running clocksource */
|
||||
writel(0, TIMER3_VA_BASE + TIMER_CTRL);
|
||||
|
@ -328,10 +324,8 @@ static int __init bcmring_clocksource_init(void)
|
|||
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
|
||||
TIMER3_VA_BASE + TIMER_CTRL);
|
||||
|
||||
clocksource_bcmring_timer3.mult =
|
||||
clocksource_khz2mult(TIMER3_FREQUENCY_KHZ,
|
||||
clocksource_bcmring_timer3.shift);
|
||||
clocksource_register(&clocksource_bcmring_timer3);
|
||||
clocksource_register_khz(&clocksource_bcmring_timer3,
|
||||
TIMER3_FREQUENCY_KHZ);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ menu "CNS3XXX platform type"
|
|||
|
||||
config MACH_CNS3420VB
|
||||
bool "Support for CNS3420 Validation Board"
|
||||
select MIGHT_HAVE_PCI
|
||||
help
|
||||
Include support for the Cavium Networks CNS3420 MPCore Platform
|
||||
Baseboard.
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/kernel.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/serial_8250.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -107,11 +108,64 @@ static void __init cns3420_early_serial_setup(void)
|
|||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* USB
|
||||
*/
|
||||
static struct resource cns3xxx_usb_ehci_resources[] = {
|
||||
[0] = {
|
||||
.start = CNS3XXX_USB_BASE,
|
||||
.end = CNS3XXX_USB_BASE + SZ_16M - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_CNS3XXX_USB_EHCI,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static u64 cns3xxx_usb_ehci_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device cns3xxx_usb_ehci_device = {
|
||||
.name = "cns3xxx-ehci",
|
||||
.num_resources = ARRAY_SIZE(cns3xxx_usb_ehci_resources),
|
||||
.resource = cns3xxx_usb_ehci_resources,
|
||||
.dev = {
|
||||
.dma_mask = &cns3xxx_usb_ehci_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
static struct resource cns3xxx_usb_ohci_resources[] = {
|
||||
[0] = {
|
||||
.start = CNS3XXX_USB_OHCI_BASE,
|
||||
.end = CNS3XXX_USB_OHCI_BASE + SZ_16M - 1,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = IRQ_CNS3XXX_USB_OHCI,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
static u64 cns3xxx_usb_ohci_dma_mask = DMA_BIT_MASK(32);
|
||||
|
||||
static struct platform_device cns3xxx_usb_ohci_device = {
|
||||
.name = "cns3xxx-ohci",
|
||||
.num_resources = ARRAY_SIZE(cns3xxx_usb_ohci_resources),
|
||||
.resource = cns3xxx_usb_ohci_resources,
|
||||
.dev = {
|
||||
.dma_mask = &cns3xxx_usb_ohci_dma_mask,
|
||||
.coherent_dma_mask = DMA_BIT_MASK(32),
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Initialization
|
||||
*/
|
||||
static struct platform_device *cns3420_pdevs[] __initdata = {
|
||||
&cns3420_nor_pdev,
|
||||
&cns3xxx_usb_ehci_device,
|
||||
&cns3xxx_usb_ohci_device,
|
||||
};
|
||||
|
||||
static void __init cns3420_init(void)
|
||||
|
|
|
@ -69,13 +69,10 @@ void __init cns3xxx_map_io(void)
|
|||
}
|
||||
|
||||
/* used by entry-macro.S */
|
||||
void __iomem *gic_cpu_base_addr;
|
||||
|
||||
void __init cns3xxx_init_irq(void)
|
||||
{
|
||||
gic_cpu_base_addr = __io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT);
|
||||
gic_dist_init(0, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT), 29);
|
||||
gic_cpu_init(0, gic_cpu_base_addr);
|
||||
gic_init(0, 29, __io(CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT),
|
||||
__io(CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT));
|
||||
}
|
||||
|
||||
void cns3xxx_power_off(void)
|
||||
|
|
|
@ -11,13 +11,10 @@
|
|||
#ifndef __CNS3XXX_CORE_H
|
||||
#define __CNS3XXX_CORE_H
|
||||
|
||||
extern void __iomem *gic_cpu_base_addr;
|
||||
extern struct sys_timer cns3xxx_timer;
|
||||
|
||||
void __init cns3xxx_map_io(void);
|
||||
void __init cns3xxx_init_irq(void);
|
||||
void cns3xxx_power_off(void);
|
||||
void cns3xxx_pwr_power_up(unsigned int block);
|
||||
void cns3xxx_pwr_power_down(unsigned int block);
|
||||
|
||||
#endif /* __CNS3XXX_CORE_H */
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <mach/cns3xxx.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <mach/pm.h>
|
||||
#include "core.h"
|
||||
#include "devices.h"
|
||||
|
||||
|
|
|
@ -165,7 +165,6 @@
|
|||
#define CNS3XXX_USBOTG_BASE_VIRT 0xFFF15000
|
||||
|
||||
#define CNS3XXX_USB_BASE 0x82000000 /* USB Host Control */
|
||||
#define CNS3XXX_USB_BASE_VIRT 0xFFF16000
|
||||
|
||||
#define CNS3XXX_SATA2_BASE 0x83000000 /* SATA */
|
||||
#define CNS3XXX_SATA2_SIZE SZ_16M
|
||||
|
@ -184,7 +183,6 @@
|
|||
#define CNS3XXX_2DG_BASE_VIRT 0xFFF1B000
|
||||
|
||||
#define CNS3XXX_USB_OHCI_BASE 0x88000000 /* USB OHCI */
|
||||
#define CNS3XXX_USB_OHCI_BASE_VIRT 0xFFF1C000
|
||||
|
||||
#define CNS3XXX_L2C_BASE 0x92000000 /* L2 Cache Control */
|
||||
#define CNS3XXX_L2C_BASE_VIRT 0xFFF27000
|
||||
|
|
|
@ -9,74 +9,10 @@
|
|||
*/
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/hardware/gic.h>
|
||||
#include <asm/hardware/entry-macro-gic.S>
|
||||
|
||||
.macro disable_fiq
|
||||
.endm
|
||||
|
||||
.macro get_irqnr_preamble, base, tmp
|
||||
ldr \base, =gic_cpu_base_addr
|
||||
ldr \base, [\base]
|
||||
.endm
|
||||
|
||||
.macro arch_ret_to_user, tmp1, tmp2
|
||||
.endm
|
||||
|
||||
/*
|
||||
* The interrupt numbering scheme is defined in the
|
||||
* interrupt controller spec. To wit:
|
||||
*
|
||||
* Interrupts 0-15 are IPI
|
||||
* 16-28 are reserved
|
||||
* 29-31 are local. We allow 30 to be used for the watchdog.
|
||||
* 32-1020 are global
|
||||
* 1021-1022 are reserved
|
||||
* 1023 is "spurious" (no interrupt)
|
||||
*
|
||||
* For now, we ignore all local interrupts so only return an interrupt if it's
|
||||
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
|
||||
*
|
||||
* A simple read from the controller will tell us the number of the highest
|
||||
* priority enabled interrupt. We then just need to check whether it is in the
|
||||
* valid range for an IRQ (30-1020 inclusive).
|
||||
*/
|
||||
|
||||
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
|
||||
|
||||
ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
|
||||
|
||||
ldr \tmp, =1021
|
||||
|
||||
bic \irqnr, \irqstat, #0x1c00
|
||||
|
||||
cmp \irqnr, #29
|
||||
cmpcc \irqnr, \irqnr
|
||||
cmpne \irqnr, \tmp
|
||||
cmpcs \irqnr, \irqnr
|
||||
|
||||
.endm
|
||||
|
||||
/* We assume that irqstat (the raw value of the IRQ acknowledge
|
||||
* register) is preserved from the macro above.
|
||||
* If there is an IPI, we immediately signal end of interrupt on the
|
||||
* controller, since this requires the original irqstat value which
|
||||
* we won't easily be able to recreate later.
|
||||
*/
|
||||
|
||||
.macro test_for_ipi, irqnr, irqstat, base, tmp
|
||||
bic \irqnr, \irqstat, #0x1c00
|
||||
cmp \irqnr, #16
|
||||
strcc \irqstat, [\base, #GIC_CPU_EOI]
|
||||
cmpcs \irqnr, \irqnr
|
||||
.endm
|
||||
|
||||
/* As above, this assumes that irqstat and base are preserved.. */
|
||||
|
||||
.macro test_for_ltirq, irqnr, irqstat, base, tmp
|
||||
bic \irqnr, \irqstat, #0x1c00
|
||||
mov \tmp, #0
|
||||
cmp \irqnr, #29
|
||||
moveq \tmp, #1
|
||||
streq \irqstat, [\base, #GIC_CPU_EOI]
|
||||
cmp \tmp, #0
|
||||
.endm
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Copyright 2000 Deep Blue Solutions Ltd
|
||||
* Copyright 2004 ARM Limited
|
||||
* Copyright 2008 Cavium Networks
|
||||
*
|
||||
* This file is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, Version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __CNS3XXX_PM_H
|
||||
#define __CNS3XXX_PM_H
|
||||
|
||||
#include <asm/atomic.h>
|
||||
|
||||
void cns3xxx_pwr_clk_en(unsigned int block);
|
||||
void cns3xxx_pwr_clk_dis(unsigned int block);
|
||||
void cns3xxx_pwr_power_up(unsigned int block);
|
||||
void cns3xxx_pwr_power_down(unsigned int block);
|
||||
|
||||
extern atomic_t usb_pwr_ref;
|
||||
|
||||
#endif /* __CNS3XXX_PM_H */
|
|
@ -6,10 +6,14 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <mach/system.h>
|
||||
#include <mach/cns3xxx.h>
|
||||
#include <mach/pm.h>
|
||||
|
||||
void cns3xxx_pwr_clk_en(unsigned int block)
|
||||
{
|
||||
|
@ -18,6 +22,16 @@ void cns3xxx_pwr_clk_en(unsigned int block)
|
|||
reg |= (block & PM_CLK_GATE_REG_MASK);
|
||||
__raw_writel(reg, PM_CLK_GATE_REG);
|
||||
}
|
||||
EXPORT_SYMBOL(cns3xxx_pwr_clk_en);
|
||||
|
||||
void cns3xxx_pwr_clk_dis(unsigned int block)
|
||||
{
|
||||
u32 reg = __raw_readl(PM_CLK_GATE_REG);
|
||||
|
||||
reg &= ~(block & PM_CLK_GATE_REG_MASK);
|
||||
__raw_writel(reg, PM_CLK_GATE_REG);
|
||||
}
|
||||
EXPORT_SYMBOL(cns3xxx_pwr_clk_dis);
|
||||
|
||||
void cns3xxx_pwr_power_up(unsigned int block)
|
||||
{
|
||||
|
@ -29,6 +43,7 @@ void cns3xxx_pwr_power_up(unsigned int block)
|
|||
/* Wait for 300us for the PLL output clock locked. */
|
||||
udelay(300);
|
||||
};
|
||||
EXPORT_SYMBOL(cns3xxx_pwr_power_up);
|
||||
|
||||
void cns3xxx_pwr_power_down(unsigned int block)
|
||||
{
|
||||
|
@ -38,6 +53,7 @@ void cns3xxx_pwr_power_down(unsigned int block)
|
|||
reg |= (block & CNS3XXX_PWR_PLL_ALL);
|
||||
__raw_writel(reg, PM_PLL_HM_PD_CTRL_REG);
|
||||
};
|
||||
EXPORT_SYMBOL(cns3xxx_pwr_power_down);
|
||||
|
||||
static void cns3xxx_pwr_soft_rst_force(unsigned int block)
|
||||
{
|
||||
|
@ -51,11 +67,13 @@ static void cns3xxx_pwr_soft_rst_force(unsigned int block)
|
|||
reg &= ~(block & PM_SOFT_RST_REG_MASK);
|
||||
} else {
|
||||
reg &= ~(block & PM_SOFT_RST_REG_MASK);
|
||||
__raw_writel(reg, PM_SOFT_RST_REG);
|
||||
reg |= (block & PM_SOFT_RST_REG_MASK);
|
||||
}
|
||||
|
||||
__raw_writel(reg, PM_SOFT_RST_REG);
|
||||
}
|
||||
EXPORT_SYMBOL(cns3xxx_pwr_soft_rst_force);
|
||||
|
||||
void cns3xxx_pwr_soft_rst(unsigned int block)
|
||||
{
|
||||
|
@ -69,6 +87,7 @@ void cns3xxx_pwr_soft_rst(unsigned int block)
|
|||
}
|
||||
cns3xxx_pwr_soft_rst_force(block);
|
||||
}
|
||||
EXPORT_SYMBOL(cns3xxx_pwr_soft_rst);
|
||||
|
||||
void arch_reset(char mode, const char *cmd)
|
||||
{
|
||||
|
@ -99,3 +118,7 @@ int cns3xxx_cpu_clock(void)
|
|||
|
||||
return cpu;
|
||||
}
|
||||
EXPORT_SYMBOL(cns3xxx_cpu_clock);
|
||||
|
||||
atomic_t usb_pwr_ref = ATOMIC_INIT(0);
|
||||
EXPORT_SYMBOL(usb_pwr_ref);
|
||||
|
|
|
@ -61,6 +61,8 @@ config MACH_DAVINCI_EVM
|
|||
bool "TI DM644x EVM"
|
||||
default ARCH_DAVINCI_DM644x
|
||||
depends on ARCH_DAVINCI_DM644x
|
||||
select MISC_DEVICES
|
||||
select EEPROM_AT24
|
||||
help
|
||||
Configure this option to specify the whether the board used
|
||||
for development is a DM644x EVM
|
||||
|
@ -68,6 +70,8 @@ config MACH_DAVINCI_EVM
|
|||
config MACH_SFFSDR
|
||||
bool "Lyrtech SFFSDR"
|
||||
depends on ARCH_DAVINCI_DM644x
|
||||
select MISC_DEVICES
|
||||
select EEPROM_AT24
|
||||
help
|
||||
Say Y here to select the Lyrtech Small Form Factor
|
||||
Software Defined Radio (SFFSDR) board.
|
||||
|
@ -99,6 +103,8 @@ config MACH_DAVINCI_DM6467_EVM
|
|||
default ARCH_DAVINCI_DM646x
|
||||
depends on ARCH_DAVINCI_DM646x
|
||||
select MACH_DAVINCI_DM6467TEVM
|
||||
select MISC_DEVICES
|
||||
select EEPROM_AT24
|
||||
help
|
||||
Configure this option to specify the whether the board used
|
||||
for development is a DM6467 EVM
|
||||
|
@ -110,6 +116,8 @@ config MACH_DAVINCI_DM365_EVM
|
|||
bool "TI DM365 EVM"
|
||||
default ARCH_DAVINCI_DM365
|
||||
depends on ARCH_DAVINCI_DM365
|
||||
select MISC_DEVICES
|
||||
select EEPROM_AT24
|
||||
help
|
||||
Configure this option to specify whether the board used
|
||||
for development is a DM365 EVM
|
||||
|
@ -119,6 +127,8 @@ config MACH_DAVINCI_DA830_EVM
|
|||
default ARCH_DAVINCI_DA830
|
||||
depends on ARCH_DAVINCI_DA830
|
||||
select GPIO_PCF857X
|
||||
select MISC_DEVICES
|
||||
select EEPROM_AT24
|
||||
help
|
||||
Say Y here to select the TI DA830/OMAP-L137/AM17x Evaluation Module.
|
||||
|
||||
|
@ -148,7 +158,6 @@ config MACH_DAVINCI_DA850_EVM
|
|||
bool "TI DA850/OMAP-L138/AM18x Reference Platform"
|
||||
default ARCH_DAVINCI_DA850
|
||||
depends on ARCH_DAVINCI_DA850
|
||||
select GPIO_PCA953X
|
||||
help
|
||||
Say Y here to select the TI DA850/OMAP-L138/AM18x Evaluation Module.
|
||||
|
||||
|
@ -178,6 +187,12 @@ config DA850_UI_RMII
|
|||
|
||||
endchoice
|
||||
|
||||
config GPIO_PCA953X
|
||||
default MACH_DAVINCI_DA850_EVM
|
||||
|
||||
config KEYBOARD_GPIO_POLLED
|
||||
default MACH_DAVINCI_DA850_EVM
|
||||
|
||||
config MACH_TNETV107X
|
||||
bool "TI TNETV107X Reference Platform"
|
||||
default ARCH_DAVINCI_TNETV107X
|
||||
|
@ -188,6 +203,8 @@ config MACH_TNETV107X
|
|||
config MACH_MITYOMAPL138
|
||||
bool "Critical Link MityDSP-L138/MityARM-1808 SoM"
|
||||
depends on ARCH_DAVINCI_DA850
|
||||
select MISC_DEVICES
|
||||
select EEPROM_AT24
|
||||
help
|
||||
Say Y here to select the Critical Link MityDSP-L138/MityARM-1808
|
||||
System on Module. Information on this SoM may be found at
|
||||
|
|
|
@ -90,7 +90,7 @@ int davinci_aemif_setup_timing(struct davinci_aemif_timing *t,
|
|||
void __iomem *base, unsigned cs)
|
||||
{
|
||||
unsigned set, val;
|
||||
unsigned ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
|
||||
int ta, rhold, rstrobe, rsetup, whold, wstrobe, wsetup;
|
||||
unsigned offset = A1CR_OFFSET + cs * 4;
|
||||
struct clk *aemif_clk;
|
||||
unsigned long clkrate;
|
||||
|
|
|
@ -17,8 +17,10 @@
|
|||
#include <linux/i2c.h>
|
||||
#include <linux/i2c/at24.h>
|
||||
#include <linux/i2c/pca953x.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/mfd/tps6507x.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio_keys.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/mtd/nand.h>
|
||||
|
@ -266,34 +268,115 @@ static inline void da850_evm_setup_emac_rmii(int rmii_sel)
|
|||
struct davinci_soc_info *soc_info = &davinci_soc_info;
|
||||
|
||||
soc_info->emac_pdata->rmii_en = 1;
|
||||
gpio_set_value(rmii_sel, 0);
|
||||
gpio_set_value_cansleep(rmii_sel, 0);
|
||||
}
|
||||
#else
|
||||
static inline void da850_evm_setup_emac_rmii(int rmii_sel) { }
|
||||
#endif
|
||||
|
||||
|
||||
#define DA850_KEYS_DEBOUNCE_MS 10
|
||||
/*
|
||||
* At 200ms polling interval it is possible to miss an
|
||||
* event by tapping very lightly on the push button but most
|
||||
* pushes do result in an event; longer intervals require the
|
||||
* user to hold the button whereas shorter intervals require
|
||||
* more CPU time for polling.
|
||||
*/
|
||||
#define DA850_GPIO_KEYS_POLL_MS 200
|
||||
|
||||
enum da850_evm_ui_exp_pins {
|
||||
DA850_EVM_UI_EXP_SEL_C = 5,
|
||||
DA850_EVM_UI_EXP_SEL_B,
|
||||
DA850_EVM_UI_EXP_SEL_A,
|
||||
DA850_EVM_UI_EXP_PB8,
|
||||
DA850_EVM_UI_EXP_PB7,
|
||||
DA850_EVM_UI_EXP_PB6,
|
||||
DA850_EVM_UI_EXP_PB5,
|
||||
DA850_EVM_UI_EXP_PB4,
|
||||
DA850_EVM_UI_EXP_PB3,
|
||||
DA850_EVM_UI_EXP_PB2,
|
||||
DA850_EVM_UI_EXP_PB1,
|
||||
};
|
||||
|
||||
static const char const *da850_evm_ui_exp[] = {
|
||||
[DA850_EVM_UI_EXP_SEL_C] = "sel_c",
|
||||
[DA850_EVM_UI_EXP_SEL_B] = "sel_b",
|
||||
[DA850_EVM_UI_EXP_SEL_A] = "sel_a",
|
||||
[DA850_EVM_UI_EXP_PB8] = "pb8",
|
||||
[DA850_EVM_UI_EXP_PB7] = "pb7",
|
||||
[DA850_EVM_UI_EXP_PB6] = "pb6",
|
||||
[DA850_EVM_UI_EXP_PB5] = "pb5",
|
||||
[DA850_EVM_UI_EXP_PB4] = "pb4",
|
||||
[DA850_EVM_UI_EXP_PB3] = "pb3",
|
||||
[DA850_EVM_UI_EXP_PB2] = "pb2",
|
||||
[DA850_EVM_UI_EXP_PB1] = "pb1",
|
||||
};
|
||||
|
||||
#define DA850_N_UI_PB 8
|
||||
|
||||
static struct gpio_keys_button da850_evm_ui_keys[] = {
|
||||
[0 ... DA850_N_UI_PB - 1] = {
|
||||
.type = EV_KEY,
|
||||
.active_low = 1,
|
||||
.wakeup = 0,
|
||||
.debounce_interval = DA850_KEYS_DEBOUNCE_MS,
|
||||
.code = -1, /* assigned at runtime */
|
||||
.gpio = -1, /* assigned at runtime */
|
||||
.desc = NULL, /* assigned at runtime */
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpio_keys_platform_data da850_evm_ui_keys_pdata = {
|
||||
.buttons = da850_evm_ui_keys,
|
||||
.nbuttons = ARRAY_SIZE(da850_evm_ui_keys),
|
||||
.poll_interval = DA850_GPIO_KEYS_POLL_MS,
|
||||
};
|
||||
|
||||
static struct platform_device da850_evm_ui_keys_device = {
|
||||
.name = "gpio-keys-polled",
|
||||
.id = 0,
|
||||
.dev = {
|
||||
.platform_data = &da850_evm_ui_keys_pdata
|
||||
},
|
||||
};
|
||||
|
||||
static void da850_evm_ui_keys_init(unsigned gpio)
|
||||
{
|
||||
int i;
|
||||
struct gpio_keys_button *button;
|
||||
|
||||
for (i = 0; i < DA850_N_UI_PB; i++) {
|
||||
button = &da850_evm_ui_keys[i];
|
||||
button->code = KEY_F8 - i;
|
||||
button->desc = (char *)
|
||||
da850_evm_ui_exp[DA850_EVM_UI_EXP_PB8 + i];
|
||||
button->gpio = gpio + DA850_EVM_UI_EXP_PB8 + i;
|
||||
}
|
||||
}
|
||||
|
||||
static int da850_evm_ui_expander_setup(struct i2c_client *client, unsigned gpio,
|
||||
unsigned ngpio, void *c)
|
||||
{
|
||||
int sel_a, sel_b, sel_c, ret;
|
||||
|
||||
sel_a = gpio + 7;
|
||||
sel_b = gpio + 6;
|
||||
sel_c = gpio + 5;
|
||||
sel_a = gpio + DA850_EVM_UI_EXP_SEL_A;
|
||||
sel_b = gpio + DA850_EVM_UI_EXP_SEL_B;
|
||||
sel_c = gpio + DA850_EVM_UI_EXP_SEL_C;
|
||||
|
||||
ret = gpio_request(sel_a, "sel_a");
|
||||
ret = gpio_request(sel_a, da850_evm_ui_exp[DA850_EVM_UI_EXP_SEL_A]);
|
||||
if (ret) {
|
||||
pr_warning("Cannot open UI expander pin %d\n", sel_a);
|
||||
goto exp_setup_sela_fail;
|
||||
}
|
||||
|
||||
ret = gpio_request(sel_b, "sel_b");
|
||||
ret = gpio_request(sel_b, da850_evm_ui_exp[DA850_EVM_UI_EXP_SEL_B]);
|
||||
if (ret) {
|
||||
pr_warning("Cannot open UI expander pin %d\n", sel_b);
|
||||
goto exp_setup_selb_fail;
|
||||
}
|
||||
|
||||
ret = gpio_request(sel_c, "sel_c");
|
||||
ret = gpio_request(sel_c, da850_evm_ui_exp[DA850_EVM_UI_EXP_SEL_C]);
|
||||
if (ret) {
|
||||
pr_warning("Cannot open UI expander pin %d\n", sel_c);
|
||||
goto exp_setup_selc_fail;
|
||||
|
@ -304,6 +387,13 @@ static int da850_evm_ui_expander_setup(struct i2c_client *client, unsigned gpio,
|
|||
gpio_direction_output(sel_b, 1);
|
||||
gpio_direction_output(sel_c, 1);
|
||||
|
||||
da850_evm_ui_keys_init(gpio);
|
||||
ret = platform_device_register(&da850_evm_ui_keys_device);
|
||||
if (ret) {
|
||||
pr_warning("Could not register UI GPIO expander push-buttons");
|
||||
goto exp_setup_keys_fail;
|
||||
}
|
||||
|
||||
ui_card_detected = 1;
|
||||
pr_info("DA850/OMAP-L138 EVM UI card detected\n");
|
||||
|
||||
|
@ -313,6 +403,8 @@ static int da850_evm_ui_expander_setup(struct i2c_client *client, unsigned gpio,
|
|||
|
||||
return 0;
|
||||
|
||||
exp_setup_keys_fail:
|
||||
gpio_free(sel_c);
|
||||
exp_setup_selc_fail:
|
||||
gpio_free(sel_b);
|
||||
exp_setup_selb_fail:
|
||||
|
@ -324,14 +416,192 @@ exp_setup_sela_fail:
|
|||
static int da850_evm_ui_expander_teardown(struct i2c_client *client,
|
||||
unsigned gpio, unsigned ngpio, void *c)
|
||||
{
|
||||
/* deselect all functionalities */
|
||||
gpio_set_value(gpio + 5, 1);
|
||||
gpio_set_value(gpio + 6, 1);
|
||||
gpio_set_value(gpio + 7, 1);
|
||||
platform_device_unregister(&da850_evm_ui_keys_device);
|
||||
|
||||
gpio_free(gpio + 5);
|
||||
gpio_free(gpio + 6);
|
||||
gpio_free(gpio + 7);
|
||||
/* deselect all functionalities */
|
||||
gpio_set_value_cansleep(gpio + DA850_EVM_UI_EXP_SEL_C, 1);
|
||||
gpio_set_value_cansleep(gpio + DA850_EVM_UI_EXP_SEL_B, 1);
|
||||
gpio_set_value_cansleep(gpio + DA850_EVM_UI_EXP_SEL_A, 1);
|
||||
|
||||
gpio_free(gpio + DA850_EVM_UI_EXP_SEL_C);
|
||||
gpio_free(gpio + DA850_EVM_UI_EXP_SEL_B);
|
||||
gpio_free(gpio + DA850_EVM_UI_EXP_SEL_A);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* assign the baseboard expander's GPIOs after the UI board's */
|
||||
#define DA850_UI_EXPANDER_N_GPIOS ARRAY_SIZE(da850_evm_ui_exp)
|
||||
#define DA850_BB_EXPANDER_GPIO_BASE (DAVINCI_N_GPIO + DA850_UI_EXPANDER_N_GPIOS)
|
||||
|
||||
enum da850_evm_bb_exp_pins {
|
||||
DA850_EVM_BB_EXP_DEEP_SLEEP_EN = 0,
|
||||
DA850_EVM_BB_EXP_SW_RST,
|
||||
DA850_EVM_BB_EXP_TP_23,
|
||||
DA850_EVM_BB_EXP_TP_22,
|
||||
DA850_EVM_BB_EXP_TP_21,
|
||||
DA850_EVM_BB_EXP_USER_PB1,
|
||||
DA850_EVM_BB_EXP_USER_LED2,
|
||||
DA850_EVM_BB_EXP_USER_LED1,
|
||||
DA850_EVM_BB_EXP_USER_SW1,
|
||||
DA850_EVM_BB_EXP_USER_SW2,
|
||||
DA850_EVM_BB_EXP_USER_SW3,
|
||||
DA850_EVM_BB_EXP_USER_SW4,
|
||||
DA850_EVM_BB_EXP_USER_SW5,
|
||||
DA850_EVM_BB_EXP_USER_SW6,
|
||||
DA850_EVM_BB_EXP_USER_SW7,
|
||||
DA850_EVM_BB_EXP_USER_SW8
|
||||
};
|
||||
|
||||
static const char const *da850_evm_bb_exp[] = {
|
||||
[DA850_EVM_BB_EXP_DEEP_SLEEP_EN] = "deep_sleep_en",
|
||||
[DA850_EVM_BB_EXP_SW_RST] = "sw_rst",
|
||||
[DA850_EVM_BB_EXP_TP_23] = "tp_23",
|
||||
[DA850_EVM_BB_EXP_TP_22] = "tp_22",
|
||||
[DA850_EVM_BB_EXP_TP_21] = "tp_21",
|
||||
[DA850_EVM_BB_EXP_USER_PB1] = "user_pb1",
|
||||
[DA850_EVM_BB_EXP_USER_LED2] = "user_led2",
|
||||
[DA850_EVM_BB_EXP_USER_LED1] = "user_led1",
|
||||
[DA850_EVM_BB_EXP_USER_SW1] = "user_sw1",
|
||||
[DA850_EVM_BB_EXP_USER_SW2] = "user_sw2",
|
||||
[DA850_EVM_BB_EXP_USER_SW3] = "user_sw3",
|
||||
[DA850_EVM_BB_EXP_USER_SW4] = "user_sw4",
|
||||
[DA850_EVM_BB_EXP_USER_SW5] = "user_sw5",
|
||||
[DA850_EVM_BB_EXP_USER_SW6] = "user_sw6",
|
||||
[DA850_EVM_BB_EXP_USER_SW7] = "user_sw7",
|
||||
[DA850_EVM_BB_EXP_USER_SW8] = "user_sw8",
|
||||
};
|
||||
|
||||
#define DA850_N_BB_USER_SW 8
|
||||
|
||||
static struct gpio_keys_button da850_evm_bb_keys[] = {
|
||||
[0] = {
|
||||
.type = EV_KEY,
|
||||
.active_low = 1,
|
||||
.wakeup = 0,
|
||||
.debounce_interval = DA850_KEYS_DEBOUNCE_MS,
|
||||
.code = KEY_PROG1,
|
||||
.desc = NULL, /* assigned at runtime */
|
||||
.gpio = -1, /* assigned at runtime */
|
||||
},
|
||||
[1 ... DA850_N_BB_USER_SW] = {
|
||||
.type = EV_SW,
|
||||
.active_low = 1,
|
||||
.wakeup = 0,
|
||||
.debounce_interval = DA850_KEYS_DEBOUNCE_MS,
|
||||
.code = -1, /* assigned at runtime */
|
||||
.desc = NULL, /* assigned at runtime */
|
||||
.gpio = -1, /* assigned at runtime */
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpio_keys_platform_data da850_evm_bb_keys_pdata = {
|
||||
.buttons = da850_evm_bb_keys,
|
||||
.nbuttons = ARRAY_SIZE(da850_evm_bb_keys),
|
||||
.poll_interval = DA850_GPIO_KEYS_POLL_MS,
|
||||
};
|
||||
|
||||
static struct platform_device da850_evm_bb_keys_device = {
|
||||
.name = "gpio-keys-polled",
|
||||
.id = 1,
|
||||
.dev = {
|
||||
.platform_data = &da850_evm_bb_keys_pdata
|
||||
},
|
||||
};
|
||||
|
||||
static void da850_evm_bb_keys_init(unsigned gpio)
|
||||
{
|
||||
int i;
|
||||
struct gpio_keys_button *button;
|
||||
|
||||
button = &da850_evm_bb_keys[0];
|
||||
button->desc = (char *)
|
||||
da850_evm_bb_exp[DA850_EVM_BB_EXP_USER_PB1];
|
||||
button->gpio = gpio + DA850_EVM_BB_EXP_USER_PB1;
|
||||
|
||||
for (i = 0; i < DA850_N_BB_USER_SW; i++) {
|
||||
button = &da850_evm_bb_keys[i + 1];
|
||||
button->code = SW_LID + i;
|
||||
button->desc = (char *)
|
||||
da850_evm_bb_exp[DA850_EVM_BB_EXP_USER_SW1 + i];
|
||||
button->gpio = gpio + DA850_EVM_BB_EXP_USER_SW1 + i;
|
||||
}
|
||||
}
|
||||
|
||||
#define DA850_N_BB_USER_LED 2
|
||||
|
||||
static struct gpio_led da850_evm_bb_leds[] = {
|
||||
[0 ... DA850_N_BB_USER_LED - 1] = {
|
||||
.active_low = 1,
|
||||
.gpio = -1, /* assigned at runtime */
|
||||
.name = NULL, /* assigned at runtime */
|
||||
},
|
||||
};
|
||||
|
||||
static struct gpio_led_platform_data da850_evm_bb_leds_pdata = {
|
||||
.leds = da850_evm_bb_leds,
|
||||
.num_leds = ARRAY_SIZE(da850_evm_bb_leds),
|
||||
};
|
||||
|
||||
static struct platform_device da850_evm_bb_leds_device = {
|
||||
.name = "leds-gpio",
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &da850_evm_bb_leds_pdata
|
||||
}
|
||||
};
|
||||
|
||||
static void da850_evm_bb_leds_init(unsigned gpio)
|
||||
{
|
||||
int i;
|
||||
struct gpio_led *led;
|
||||
|
||||
for (i = 0; i < DA850_N_BB_USER_LED; i++) {
|
||||
led = &da850_evm_bb_leds[i];
|
||||
|
||||
led->gpio = gpio + DA850_EVM_BB_EXP_USER_LED2 + i;
|
||||
led->name =
|
||||
da850_evm_bb_exp[DA850_EVM_BB_EXP_USER_LED2 + i];
|
||||
}
|
||||
}
|
||||
|
||||
static int da850_evm_bb_expander_setup(struct i2c_client *client,
|
||||
unsigned gpio, unsigned ngpio,
|
||||
void *c)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Register the switches and pushbutton on the baseboard as a gpio-keys
|
||||
* device.
|
||||
*/
|
||||
da850_evm_bb_keys_init(gpio);
|
||||
ret = platform_device_register(&da850_evm_bb_keys_device);
|
||||
if (ret) {
|
||||
pr_warning("Could not register baseboard GPIO expander keys");
|
||||
goto io_exp_setup_sw_fail;
|
||||
}
|
||||
|
||||
da850_evm_bb_leds_init(gpio);
|
||||
ret = platform_device_register(&da850_evm_bb_leds_device);
|
||||
if (ret) {
|
||||
pr_warning("Could not register baseboard GPIO expander LEDS");
|
||||
goto io_exp_setup_leds_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
io_exp_setup_leds_fail:
|
||||
platform_device_unregister(&da850_evm_bb_keys_device);
|
||||
io_exp_setup_sw_fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int da850_evm_bb_expander_teardown(struct i2c_client *client,
|
||||
unsigned gpio, unsigned ngpio, void *c)
|
||||
{
|
||||
platform_device_unregister(&da850_evm_bb_leds_device);
|
||||
platform_device_unregister(&da850_evm_bb_keys_device);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -340,6 +610,14 @@ static struct pca953x_platform_data da850_evm_ui_expander_info = {
|
|||
.gpio_base = DAVINCI_N_GPIO,
|
||||
.setup = da850_evm_ui_expander_setup,
|
||||
.teardown = da850_evm_ui_expander_teardown,
|
||||
.names = da850_evm_ui_exp,
|
||||
};
|
||||
|
||||
static struct pca953x_platform_data da850_evm_bb_expander_info = {
|
||||
.gpio_base = DA850_BB_EXPANDER_GPIO_BASE,
|
||||
.setup = da850_evm_bb_expander_setup,
|
||||
.teardown = da850_evm_bb_expander_teardown,
|
||||
.names = da850_evm_bb_exp,
|
||||
};
|
||||
|
||||
static struct i2c_board_info __initdata da850_evm_i2c_devices[] = {
|
||||
|
@ -350,6 +628,10 @@ static struct i2c_board_info __initdata da850_evm_i2c_devices[] = {
|
|||
I2C_BOARD_INFO("tca6416", 0x20),
|
||||
.platform_data = &da850_evm_ui_expander_info,
|
||||
},
|
||||
{
|
||||
I2C_BOARD_INFO("tca6416", 0x21),
|
||||
.platform_data = &da850_evm_bb_expander_info,
|
||||
},
|
||||
};
|
||||
|
||||
static struct davinci_i2c_platform_data da850_evm_i2c_0_pdata = {
|
||||
|
@ -540,7 +822,7 @@ static struct regulator_init_data tps65070_regulator_data[] = {
|
|||
{
|
||||
.constraints = {
|
||||
.min_uV = 950000,
|
||||
.max_uV = 1320000,
|
||||
.max_uV = 1350000,
|
||||
.valid_ops_mask = (REGULATOR_CHANGE_VOLTAGE |
|
||||
REGULATOR_CHANGE_STATUS),
|
||||
.boot_on = 1,
|
||||
|
@ -591,7 +873,7 @@ static struct tps6507x_board tps_board = {
|
|||
.tps6507x_ts_init_data = &tps6507x_touchscreen_data,
|
||||
};
|
||||
|
||||
static struct i2c_board_info __initdata da850evm_tps65070_info[] = {
|
||||
static struct i2c_board_info __initdata da850_evm_tps65070_info[] = {
|
||||
{
|
||||
I2C_BOARD_INFO("tps6507x", 0x48),
|
||||
.platform_data = &tps_board,
|
||||
|
@ -600,8 +882,8 @@ static struct i2c_board_info __initdata da850evm_tps65070_info[] = {
|
|||
|
||||
static int __init pmic_tps65070_init(void)
|
||||
{
|
||||
return i2c_register_board_info(1, da850evm_tps65070_info,
|
||||
ARRAY_SIZE(da850evm_tps65070_info));
|
||||
return i2c_register_board_info(1, da850_evm_tps65070_info,
|
||||
ARRAY_SIZE(da850_evm_tps65070_info));
|
||||
}
|
||||
|
||||
static const short da850_evm_lcdc_pins[] = {
|
||||
|
@ -736,6 +1018,27 @@ static struct edma_rsv_info *da850_edma_rsv[2] = {
|
|||
&da850_edma_cc1_rsv,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
static __init int da850_evm_init_cpufreq(void)
|
||||
{
|
||||
switch (system_rev & 0xF) {
|
||||
case 3:
|
||||
da850_max_speed = 456000;
|
||||
break;
|
||||
case 2:
|
||||
da850_max_speed = 408000;
|
||||
break;
|
||||
case 1:
|
||||
da850_max_speed = 372000;
|
||||
break;
|
||||
}
|
||||
|
||||
return da850_register_cpufreq("pll0_sysclk3");
|
||||
}
|
||||
#else
|
||||
static __init int da850_evm_init_cpufreq(void) { return 0; }
|
||||
#endif
|
||||
|
||||
static __init void da850_evm_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
@ -836,7 +1139,7 @@ static __init void da850_evm_init(void)
|
|||
if (ret)
|
||||
pr_warning("da850_evm_init: rtc setup failed: %d\n", ret);
|
||||
|
||||
ret = da850_register_cpufreq("pll0_sysclk3");
|
||||
ret = da850_evm_init_cpufreq();
|
||||
if (ret)
|
||||
pr_warning("da850_evm_init: cpufreq registration failed: %d\n",
|
||||
ret);
|
||||
|
|
|
@ -336,7 +336,7 @@ int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
|
|||
ratio--;
|
||||
}
|
||||
|
||||
if (ratio > PLLDIV_RATIO_MASK)
|
||||
if (ratio > pll->div_ratio_mask)
|
||||
return -EINVAL;
|
||||
|
||||
do {
|
||||
|
@ -344,7 +344,7 @@ int davinci_set_sysclk_rate(struct clk *clk, unsigned long rate)
|
|||
} while (v & PLLSTAT_GOSTAT);
|
||||
|
||||
v = __raw_readl(pll->base + clk->div_reg);
|
||||
v &= ~PLLDIV_RATIO_MASK;
|
||||
v &= ~pll->div_ratio_mask;
|
||||
v |= ratio | PLLDIV_EN;
|
||||
__raw_writel(v, pll->base + clk->div_reg);
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@
|
|||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <asm/clkdev.h>
|
||||
#include <linux/clkdev.h>
|
||||
|
||||
#define PLLSTAT_GOSTAT BIT(0)
|
||||
#define PLLCMD_GOSET BIT(0)
|
||||
|
|
|
@ -830,8 +830,7 @@ static void da850_set_async3_src(int pllnum)
|
|||
* According to the TRM, minimum PLLM results in maximum power savings.
|
||||
* The OPP definitions below should keep the PLLM as low as possible.
|
||||
*
|
||||
* The output of the PLLM must be between 400 to 600 MHz.
|
||||
* This rules out prediv of anything but divide-by-one for 24Mhz OSC input.
|
||||
* The output of the PLLM must be between 300 to 600 MHz.
|
||||
*/
|
||||
struct da850_opp {
|
||||
unsigned int freq; /* in KHz */
|
||||
|
@ -842,6 +841,33 @@ struct da850_opp {
|
|||
unsigned int cvdd_max; /* in uV */
|
||||
};
|
||||
|
||||
static const struct da850_opp da850_opp_456 = {
|
||||
.freq = 456000,
|
||||
.prediv = 1,
|
||||
.mult = 19,
|
||||
.postdiv = 1,
|
||||
.cvdd_min = 1300000,
|
||||
.cvdd_max = 1350000,
|
||||
};
|
||||
|
||||
static const struct da850_opp da850_opp_408 = {
|
||||
.freq = 408000,
|
||||
.prediv = 1,
|
||||
.mult = 17,
|
||||
.postdiv = 1,
|
||||
.cvdd_min = 1300000,
|
||||
.cvdd_max = 1350000,
|
||||
};
|
||||
|
||||
static const struct da850_opp da850_opp_372 = {
|
||||
.freq = 372000,
|
||||
.prediv = 2,
|
||||
.mult = 31,
|
||||
.postdiv = 1,
|
||||
.cvdd_min = 1200000,
|
||||
.cvdd_max = 1320000,
|
||||
};
|
||||
|
||||
static const struct da850_opp da850_opp_300 = {
|
||||
.freq = 300000,
|
||||
.prediv = 1,
|
||||
|
@ -876,6 +902,9 @@ static const struct da850_opp da850_opp_96 = {
|
|||
}
|
||||
|
||||
static struct cpufreq_frequency_table da850_freq_table[] = {
|
||||
OPP(456),
|
||||
OPP(408),
|
||||
OPP(372),
|
||||
OPP(300),
|
||||
OPP(200),
|
||||
OPP(96),
|
||||
|
@ -885,6 +914,19 @@ static struct cpufreq_frequency_table da850_freq_table[] = {
|
|||
},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
static int da850_set_voltage(unsigned int index);
|
||||
static int da850_regulator_init(void);
|
||||
#endif
|
||||
|
||||
static struct davinci_cpufreq_config cpufreq_info = {
|
||||
.freq_table = da850_freq_table,
|
||||
#ifdef CONFIG_REGULATOR
|
||||
.init = da850_regulator_init,
|
||||
.set_voltage = da850_set_voltage,
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_REGULATOR
|
||||
static struct regulator *cvdd;
|
||||
|
||||
|
@ -895,7 +937,7 @@ static int da850_set_voltage(unsigned int index)
|
|||
if (!cvdd)
|
||||
return -ENODEV;
|
||||
|
||||
opp = (struct da850_opp *) da850_freq_table[index].index;
|
||||
opp = (struct da850_opp *) cpufreq_info.freq_table[index].index;
|
||||
|
||||
return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max);
|
||||
}
|
||||
|
@ -912,14 +954,6 @@ static int da850_regulator_init(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
static struct davinci_cpufreq_config cpufreq_info = {
|
||||
.freq_table = &da850_freq_table[0],
|
||||
#ifdef CONFIG_REGULATOR
|
||||
.init = da850_regulator_init,
|
||||
.set_voltage = da850_set_voltage,
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct platform_device da850_cpufreq_device = {
|
||||
.name = "cpufreq-davinci",
|
||||
.dev = {
|
||||
|
@ -928,12 +962,22 @@ static struct platform_device da850_cpufreq_device = {
|
|||
.id = -1,
|
||||
};
|
||||
|
||||
unsigned int da850_max_speed = 300000;
|
||||
|
||||
int __init da850_register_cpufreq(char *async_clk)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* cpufreq driver can help keep an "async" clock constant */
|
||||
if (async_clk)
|
||||
clk_add_alias("async", da850_cpufreq_device.name,
|
||||
async_clk, NULL);
|
||||
for (i = 0; i < ARRAY_SIZE(da850_freq_table); i++) {
|
||||
if (da850_freq_table[i].frequency <= da850_max_speed) {
|
||||
cpufreq_info.freq_table = &da850_freq_table[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return platform_device_register(&da850_cpufreq_device);
|
||||
}
|
||||
|
@ -942,17 +986,18 @@ static int da850_round_armrate(struct clk *clk, unsigned long rate)
|
|||
{
|
||||
int i, ret = 0, diff;
|
||||
unsigned int best = (unsigned int) -1;
|
||||
struct cpufreq_frequency_table *table = cpufreq_info.freq_table;
|
||||
|
||||
rate /= 1000; /* convert to kHz */
|
||||
|
||||
for (i = 0; da850_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
diff = da850_freq_table[i].frequency - rate;
|
||||
for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
|
||||
diff = table[i].frequency - rate;
|
||||
if (diff < 0)
|
||||
diff = -diff;
|
||||
|
||||
if (diff < best) {
|
||||
best = diff;
|
||||
ret = da850_freq_table[i].frequency;
|
||||
ret = table[i].frequency;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -973,7 +1018,7 @@ static int da850_set_pll0rate(struct clk *clk, unsigned long index)
|
|||
struct pll_data *pll = clk->pll_data;
|
||||
int ret;
|
||||
|
||||
opp = (struct da850_opp *) da850_freq_table[index].index;
|
||||
opp = (struct da850_opp *) cpufreq_info.freq_table[index].index;
|
||||
prediv = opp->prediv;
|
||||
mult = opp->mult;
|
||||
postdiv = opp->postdiv;
|
||||
|
|
|
@ -344,7 +344,20 @@ static struct platform_device tsc_device = {
|
|||
|
||||
void __init tnetv107x_devices_init(struct tnetv107x_device_info *info)
|
||||
{
|
||||
int i;
|
||||
int i, error;
|
||||
struct clk *tsc_clk;
|
||||
|
||||
/*
|
||||
* The reset defaults for tnetv107x tsc clock divider is set too high.
|
||||
* This forces the clock down to a range that allows the ADC to
|
||||
* complete sample conversion in time.
|
||||
*/
|
||||
tsc_clk = clk_get(NULL, "sys_tsc_clk");
|
||||
if (tsc_clk) {
|
||||
error = clk_set_rate(tsc_clk, 5000000);
|
||||
WARN_ON(error < 0);
|
||||
clk_put(tsc_clk);
|
||||
}
|
||||
|
||||
platform_device_register(&edma_device);
|
||||
platform_device_register(&tnetv107x_wdt_device);
|
||||
|
|
|
@ -27,6 +27,13 @@
|
|||
extern void __iomem *da8xx_syscfg0_base;
|
||||
extern void __iomem *da8xx_syscfg1_base;
|
||||
|
||||
/*
|
||||
* If the DA850/OMAP-L138/AM18x SoC on board is of a higher speed grade
|
||||
* (than the regular 300Mhz variant), the board code should set this up
|
||||
* with the supported speed before calling da850_register_cpufreq().
|
||||
*/
|
||||
extern unsigned int da850_max_speed;
|
||||
|
||||
/*
|
||||
* The cp_intc interrupt controller for the da8xx isn't in the same
|
||||
* chunk of physical memory space as the other registers (like it is
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
#define __mem_isa(a) (a)
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
#define __arch_ioremap(p, s, t) davinci_ioremap(p, s, t)
|
||||
#define __arch_iounmap(v) davinci_iounmap(v)
|
||||
#define __arch_ioremap davinci_ioremap
|
||||
#define __arch_iounmap davinci_iounmap
|
||||
|
||||
void __iomem *davinci_ioremap(unsigned long phys, size_t size,
|
||||
unsigned int type);
|
||||
|
|
|
@ -83,20 +83,15 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
|
|||
pdctl1 = __raw_readl(psc_base + PDCTL1);
|
||||
pdctl1 |= 0x100;
|
||||
__raw_writel(pdctl1, psc_base + PDCTL1);
|
||||
|
||||
do {
|
||||
ptstat = __raw_readl(psc_base +
|
||||
PTSTAT);
|
||||
} while (!(((ptstat >> domain) & 1) == 0));
|
||||
} else {
|
||||
ptcmd = 1 << domain;
|
||||
__raw_writel(ptcmd, psc_base + PTCMD);
|
||||
|
||||
do {
|
||||
ptstat = __raw_readl(psc_base + PTSTAT);
|
||||
} while (!(((ptstat >> domain) & 1) == 0));
|
||||
}
|
||||
|
||||
do {
|
||||
ptstat = __raw_readl(psc_base + PTSTAT);
|
||||
} while (!(((ptstat >> domain) & 1) == 0));
|
||||
|
||||
do {
|
||||
mdstat = __raw_readl(psc_base + MDSTAT + 4 * id);
|
||||
} while (!((mdstat & MDSTAT_STATE_MASK) == next_state));
|
||||
|
|
|
@ -272,14 +272,34 @@ static cycle_t read_cycles(struct clocksource *cs)
|
|||
return (cycles_t)timer32_read(t);
|
||||
}
|
||||
|
||||
/*
|
||||
* Kernel assumes that sched_clock can be called early but may not have
|
||||
* things ready yet.
|
||||
*/
|
||||
static cycle_t read_dummy(struct clocksource *cs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static struct clocksource clocksource_davinci = {
|
||||
.rating = 300,
|
||||
.read = read_cycles,
|
||||
.read = read_dummy,
|
||||
.mask = CLOCKSOURCE_MASK(32),
|
||||
.shift = 24,
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
/*
|
||||
* Overwrite weak default sched_clock with something more precise
|
||||
*/
|
||||
unsigned long long notrace sched_clock(void)
|
||||
{
|
||||
const cycle_t cyc = clocksource_davinci.read(&clocksource_davinci);
|
||||
|
||||
return clocksource_cyc2ns(cyc, clocksource_davinci.mult,
|
||||
clocksource_davinci.shift);
|
||||
}
|
||||
|
||||
/*
|
||||
* clockevent
|
||||
*/
|
||||
|
@ -377,11 +397,10 @@ static void __init davinci_timer_init(void)
|
|||
davinci_clock_tick_rate = clk_get_rate(timer_clk);
|
||||
|
||||
/* setup clocksource */
|
||||
clocksource_davinci.read = read_cycles;
|
||||
clocksource_davinci.name = id_to_name[clocksource_id];
|
||||
clocksource_davinci.mult =
|
||||
clocksource_khz2mult(davinci_clock_tick_rate/1000,
|
||||
clocksource_davinci.shift);
|
||||
if (clocksource_register(&clocksource_davinci))
|
||||
if (clocksource_register_hz(&clocksource_davinci,
|
||||
davinci_clock_tick_rate))
|
||||
printk(err, clocksource_davinci.name);
|
||||
|
||||
/* setup clockevent */
|
||||
|
|
|
@ -131,12 +131,13 @@ define_pll_clk(tdm, 1, 0x0ff, 0x200);
|
|||
define_pll_clk(eth, 2, 0x0ff, 0x400);
|
||||
|
||||
/* Level 2 - divided outputs from the PLLs */
|
||||
#define define_pll_div_clk(pll, cname, div) \
|
||||
static struct clk pll##_##cname##_clk = { \
|
||||
.name = #pll "_" #cname "_clk",\
|
||||
.parent = &pll_##pll##_clk, \
|
||||
.flags = CLK_PLL, \
|
||||
.div_reg = PLLDIV##div, \
|
||||
#define define_pll_div_clk(pll, cname, div) \
|
||||
static struct clk pll##_##cname##_clk = { \
|
||||
.name = #pll "_" #cname "_clk", \
|
||||
.parent = &pll_##pll##_clk, \
|
||||
.flags = CLK_PLL, \
|
||||
.div_reg = PLLDIV##div, \
|
||||
.set_rate = davinci_set_sysclk_rate, \
|
||||
}
|
||||
|
||||
define_pll_div_clk(sys, arm1176, 1);
|
||||
|
@ -192,6 +193,7 @@ lpsc_clk_enabled(system, sys_half_clk, SYSTEM);
|
|||
lpsc_clk_enabled(ddr2_vrst, sys_ddr_clk, DDR2_EMIF1_VRST);
|
||||
lpsc_clk_enabled(ddr2_vctl_rst, sys_ddr_clk, DDR2_EMIF2_VCTL_RST);
|
||||
lpsc_clk_enabled(wdt_arm, sys_half_clk, WDT_ARM);
|
||||
lpsc_clk_enabled(timer1, sys_half_clk, TIMER1);
|
||||
|
||||
lpsc_clk(mbx_lite, sys_arm1176_clk, MBX_LITE);
|
||||
lpsc_clk(ethss, eth_125mhz_clk, ETHSS);
|
||||
|
@ -205,16 +207,15 @@ lpsc_clk(mdio, sys_half_clk, MDIO);
|
|||
lpsc_clk(sdio0, sys_half_clk, SDIO0);
|
||||
lpsc_clk(sdio1, sys_half_clk, SDIO1);
|
||||
lpsc_clk(timer0, sys_half_clk, TIMER0);
|
||||
lpsc_clk(timer1, sys_half_clk, TIMER1);
|
||||
lpsc_clk(wdt_dsp, sys_half_clk, WDT_DSP);
|
||||
lpsc_clk(ssp, sys_half_clk, SSP);
|
||||
lpsc_clk(tdm0, tdm_0_clk, TDM0);
|
||||
lpsc_clk(tdm1, tdm_1_clk, TDM1);
|
||||
lpsc_clk(vlynq, sys_vlynq_ref_clk, VLYNQ);
|
||||
lpsc_clk(mcdma, sys_half_clk, MCDMA);
|
||||
lpsc_clk(usb0, sys_half_clk, USB0);
|
||||
lpsc_clk(usb1, sys_half_clk, USB1);
|
||||
lpsc_clk(usbss, sys_half_clk, USBSS);
|
||||
lpsc_clk(usb0, clk_usbss, USB0);
|
||||
lpsc_clk(usb1, clk_usbss, USB1);
|
||||
lpsc_clk(ethss_rgmii, eth_250mhz_clk, ETHSS_RGMII);
|
||||
lpsc_clk(imcop, sys_dsp_clk, IMCOP);
|
||||
lpsc_clk(spare, sys_half_clk, SPARE);
|
||||
|
@ -281,7 +282,9 @@ static struct clk_lookup clks[] = {
|
|||
CLK(NULL, "clk_tdm0", &clk_tdm0),
|
||||
CLK(NULL, "clk_vlynq", &clk_vlynq),
|
||||
CLK(NULL, "clk_mcdma", &clk_mcdma),
|
||||
CLK(NULL, "clk_usbss", &clk_usbss),
|
||||
CLK(NULL, "clk_usb0", &clk_usb0),
|
||||
CLK(NULL, "clk_usb1", &clk_usb1),
|
||||
CLK(NULL, "clk_tdm1", &clk_tdm1),
|
||||
CLK(NULL, "clk_debugss", &clk_debugss),
|
||||
CLK(NULL, "clk_ethss_rgmii", &clk_ethss_rgmii),
|
||||
|
@ -289,8 +292,6 @@ static struct clk_lookup clks[] = {
|
|||
CLK(NULL, "clk_imcop", &clk_imcop),
|
||||
CLK(NULL, "clk_spare", &clk_spare),
|
||||
CLK("davinci_mmc.1", NULL, &clk_sdio1),
|
||||
CLK(NULL, "clk_usb1", &clk_usb1),
|
||||
CLK(NULL, "clk_usbss", &clk_usbss),
|
||||
CLK(NULL, "clk_ddr2_vrst", &clk_ddr2_vrst),
|
||||
CLK(NULL, "clk_ddr2_vctl_rst", &clk_ddr2_vctl_rst),
|
||||
CLK(NULL, NULL, NULL),
|
||||
|
|
|
@ -9,6 +9,12 @@ config MACH_DOVE_DB
|
|||
Say 'Y' here if you want your kernel to support the
|
||||
Marvell DB-MV88AP510 Development Board.
|
||||
|
||||
config MACH_CM_A510
|
||||
bool "CompuLab CM-A510 Board"
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support the
|
||||
CompuLab CM-A510 Board.
|
||||
|
||||
endmenu
|
||||
|
||||
endif
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
obj-y += common.o addr-map.o irq.o pcie.o
|
||||
obj-y += common.o addr-map.o irq.o pcie.o mpp.o
|
||||
|
||||
obj-$(CONFIG_MACH_DOVE_DB) += dove-db-setup.o
|
||||
obj-$(CONFIG_MACH_CM_A510) += cm-a510.o
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче