power management changes for omap and imx
A significant part of the changes for these two platforms went into power management, so they are split out into a separate branch. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIVAwUATwtaEmCrR//JCVInAQIUqBAAkqKDGCyKmC2nDfz5ejYNUvugkDxgYv5I fl9UUfBc2cLDVyOynzjH9SLTphVAI8jZa0KZAlvB8/+4Wcg7XNhUFPDH868zlPzP mSsPPTnb3WJTqb1PLKi7oTbA7CfsX/srRaAtrEX7Nng7uGTZZq+5RL6mOR/bqHyR F/VuV5U9HkDjgM7T7NtcNMqP9ysHDSrcNDse62yKh8FLot59rqXEEXZWTIYZphbI v+BURp4EHs5Wm5AVJbpGmWhk4+NgRCLE0ZKZlfxnJctFz5+bW11TX/85ua+UXtmt Fnij44jSmAzbQ1o0VLbN760iBsbPN/JElYWXwIqR6v5M+Hd2UDRm3a6Bc1xqUNx0 0C8DEoo78XebhldAsN1TL/V94j1ojuNyWC7qkn9VBZLTiVYPyV/oeIdxtR19u1lB QctpXeUPCfdDyD+wAWbqid0MExayP3TAwJ5vK8Tw+ssIv3A19RkUI6kdGaW4RqyL 5n5o7Ze4CGOzrthWuyfw5flKbjRUrmtLO6TTgPZKCwxeiQh3G1GJcCL6lKbGbH3M Z8jNWzEMMExZU+55P8hRrtNgnx6rqn2bWi/3cCSmuKB6KHBUWXfKJw3rmTcWOsLB aNSXqYoWtTK9hJ0zo1xIAGmnJlfrO9I66abCuHHjDKVh1W5j7zmZwrj4ErUuS/dO UHOmrQN/GOY= =P4kO -----END PGP SIGNATURE----- Merge tag 'pm' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc power management changes for omap and imx A significant part of the changes for these two platforms went into power management, so they are split out into a separate branch. * tag 'pm' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (65 commits) ARM: imx6: remove __CPUINIT annotation from v7_invalidate_l1 ARM: imx6: fix v7_invalidate_l1 by adding I-Cache invalidation ARM: imx6q: resume PL310 only when CACHE_L2X0 defined ARM: imx6q: build pm code only when CONFIG_PM selected ARM: mx5: use generic irq chip pm interface for pm functions on ARM: omap: pass minimal SoC/board data for UART from dt arm/dts: Add minimal device tree support for omap2420 and omap2430 omap-serial: Add minimal device tree support omap-serial: Use default clock speed (48Mhz) if not specified omap-serial: Get rid of all pdev->id usage ARM: OMAP2+: hwmod: Add a new flag to handle hwmods left enabled at init ARM: OMAP4: PRM: use PRCM interrupt handler ARM: OMAP3: pm: use prcm chain handler ARM: OMAP: hwmod: add support for selecting mpu_irq for each wakeup pad ARM: OMAP2+: mux: add support for PAD wakeup interrupts ARM: OMAP: PRCM: add suspend prepare / finish support ARM: OMAP: PRCM: add support for chain interrupt handler ARM: OMAP3/4: PRM: add functions to read pending IRQs, PRM barrier ARM: OMAP2+: hwmod: Add API to enable IO ring wakeup ARM: OMAP2+: mux: add wakeup-capable hwmod mux entries to dynamic list ...
This commit is contained in:
Коммит
b3c3752292
|
@ -0,0 +1,10 @@
|
|||
OMAP UART controller
|
||||
|
||||
Required properties:
|
||||
- compatible : should be "ti,omap2-uart" for OMAP2 controllers
|
||||
- compatible : should be "ti,omap3-uart" for OMAP3 controllers
|
||||
- compatible : should be "ti,omap4-uart" for OMAP4 controllers
|
||||
- ti,hwmods : Must be "uart<n>", n being the instance number (1-based)
|
||||
|
||||
Optional properties:
|
||||
- clock-frequency : frequency of the clock input to the UART
|
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
* Device Tree Source for OMAP2 SoC
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
|
||||
*
|
||||
* This file is licensed under the terms of the GNU General Public License
|
||||
* version 2. This program is licensed "as is" without any warranty of any
|
||||
* kind, whether express or implied.
|
||||
*/
|
||||
|
||||
/include/ "skeleton.dtsi"
|
||||
|
||||
/ {
|
||||
compatible = "ti,omap2430", "ti,omap2420", "ti,omap2";
|
||||
|
||||
aliases {
|
||||
serial0 = &uart1;
|
||||
serial1 = &uart2;
|
||||
serial2 = &uart3;
|
||||
};
|
||||
|
||||
cpus {
|
||||
cpu@0 {
|
||||
compatible = "arm,arm1136jf-s";
|
||||
};
|
||||
};
|
||||
|
||||
soc {
|
||||
compatible = "ti,omap-infra";
|
||||
mpu {
|
||||
compatible = "ti,omap2-mpu";
|
||||
ti,hwmods = "mpu";
|
||||
};
|
||||
};
|
||||
|
||||
ocp {
|
||||
compatible = "simple-bus";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
ti,hwmods = "l3_main";
|
||||
|
||||
intc: interrupt-controller@1 {
|
||||
compatible = "ti,omap2-intc";
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
};
|
||||
|
||||
uart1: serial@4806a000 {
|
||||
compatible = "ti,omap2-uart";
|
||||
ti,hwmods = "uart1";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart2: serial@4806c000 {
|
||||
compatible = "ti,omap2-uart";
|
||||
ti,hwmods = "uart2";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart3: serial@4806e000 {
|
||||
compatible = "ti,omap2-uart";
|
||||
ti,hwmods = "uart3";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
};
|
||||
};
|
|
@ -13,6 +13,13 @@
|
|||
/ {
|
||||
compatible = "ti,omap3430", "ti,omap3";
|
||||
|
||||
aliases {
|
||||
serial0 = &uart1;
|
||||
serial1 = &uart2;
|
||||
serial2 = &uart3;
|
||||
serial3 = &uart4;
|
||||
};
|
||||
|
||||
cpus {
|
||||
cpu@0 {
|
||||
compatible = "arm,cortex-a8";
|
||||
|
@ -59,5 +66,29 @@
|
|||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
};
|
||||
|
||||
uart1: serial@0x4806a000 {
|
||||
compatible = "ti,omap3-uart";
|
||||
ti,hwmods = "uart1";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart2: serial@0x4806c000 {
|
||||
compatible = "ti,omap3-uart";
|
||||
ti,hwmods = "uart2";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart3: serial@0x49020000 {
|
||||
compatible = "ti,omap3-uart";
|
||||
ti,hwmods = "uart3";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart4: serial@0x49042000 {
|
||||
compatible = "ti,omap3-uart";
|
||||
ti,hwmods = "uart4";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -21,6 +21,10 @@
|
|||
interrupt-parent = <&gic>;
|
||||
|
||||
aliases {
|
||||
serial0 = &uart1;
|
||||
serial1 = &uart2;
|
||||
serial2 = &uart3;
|
||||
serial3 = &uart4;
|
||||
};
|
||||
|
||||
cpus {
|
||||
|
@ -99,5 +103,29 @@
|
|||
reg = <0x48241000 0x1000>,
|
||||
<0x48240100 0x0100>;
|
||||
};
|
||||
|
||||
uart1: serial@0x4806a000 {
|
||||
compatible = "ti,omap4-uart";
|
||||
ti,hwmods = "uart1";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart2: serial@0x4806c000 {
|
||||
compatible = "ti,omap4-uart";
|
||||
ti,hwmods = "uart2";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart3: serial@0x48020000 {
|
||||
compatible = "ti,omap4-uart";
|
||||
ti,hwmods = "uart3";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
|
||||
uart4: serial@0x4806e000 {
|
||||
compatible = "ti,omap4-uart";
|
||||
ti,hwmods = "uart4";
|
||||
clock-frequency = <48000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -596,6 +596,7 @@ comment "i.MX6 family:"
|
|||
|
||||
config SOC_IMX6Q
|
||||
bool "i.MX6 Quad support"
|
||||
select ARM_CPU_SUSPEND if PM
|
||||
select ARM_GIC
|
||||
select CPU_V7
|
||||
select HAVE_ARM_SCU
|
||||
|
|
|
@ -70,4 +70,8 @@ AFLAGS_head-v7.o :=-Wa,-march=armv7-a
|
|||
obj-$(CONFIG_SMP) += platsmp.o
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
|
||||
obj-$(CONFIG_LOCAL_TIMERS) += localtimer.o
|
||||
obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o pm-imx6q.o
|
||||
obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
|
||||
|
||||
ifeq ($(CONFIG_PM),y)
|
||||
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
|
||||
endif
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
||||
.section ".text.head", "ax"
|
||||
__CPUINIT
|
||||
|
||||
/*
|
||||
* The secondary kernel init calls v7_flush_dcache_all before it enables
|
||||
|
@ -33,6 +32,7 @@
|
|||
*/
|
||||
ENTRY(v7_invalidate_l1)
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
|
||||
mcr p15, 2, r0, c0, c0, 0
|
||||
mrc p15, 1, r0, c0, c0, 0
|
||||
|
||||
|
@ -71,6 +71,7 @@ ENTRY(v7_secondary_startup)
|
|||
ENDPROC(v7_secondary_startup)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
/*
|
||||
* The following code is located into the .data section. This is to
|
||||
* allow phys_l2x0_saved_regs to be accessed with a relative load
|
||||
|
@ -79,6 +80,7 @@ ENDPROC(v7_secondary_startup)
|
|||
.data
|
||||
.align
|
||||
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
.macro pl310_resume
|
||||
ldr r2, phys_l2x0_saved_regs
|
||||
ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
|
||||
|
@ -88,12 +90,17 @@ ENDPROC(v7_secondary_startup)
|
|||
str r1, [r0, #L2X0_CTRL] @ re-enable L2
|
||||
.endm
|
||||
|
||||
.globl phys_l2x0_saved_regs
|
||||
phys_l2x0_saved_regs:
|
||||
.long 0
|
||||
#else
|
||||
.macro pl310_resume
|
||||
.endm
|
||||
#endif
|
||||
|
||||
ENTRY(v7_cpu_resume)
|
||||
bl v7_invalidate_l1
|
||||
pl310_resume
|
||||
b cpu_resume
|
||||
ENDPROC(v7_cpu_resume)
|
||||
|
||||
.globl phys_l2x0_saved_regs
|
||||
phys_l2x0_saved_regs:
|
||||
.long 0
|
||||
#endif
|
||||
|
|
|
@ -64,7 +64,9 @@ void __init imx6q_pm_init(void)
|
|||
* address of the data structure used by l2x0 core to save registers,
|
||||
* and later restore the necessary ones in imx6q resume entry.
|
||||
*/
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
phys_l2x0_saved_regs = __pa(&l2x0_saved_regs);
|
||||
#endif
|
||||
|
||||
suspend_set_ops(&imx6q_pm_ops);
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/clk.h>
|
||||
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
|
@ -21,10 +22,26 @@
|
|||
#include <mach/devices-common.h>
|
||||
#include <mach/iomux-v3.h>
|
||||
|
||||
static struct clk *gpc_dvfs_clk;
|
||||
|
||||
static void imx5_idle(void)
|
||||
{
|
||||
if (!need_resched())
|
||||
if (!need_resched()) {
|
||||
/* gpc clock is needed for SRPG */
|
||||
if (gpc_dvfs_clk == NULL) {
|
||||
gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
|
||||
if (IS_ERR(gpc_dvfs_clk))
|
||||
goto err0;
|
||||
}
|
||||
clk_enable(gpc_dvfs_clk);
|
||||
mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
|
||||
if (tzic_enable_wake())
|
||||
goto err1;
|
||||
cpu_do_idle();
|
||||
err1:
|
||||
clk_disable(gpc_dvfs_clk);
|
||||
}
|
||||
err0:
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
|
|
|
@ -55,9 +55,6 @@ void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
|
|||
stop_mode = 1;
|
||||
}
|
||||
arm_srpgcr |= MXC_SRPGCR_PCR;
|
||||
|
||||
if (tzic_enable_wake(1) != 0)
|
||||
return;
|
||||
break;
|
||||
case STOP_POWER_ON:
|
||||
ccm_clpcr |= 0x2 << MXC_CCM_CLPCR_LPM_OFFSET;
|
||||
|
|
|
@ -365,6 +365,27 @@ config OMAP3_SDRC_AC_TIMING
|
|||
wish to say no. Selecting yes without understanding what is
|
||||
going on could result in system crashes;
|
||||
|
||||
config OMAP4_ERRATA_I688
|
||||
bool "OMAP4 errata: Async Bridge Corruption"
|
||||
depends on ARCH_OMAP4
|
||||
select ARCH_HAS_BARRIERS
|
||||
help
|
||||
If a data is stalled inside asynchronous bridge because of back
|
||||
pressure, it may be accepted multiple times, creating pointer
|
||||
misalignment that will corrupt next transfers on that data path
|
||||
until next reset of the system (No recovery procedure once the
|
||||
issue is hit, the path remains consistently broken). Async bridge
|
||||
can be found on path between MPU to EMIF and MPU to L3 interconnect.
|
||||
This situation can happen only when the idle is initiated by a
|
||||
Master Request Disconnection (which is trigged by software when
|
||||
executing WFI on CPU).
|
||||
The work-around for this errata needs all the initiators connected
|
||||
through async bridge must ensure that data path is properly drained
|
||||
before issuing WFI. This condition will be met if one Strongly ordered
|
||||
access is performed to the target right before executing the WFI.
|
||||
In MPU case, L3 T2ASYNC FIFO and DDR T2ASYNC FIFO needs to be drained.
|
||||
IO barrier ensure that there is no synchronisation loss on initiators
|
||||
operating on both interconnect port simultaneously.
|
||||
endmenu
|
||||
|
||||
endif
|
||||
|
|
|
@ -11,10 +11,11 @@ hwmod-common = omap_hwmod.o \
|
|||
omap_hwmod_common_data.o
|
||||
clock-common = clock.o clock_common_data.o \
|
||||
clkt_dpll.o clkt_clksel.o
|
||||
secure-common = omap-smc.o omap-secure.o
|
||||
|
||||
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
|
||||
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common)
|
||||
obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common)
|
||||
obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
|
||||
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
|
||||
obj-$(CONFIG_ARCH_OMAP4) += prm44xx.o $(hwmod-common) $(secure-common)
|
||||
|
||||
obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
|
||||
|
||||
|
@ -24,11 +25,13 @@ obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
|
|||
obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
|
||||
obj-$(CONFIG_LOCAL_TIMERS) += timer-mpu.o
|
||||
obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
|
||||
obj-$(CONFIG_ARCH_OMAP4) += omap44xx-smc.o omap4-common.o
|
||||
obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \
|
||||
sleep44xx.o
|
||||
|
||||
plus_sec := $(call as-instr,.arch_extension sec,+sec)
|
||||
AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
|
||||
AFLAGS_omap44xx-smc.o :=-Wa,-march=armv7-a$(plus_sec)
|
||||
AFLAGS_omap-smc.o :=-Wa,-march=armv7-a$(plus_sec)
|
||||
AFLAGS_sleep44xx.o :=-Wa,-march=armv7-a$(plus_sec)
|
||||
|
||||
# Functions loaded to SRAM
|
||||
obj-$(CONFIG_SOC_OMAP2420) += sram242x.o
|
||||
|
@ -62,7 +65,8 @@ obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
|
|||
obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
|
||||
obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \
|
||||
cpuidle34xx.o
|
||||
obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o
|
||||
obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \
|
||||
cpuidle44xx.o
|
||||
obj-$(CONFIG_PM_DEBUG) += pm-debug.o
|
||||
obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
|
||||
obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
|
||||
|
@ -77,6 +81,7 @@ endif
|
|||
endif
|
||||
|
||||
# PRCM
|
||||
obj-y += prm_common.o
|
||||
obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
|
||||
obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
|
||||
vc3xxx_data.o vp3xxx_data.o
|
||||
|
@ -86,7 +91,7 @@ obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \
|
|||
obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
|
||||
cm44xx.o prcm_mpu44xx.o \
|
||||
prminst44xx.o vc44xx_data.o \
|
||||
vp44xx_data.o
|
||||
vp44xx_data.o prm44xx.o
|
||||
|
||||
# OMAP voltage domains
|
||||
voltagedomain-common := voltage.o vc.o vp.o
|
||||
|
|
|
@ -475,106 +475,8 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = {
|
|||
static struct omap_board_mux board_mux[] __initdata = {
|
||||
{ .reg_offset = OMAP_MUX_TERMINATOR },
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial1_pads[] __initdata = {
|
||||
/*
|
||||
* Note that off output enable is an active low
|
||||
* signal. So setting this means pin is a
|
||||
* input enabled in off mode
|
||||
*/
|
||||
OMAP_MUX_STATIC("uart1_cts.uart1_cts",
|
||||
OMAP_PIN_INPUT |
|
||||
OMAP_PIN_OFF_INPUT_PULLDOWN |
|
||||
OMAP_OFFOUT_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart1_rts.uart1_rts",
|
||||
OMAP_PIN_OUTPUT |
|
||||
OMAP_OFF_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart1_rx.uart1_rx",
|
||||
OMAP_PIN_INPUT |
|
||||
OMAP_PIN_OFF_INPUT_PULLDOWN |
|
||||
OMAP_OFFOUT_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart1_tx.uart1_tx",
|
||||
OMAP_PIN_OUTPUT |
|
||||
OMAP_OFF_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial2_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart2_cts.uart2_cts",
|
||||
OMAP_PIN_INPUT_PULLUP |
|
||||
OMAP_PIN_OFF_INPUT_PULLDOWN |
|
||||
OMAP_OFFOUT_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_rts.uart2_rts",
|
||||
OMAP_PIN_OUTPUT |
|
||||
OMAP_OFF_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_rx.uart2_rx",
|
||||
OMAP_PIN_INPUT |
|
||||
OMAP_PIN_OFF_INPUT_PULLDOWN |
|
||||
OMAP_OFFOUT_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_tx.uart2_tx",
|
||||
OMAP_PIN_OUTPUT |
|
||||
OMAP_OFF_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial3_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
|
||||
OMAP_PIN_INPUT_PULLDOWN |
|
||||
OMAP_PIN_OFF_INPUT_PULLDOWN |
|
||||
OMAP_OFFOUT_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
|
||||
OMAP_PIN_OUTPUT |
|
||||
OMAP_OFF_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
|
||||
OMAP_PIN_INPUT |
|
||||
OMAP_PIN_OFF_INPUT_PULLDOWN |
|
||||
OMAP_OFFOUT_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
|
||||
OMAP_PIN_OUTPUT |
|
||||
OMAP_OFF_EN |
|
||||
OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial1_data __initdata = {
|
||||
.id = 0,
|
||||
.pads = serial1_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial1_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial2_data __initdata = {
|
||||
.id = 1,
|
||||
.pads = serial2_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial2_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial3_data __initdata = {
|
||||
.id = 2,
|
||||
.pads = serial3_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial3_pads),
|
||||
};
|
||||
|
||||
static inline void board_serial_init(void)
|
||||
{
|
||||
omap_serial_init_port(&serial1_data);
|
||||
omap_serial_init_port(&serial2_data);
|
||||
omap_serial_init_port(&serial3_data);
|
||||
}
|
||||
#else
|
||||
#define board_mux NULL
|
||||
|
||||
static inline void board_serial_init(void)
|
||||
{
|
||||
omap_serial_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -711,7 +613,7 @@ static void __init omap_3430sdp_init(void)
|
|||
else
|
||||
gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1;
|
||||
omap_ads7846_init(1, gpio_pendown, 310, NULL);
|
||||
board_serial_init();
|
||||
omap_serial_init();
|
||||
omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL);
|
||||
usb_musb_init(NULL);
|
||||
board_smc91x_init();
|
||||
|
|
|
@ -844,74 +844,8 @@ static struct omap_board_mux board_mux[] __initdata = {
|
|||
{ .reg_offset = OMAP_MUX_TERMINATOR },
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial2_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart2_cts.uart2_cts",
|
||||
OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_rts.uart2_rts",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_rx.uart2_rx",
|
||||
OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_tx.uart2_tx",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial3_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
|
||||
OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
|
||||
OMAP_PIN_INPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial4_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart4_rx.uart4_rx",
|
||||
OMAP_PIN_INPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart4_tx.uart4_tx",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial2_data __initdata = {
|
||||
.id = 1,
|
||||
.pads = serial2_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial2_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial3_data __initdata = {
|
||||
.id = 2,
|
||||
.pads = serial3_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial3_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial4_data __initdata = {
|
||||
.id = 3,
|
||||
.pads = serial4_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial4_pads),
|
||||
};
|
||||
|
||||
static inline void board_serial_init(void)
|
||||
{
|
||||
struct omap_board_data bdata;
|
||||
bdata.flags = 0;
|
||||
bdata.pads = NULL;
|
||||
bdata.pads_cnt = 0;
|
||||
bdata.id = 0;
|
||||
/* pass dummy data for UART1 */
|
||||
omap_serial_init_port(&bdata);
|
||||
|
||||
omap_serial_init_port(&serial2_data);
|
||||
omap_serial_init_port(&serial3_data);
|
||||
omap_serial_init_port(&serial4_data);
|
||||
}
|
||||
#else
|
||||
#define board_mux NULL
|
||||
|
||||
static inline void board_serial_init(void)
|
||||
{
|
||||
omap_serial_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
static void omap4_sdp4430_wifi_mux_init(void)
|
||||
|
@ -961,7 +895,7 @@ static void __init omap_4430sdp_init(void)
|
|||
omap4_i2c_init();
|
||||
omap_sfh7741prox_init();
|
||||
platform_add_devices(sdp4430_devices, ARRAY_SIZE(sdp4430_devices));
|
||||
board_serial_init();
|
||||
omap_serial_init();
|
||||
omap_sdrc_init(NULL, NULL);
|
||||
omap4_sdp4430_wifi_init();
|
||||
omap4_twl6030_hsmmc_init(mmc);
|
||||
|
|
|
@ -69,7 +69,6 @@ static void __init omap_generic_init(void)
|
|||
if (node)
|
||||
irq_domain_add_simple(node, 0);
|
||||
|
||||
omap_serial_init();
|
||||
omap_sdrc_init(NULL, NULL);
|
||||
|
||||
of_platform_populate(NULL, omap_dt_match_table, NULL, NULL);
|
||||
|
|
|
@ -644,15 +644,15 @@ static inline void board_serial_init(void)
|
|||
bdata.pads_cnt = 0;
|
||||
|
||||
bdata.id = 0;
|
||||
omap_serial_init_port(&bdata);
|
||||
omap_serial_init_port(&bdata, NULL);
|
||||
|
||||
bdata.id = 1;
|
||||
omap_serial_init_port(&bdata);
|
||||
omap_serial_init_port(&bdata, NULL);
|
||||
|
||||
bdata.id = 2;
|
||||
bdata.pads = serial2_pads;
|
||||
bdata.pads_cnt = ARRAY_SIZE(serial2_pads);
|
||||
omap_serial_init_port(&bdata);
|
||||
omap_serial_init_port(&bdata, NULL);
|
||||
}
|
||||
|
||||
#else
|
||||
|
|
|
@ -364,74 +364,8 @@ static struct omap_board_mux board_mux[] __initdata = {
|
|||
{ .reg_offset = OMAP_MUX_TERMINATOR },
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial2_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart2_cts.uart2_cts",
|
||||
OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_rts.uart2_rts",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_rx.uart2_rx",
|
||||
OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart2_tx.uart2_tx",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial3_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart3_cts_rctx.uart3_cts_rctx",
|
||||
OMAP_PIN_INPUT_PULLUP | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_rts_sd.uart3_rts_sd",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_rx_irrx.uart3_rx_irrx",
|
||||
OMAP_PIN_INPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart3_tx_irtx.uart3_tx_irtx",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_device_pad serial4_pads[] __initdata = {
|
||||
OMAP_MUX_STATIC("uart4_rx.uart4_rx",
|
||||
OMAP_PIN_INPUT | OMAP_MUX_MODE0),
|
||||
OMAP_MUX_STATIC("uart4_tx.uart4_tx",
|
||||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial2_data __initdata = {
|
||||
.id = 1,
|
||||
.pads = serial2_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial2_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial3_data __initdata = {
|
||||
.id = 2,
|
||||
.pads = serial3_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial3_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial4_data __initdata = {
|
||||
.id = 3,
|
||||
.pads = serial4_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial4_pads),
|
||||
};
|
||||
|
||||
static inline void board_serial_init(void)
|
||||
{
|
||||
struct omap_board_data bdata;
|
||||
bdata.flags = 0;
|
||||
bdata.pads = NULL;
|
||||
bdata.pads_cnt = 0;
|
||||
bdata.id = 0;
|
||||
/* pass dummy data for UART1 */
|
||||
omap_serial_init_port(&bdata);
|
||||
|
||||
omap_serial_init_port(&serial2_data);
|
||||
omap_serial_init_port(&serial3_data);
|
||||
omap_serial_init_port(&serial4_data);
|
||||
}
|
||||
#else
|
||||
#define board_mux NULL
|
||||
|
||||
static inline void board_serial_init(void)
|
||||
{
|
||||
omap_serial_init();
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Display DVI */
|
||||
|
@ -562,7 +496,7 @@ static void __init omap4_panda_init(void)
|
|||
omap4_panda_i2c_init();
|
||||
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
|
||||
platform_device_register(&omap_vwlan_device);
|
||||
board_serial_init();
|
||||
omap_serial_init();
|
||||
omap_sdrc_init(NULL, NULL);
|
||||
omap4_twl6030_hsmmc_init(mmc);
|
||||
omap4_ehci_init();
|
||||
|
|
|
@ -24,9 +24,11 @@
|
|||
|
||||
#ifndef __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
|
||||
#define __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <plat/common.h>
|
||||
#include <asm/proc-fns.h>
|
||||
|
||||
#ifdef CONFIG_SOC_OMAP2420
|
||||
extern void omap242x_map_common_io(void);
|
||||
|
@ -168,23 +170,23 @@ void omap3_intc_resume_idle(void);
|
|||
void omap2_intc_handle_irq(struct pt_regs *regs);
|
||||
void omap3_intc_handle_irq(struct pt_regs *regs);
|
||||
|
||||
/*
|
||||
* wfi used in low power code. Directly opcode is used instead
|
||||
* of instruction to avoid mulit-omap build break
|
||||
*/
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#define do_wfi() __asm__ __volatile__ ("wfi" : : : "memory")
|
||||
#else
|
||||
#define do_wfi() \
|
||||
__asm__ __volatile__ (".word 0xe320f003" : : : "memory")
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
extern void __iomem *omap4_get_l2cache_base(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
extern void __iomem *l2cache_base;
|
||||
#ifdef CONFIG_SMP
|
||||
extern void __iomem *omap4_get_scu_base(void);
|
||||
#else
|
||||
static inline void __iomem *omap4_get_scu_base(void)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void __init gic_init_irq(void);
|
||||
extern void omap_smc1(u32 fn, u32 arg);
|
||||
extern void __iomem *omap4_get_sar_ram_base(void);
|
||||
extern void omap_do_wfi(void);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Needed for secondary core boot */
|
||||
|
@ -194,4 +196,44 @@ extern void omap_auxcoreboot_addr(u32 cpu_addr);
|
|||
extern u32 omap_read_auxcoreboot0(void);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
|
||||
extern int omap4_mpuss_init(void);
|
||||
extern int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state);
|
||||
extern int omap4_finish_suspend(unsigned long cpu_state);
|
||||
extern void omap4_cpu_resume(void);
|
||||
extern int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state);
|
||||
extern u32 omap4_mpuss_read_prev_context_state(void);
|
||||
#else
|
||||
static inline int omap4_enter_lowpower(unsigned int cpu,
|
||||
unsigned int power_state)
|
||||
{
|
||||
cpu_do_idle();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
|
||||
{
|
||||
cpu_do_idle();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int omap4_mpuss_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int omap4_finish_suspend(unsigned long cpu_state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void omap4_cpu_resume(void)
|
||||
{}
|
||||
|
||||
static inline u32 omap4_mpuss_read_prev_context_state(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif /* __ASSEMBLER__ */
|
||||
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
|
||||
|
|
|
@ -25,12 +25,12 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
|
||||
#include <plat/prcm.h>
|
||||
#include <plat/irqs.h>
|
||||
#include "powerdomain.h"
|
||||
#include "clockdomain.h"
|
||||
#include <plat/serial.h>
|
||||
|
||||
#include "pm.h"
|
||||
#include "control.h"
|
||||
|
@ -124,9 +124,23 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
|||
pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
|
||||
}
|
||||
|
||||
/*
|
||||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP context is saved.
|
||||
*/
|
||||
if (mpu_state == PWRDM_POWER_OFF)
|
||||
cpu_pm_enter();
|
||||
|
||||
/* Execute ARM wfi */
|
||||
omap_sram_idle();
|
||||
|
||||
/*
|
||||
* Call idle CPU PM enter notifier chain to restore
|
||||
* VFP context.
|
||||
*/
|
||||
if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF)
|
||||
cpu_pm_exit();
|
||||
|
||||
/* Re-allow idle for C1 */
|
||||
if (index == 0) {
|
||||
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
|
||||
|
@ -245,11 +259,6 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
|||
struct omap3_idle_statedata *cx;
|
||||
int ret;
|
||||
|
||||
if (!omap3_can_sleep()) {
|
||||
new_state_idx = drv->safe_state_index;
|
||||
goto select_state;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prevent idle completely if CAM is active.
|
||||
* CAM does not have wakeup capability in OMAP3.
|
||||
|
|
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
* OMAP4 CPU idle Routines
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
* Rajendra Nayak <rnayak@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/clockchips.h>
|
||||
|
||||
#include <asm/proc-fns.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "pm.h"
|
||||
#include "prm.h"
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
||||
/* Machine specific information to be recorded in the C-state driver_data */
|
||||
struct omap4_idle_statedata {
|
||||
u32 cpu_state;
|
||||
u32 mpu_logic_state;
|
||||
u32 mpu_state;
|
||||
u8 valid;
|
||||
};
|
||||
|
||||
static struct cpuidle_params cpuidle_params_table[] = {
|
||||
/* C1 - CPU0 ON + CPU1 ON + MPU ON */
|
||||
{.exit_latency = 2 + 2 , .target_residency = 5, .valid = 1},
|
||||
/* C2- CPU0 OFF + CPU1 OFF + MPU CSWR */
|
||||
{.exit_latency = 328 + 440 , .target_residency = 960, .valid = 1},
|
||||
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
|
||||
{.exit_latency = 460 + 518 , .target_residency = 1100, .valid = 1},
|
||||
};
|
||||
|
||||
#define OMAP4_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
|
||||
|
||||
struct omap4_idle_statedata omap4_idle_data[OMAP4_NUM_STATES];
|
||||
static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd;
|
||||
|
||||
/**
|
||||
* omap4_enter_idle - Programs OMAP4 to enter the specified state
|
||||
* @dev: cpuidle device
|
||||
* @drv: cpuidle driver
|
||||
* @index: the index of state to be entered
|
||||
*
|
||||
* Called from the CPUidle framework to program the device to the
|
||||
* specified low power state selected by the governor.
|
||||
* Returns the amount of time spent in the low power state.
|
||||
*/
|
||||
static int omap4_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_driver *drv,
|
||||
int index)
|
||||
{
|
||||
struct omap4_idle_statedata *cx =
|
||||
cpuidle_get_statedata(&dev->states_usage[index]);
|
||||
struct timespec ts_preidle, ts_postidle, ts_idle;
|
||||
u32 cpu1_state;
|
||||
int idle_time;
|
||||
int new_state_idx;
|
||||
int cpu_id = smp_processor_id();
|
||||
|
||||
/* Used to keep track of the total time in idle */
|
||||
getnstimeofday(&ts_preidle);
|
||||
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
/*
|
||||
* CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state.
|
||||
* This is necessary to honour hardware recommondation
|
||||
* of triggeing all the possible low power modes once CPU1 is
|
||||
* out of coherency and in OFF mode.
|
||||
* Update dev->last_state so that governor stats reflects right
|
||||
* data.
|
||||
*/
|
||||
cpu1_state = pwrdm_read_pwrst(cpu1_pd);
|
||||
if (cpu1_state != PWRDM_POWER_OFF) {
|
||||
new_state_idx = drv->safe_state_index;
|
||||
cx = cpuidle_get_statedata(&dev->states_usage[new_state_idx]);
|
||||
}
|
||||
|
||||
if (index > 0)
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id);
|
||||
|
||||
/*
|
||||
* Call idle CPU PM enter notifier chain so that
|
||||
* VFP and per CPU interrupt context is saved.
|
||||
*/
|
||||
if (cx->cpu_state == PWRDM_POWER_OFF)
|
||||
cpu_pm_enter();
|
||||
|
||||
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
|
||||
omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
|
||||
|
||||
/*
|
||||
* Call idle CPU cluster PM enter notifier chain
|
||||
* to save GIC and wakeupgen context.
|
||||
*/
|
||||
if ((cx->mpu_state == PWRDM_POWER_RET) &&
|
||||
(cx->mpu_logic_state == PWRDM_POWER_OFF))
|
||||
cpu_cluster_pm_enter();
|
||||
|
||||
omap4_enter_lowpower(dev->cpu, cx->cpu_state);
|
||||
|
||||
/*
|
||||
* Call idle CPU PM exit notifier chain to restore
|
||||
* VFP and per CPU IRQ context. Only CPU0 state is
|
||||
* considered since CPU1 is managed by CPU hotplug.
|
||||
*/
|
||||
if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF)
|
||||
cpu_pm_exit();
|
||||
|
||||
/*
|
||||
* Call idle CPU cluster PM exit notifier chain
|
||||
* to restore GIC and wakeupgen context.
|
||||
*/
|
||||
if (omap4_mpuss_read_prev_context_state())
|
||||
cpu_cluster_pm_exit();
|
||||
|
||||
if (index > 0)
|
||||
clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id);
|
||||
|
||||
getnstimeofday(&ts_postidle);
|
||||
ts_idle = timespec_sub(ts_postidle, ts_preidle);
|
||||
|
||||
local_irq_enable();
|
||||
local_fiq_enable();
|
||||
|
||||
idle_time = ts_idle.tv_nsec / NSEC_PER_USEC + ts_idle.tv_sec * \
|
||||
USEC_PER_SEC;
|
||||
|
||||
/* Update cpuidle counters */
|
||||
dev->last_residency = idle_time;
|
||||
|
||||
return index;
|
||||
}
|
||||
|
||||
DEFINE_PER_CPU(struct cpuidle_device, omap4_idle_dev);
|
||||
|
||||
struct cpuidle_driver omap4_idle_driver = {
|
||||
.name = "omap4_idle",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static inline void _fill_cstate(struct cpuidle_driver *drv,
|
||||
int idx, const char *descr)
|
||||
{
|
||||
struct cpuidle_state *state = &drv->states[idx];
|
||||
|
||||
state->exit_latency = cpuidle_params_table[idx].exit_latency;
|
||||
state->target_residency = cpuidle_params_table[idx].target_residency;
|
||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
state->enter = omap4_enter_idle;
|
||||
sprintf(state->name, "C%d", idx + 1);
|
||||
strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
|
||||
}
|
||||
|
||||
static inline struct omap4_idle_statedata *_fill_cstate_usage(
|
||||
struct cpuidle_device *dev,
|
||||
int idx)
|
||||
{
|
||||
struct omap4_idle_statedata *cx = &omap4_idle_data[idx];
|
||||
struct cpuidle_state_usage *state_usage = &dev->states_usage[idx];
|
||||
|
||||
cx->valid = cpuidle_params_table[idx].valid;
|
||||
cpuidle_set_statedata(state_usage, cx);
|
||||
|
||||
return cx;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* omap4_idle_init - Init routine for OMAP4 idle
|
||||
*
|
||||
* Registers the OMAP4 specific cpuidle driver to the cpuidle
|
||||
* framework with the valid set of states.
|
||||
*/
|
||||
int __init omap4_idle_init(void)
|
||||
{
|
||||
struct omap4_idle_statedata *cx;
|
||||
struct cpuidle_device *dev;
|
||||
struct cpuidle_driver *drv = &omap4_idle_driver;
|
||||
unsigned int cpu_id = 0;
|
||||
|
||||
mpu_pd = pwrdm_lookup("mpu_pwrdm");
|
||||
cpu0_pd = pwrdm_lookup("cpu0_pwrdm");
|
||||
cpu1_pd = pwrdm_lookup("cpu1_pwrdm");
|
||||
if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd))
|
||||
return -ENODEV;
|
||||
|
||||
|
||||
drv->safe_state_index = -1;
|
||||
dev = &per_cpu(omap4_idle_dev, cpu_id);
|
||||
dev->cpu = cpu_id;
|
||||
|
||||
/* C1 - CPU0 ON + CPU1 ON + MPU ON */
|
||||
_fill_cstate(drv, 0, "MPUSS ON");
|
||||
drv->safe_state_index = 0;
|
||||
cx = _fill_cstate_usage(dev, 0);
|
||||
cx->valid = 1; /* C1 is always valid */
|
||||
cx->cpu_state = PWRDM_POWER_ON;
|
||||
cx->mpu_state = PWRDM_POWER_ON;
|
||||
cx->mpu_logic_state = PWRDM_POWER_RET;
|
||||
|
||||
/* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
|
||||
_fill_cstate(drv, 1, "MPUSS CSWR");
|
||||
cx = _fill_cstate_usage(dev, 1);
|
||||
cx->cpu_state = PWRDM_POWER_OFF;
|
||||
cx->mpu_state = PWRDM_POWER_RET;
|
||||
cx->mpu_logic_state = PWRDM_POWER_RET;
|
||||
|
||||
/* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
|
||||
_fill_cstate(drv, 2, "MPUSS OSWR");
|
||||
cx = _fill_cstate_usage(dev, 2);
|
||||
cx->cpu_state = PWRDM_POWER_OFF;
|
||||
cx->mpu_state = PWRDM_POWER_RET;
|
||||
cx->mpu_logic_state = PWRDM_POWER_OFF;
|
||||
|
||||
drv->state_count = OMAP4_NUM_STATES;
|
||||
cpuidle_register_driver(&omap4_idle_driver);
|
||||
|
||||
dev->state_count = OMAP4_NUM_STATES;
|
||||
if (cpuidle_register_device(dev)) {
|
||||
pr_err("%s: CPUidle register device failed\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
int __init omap4_idle_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_CPU_IDLE */
|
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
* OMAP memory barrier header.
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
* Richard Woodruff <r-woodruff2@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef __MACH_BARRIERS_H
|
||||
#define __MACH_BARRIERS_H
|
||||
|
||||
extern void omap_bus_sync(void);
|
||||
|
||||
#define rmb() dsb()
|
||||
#define wmb() do { dsb(); outer_sync(); omap_bus_sync(); } while (0)
|
||||
#define mb() wmb()
|
||||
|
||||
#endif /* __MACH_BARRIERS_H */
|
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* omap-secure.h: OMAP Secure infrastructure header.
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef OMAP_ARCH_OMAP_SECURE_H
|
||||
#define OMAP_ARCH_OMAP_SECURE_H
|
||||
|
||||
/* Monitor error code */
|
||||
#define API_HAL_RET_VALUE_NS2S_CONVERSION_ERROR 0xFFFFFFFE
|
||||
#define API_HAL_RET_VALUE_SERVICE_UNKNWON 0xFFFFFFFF
|
||||
|
||||
/* HAL API error codes */
|
||||
#define API_HAL_RET_VALUE_OK 0x00
|
||||
#define API_HAL_RET_VALUE_FAIL 0x01
|
||||
|
||||
/* Secure HAL API flags */
|
||||
#define FLAG_START_CRITICAL 0x4
|
||||
#define FLAG_IRQFIQ_MASK 0x3
|
||||
#define FLAG_IRQ_ENABLE 0x2
|
||||
#define FLAG_FIQ_ENABLE 0x1
|
||||
#define NO_FLAG 0x0
|
||||
|
||||
/* Maximum Secure memory storage size */
|
||||
#define OMAP_SECURE_RAM_STORAGE (88 * SZ_1K)
|
||||
|
||||
/* Secure low power HAL API index */
|
||||
#define OMAP4_HAL_SAVESECURERAM_INDEX 0x1a
|
||||
#define OMAP4_HAL_SAVEHW_INDEX 0x1b
|
||||
#define OMAP4_HAL_SAVEALL_INDEX 0x1c
|
||||
#define OMAP4_HAL_SAVEGIC_INDEX 0x1d
|
||||
|
||||
/* Secure Monitor mode APIs */
|
||||
#define OMAP4_MON_SCU_PWR_INDEX 0x108
|
||||
#define OMAP4_MON_L2X0_DBG_CTRL_INDEX 0x100
|
||||
#define OMAP4_MON_L2X0_CTRL_INDEX 0x102
|
||||
#define OMAP4_MON_L2X0_AUXCTRL_INDEX 0x109
|
||||
#define OMAP4_MON_L2X0_PREFETCH_INDEX 0x113
|
||||
|
||||
/* Secure PPA(Primary Protected Application) APIs */
|
||||
#define OMAP4_PPA_L2_POR_INDEX 0x23
|
||||
#define OMAP4_PPA_CPU_ACTRL_SMP_INDEX 0x25
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
extern u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs,
|
||||
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
|
||||
extern u32 omap_smc2(u32 id, u32 falg, u32 pargs);
|
||||
extern phys_addr_t omap_secure_ram_mempool_base(void);
|
||||
|
||||
#endif /* __ASSEMBLER__ */
|
||||
#endif /* OMAP_ARCH_OMAP_SECURE_H */
|
|
@ -0,0 +1,39 @@
|
|||
/*
|
||||
* OMAP WakeupGen header file
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef OMAP_ARCH_WAKEUPGEN_H
|
||||
#define OMAP_ARCH_WAKEUPGEN_H
|
||||
|
||||
#define OMAP_WKG_CONTROL_0 0x00
|
||||
#define OMAP_WKG_ENB_A_0 0x10
|
||||
#define OMAP_WKG_ENB_B_0 0x14
|
||||
#define OMAP_WKG_ENB_C_0 0x18
|
||||
#define OMAP_WKG_ENB_D_0 0x1c
|
||||
#define OMAP_WKG_ENB_SECURE_A_0 0x20
|
||||
#define OMAP_WKG_ENB_SECURE_B_0 0x24
|
||||
#define OMAP_WKG_ENB_SECURE_C_0 0x28
|
||||
#define OMAP_WKG_ENB_SECURE_D_0 0x2c
|
||||
#define OMAP_WKG_ENB_A_1 0x410
|
||||
#define OMAP_WKG_ENB_B_1 0x414
|
||||
#define OMAP_WKG_ENB_C_1 0x418
|
||||
#define OMAP_WKG_ENB_D_1 0x41c
|
||||
#define OMAP_WKG_ENB_SECURE_A_1 0x420
|
||||
#define OMAP_WKG_ENB_SECURE_B_1 0x424
|
||||
#define OMAP_WKG_ENB_SECURE_C_1 0x428
|
||||
#define OMAP_WKG_ENB_SECURE_D_1 0x42c
|
||||
#define OMAP_AUX_CORE_BOOT_0 0x800
|
||||
#define OMAP_AUX_CORE_BOOT_1 0x804
|
||||
#define OMAP_PTMSYNCREQ_MASK 0xc00
|
||||
#define OMAP_PTMSYNCREQ_EN 0xc04
|
||||
#define OMAP_TIMESTAMPCYCLELO 0xc08
|
||||
#define OMAP_TIMESTAMPCYCLEHI 0xc0c
|
||||
|
||||
extern int __init omap_wakeupgen_init(void);
|
||||
#endif
|
|
@ -254,6 +254,15 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
|
|||
.length = L4_EMU_44XX_SIZE,
|
||||
.type = MT_DEVICE,
|
||||
},
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
{
|
||||
.virtual = OMAP4_SRAM_VA,
|
||||
.pfn = __phys_to_pfn(OMAP4_SRAM_PA),
|
||||
.length = PAGE_SIZE,
|
||||
.type = MT_MEMORY_SO,
|
||||
},
|
||||
#endif
|
||||
|
||||
};
|
||||
#endif
|
||||
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
|
||||
|
@ -39,6 +41,7 @@
|
|||
|
||||
#include "control.h"
|
||||
#include "mux.h"
|
||||
#include "prm.h"
|
||||
|
||||
#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
|
||||
#define OMAP_MUX_BASE_SZ 0x5ca
|
||||
|
@ -306,7 +309,8 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
|
|||
pad->idle = bpad->idle;
|
||||
pad->off = bpad->off;
|
||||
|
||||
if (pad->flags & OMAP_DEVICE_PAD_REMUX)
|
||||
if (pad->flags &
|
||||
(OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP))
|
||||
nr_pads_dynamic++;
|
||||
|
||||
pr_debug("%s: Initialized %s\n", __func__, pad->name);
|
||||
|
@ -331,7 +335,8 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads)
|
|||
for (i = 0; i < hmux->nr_pads; i++) {
|
||||
struct omap_device_pad *pad = &hmux->pads[i];
|
||||
|
||||
if (pad->flags & OMAP_DEVICE_PAD_REMUX) {
|
||||
if (pad->flags &
|
||||
(OMAP_DEVICE_PAD_REMUX | OMAP_DEVICE_PAD_WAKEUP)) {
|
||||
pr_debug("%s: pad %s tagged dynamic\n",
|
||||
__func__, pad->name);
|
||||
hmux->pads_dynamic[nr_pads_dynamic] = pad;
|
||||
|
@ -351,6 +356,78 @@ err1:
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_hwmod_mux_scan_wakeups - omap hwmod scan wakeup pads
|
||||
* @hmux: Pads for a hwmod
|
||||
* @mpu_irqs: MPU irq array for a hwmod
|
||||
*
|
||||
* Scans the wakeup status of pads for a single hwmod. If an irq
|
||||
* array is defined for this mux, the parser will call the registered
|
||||
* ISRs for corresponding pads, otherwise the parser will stop at the
|
||||
* first wakeup active pad and return. Returns true if there is a
|
||||
* pending and non-served wakeup event for the mux, otherwise false.
|
||||
*/
|
||||
static bool omap_hwmod_mux_scan_wakeups(struct omap_hwmod_mux_info *hmux,
|
||||
struct omap_hwmod_irq_info *mpu_irqs)
|
||||
{
|
||||
int i, irq;
|
||||
unsigned int val;
|
||||
u32 handled_irqs = 0;
|
||||
|
||||
for (i = 0; i < hmux->nr_pads_dynamic; i++) {
|
||||
struct omap_device_pad *pad = hmux->pads_dynamic[i];
|
||||
|
||||
if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP) ||
|
||||
!(pad->idle & OMAP_WAKEUP_EN))
|
||||
continue;
|
||||
|
||||
val = omap_mux_read(pad->partition, pad->mux->reg_offset);
|
||||
if (!(val & OMAP_WAKEUP_EVENT))
|
||||
continue;
|
||||
|
||||
if (!hmux->irqs)
|
||||
return true;
|
||||
|
||||
irq = hmux->irqs[i];
|
||||
/* make sure we only handle each irq once */
|
||||
if (handled_irqs & 1 << irq)
|
||||
continue;
|
||||
|
||||
handled_irqs |= 1 << irq;
|
||||
|
||||
generic_handle_irq(mpu_irqs[irq].irq);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* _omap_hwmod_mux_handle_irq - Process wakeup events for a single hwmod
|
||||
*
|
||||
* Checks a single hwmod for every wakeup capable pad to see if there is an
|
||||
* active wakeup event. If this is the case, call the corresponding ISR.
|
||||
*/
|
||||
static int _omap_hwmod_mux_handle_irq(struct omap_hwmod *oh, void *data)
|
||||
{
|
||||
if (!oh->mux || !oh->mux->enabled)
|
||||
return 0;
|
||||
if (omap_hwmod_mux_scan_wakeups(oh->mux, oh->mpu_irqs))
|
||||
generic_handle_irq(oh->mpu_irqs[0].irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_hwmod_mux_handle_irq - Process pad wakeup irqs.
|
||||
*
|
||||
* Calls a function for each registered omap_hwmod to check
|
||||
* pad wakeup statuses.
|
||||
*/
|
||||
static irqreturn_t omap_hwmod_mux_handle_irq(int irq, void *unused)
|
||||
{
|
||||
omap_hwmod_for_each(_omap_hwmod_mux_handle_irq, NULL);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* Assumes the calling function takes care of locking */
|
||||
void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state)
|
||||
{
|
||||
|
@ -715,6 +792,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
|
|||
static int __init omap_mux_late_init(void)
|
||||
{
|
||||
struct omap_mux_partition *partition;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(partition, &mux_partitions, node) {
|
||||
struct omap_mux_entry *e, *tmp;
|
||||
|
@ -735,6 +813,13 @@ static int __init omap_mux_late_init(void)
|
|||
}
|
||||
}
|
||||
|
||||
ret = request_irq(omap_prcm_event_to_irq("io"),
|
||||
omap_hwmod_mux_handle_irq, IRQF_SHARED | IRQF_NO_SUSPEND,
|
||||
"hwmod_io", omap_mux_late_init);
|
||||
|
||||
if (ret)
|
||||
pr_warning("mux: Failed to setup hwmod io irq %d\n", ret);
|
||||
|
||||
omap_mux_dbg_init();
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -18,11 +18,6 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
/* Physical address needed since MMU not enabled yet on secondary core */
|
||||
#define OMAP4_AUX_CORE_BOOT1_PA 0x48281804
|
||||
|
||||
__INIT
|
||||
|
||||
/*
|
||||
* OMAP4 specific entry point for secondary CPU to jump from ROM
|
||||
* code. This routine also provides a holding flag into which
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
|
||||
#include "common.h"
|
||||
|
||||
#include "powerdomain.h"
|
||||
|
||||
int platform_cpu_kill(unsigned int cpu)
|
||||
{
|
||||
return 1;
|
||||
|
@ -33,6 +35,8 @@ int platform_cpu_kill(unsigned int cpu)
|
|||
*/
|
||||
void platform_cpu_die(unsigned int cpu)
|
||||
{
|
||||
unsigned int this_cpu;
|
||||
|
||||
flush_cache_all();
|
||||
dsb();
|
||||
|
||||
|
@ -40,15 +44,15 @@ void platform_cpu_die(unsigned int cpu)
|
|||
* we're ready for shutdown now, so do it
|
||||
*/
|
||||
if (omap_modify_auxcoreboot0(0x0, 0x200) != 0x0)
|
||||
printk(KERN_CRIT "Secure clear status failed\n");
|
||||
pr_err("Secure clear status failed\n");
|
||||
|
||||
for (;;) {
|
||||
/*
|
||||
* Execute WFI
|
||||
* Enter into low power state
|
||||
*/
|
||||
do_wfi();
|
||||
|
||||
if (omap_read_auxcoreboot0() == cpu) {
|
||||
omap4_hotplug_cpu(cpu, PWRDM_POWER_OFF);
|
||||
this_cpu = smp_processor_id();
|
||||
if (omap_read_auxcoreboot0() == this_cpu) {
|
||||
/*
|
||||
* OK, proper wakeup, we're done
|
||||
*/
|
||||
|
|
|
@ -0,0 +1,398 @@
|
|||
/*
|
||||
* OMAP MPUSS low power code
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* OMAP4430 MPUSS mainly consists of dual Cortex-A9 with per-CPU
|
||||
* Local timer and Watchdog, GIC, SCU, PL310 L2 cache controller,
|
||||
* CPU0 and CPU1 LPRM modules.
|
||||
* CPU0, CPU1 and MPUSS each have there own power domain and
|
||||
* hence multiple low power combinations of MPUSS are possible.
|
||||
*
|
||||
* The CPU0 and CPU1 can't support Closed switch Retention (CSWR)
|
||||
* because the mode is not supported by hw constraints of dormant
|
||||
* mode. While waking up from the dormant mode, a reset signal
|
||||
* to the Cortex-A9 processor must be asserted by the external
|
||||
* power controller.
|
||||
*
|
||||
* With architectural inputs and hardware recommendations, only
|
||||
* below modes are supported from power gain vs latency point of view.
|
||||
*
|
||||
* CPU0 CPU1 MPUSS
|
||||
* ----------------------------------------------
|
||||
* ON ON ON
|
||||
* ON(Inactive) OFF ON(Inactive)
|
||||
* OFF OFF CSWR
|
||||
* OFF OFF OSWR
|
||||
* OFF OFF OFF(Device OFF *TBD)
|
||||
* ----------------------------------------------
|
||||
*
|
||||
* Note: CPU0 is the master core and it is the last CPU to go down
|
||||
* and first to wake-up when MPUSS low power states are excercised
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/suspend.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
||||
#include <plat/omap44xx.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "omap4-sar-layout.h"
|
||||
#include "pm.h"
|
||||
#include "prcm_mpu44xx.h"
|
||||
#include "prminst44xx.h"
|
||||
#include "prcm44xx.h"
|
||||
#include "prm44xx.h"
|
||||
#include "prm-regbits-44xx.h"
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
struct omap4_cpu_pm_info {
|
||||
struct powerdomain *pwrdm;
|
||||
void __iomem *scu_sar_addr;
|
||||
void __iomem *wkup_sar_addr;
|
||||
void __iomem *l2x0_sar_addr;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
|
||||
static struct powerdomain *mpuss_pd;
|
||||
static void __iomem *sar_base;
|
||||
|
||||
/*
|
||||
* Program the wakeup routine address for the CPU0 and CPU1
|
||||
* used for OFF or DORMANT wakeup.
|
||||
*/
|
||||
static inline void set_cpu_wakeup_addr(unsigned int cpu_id, u32 addr)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
|
||||
|
||||
__raw_writel(addr, pm_info->wkup_sar_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the CPUx powerdomain's previous power state
|
||||
*/
|
||||
static inline void set_cpu_next_pwrst(unsigned int cpu_id,
|
||||
unsigned int power_state)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
|
||||
|
||||
pwrdm_set_next_pwrst(pm_info->pwrdm, power_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read CPU's previous power state
|
||||
*/
|
||||
static inline unsigned int read_cpu_prev_pwrst(unsigned int cpu_id)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
|
||||
|
||||
return pwrdm_read_prev_pwrst(pm_info->pwrdm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the CPUx powerdomain's previous power state
|
||||
*/
|
||||
static inline void clear_cpu_prev_pwrst(unsigned int cpu_id)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
|
||||
|
||||
pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the SCU power status value to scratchpad memory
|
||||
*/
|
||||
static void scu_pwrst_prepare(unsigned int cpu_id, unsigned int cpu_state)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
|
||||
u32 scu_pwr_st;
|
||||
|
||||
switch (cpu_state) {
|
||||
case PWRDM_POWER_RET:
|
||||
scu_pwr_st = SCU_PM_DORMANT;
|
||||
break;
|
||||
case PWRDM_POWER_OFF:
|
||||
scu_pwr_st = SCU_PM_POWEROFF;
|
||||
break;
|
||||
case PWRDM_POWER_ON:
|
||||
case PWRDM_POWER_INACTIVE:
|
||||
default:
|
||||
scu_pwr_st = SCU_PM_NORMAL;
|
||||
break;
|
||||
}
|
||||
|
||||
__raw_writel(scu_pwr_st, pm_info->scu_sar_addr);
|
||||
}
|
||||
|
||||
/* Helper functions for MPUSS OSWR */
|
||||
static inline void mpuss_clear_prev_logic_pwrst(void)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
|
||||
OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
|
||||
omap4_prminst_write_inst_reg(reg, OMAP4430_PRM_PARTITION,
|
||||
OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
|
||||
}
|
||||
|
||||
static inline void cpu_clear_prev_logic_pwrst(unsigned int cpu_id)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
if (cpu_id) {
|
||||
reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU1_INST,
|
||||
OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
|
||||
omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU1_INST,
|
||||
OMAP4_RM_CPU1_CPU1_CONTEXT_OFFSET);
|
||||
} else {
|
||||
reg = omap4_prcm_mpu_read_inst_reg(OMAP4430_PRCM_MPU_CPU0_INST,
|
||||
OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
|
||||
omap4_prcm_mpu_write_inst_reg(reg, OMAP4430_PRCM_MPU_CPU0_INST,
|
||||
OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* omap4_mpuss_read_prev_context_state:
|
||||
* Function returns the MPUSS previous context state
|
||||
*/
|
||||
u32 omap4_mpuss_read_prev_context_state(void)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
reg = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
|
||||
OMAP4430_PRM_MPU_INST, OMAP4_RM_MPU_MPU_CONTEXT_OFFSET);
|
||||
reg &= OMAP4430_LOSTCONTEXT_DFF_MASK;
|
||||
return reg;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store the CPU cluster state for L2X0 low power operations.
|
||||
*/
|
||||
static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info = &per_cpu(omap4_pm_info, cpu_id);
|
||||
|
||||
__raw_writel(save_state, pm_info->l2x0_sar_addr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save the L2X0 AUXCTRL and POR value to SAR memory. Its used to
|
||||
* in every restore MPUSS OFF path.
|
||||
*/
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
static void save_l2x0_context(void)
|
||||
{
|
||||
u32 val;
|
||||
void __iomem *l2x0_base = omap4_get_l2cache_base();
|
||||
|
||||
val = __raw_readl(l2x0_base + L2X0_AUX_CTRL);
|
||||
__raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET);
|
||||
val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL);
|
||||
__raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
|
||||
}
|
||||
#else
|
||||
static void save_l2x0_context(void)
|
||||
{}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* omap4_enter_lowpower: OMAP4 MPUSS Low Power Entry Function
|
||||
* The purpose of this function is to manage low power programming
|
||||
* of OMAP4 MPUSS subsystem
|
||||
* @cpu : CPU ID
|
||||
* @power_state: Low power state.
|
||||
*
|
||||
* MPUSS states for the context save:
|
||||
* save_state =
|
||||
* 0 - Nothing lost and no need to save: MPUSS INACTIVE
|
||||
* 1 - CPUx L1 and logic lost: MPUSS CSWR
|
||||
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
|
||||
* 3 - CPUx L1 and logic lost + GIC + L2 lost: DEVICE OFF
|
||||
*/
|
||||
int omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
|
||||
{
|
||||
unsigned int save_state = 0;
|
||||
unsigned int wakeup_cpu;
|
||||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0)
|
||||
return -ENXIO;
|
||||
|
||||
switch (power_state) {
|
||||
case PWRDM_POWER_ON:
|
||||
case PWRDM_POWER_INACTIVE:
|
||||
save_state = 0;
|
||||
break;
|
||||
case PWRDM_POWER_OFF:
|
||||
save_state = 1;
|
||||
break;
|
||||
case PWRDM_POWER_RET:
|
||||
default:
|
||||
/*
|
||||
* CPUx CSWR is invalid hardware state. Also CPUx OSWR
|
||||
* doesn't make much scense, since logic is lost and $L1
|
||||
* needs to be cleaned because of coherency. This makes
|
||||
* CPUx OSWR equivalent to CPUX OFF and hence not supported
|
||||
*/
|
||||
WARN_ON(1);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
pwrdm_pre_transition();
|
||||
|
||||
/*
|
||||
* Check MPUSS next state and save interrupt controller if needed.
|
||||
* In MPUSS OSWR or device OFF, interrupt controller contest is lost.
|
||||
*/
|
||||
mpuss_clear_prev_logic_pwrst();
|
||||
pwrdm_clear_all_prev_pwrst(mpuss_pd);
|
||||
if ((pwrdm_read_next_pwrst(mpuss_pd) == PWRDM_POWER_RET) &&
|
||||
(pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF))
|
||||
save_state = 2;
|
||||
|
||||
clear_cpu_prev_pwrst(cpu);
|
||||
cpu_clear_prev_logic_pwrst(cpu);
|
||||
set_cpu_next_pwrst(cpu, power_state);
|
||||
set_cpu_wakeup_addr(cpu, virt_to_phys(omap4_cpu_resume));
|
||||
scu_pwrst_prepare(cpu, power_state);
|
||||
l2x0_pwrst_prepare(cpu, save_state);
|
||||
|
||||
/*
|
||||
* Call low level function with targeted low power state.
|
||||
*/
|
||||
cpu_suspend(save_state, omap4_finish_suspend);
|
||||
|
||||
/*
|
||||
* Restore the CPUx power state to ON otherwise CPUx
|
||||
* power domain can transitions to programmed low power
|
||||
* state while doing WFI outside the low powe code. On
|
||||
* secure devices, CPUx does WFI which can result in
|
||||
* domain transition
|
||||
*/
|
||||
wakeup_cpu = smp_processor_id();
|
||||
set_cpu_next_pwrst(wakeup_cpu, PWRDM_POWER_ON);
|
||||
|
||||
pwrdm_post_transition();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap4_hotplug_cpu: OMAP4 CPU hotplug entry
|
||||
* @cpu : CPU ID
|
||||
* @power_state: CPU low power state.
|
||||
*/
|
||||
int omap4_hotplug_cpu(unsigned int cpu, unsigned int power_state)
|
||||
{
|
||||
unsigned int cpu_state = 0;
|
||||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0)
|
||||
return -ENXIO;
|
||||
|
||||
if (power_state == PWRDM_POWER_OFF)
|
||||
cpu_state = 1;
|
||||
|
||||
clear_cpu_prev_pwrst(cpu);
|
||||
set_cpu_next_pwrst(cpu, power_state);
|
||||
set_cpu_wakeup_addr(cpu, virt_to_phys(omap_secondary_startup));
|
||||
scu_pwrst_prepare(cpu, power_state);
|
||||
|
||||
/*
|
||||
* CPU never retuns back if targetted power state is OFF mode.
|
||||
* CPU ONLINE follows normal CPU ONLINE ptah via
|
||||
* omap_secondary_startup().
|
||||
*/
|
||||
omap4_finish_suspend(cpu_state);
|
||||
|
||||
set_cpu_next_pwrst(cpu, PWRDM_POWER_ON);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Initialise OMAP4 MPUSS
|
||||
*/
|
||||
int __init omap4_mpuss_init(void)
|
||||
{
|
||||
struct omap4_cpu_pm_info *pm_info;
|
||||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0) {
|
||||
WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sar_base = omap4_get_sar_ram_base();
|
||||
|
||||
/* Initilaise per CPU PM information */
|
||||
pm_info = &per_cpu(omap4_pm_info, 0x0);
|
||||
pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
|
||||
pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
|
||||
pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
|
||||
pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
|
||||
if (!pm_info->pwrdm) {
|
||||
pr_err("Lookup failed for CPU0 pwrdm\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Clear CPU previous power domain state */
|
||||
pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
|
||||
cpu_clear_prev_logic_pwrst(0);
|
||||
|
||||
/* Initialise CPU0 power domain state to ON */
|
||||
pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
|
||||
|
||||
pm_info = &per_cpu(omap4_pm_info, 0x1);
|
||||
pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
|
||||
pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
|
||||
pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
|
||||
pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
|
||||
if (!pm_info->pwrdm) {
|
||||
pr_err("Lookup failed for CPU1 pwrdm\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Clear CPU previous power domain state */
|
||||
pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
|
||||
cpu_clear_prev_logic_pwrst(1);
|
||||
|
||||
/* Initialise CPU1 power domain state to ON */
|
||||
pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);
|
||||
|
||||
mpuss_pd = pwrdm_lookup("mpu_pwrdm");
|
||||
if (!mpuss_pd) {
|
||||
pr_err("Failed to lookup MPUSS power domain\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pwrdm_clear_all_prev_pwrst(mpuss_pd);
|
||||
mpuss_clear_prev_logic_pwrst();
|
||||
|
||||
/* Save device type on scratchpad for low level code to use */
|
||||
if (omap_type() != OMAP2_DEVICE_TYPE_GP)
|
||||
__raw_writel(1, sar_base + OMAP_TYPE_OFFSET);
|
||||
else
|
||||
__raw_writel(0, sar_base + OMAP_TYPE_OFFSET);
|
||||
|
||||
save_l2x0_context();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
* OMAP Secure API infrastructure.
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
*
|
||||
* This program is free software,you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include <mach/omap-secure.h>
|
||||
|
||||
static phys_addr_t omap_secure_memblock_base;
|
||||
|
||||
/**
|
||||
* omap_sec_dispatcher: Routine to dispatch low power secure
|
||||
* service routines
|
||||
* @idx: The HAL API index
|
||||
* @flag: The flag indicating criticality of operation
|
||||
* @nargs: Number of valid arguments out of four.
|
||||
* @arg1, arg2, arg3 args4: Parameters passed to secure API
|
||||
*
|
||||
* Return the non-zero error value on failure.
|
||||
*/
|
||||
u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
|
||||
u32 arg3, u32 arg4)
|
||||
{
|
||||
u32 ret;
|
||||
u32 param[5];
|
||||
|
||||
param[0] = nargs;
|
||||
param[1] = arg1;
|
||||
param[2] = arg2;
|
||||
param[3] = arg3;
|
||||
param[4] = arg4;
|
||||
|
||||
/*
|
||||
* Secure API needs physical address
|
||||
* pointer for the parameters
|
||||
*/
|
||||
flush_cache_all();
|
||||
outer_clean_range(__pa(param), __pa(param + 5));
|
||||
ret = omap_smc2(idx, flag, __pa(param));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Allocate the memory to save secure ram */
|
||||
int __init omap_secure_ram_reserve_memblock(void)
|
||||
{
|
||||
phys_addr_t paddr;
|
||||
u32 size = OMAP_SECURE_RAM_STORAGE;
|
||||
|
||||
size = ALIGN(size, SZ_1M);
|
||||
paddr = memblock_alloc(size, SZ_1M);
|
||||
if (!paddr) {
|
||||
pr_err("%s: failed to reserve %x bytes\n",
|
||||
__func__, size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memblock_free(paddr, size);
|
||||
memblock_remove(paddr, size);
|
||||
|
||||
omap_secure_memblock_base = paddr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
phys_addr_t omap_secure_ram_mempool_base(void)
|
||||
{
|
||||
return omap_secure_memblock_base;
|
||||
}
|
|
@ -31,6 +31,29 @@ ENTRY(omap_smc1)
|
|||
ldmfd sp!, {r2-r12, pc}
|
||||
ENDPROC(omap_smc1)
|
||||
|
||||
/**
|
||||
* u32 omap_smc2(u32 id, u32 falg, u32 pargs)
|
||||
* Low level common routine for secure HAL and PPA APIs.
|
||||
* @id: Application ID of HAL APIs
|
||||
* @flag: Flag to indicate the criticality of operation
|
||||
* @pargs: Physical address of parameter list starting
|
||||
* with number of parametrs
|
||||
*/
|
||||
ENTRY(omap_smc2)
|
||||
stmfd sp!, {r4-r12, lr}
|
||||
mov r3, r2
|
||||
mov r2, r1
|
||||
mov r1, #0x0 @ Process ID
|
||||
mov r6, #0xff
|
||||
mov r12, #0x00 @ Secure Service ID
|
||||
mov r7, #0
|
||||
mcr p15, 0, r7, c7, c5, 6
|
||||
dsb
|
||||
dmb
|
||||
smc #0
|
||||
ldmfd sp!, {r4-r12, pc}
|
||||
ENDPROC(omap_smc2)
|
||||
|
||||
ENTRY(omap_modify_auxcoreboot0)
|
||||
stmfd sp!, {r1-r12, lr}
|
||||
ldr r12, =0x104
|
|
@ -24,16 +24,36 @@
|
|||
#include <asm/hardware/gic.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/omap-secure.h>
|
||||
|
||||
#include "common.h"
|
||||
|
||||
#include "clockdomain.h"
|
||||
|
||||
/* SCU base address */
|
||||
static void __iomem *scu_base;
|
||||
|
||||
static DEFINE_SPINLOCK(boot_lock);
|
||||
|
||||
void __iomem *omap4_get_scu_base(void)
|
||||
{
|
||||
return scu_base;
|
||||
}
|
||||
|
||||
void __cpuinit platform_secondary_init(unsigned int cpu)
|
||||
{
|
||||
/*
|
||||
* Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
|
||||
* OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
|
||||
* init and for CPU1, a secure PPA API provided. CPU0 must be ON
|
||||
* while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
|
||||
* OMAP443X GP devices- SMP bit isn't accessible.
|
||||
* OMAP446X GP devices - SMP bit access is enabled on both CPUs.
|
||||
*/
|
||||
if (cpu_is_omap443x() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
|
||||
omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
|
||||
4, 0, 0, 0, 0, 0);
|
||||
|
||||
/*
|
||||
* If any interrupts are already enabled for the primary
|
||||
* core (e.g. timer irq), then they will not have been enabled
|
||||
|
@ -50,6 +70,8 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
|
|||
|
||||
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
static struct clockdomain *cpu1_clkdm;
|
||||
static bool booted;
|
||||
/*
|
||||
* Set synchronisation state between this boot processor
|
||||
* and the secondary one
|
||||
|
@ -65,6 +87,29 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
|
|||
omap_modify_auxcoreboot0(0x200, 0xfffffdff);
|
||||
flush_cache_all();
|
||||
smp_wmb();
|
||||
|
||||
if (!cpu1_clkdm)
|
||||
cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
|
||||
|
||||
/*
|
||||
* The SGI(Software Generated Interrupts) are not wakeup capable
|
||||
* from low power states. This is known limitation on OMAP4 and
|
||||
* needs to be worked around by using software forced clockdomain
|
||||
* wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
|
||||
* software force wakeup. The clockdomain is then put back to
|
||||
* hardware supervised mode.
|
||||
* More details can be found in OMAP4430 TRM - Version J
|
||||
* Section :
|
||||
* 4.3.4.2 Power States of CPU0 and CPU1
|
||||
*/
|
||||
if (booted) {
|
||||
clkdm_wakeup(cpu1_clkdm);
|
||||
clkdm_allow_idle(cpu1_clkdm);
|
||||
} else {
|
||||
dsb_sev();
|
||||
booted = true;
|
||||
}
|
||||
|
||||
gic_raise_softirq(cpumask_of(cpu), 1);
|
||||
|
||||
/*
|
||||
|
|
|
@ -0,0 +1,389 @@
|
|||
/*
|
||||
* OMAP WakeupGen Source file
|
||||
*
|
||||
* OMAP WakeupGen is the interrupt controller extension used along
|
||||
* with ARM GIC to wake the CPU out from low power states on
|
||||
* external interrupts. It is responsible for generating wakeup
|
||||
* event from the incoming interrupts and enable bits. It is
|
||||
* implemented in MPU always ON power domain. During normal operation,
|
||||
* WakeupGen delivers external interrupts directly to the GIC.
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/cpu_pm.h>
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
|
||||
#include <mach/omap-wakeupgen.h>
|
||||
#include <mach/omap-secure.h>
|
||||
|
||||
#include "omap4-sar-layout.h"
|
||||
#include "common.h"
|
||||
|
||||
#define NR_REG_BANKS 4
|
||||
#define MAX_IRQS 128
|
||||
#define WKG_MASK_ALL 0x00000000
|
||||
#define WKG_UNMASK_ALL 0xffffffff
|
||||
#define CPU_ENA_OFFSET 0x400
|
||||
#define CPU0_ID 0x0
|
||||
#define CPU1_ID 0x1
|
||||
|
||||
static void __iomem *wakeupgen_base;
|
||||
static void __iomem *sar_base;
|
||||
static DEFINE_PER_CPU(u32 [NR_REG_BANKS], irqmasks);
|
||||
static DEFINE_SPINLOCK(wakeupgen_lock);
|
||||
static unsigned int irq_target_cpu[NR_IRQS];
|
||||
|
||||
/*
|
||||
* Static helper functions.
|
||||
*/
|
||||
static inline u32 wakeupgen_readl(u8 idx, u32 cpu)
|
||||
{
|
||||
return __raw_readl(wakeupgen_base + OMAP_WKG_ENB_A_0 +
|
||||
(cpu * CPU_ENA_OFFSET) + (idx * 4));
|
||||
}
|
||||
|
||||
static inline void wakeupgen_writel(u32 val, u8 idx, u32 cpu)
|
||||
{
|
||||
__raw_writel(val, wakeupgen_base + OMAP_WKG_ENB_A_0 +
|
||||
(cpu * CPU_ENA_OFFSET) + (idx * 4));
|
||||
}
|
||||
|
||||
static inline void sar_writel(u32 val, u32 offset, u8 idx)
|
||||
{
|
||||
__raw_writel(val, sar_base + offset + (idx * 4));
|
||||
}
|
||||
|
||||
static void _wakeupgen_set_all(unsigned int cpu, unsigned int reg)
|
||||
{
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < NR_REG_BANKS; i++)
|
||||
wakeupgen_writel(reg, i, cpu);
|
||||
}
|
||||
|
||||
static inline int _wakeupgen_get_irq_info(u32 irq, u32 *bit_posn, u8 *reg_index)
|
||||
{
|
||||
unsigned int spi_irq;
|
||||
|
||||
/*
|
||||
* PPIs and SGIs are not supported.
|
||||
*/
|
||||
if (irq < OMAP44XX_IRQ_GIC_START)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Subtract the GIC offset.
|
||||
*/
|
||||
spi_irq = irq - OMAP44XX_IRQ_GIC_START;
|
||||
if (spi_irq > MAX_IRQS) {
|
||||
pr_err("omap wakeupGen: Invalid IRQ%d\n", irq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Each WakeupGen register controls 32 interrupt.
|
||||
* i.e. 1 bit per SPI IRQ
|
||||
*/
|
||||
*reg_index = spi_irq >> 5;
|
||||
*bit_posn = spi_irq %= 32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _wakeupgen_clear(unsigned int irq, unsigned int cpu)
|
||||
{
|
||||
u32 val, bit_number;
|
||||
u8 i;
|
||||
|
||||
if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
|
||||
return;
|
||||
|
||||
val = wakeupgen_readl(i, cpu);
|
||||
val &= ~BIT(bit_number);
|
||||
wakeupgen_writel(val, i, cpu);
|
||||
}
|
||||
|
||||
static void _wakeupgen_set(unsigned int irq, unsigned int cpu)
|
||||
{
|
||||
u32 val, bit_number;
|
||||
u8 i;
|
||||
|
||||
if (_wakeupgen_get_irq_info(irq, &bit_number, &i))
|
||||
return;
|
||||
|
||||
val = wakeupgen_readl(i, cpu);
|
||||
val |= BIT(bit_number);
|
||||
wakeupgen_writel(val, i, cpu);
|
||||
}
|
||||
|
||||
static void _wakeupgen_save_masks(unsigned int cpu)
|
||||
{
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < NR_REG_BANKS; i++)
|
||||
per_cpu(irqmasks, cpu)[i] = wakeupgen_readl(i, cpu);
|
||||
}
|
||||
|
||||
static void _wakeupgen_restore_masks(unsigned int cpu)
|
||||
{
|
||||
u8 i;
|
||||
|
||||
for (i = 0; i < NR_REG_BANKS; i++)
|
||||
wakeupgen_writel(per_cpu(irqmasks, cpu)[i], i, cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architecture specific Mask extension
|
||||
*/
|
||||
static void wakeupgen_mask(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&wakeupgen_lock, flags);
|
||||
_wakeupgen_clear(d->irq, irq_target_cpu[d->irq]);
|
||||
spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Architecture specific Unmask extension
|
||||
*/
|
||||
static void wakeupgen_unmask(struct irq_data *d)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&wakeupgen_lock, flags);
|
||||
_wakeupgen_set(d->irq, irq_target_cpu[d->irq]);
|
||||
spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mask or unmask all interrupts on given CPU.
|
||||
* 0 = Mask all interrupts on the 'cpu'
|
||||
* 1 = Unmask all interrupts on the 'cpu'
|
||||
* Ensure that the initial mask is maintained. This is faster than
|
||||
* iterating through GIC registers to arrive at the correct masks.
|
||||
*/
|
||||
static void wakeupgen_irqmask_all(unsigned int cpu, unsigned int set)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&wakeupgen_lock, flags);
|
||||
if (set) {
|
||||
_wakeupgen_save_masks(cpu);
|
||||
_wakeupgen_set_all(cpu, WKG_MASK_ALL);
|
||||
} else {
|
||||
_wakeupgen_set_all(cpu, WKG_UNMASK_ALL);
|
||||
_wakeupgen_restore_masks(cpu);
|
||||
}
|
||||
spin_unlock_irqrestore(&wakeupgen_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_PM
|
||||
/*
|
||||
* Save WakeupGen interrupt context in SAR BANK3. Restore is done by
|
||||
* ROM code. WakeupGen IP is integrated along with GIC to manage the
|
||||
* interrupt wakeups from CPU low power states. It manages
|
||||
* masking/unmasking of Shared peripheral interrupts(SPI). So the
|
||||
* interrupt enable/disable control should be in sync and consistent
|
||||
* at WakeupGen and GIC so that interrupts are not lost.
|
||||
*/
|
||||
static void irq_save_context(void)
|
||||
{
|
||||
u32 i, val;
|
||||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0)
|
||||
return;
|
||||
|
||||
if (!sar_base)
|
||||
sar_base = omap4_get_sar_ram_base();
|
||||
|
||||
for (i = 0; i < NR_REG_BANKS; i++) {
|
||||
/* Save the CPUx interrupt mask for IRQ 0 to 127 */
|
||||
val = wakeupgen_readl(i, 0);
|
||||
sar_writel(val, WAKEUPGENENB_OFFSET_CPU0, i);
|
||||
val = wakeupgen_readl(i, 1);
|
||||
sar_writel(val, WAKEUPGENENB_OFFSET_CPU1, i);
|
||||
|
||||
/*
|
||||
* Disable the secure interrupts for CPUx. The restore
|
||||
* code blindly restores secure and non-secure interrupt
|
||||
* masks from SAR RAM. Secure interrupts are not suppose
|
||||
* to be enabled from HLOS. So overwrite the SAR location
|
||||
* so that the secure interrupt remains disabled.
|
||||
*/
|
||||
sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU0, i);
|
||||
sar_writel(0x0, WAKEUPGENENB_SECURE_OFFSET_CPU1, i);
|
||||
}
|
||||
|
||||
/* Save AuxBoot* registers */
|
||||
val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
|
||||
__raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
|
||||
val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
|
||||
__raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
|
||||
|
||||
/* Save SyncReq generation logic */
|
||||
val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
|
||||
__raw_writel(val, sar_base + AUXCOREBOOT0_OFFSET);
|
||||
val = __raw_readl(wakeupgen_base + OMAP_AUX_CORE_BOOT_0);
|
||||
__raw_writel(val, sar_base + AUXCOREBOOT1_OFFSET);
|
||||
|
||||
/* Save SyncReq generation logic */
|
||||
val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_MASK);
|
||||
__raw_writel(val, sar_base + PTMSYNCREQ_MASK_OFFSET);
|
||||
val = __raw_readl(wakeupgen_base + OMAP_PTMSYNCREQ_EN);
|
||||
__raw_writel(val, sar_base + PTMSYNCREQ_EN_OFFSET);
|
||||
|
||||
/* Set the Backup Bit Mask status */
|
||||
val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
|
||||
val |= SAR_BACKUP_STATUS_WAKEUPGEN;
|
||||
__raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear WakeupGen SAR backup status.
|
||||
*/
|
||||
void irq_sar_clear(void)
|
||||
{
|
||||
u32 val;
|
||||
val = __raw_readl(sar_base + SAR_BACKUP_STATUS_OFFSET);
|
||||
val &= ~SAR_BACKUP_STATUS_WAKEUPGEN;
|
||||
__raw_writel(val, sar_base + SAR_BACKUP_STATUS_OFFSET);
|
||||
}
|
||||
|
||||
/*
|
||||
* Save GIC and Wakeupgen interrupt context using secure API
|
||||
* for HS/EMU devices.
|
||||
*/
|
||||
static void irq_save_secure_context(void)
|
||||
{
|
||||
u32 ret;
|
||||
ret = omap_secure_dispatcher(OMAP4_HAL_SAVEGIC_INDEX,
|
||||
FLAG_START_CRITICAL,
|
||||
0, 0, 0, 0, 0);
|
||||
if (ret != API_HAL_RET_VALUE_OK)
|
||||
pr_err("GIC and Wakeupgen context save failed\n");
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned int)hcpu;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
wakeupgen_irqmask_all(cpu, 0);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
wakeupgen_irqmask_all(cpu, 1);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __refdata irq_hotplug_notifier = {
|
||||
.notifier_call = irq_cpu_hotplug_notify,
|
||||
};
|
||||
|
||||
static void __init irq_hotplug_init(void)
|
||||
{
|
||||
register_hotcpu_notifier(&irq_hotplug_notifier);
|
||||
}
|
||||
#else
|
||||
static void __init irq_hotplug_init(void)
|
||||
{}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_PM
|
||||
static int irq_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
||||
{
|
||||
switch (cmd) {
|
||||
case CPU_CLUSTER_PM_ENTER:
|
||||
if (omap_type() == OMAP2_DEVICE_TYPE_GP)
|
||||
irq_save_context();
|
||||
else
|
||||
irq_save_secure_context();
|
||||
break;
|
||||
case CPU_CLUSTER_PM_EXIT:
|
||||
if (omap_type() == OMAP2_DEVICE_TYPE_GP)
|
||||
irq_sar_clear();
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block irq_notifier_block = {
|
||||
.notifier_call = irq_notifier,
|
||||
};
|
||||
|
||||
static void __init irq_pm_init(void)
|
||||
{
|
||||
cpu_pm_register_notifier(&irq_notifier_block);
|
||||
}
|
||||
#else
|
||||
static void __init irq_pm_init(void)
|
||||
{}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialise the wakeupgen module.
|
||||
*/
|
||||
int __init omap_wakeupgen_init(void)
|
||||
{
|
||||
int i;
|
||||
unsigned int boot_cpu = smp_processor_id();
|
||||
|
||||
/* Not supported on OMAP4 ES1.0 silicon */
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0) {
|
||||
WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* Static mapping, never released */
|
||||
wakeupgen_base = ioremap(OMAP44XX_WKUPGEN_BASE, SZ_4K);
|
||||
if (WARN_ON(!wakeupgen_base))
|
||||
return -ENOMEM;
|
||||
|
||||
/* Clear all IRQ bitmasks at wakeupGen level */
|
||||
for (i = 0; i < NR_REG_BANKS; i++) {
|
||||
wakeupgen_writel(0, i, CPU0_ID);
|
||||
wakeupgen_writel(0, i, CPU1_ID);
|
||||
}
|
||||
|
||||
/*
|
||||
* Override GIC architecture specific functions to add
|
||||
* OMAP WakeupGen interrupt controller along with GIC
|
||||
*/
|
||||
gic_arch_extn.irq_mask = wakeupgen_mask;
|
||||
gic_arch_extn.irq_unmask = wakeupgen_unmask;
|
||||
gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;
|
||||
|
||||
/*
|
||||
* FIXME: Add support to set_smp_affinity() once the core
|
||||
* GIC code has necessary hooks in place.
|
||||
*/
|
||||
|
||||
/* Associate all the IRQs to boot CPU like GIC init does. */
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_target_cpu[i] = boot_cpu;
|
||||
|
||||
irq_hotplug_init();
|
||||
irq_pm_init();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -15,18 +15,73 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/hardware/gic.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
#include <plat/irqs.h>
|
||||
#include <plat/sram.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <mach/omap-wakeupgen.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "omap4-sar-layout.h"
|
||||
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
void __iomem *l2cache_base;
|
||||
static void __iomem *l2cache_base;
|
||||
#endif
|
||||
|
||||
static void __iomem *sar_ram_base;
|
||||
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
/* Used to implement memory barrier on DRAM path */
|
||||
#define OMAP4_DRAM_BARRIER_VA 0xfe600000
|
||||
|
||||
void __iomem *dram_sync, *sram_sync;
|
||||
|
||||
void omap_bus_sync(void)
|
||||
{
|
||||
if (dram_sync && sram_sync) {
|
||||
writel_relaxed(readl_relaxed(dram_sync), dram_sync);
|
||||
writel_relaxed(readl_relaxed(sram_sync), sram_sync);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
static int __init omap_barriers_init(void)
|
||||
{
|
||||
struct map_desc dram_io_desc[1];
|
||||
phys_addr_t paddr;
|
||||
u32 size;
|
||||
|
||||
if (!cpu_is_omap44xx())
|
||||
return -ENODEV;
|
||||
|
||||
size = ALIGN(PAGE_SIZE, SZ_1M);
|
||||
paddr = memblock_alloc(size, SZ_1M);
|
||||
if (!paddr) {
|
||||
pr_err("%s: failed to reserve 4 Kbytes\n", __func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memblock_free(paddr, size);
|
||||
memblock_remove(paddr, size);
|
||||
dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
|
||||
dram_io_desc[0].pfn = __phys_to_pfn(paddr);
|
||||
dram_io_desc[0].length = size;
|
||||
dram_io_desc[0].type = MT_MEMORY_SO;
|
||||
iotable_init(dram_io_desc, ARRAY_SIZE(dram_io_desc));
|
||||
dram_sync = (void __iomem *) dram_io_desc[0].virtual;
|
||||
sram_sync = (void __iomem *) OMAP4_SRAM_VA;
|
||||
|
||||
pr_info("OMAP4: Map 0x%08llx to 0x%08lx for dram barrier\n",
|
||||
(long long) paddr, dram_io_desc[0].virtual);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(omap_barriers_init);
|
||||
#endif
|
||||
|
||||
void __init gic_init_irq(void)
|
||||
|
@ -42,11 +97,18 @@ void __init gic_init_irq(void)
|
|||
omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
|
||||
BUG_ON(!omap_irq_base);
|
||||
|
||||
omap_wakeupgen_init();
|
||||
|
||||
gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
|
||||
void __iomem *omap4_get_l2cache_base(void)
|
||||
{
|
||||
return l2cache_base;
|
||||
}
|
||||
|
||||
static void omap4_l2x0_disable(void)
|
||||
{
|
||||
/* Disable PL310 L2 Cache controller */
|
||||
|
@ -72,7 +134,8 @@ static int __init omap_l2_cache_init(void)
|
|||
|
||||
/* Static mapping, never released */
|
||||
l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
|
||||
BUG_ON(!l2cache_base);
|
||||
if (WARN_ON(!l2cache_base))
|
||||
return -ENOMEM;
|
||||
|
||||
/*
|
||||
* 16-way associativity, parity disabled
|
||||
|
@ -112,3 +175,30 @@ static int __init omap_l2_cache_init(void)
|
|||
}
|
||||
early_initcall(omap_l2_cache_init);
|
||||
#endif
|
||||
|
||||
void __iomem *omap4_get_sar_ram_base(void)
|
||||
{
|
||||
return sar_ram_base;
|
||||
}
|
||||
|
||||
/*
|
||||
* SAR RAM used to save and restore the HW
|
||||
* context in low power modes
|
||||
*/
|
||||
static int __init omap4_sar_ram_init(void)
|
||||
{
|
||||
/*
|
||||
* To avoid code running on other OMAPs in
|
||||
* multi-omap builds
|
||||
*/
|
||||
if (!cpu_is_omap44xx())
|
||||
return -ENOMEM;
|
||||
|
||||
/* Static mapping, never released */
|
||||
sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
|
||||
if (WARN_ON(!sar_ram_base))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(omap4_sar_ram_init);
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
/*
|
||||
* omap4-sar-layout.h: OMAP4 SAR RAM layout header file
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef OMAP_ARCH_OMAP4_SAR_LAYOUT_H
|
||||
#define OMAP_ARCH_OMAP4_SAR_LAYOUT_H
|
||||
|
||||
/*
|
||||
* SAR BANK offsets from base address OMAP44XX_SAR_RAM_BASE
|
||||
*/
|
||||
#define SAR_BANK1_OFFSET 0x0000
|
||||
#define SAR_BANK2_OFFSET 0x1000
|
||||
#define SAR_BANK3_OFFSET 0x2000
|
||||
#define SAR_BANK4_OFFSET 0x3000
|
||||
|
||||
/* Scratch pad memory offsets from SAR_BANK1 */
|
||||
#define SCU_OFFSET0 0xd00
|
||||
#define SCU_OFFSET1 0xd04
|
||||
#define OMAP_TYPE_OFFSET 0xd10
|
||||
#define L2X0_SAVE_OFFSET0 0xd14
|
||||
#define L2X0_SAVE_OFFSET1 0xd18
|
||||
#define L2X0_AUXCTRL_OFFSET 0xd1c
|
||||
#define L2X0_PREFETCH_CTRL_OFFSET 0xd20
|
||||
|
||||
/* CPUx Wakeup Non-Secure Physical Address offsets in SAR_BANK3 */
|
||||
#define CPU0_WAKEUP_NS_PA_ADDR_OFFSET 0xa04
|
||||
#define CPU1_WAKEUP_NS_PA_ADDR_OFFSET 0xa08
|
||||
|
||||
#define SAR_BACKUP_STATUS_OFFSET (SAR_BANK3_OFFSET + 0x500)
|
||||
#define SAR_SECURE_RAM_SIZE_OFFSET (SAR_BANK3_OFFSET + 0x504)
|
||||
#define SAR_SECRAM_SAVED_AT_OFFSET (SAR_BANK3_OFFSET + 0x508)
|
||||
|
||||
/* WakeUpGen save restore offset from OMAP44XX_SAR_RAM_BASE */
|
||||
#define WAKEUPGENENB_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x684)
|
||||
#define WAKEUPGENENB_SECURE_OFFSET_CPU0 (SAR_BANK3_OFFSET + 0x694)
|
||||
#define WAKEUPGENENB_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6a4)
|
||||
#define WAKEUPGENENB_SECURE_OFFSET_CPU1 (SAR_BANK3_OFFSET + 0x6b4)
|
||||
#define AUXCOREBOOT0_OFFSET (SAR_BANK3_OFFSET + 0x6c4)
|
||||
#define AUXCOREBOOT1_OFFSET (SAR_BANK3_OFFSET + 0x6c8)
|
||||
#define PTMSYNCREQ_MASK_OFFSET (SAR_BANK3_OFFSET + 0x6cc)
|
||||
#define PTMSYNCREQ_EN_OFFSET (SAR_BANK3_OFFSET + 0x6d0)
|
||||
#define SAR_BACKUP_STATUS_WAKEUPGEN 0x10
|
||||
|
||||
#endif
|
|
@ -136,6 +136,7 @@
|
|||
#include <linux/list.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "common.h"
|
||||
#include <plat/cpu.h>
|
||||
|
@ -380,6 +381,51 @@ static int _set_module_autoidle(struct omap_hwmod *oh, u8 autoidle,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _set_idle_ioring_wakeup - enable/disable IO pad wakeup on hwmod idle for mux
|
||||
* @oh: struct omap_hwmod *
|
||||
* @set_wake: bool value indicating to set (true) or clear (false) wakeup enable
|
||||
*
|
||||
* Set or clear the I/O pad wakeup flag in the mux entries for the
|
||||
* hwmod @oh. This function changes the @oh->mux->pads_dynamic array
|
||||
* in memory. If the hwmod is currently idled, and the new idle
|
||||
* values don't match the previous ones, this function will also
|
||||
* update the SCM PADCTRL registers. Otherwise, if the hwmod is not
|
||||
* currently idled, this function won't touch the hardware: the new
|
||||
* mux settings are written to the SCM PADCTRL registers when the
|
||||
* hwmod is idled. No return value.
|
||||
*/
|
||||
static void _set_idle_ioring_wakeup(struct omap_hwmod *oh, bool set_wake)
|
||||
{
|
||||
struct omap_device_pad *pad;
|
||||
bool change = false;
|
||||
u16 prev_idle;
|
||||
int j;
|
||||
|
||||
if (!oh->mux || !oh->mux->enabled)
|
||||
return;
|
||||
|
||||
for (j = 0; j < oh->mux->nr_pads_dynamic; j++) {
|
||||
pad = oh->mux->pads_dynamic[j];
|
||||
|
||||
if (!(pad->flags & OMAP_DEVICE_PAD_WAKEUP))
|
||||
continue;
|
||||
|
||||
prev_idle = pad->idle;
|
||||
|
||||
if (set_wake)
|
||||
pad->idle |= OMAP_WAKEUP_EN;
|
||||
else
|
||||
pad->idle &= ~OMAP_WAKEUP_EN;
|
||||
|
||||
if (prev_idle != pad->idle)
|
||||
change = true;
|
||||
}
|
||||
|
||||
if (change && oh->_state == _HWMOD_STATE_IDLE)
|
||||
omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE);
|
||||
}
|
||||
|
||||
/**
|
||||
* _enable_wakeup: set OCP_SYSCONFIG.ENAWAKEUP bit in the hardware
|
||||
* @oh: struct omap_hwmod *
|
||||
|
@ -1449,6 +1495,25 @@ static int _enable(struct omap_hwmod *oh)
|
|||
|
||||
pr_debug("omap_hwmod: %s: enabling\n", oh->name);
|
||||
|
||||
/*
|
||||
* hwmods with HWMOD_INIT_NO_IDLE flag set are left
|
||||
* in enabled state at init.
|
||||
* Now that someone is really trying to enable them,
|
||||
* just ensure that the hwmod mux is set.
|
||||
*/
|
||||
if (oh->_int_flags & _HWMOD_SKIP_ENABLE) {
|
||||
/*
|
||||
* If the caller has mux data populated, do the mux'ing
|
||||
* which wouldn't have been done as part of the _enable()
|
||||
* done during setup.
|
||||
*/
|
||||
if (oh->mux)
|
||||
omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED);
|
||||
|
||||
oh->_int_flags &= ~_HWMOD_SKIP_ENABLE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (oh->_state != _HWMOD_STATE_INITIALIZED &&
|
||||
oh->_state != _HWMOD_STATE_IDLE &&
|
||||
oh->_state != _HWMOD_STATE_DISABLED) {
|
||||
|
@ -1744,8 +1809,10 @@ static int _setup(struct omap_hwmod *oh, void *data)
|
|||
* it should be set by the core code as a runtime flag during startup
|
||||
*/
|
||||
if ((oh->flags & HWMOD_INIT_NO_IDLE) &&
|
||||
(postsetup_state == _HWMOD_STATE_IDLE))
|
||||
(postsetup_state == _HWMOD_STATE_IDLE)) {
|
||||
oh->_int_flags |= _HWMOD_SKIP_ENABLE;
|
||||
postsetup_state = _HWMOD_STATE_ENABLED;
|
||||
}
|
||||
|
||||
if (postsetup_state == _HWMOD_STATE_IDLE)
|
||||
_idle(oh);
|
||||
|
@ -2416,6 +2483,7 @@ int omap_hwmod_enable_wakeup(struct omap_hwmod *oh)
|
|||
v = oh->_sysc_cache;
|
||||
_enable_wakeup(oh, &v);
|
||||
_write_sysconfig(v, oh);
|
||||
_set_idle_ioring_wakeup(oh, true);
|
||||
spin_unlock_irqrestore(&oh->_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
@ -2446,6 +2514,7 @@ int omap_hwmod_disable_wakeup(struct omap_hwmod *oh)
|
|||
v = oh->_sysc_cache;
|
||||
_disable_wakeup(oh, &v);
|
||||
_write_sysconfig(v, oh);
|
||||
_set_idle_ioring_wakeup(oh, false);
|
||||
spin_unlock_irqrestore(&oh->_lock, flags);
|
||||
|
||||
return 0;
|
||||
|
@ -2662,3 +2731,57 @@ int omap_hwmod_no_setup_reset(struct omap_hwmod *oh)
|
|||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_hwmod_pad_route_irq - route an I/O pad wakeup to a particular MPU IRQ
|
||||
* @oh: struct omap_hwmod * containing hwmod mux entries
|
||||
* @pad_idx: array index in oh->mux of the hwmod mux entry to route wakeup
|
||||
* @irq_idx: the hwmod mpu_irqs array index of the IRQ to trigger on wakeup
|
||||
*
|
||||
* When an I/O pad wakeup arrives for the dynamic or wakeup hwmod mux
|
||||
* entry number @pad_idx for the hwmod @oh, trigger the interrupt
|
||||
* service routine for the hwmod's mpu_irqs array index @irq_idx. If
|
||||
* this function is not called for a given pad_idx, then the ISR
|
||||
* associated with @oh's first MPU IRQ will be triggered when an I/O
|
||||
* pad wakeup occurs on that pad. Note that @pad_idx is the index of
|
||||
* the _dynamic or wakeup_ entry: if there are other entries not
|
||||
* marked with OMAP_DEVICE_PAD_WAKEUP or OMAP_DEVICE_PAD_REMUX, these
|
||||
* entries are NOT COUNTED in the dynamic pad index. This function
|
||||
* must be called separately for each pad that requires its interrupt
|
||||
* to be re-routed this way. Returns -EINVAL if there is an argument
|
||||
* problem or if @oh does not have hwmod mux entries or MPU IRQs;
|
||||
* returns -ENOMEM if memory cannot be allocated; or 0 upon success.
|
||||
*
|
||||
* XXX This function interface is fragile. Rather than using array
|
||||
* indexes, which are subject to unpredictable change, it should be
|
||||
* using hwmod IRQ names, and some other stable key for the hwmod mux
|
||||
* pad records.
|
||||
*/
|
||||
int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx)
|
||||
{
|
||||
int nr_irqs;
|
||||
|
||||
might_sleep();
|
||||
|
||||
if (!oh || !oh->mux || !oh->mpu_irqs || pad_idx < 0 ||
|
||||
pad_idx >= oh->mux->nr_pads_dynamic)
|
||||
return -EINVAL;
|
||||
|
||||
/* Check the number of available mpu_irqs */
|
||||
for (nr_irqs = 0; oh->mpu_irqs[nr_irqs].irq >= 0; nr_irqs++)
|
||||
;
|
||||
|
||||
if (irq_idx >= nr_irqs)
|
||||
return -EINVAL;
|
||||
|
||||
if (!oh->mux->irqs) {
|
||||
/* XXX What frees this? */
|
||||
oh->mux->irqs = kzalloc(sizeof(int) * oh->mux->nr_pads_dynamic,
|
||||
GFP_KERNEL);
|
||||
if (!oh->mux->irqs)
|
||||
return -ENOMEM;
|
||||
}
|
||||
oh->mux->irqs[pad_idx] = irq_idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ extern void omap_sram_idle(void);
|
|||
extern int omap3_can_sleep(void);
|
||||
extern int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state);
|
||||
extern int omap3_idle_init(void);
|
||||
extern int omap4_idle_init(void);
|
||||
|
||||
#if defined(CONFIG_PM_OPP)
|
||||
extern int omap3_opp_init(void);
|
||||
|
|
|
@ -30,7 +30,6 @@
|
|||
#include <linux/irq.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <asm/mach/time.h>
|
||||
#include <asm/mach/irq.h>
|
||||
|
@ -127,27 +126,11 @@ static void omap2_enter_full_retention(void)
|
|||
if (omap_irq_pending())
|
||||
goto no_sleep;
|
||||
|
||||
/* Block console output in case it is on one of the OMAP UARTs */
|
||||
if (!is_suspending())
|
||||
if (!console_trylock())
|
||||
goto no_sleep;
|
||||
|
||||
omap_uart_prepare_idle(0);
|
||||
omap_uart_prepare_idle(1);
|
||||
omap_uart_prepare_idle(2);
|
||||
|
||||
/* Jump to SRAM suspend code */
|
||||
omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
|
||||
OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
|
||||
OMAP_SDRC_REGADDR(SDRC_POWER));
|
||||
|
||||
omap_uart_resume_idle(2);
|
||||
omap_uart_resume_idle(1);
|
||||
omap_uart_resume_idle(0);
|
||||
|
||||
if (!is_suspending())
|
||||
console_unlock();
|
||||
|
||||
no_sleep:
|
||||
omap2_gpio_resume_after_idle();
|
||||
|
||||
|
@ -239,8 +222,6 @@ static int omap2_can_sleep(void)
|
|||
{
|
||||
if (omap2_fclks_active())
|
||||
return 0;
|
||||
if (!omap_uart_can_sleep())
|
||||
return 0;
|
||||
if (osc_ck->usecount > 1)
|
||||
return 0;
|
||||
if (omap_dma_running())
|
||||
|
@ -291,7 +272,6 @@ static int omap2_pm_suspend(void)
|
|||
mir1 = omap_readl(0x480fe0a4);
|
||||
omap_writel(1 << 5, 0x480fe0ac);
|
||||
|
||||
omap_uart_prepare_suspend();
|
||||
omap2_enter_full_retention();
|
||||
|
||||
omap_writel(mir1, 0x480fe0a4);
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/console.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include <asm/suspend.h>
|
||||
|
@ -36,7 +35,6 @@
|
|||
#include <plat/sram.h>
|
||||
#include "clockdomain.h"
|
||||
#include "powerdomain.h"
|
||||
#include <plat/serial.h>
|
||||
#include <plat/sdrc.h>
|
||||
#include <plat/prcm.h>
|
||||
#include <plat/gpmc.h>
|
||||
|
@ -54,15 +52,6 @@
|
|||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static suspend_state_t suspend_state = PM_SUSPEND_ON;
|
||||
static inline bool is_suspending(void)
|
||||
{
|
||||
return (suspend_state != PM_SUSPEND_ON) && console_suspend_enabled;
|
||||
}
|
||||
#else
|
||||
static inline bool is_suspending(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* pm34xx errata defined in pm.h */
|
||||
|
@ -195,7 +184,7 @@ static void omap3_save_secure_ram_context(void)
|
|||
* that any peripheral wake-up events occurring while attempting to
|
||||
* clear the PM_WKST_x are detected and cleared.
|
||||
*/
|
||||
static int prcm_clear_mod_irqs(s16 module, u8 regs)
|
||||
static int prcm_clear_mod_irqs(s16 module, u8 regs, u32 ignore_bits)
|
||||
{
|
||||
u32 wkst, fclk, iclk, clken;
|
||||
u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
|
||||
|
@ -207,6 +196,7 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
|
|||
|
||||
wkst = omap2_prm_read_mod_reg(module, wkst_off);
|
||||
wkst &= omap2_prm_read_mod_reg(module, grpsel_off);
|
||||
wkst &= ~ignore_bits;
|
||||
if (wkst) {
|
||||
iclk = omap2_cm_read_mod_reg(module, iclk_off);
|
||||
fclk = omap2_cm_read_mod_reg(module, fclk_off);
|
||||
|
@ -222,6 +212,7 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
|
|||
omap2_cm_set_mod_reg_bits(clken, module, fclk_off);
|
||||
omap2_prm_write_mod_reg(wkst, module, wkst_off);
|
||||
wkst = omap2_prm_read_mod_reg(module, wkst_off);
|
||||
wkst &= ~ignore_bits;
|
||||
c++;
|
||||
}
|
||||
omap2_cm_write_mod_reg(iclk, module, iclk_off);
|
||||
|
@ -231,76 +222,35 @@ static int prcm_clear_mod_irqs(s16 module, u8 regs)
|
|||
return c;
|
||||
}
|
||||
|
||||
static int _prcm_int_handle_wakeup(void)
|
||||
static irqreturn_t _prcm_int_handle_io(int irq, void *unused)
|
||||
{
|
||||
int c;
|
||||
|
||||
c = prcm_clear_mod_irqs(WKUP_MOD, 1);
|
||||
c += prcm_clear_mod_irqs(CORE_MOD, 1);
|
||||
c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
|
||||
if (omap_rev() > OMAP3430_REV_ES1_0) {
|
||||
c += prcm_clear_mod_irqs(CORE_MOD, 3);
|
||||
c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
|
||||
}
|
||||
c = prcm_clear_mod_irqs(WKUP_MOD, 1,
|
||||
~(OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK));
|
||||
|
||||
return c;
|
||||
return c ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* PRCM Interrupt Handler
|
||||
*
|
||||
* The PRM_IRQSTATUS_MPU register indicates if there are any pending
|
||||
* interrupts from the PRCM for the MPU. These bits must be cleared in
|
||||
* order to clear the PRCM interrupt. The PRCM interrupt handler is
|
||||
* implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
|
||||
* the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
|
||||
* register indicates that a wake-up event is pending for the MPU and
|
||||
* this bit can only be cleared if the all the wake-up events latched
|
||||
* in the various PM_WKST_x registers have been cleared. The interrupt
|
||||
* handler is implemented using a do-while loop so that if a wake-up
|
||||
* event occurred during the processing of the prcm interrupt handler
|
||||
* (setting a bit in the corresponding PM_WKST_x register and thus
|
||||
* preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
|
||||
* this would be handled.
|
||||
*/
|
||||
static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
|
||||
static irqreturn_t _prcm_int_handle_wakeup(int irq, void *unused)
|
||||
{
|
||||
u32 irqenable_mpu, irqstatus_mpu;
|
||||
int c = 0;
|
||||
int c;
|
||||
|
||||
irqenable_mpu = omap2_prm_read_mod_reg(OCP_MOD,
|
||||
OMAP3_PRM_IRQENABLE_MPU_OFFSET);
|
||||
irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
|
||||
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
irqstatus_mpu &= irqenable_mpu;
|
||||
/*
|
||||
* Clear all except ST_IO and ST_IO_CHAIN for wkup module,
|
||||
* these are handled in a separate handler to avoid acking
|
||||
* IO events before parsing in mux code
|
||||
*/
|
||||
c = prcm_clear_mod_irqs(WKUP_MOD, 1,
|
||||
OMAP3430_ST_IO_MASK | OMAP3430_ST_IO_CHAIN_MASK);
|
||||
c += prcm_clear_mod_irqs(CORE_MOD, 1, 0);
|
||||
c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1, 0);
|
||||
if (omap_rev() > OMAP3430_REV_ES1_0) {
|
||||
c += prcm_clear_mod_irqs(CORE_MOD, 3, 0);
|
||||
c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1, 0);
|
||||
}
|
||||
|
||||
do {
|
||||
if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
|
||||
OMAP3430_IO_ST_MASK)) {
|
||||
c = _prcm_int_handle_wakeup();
|
||||
|
||||
/*
|
||||
* Is the MPU PRCM interrupt handler racing with the
|
||||
* IVA2 PRCM interrupt handler ?
|
||||
*/
|
||||
WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
|
||||
"but no wakeup sources are marked\n");
|
||||
} else {
|
||||
/* XXX we need to expand our PRCM interrupt handler */
|
||||
WARN(1, "prcm: WARNING: PRCM interrupt received, but "
|
||||
"no code to handle it (%08x)\n", irqstatus_mpu);
|
||||
}
|
||||
|
||||
omap2_prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
|
||||
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
|
||||
irqstatus_mpu = omap2_prm_read_mod_reg(OCP_MOD,
|
||||
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
irqstatus_mpu &= irqenable_mpu;
|
||||
|
||||
} while (irqstatus_mpu);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
return c ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
static void omap34xx_save_context(u32 *save)
|
||||
|
@ -376,20 +326,11 @@ void omap_sram_idle(void)
|
|||
omap3_enable_io_chain();
|
||||
}
|
||||
|
||||
/* Block console output in case it is on one of the OMAP UARTs */
|
||||
if (!is_suspending())
|
||||
if (per_next_state < PWRDM_POWER_ON ||
|
||||
core_next_state < PWRDM_POWER_ON)
|
||||
if (!console_trylock())
|
||||
goto console_still_active;
|
||||
|
||||
pwrdm_pre_transition();
|
||||
|
||||
/* PER */
|
||||
if (per_next_state < PWRDM_POWER_ON) {
|
||||
per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0;
|
||||
omap_uart_prepare_idle(2);
|
||||
omap_uart_prepare_idle(3);
|
||||
omap2_gpio_prepare_for_idle(per_going_off);
|
||||
if (per_next_state == PWRDM_POWER_OFF)
|
||||
omap3_per_save_context();
|
||||
|
@ -397,8 +338,6 @@ void omap_sram_idle(void)
|
|||
|
||||
/* CORE */
|
||||
if (core_next_state < PWRDM_POWER_ON) {
|
||||
omap_uart_prepare_idle(0);
|
||||
omap_uart_prepare_idle(1);
|
||||
if (core_next_state == PWRDM_POWER_OFF) {
|
||||
omap3_core_save_context();
|
||||
omap3_cm_save_context();
|
||||
|
@ -447,8 +386,6 @@ void omap_sram_idle(void)
|
|||
omap3_sram_restore_context();
|
||||
omap2_sms_restore_context();
|
||||
}
|
||||
omap_uart_resume_idle(0);
|
||||
omap_uart_resume_idle(1);
|
||||
if (core_next_state == PWRDM_POWER_OFF)
|
||||
omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
|
||||
OMAP3430_GR_MOD,
|
||||
|
@ -464,14 +401,8 @@ void omap_sram_idle(void)
|
|||
omap2_gpio_resume_after_idle();
|
||||
if (per_prev_state == PWRDM_POWER_OFF)
|
||||
omap3_per_restore_context();
|
||||
omap_uart_resume_idle(2);
|
||||
omap_uart_resume_idle(3);
|
||||
}
|
||||
|
||||
if (!is_suspending())
|
||||
console_unlock();
|
||||
|
||||
console_still_active:
|
||||
/* Disable IO-PAD and IO-CHAIN wakeup */
|
||||
if (omap3_has_io_wakeup() &&
|
||||
(per_next_state < PWRDM_POWER_ON ||
|
||||
|
@ -485,21 +416,11 @@ console_still_active:
|
|||
clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
|
||||
}
|
||||
|
||||
int omap3_can_sleep(void)
|
||||
{
|
||||
if (!omap_uart_can_sleep())
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void omap3_pm_idle(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
if (!omap3_can_sleep())
|
||||
goto out;
|
||||
|
||||
if (omap_irq_pending() || need_resched())
|
||||
goto out;
|
||||
|
||||
|
@ -533,7 +454,6 @@ static int omap3_pm_suspend(void)
|
|||
goto restore;
|
||||
}
|
||||
|
||||
omap_uart_prepare_suspend();
|
||||
omap3_intc_suspend();
|
||||
|
||||
omap_sram_idle();
|
||||
|
@ -580,22 +500,27 @@ static int omap3_pm_begin(suspend_state_t state)
|
|||
{
|
||||
disable_hlt();
|
||||
suspend_state = state;
|
||||
omap_uart_enable_irqs(0);
|
||||
omap_prcm_irq_prepare();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void omap3_pm_end(void)
|
||||
{
|
||||
suspend_state = PM_SUSPEND_ON;
|
||||
omap_uart_enable_irqs(1);
|
||||
enable_hlt();
|
||||
return;
|
||||
}
|
||||
|
||||
static void omap3_pm_finish(void)
|
||||
{
|
||||
omap_prcm_irq_complete();
|
||||
}
|
||||
|
||||
static const struct platform_suspend_ops omap_pm_ops = {
|
||||
.begin = omap3_pm_begin,
|
||||
.end = omap3_pm_end,
|
||||
.enter = omap3_pm_enter,
|
||||
.finish = omap3_pm_finish,
|
||||
.valid = suspend_valid_only_mem,
|
||||
};
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
@ -701,10 +626,6 @@ static void __init prcm_setup_regs(void)
|
|||
OMAP3430_GRPSEL_GPT1_MASK |
|
||||
OMAP3430_GRPSEL_GPT12_MASK,
|
||||
WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
|
||||
/* For some reason IO doesn't generate wakeup event even if
|
||||
* it is selected to mpu wakeup goup */
|
||||
omap2_prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
|
||||
OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
|
||||
|
||||
/* Enable PM_WKEN to support DSS LPR */
|
||||
omap2_prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
|
||||
|
@ -881,12 +802,21 @@ static int __init omap3_pm_init(void)
|
|||
* supervised mode for powerdomains */
|
||||
prcm_setup_regs();
|
||||
|
||||
ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
|
||||
(irq_handler_t)prcm_interrupt_handler,
|
||||
IRQF_DISABLED, "prcm", NULL);
|
||||
ret = request_irq(omap_prcm_event_to_irq("wkup"),
|
||||
_prcm_int_handle_wakeup, IRQF_NO_SUSPEND, "pm_wkup", NULL);
|
||||
|
||||
if (ret) {
|
||||
printk(KERN_ERR "request_irq failed to register for 0x%x\n",
|
||||
INT_34XX_PRCM_MPU_IRQ);
|
||||
pr_err("pm: Failed to request pm_wkup irq\n");
|
||||
goto err1;
|
||||
}
|
||||
|
||||
/* IO interrupt is shared with mux code */
|
||||
ret = request_irq(omap_prcm_event_to_irq("io"),
|
||||
_prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
|
||||
omap3_pm_init);
|
||||
|
||||
if (ret) {
|
||||
pr_err("pm: Failed to request pm_io irq\n");
|
||||
goto err1;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
/*
|
||||
* OMAP4 Power Management Routines
|
||||
*
|
||||
* Copyright (C) 2010 Texas Instruments, Inc.
|
||||
* Copyright (C) 2010-2011 Texas Instruments, Inc.
|
||||
* Rajendra Nayak <rnayak@ti.com>
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -17,13 +18,16 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "clockdomain.h"
|
||||
#include "powerdomain.h"
|
||||
#include "pm.h"
|
||||
|
||||
struct power_state {
|
||||
struct powerdomain *pwrdm;
|
||||
u32 next_state;
|
||||
#ifdef CONFIG_SUSPEND
|
||||
u32 saved_state;
|
||||
u32 saved_logic_state;
|
||||
#endif
|
||||
struct list_head node;
|
||||
};
|
||||
|
@ -33,7 +37,50 @@ static LIST_HEAD(pwrst_list);
|
|||
#ifdef CONFIG_SUSPEND
|
||||
static int omap4_pm_suspend(void)
|
||||
{
|
||||
do_wfi();
|
||||
struct power_state *pwrst;
|
||||
int state, ret = 0;
|
||||
u32 cpu_id = smp_processor_id();
|
||||
|
||||
/* Save current powerdomain state */
|
||||
list_for_each_entry(pwrst, &pwrst_list, node) {
|
||||
pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
|
||||
pwrst->saved_logic_state = pwrdm_read_logic_retst(pwrst->pwrdm);
|
||||
}
|
||||
|
||||
/* Set targeted power domain states by suspend */
|
||||
list_for_each_entry(pwrst, &pwrst_list, node) {
|
||||
omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
|
||||
pwrdm_set_logic_retst(pwrst->pwrdm, PWRDM_POWER_OFF);
|
||||
}
|
||||
|
||||
/*
|
||||
* For MPUSS to hit power domain retention(CSWR or OSWR),
|
||||
* CPU0 and CPU1 power domains need to be in OFF or DORMANT state,
|
||||
* since CPU power domain CSWR is not supported by hardware
|
||||
* Only master CPU follows suspend path. All other CPUs follow
|
||||
* CPU hotplug path in system wide suspend. On OMAP4, CPU power
|
||||
* domain CSWR is not supported by hardware.
|
||||
* More details can be found in OMAP4430 TRM section 4.3.4.2.
|
||||
*/
|
||||
omap4_enter_lowpower(cpu_id, PWRDM_POWER_OFF);
|
||||
|
||||
/* Restore next powerdomain state */
|
||||
list_for_each_entry(pwrst, &pwrst_list, node) {
|
||||
state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
|
||||
if (state > pwrst->next_state) {
|
||||
pr_info("Powerdomain (%s) didn't enter "
|
||||
"target state %d\n",
|
||||
pwrst->pwrdm->name, pwrst->next_state);
|
||||
ret = -1;
|
||||
}
|
||||
omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
|
||||
pwrdm_set_logic_retst(pwrst->pwrdm, pwrst->saved_logic_state);
|
||||
}
|
||||
if (ret)
|
||||
pr_crit("Could not enter target state in pm_suspend\n");
|
||||
else
|
||||
pr_info("Successfully put all powerdomains to target state\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -73,6 +120,22 @@ static const struct platform_suspend_ops omap_pm_ops = {
|
|||
};
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
||||
/*
|
||||
* Enable hardware supervised mode for all clockdomains if it's
|
||||
* supported. Initiate sleep transition for other clockdomains, if
|
||||
* they are not used
|
||||
*/
|
||||
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
|
||||
{
|
||||
if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
|
||||
clkdm_allow_idle(clkdm);
|
||||
else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
|
||||
atomic_read(&clkdm->usecount) == 0)
|
||||
clkdm_sleep(clkdm);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
|
||||
{
|
||||
struct power_state *pwrst;
|
||||
|
@ -80,14 +143,48 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
|
|||
if (!pwrdm->pwrsts)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Skip CPU0 and CPU1 power domains. CPU1 is programmed
|
||||
* through hotplug path and CPU0 explicitly programmed
|
||||
* further down in the code path
|
||||
*/
|
||||
if (!strncmp(pwrdm->name, "cpu", 3))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* FIXME: Remove this check when core retention is supported
|
||||
* Only MPUSS power domain is added in the list.
|
||||
*/
|
||||
if (strcmp(pwrdm->name, "mpu_pwrdm"))
|
||||
return 0;
|
||||
|
||||
pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
|
||||
if (!pwrst)
|
||||
return -ENOMEM;
|
||||
|
||||
pwrst->pwrdm = pwrdm;
|
||||
pwrst->next_state = PWRDM_POWER_ON;
|
||||
pwrst->next_state = PWRDM_POWER_RET;
|
||||
list_add(&pwrst->node, &pwrst_list);
|
||||
|
||||
return pwrdm_set_next_pwrst(pwrst->pwrdm, pwrst->next_state);
|
||||
return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_default_idle - OMAP4 default ilde routine.'
|
||||
*
|
||||
* Implements OMAP4 memory, IO ordering requirements which can't be addressed
|
||||
* with default arch_idle() hook. Used by all CPUs with !CONFIG_CPUIDLE and
|
||||
* by secondary CPU with CONFIG_CPUIDLE.
|
||||
*/
|
||||
static void omap_default_idle(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
local_fiq_disable();
|
||||
|
||||
omap_do_wfi();
|
||||
|
||||
local_fiq_enable();
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -99,10 +196,17 @@ static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
|
|||
static int __init omap4_pm_init(void)
|
||||
{
|
||||
int ret;
|
||||
struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm;
|
||||
struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
|
||||
|
||||
if (!cpu_is_omap44xx())
|
||||
return -ENODEV;
|
||||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0) {
|
||||
WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
pr_err("Power Management for TI OMAP4.\n");
|
||||
|
||||
ret = pwrdm_for_each(pwrdms_setup, NULL);
|
||||
|
@ -111,10 +215,51 @@ static int __init omap4_pm_init(void)
|
|||
goto err2;
|
||||
}
|
||||
|
||||
/*
|
||||
* The dynamic dependency between MPUSS -> MEMIF and
|
||||
* MPUSS -> L4_PER/L3_* and DUCATI -> L3_* doesn't work as
|
||||
* expected. The hardware recommendation is to enable static
|
||||
* dependencies for these to avoid system lock ups or random crashes.
|
||||
*/
|
||||
mpuss_clkdm = clkdm_lookup("mpuss_clkdm");
|
||||
emif_clkdm = clkdm_lookup("l3_emif_clkdm");
|
||||
l3_1_clkdm = clkdm_lookup("l3_1_clkdm");
|
||||
l3_2_clkdm = clkdm_lookup("l3_2_clkdm");
|
||||
l4_per_clkdm = clkdm_lookup("l4_per_clkdm");
|
||||
ducati_clkdm = clkdm_lookup("ducati_clkdm");
|
||||
if ((!mpuss_clkdm) || (!emif_clkdm) || (!l3_1_clkdm) ||
|
||||
(!l3_2_clkdm) || (!ducati_clkdm) || (!l4_per_clkdm))
|
||||
goto err2;
|
||||
|
||||
ret = clkdm_add_wkdep(mpuss_clkdm, emif_clkdm);
|
||||
ret |= clkdm_add_wkdep(mpuss_clkdm, l3_1_clkdm);
|
||||
ret |= clkdm_add_wkdep(mpuss_clkdm, l3_2_clkdm);
|
||||
ret |= clkdm_add_wkdep(mpuss_clkdm, l4_per_clkdm);
|
||||
ret |= clkdm_add_wkdep(ducati_clkdm, l3_1_clkdm);
|
||||
ret |= clkdm_add_wkdep(ducati_clkdm, l3_2_clkdm);
|
||||
if (ret) {
|
||||
pr_err("Failed to add MPUSS -> L3/EMIF/L4PER, DUCATI -> L3 "
|
||||
"wakeup dependency\n");
|
||||
goto err2;
|
||||
}
|
||||
|
||||
ret = omap4_mpuss_init();
|
||||
if (ret) {
|
||||
pr_err("Failed to initialise OMAP4 MPUSS\n");
|
||||
goto err2;
|
||||
}
|
||||
|
||||
(void) clkdm_for_each(clkdms_setup, NULL);
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
suspend_set_ops(&omap_pm_ops);
|
||||
#endif /* CONFIG_SUSPEND */
|
||||
|
||||
/* Overwrite the default arch_idle() */
|
||||
pm_idle = omap_default_idle;
|
||||
|
||||
omap4_idle_init();
|
||||
|
||||
err2:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
/*
|
||||
* OMAP2/3 PRCM base and module definitions
|
||||
*
|
||||
* Copyright (C) 2007-2009 Texas Instruments, Inc.
|
||||
* Copyright (C) 2007-2009, 2011 Texas Instruments, Inc.
|
||||
* Copyright (C) 2007-2009 Nokia Corporation
|
||||
*
|
||||
* Written by Paul Walmsley
|
||||
|
@ -410,6 +410,79 @@
|
|||
extern void __iomem *prm_base;
|
||||
extern void __iomem *cm_base;
|
||||
extern void __iomem *cm2_base;
|
||||
|
||||
/**
|
||||
* struct omap_prcm_irq - describes a PRCM interrupt bit
|
||||
* @name: a short name describing the interrupt type, e.g. "wkup" or "io"
|
||||
* @offset: the bit shift of the interrupt inside the IRQ{ENABLE,STATUS} regs
|
||||
* @priority: should this interrupt be handled before @priority=false IRQs?
|
||||
*
|
||||
* Describes interrupt bits inside the PRM_IRQ{ENABLE,STATUS}_MPU* registers.
|
||||
* On systems with multiple PRM MPU IRQ registers, the bitfields read from
|
||||
* the registers are concatenated, so @offset could be > 31 on these systems -
|
||||
* see omap_prm_irq_handler() for more details. I/O ring interrupts should
|
||||
* have @priority set to true.
|
||||
*/
|
||||
struct omap_prcm_irq {
|
||||
const char *name;
|
||||
unsigned int offset;
|
||||
bool priority;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct omap_prcm_irq_setup - PRCM interrupt controller details
|
||||
* @ack: PRM register offset for the first PRM_IRQSTATUS_MPU register
|
||||
* @mask: PRM register offset for the first PRM_IRQENABLE_MPU register
|
||||
* @nr_regs: number of PRM_IRQ{STATUS,ENABLE}_MPU* registers
|
||||
* @nr_irqs: number of entries in the @irqs array
|
||||
* @irqs: ptr to an array of PRCM interrupt bits (see @nr_irqs)
|
||||
* @irq: MPU IRQ asserted when a PRCM interrupt arrives
|
||||
* @read_pending_irqs: fn ptr to determine if any PRCM IRQs are pending
|
||||
* @ocp_barrier: fn ptr to force buffered PRM writes to complete
|
||||
* @save_and_clear_irqen: fn ptr to save and clear IRQENABLE regs
|
||||
* @restore_irqen: fn ptr to save and clear IRQENABLE regs
|
||||
* @saved_mask: IRQENABLE regs are saved here during suspend
|
||||
* @priority_mask: 1 bit per IRQ, set to 1 if omap_prcm_irq.priority = true
|
||||
* @base_irq: base dynamic IRQ number, returned from irq_alloc_descs() in init
|
||||
* @suspended: set to true after Linux suspend code has called our ->prepare()
|
||||
* @suspend_save_flag: set to true after IRQ masks have been saved and disabled
|
||||
*
|
||||
* @saved_mask, @priority_mask, @base_irq, @suspended, and
|
||||
* @suspend_save_flag are populated dynamically, and are not to be
|
||||
* specified in static initializers.
|
||||
*/
|
||||
struct omap_prcm_irq_setup {
|
||||
u16 ack;
|
||||
u16 mask;
|
||||
u8 nr_regs;
|
||||
u8 nr_irqs;
|
||||
const struct omap_prcm_irq *irqs;
|
||||
int irq;
|
||||
void (*read_pending_irqs)(unsigned long *events);
|
||||
void (*ocp_barrier)(void);
|
||||
void (*save_and_clear_irqen)(u32 *saved_mask);
|
||||
void (*restore_irqen)(u32 *saved_mask);
|
||||
u32 *saved_mask;
|
||||
u32 *priority_mask;
|
||||
int base_irq;
|
||||
bool suspended;
|
||||
bool suspend_save_flag;
|
||||
};
|
||||
|
||||
/* OMAP_PRCM_IRQ: convenience macro for creating struct omap_prcm_irq records */
|
||||
#define OMAP_PRCM_IRQ(_name, _offset, _priority) { \
|
||||
.name = _name, \
|
||||
.offset = _offset, \
|
||||
.priority = _priority \
|
||||
}
|
||||
|
||||
extern void omap_prcm_irq_cleanup(void);
|
||||
extern int omap_prcm_register_chain_handler(
|
||||
struct omap_prcm_irq_setup *irq_setup);
|
||||
extern int omap_prcm_event_to_irq(const char *event);
|
||||
extern void omap_prcm_irq_prepare(void);
|
||||
extern void omap_prcm_irq_complete(void);
|
||||
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* OMAP2/3 PRM module functions
|
||||
*
|
||||
* Copyright (C) 2010 Texas Instruments, Inc.
|
||||
* Copyright (C) 2010-2011 Texas Instruments, Inc.
|
||||
* Copyright (C) 2010 Nokia Corporation
|
||||
* Benoît Cousson
|
||||
* Paul Walmsley
|
||||
|
@ -27,6 +27,24 @@
|
|||
#include "prm-regbits-24xx.h"
|
||||
#include "prm-regbits-34xx.h"
|
||||
|
||||
static const struct omap_prcm_irq omap3_prcm_irqs[] = {
|
||||
OMAP_PRCM_IRQ("wkup", 0, 0),
|
||||
OMAP_PRCM_IRQ("io", 9, 1),
|
||||
};
|
||||
|
||||
static struct omap_prcm_irq_setup omap3_prcm_irq_setup = {
|
||||
.ack = OMAP3_PRM_IRQSTATUS_MPU_OFFSET,
|
||||
.mask = OMAP3_PRM_IRQENABLE_MPU_OFFSET,
|
||||
.nr_regs = 1,
|
||||
.irqs = omap3_prcm_irqs,
|
||||
.nr_irqs = ARRAY_SIZE(omap3_prcm_irqs),
|
||||
.irq = INT_34XX_PRCM_MPU_IRQ,
|
||||
.read_pending_irqs = &omap3xxx_prm_read_pending_irqs,
|
||||
.ocp_barrier = &omap3xxx_prm_ocp_barrier,
|
||||
.save_and_clear_irqen = &omap3xxx_prm_save_and_clear_irqen,
|
||||
.restore_irqen = &omap3xxx_prm_restore_irqen,
|
||||
};
|
||||
|
||||
u32 omap2_prm_read_mod_reg(s16 module, u16 idx)
|
||||
{
|
||||
return __raw_readl(prm_base + module + idx);
|
||||
|
@ -212,3 +230,80 @@ u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
|
|||
{
|
||||
return omap2_prm_rmw_mod_reg_bits(mask, bits, OMAP3430_GR_MOD, offset);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3xxx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
|
||||
* @events: ptr to a u32, preallocated by caller
|
||||
*
|
||||
* Read PRM_IRQSTATUS_MPU bits, AND'ed with the currently-enabled PRM
|
||||
* MPU IRQs, and store the result into the u32 pointed to by @events.
|
||||
* No return value.
|
||||
*/
|
||||
void omap3xxx_prm_read_pending_irqs(unsigned long *events)
|
||||
{
|
||||
u32 mask, st;
|
||||
|
||||
/* XXX Can the mask read be avoided (e.g., can it come from RAM?) */
|
||||
mask = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
|
||||
st = omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
|
||||
events[0] = mask & st;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3xxx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete
|
||||
*
|
||||
* Force any buffered writes to the PRM IP block to complete. Needed
|
||||
* by the PRM IRQ handler, which reads and writes directly to the IP
|
||||
* block, to avoid race conditions after acknowledging or clearing IRQ
|
||||
* bits. No return value.
|
||||
*/
|
||||
void omap3xxx_prm_ocp_barrier(void)
|
||||
{
|
||||
omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3xxx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU reg
|
||||
* @saved_mask: ptr to a u32 array to save IRQENABLE bits
|
||||
*
|
||||
* Save the PRM_IRQENABLE_MPU register to @saved_mask. @saved_mask
|
||||
* must be allocated by the caller. Intended to be used in the PRM
|
||||
* interrupt handler suspend callback. The OCP barrier is needed to
|
||||
* ensure the write to disable PRM interrupts reaches the PRM before
|
||||
* returning; otherwise, spurious interrupts might occur. No return
|
||||
* value.
|
||||
*/
|
||||
void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask)
|
||||
{
|
||||
saved_mask[0] = omap2_prm_read_mod_reg(OCP_MOD,
|
||||
OMAP3_PRM_IRQENABLE_MPU_OFFSET);
|
||||
omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
|
||||
|
||||
/* OCP barrier */
|
||||
omap2_prm_read_mod_reg(OCP_MOD, OMAP3_PRM_REVISION_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3xxx_prm_restore_irqen - set PRM_IRQENABLE_MPU register from args
|
||||
* @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously
|
||||
*
|
||||
* Restore the PRM_IRQENABLE_MPU register from @saved_mask. Intended
|
||||
* to be used in the PRM interrupt handler resume callback to restore
|
||||
* values saved by omap3xxx_prm_save_and_clear_irqen(). No OCP
|
||||
* barrier should be needed here; any pending PRM interrupts will fire
|
||||
* once the writes reach the PRM. No return value.
|
||||
*/
|
||||
void omap3xxx_prm_restore_irqen(u32 *saved_mask)
|
||||
{
|
||||
omap2_prm_write_mod_reg(saved_mask[0], OCP_MOD,
|
||||
OMAP3_PRM_IRQENABLE_MPU_OFFSET);
|
||||
}
|
||||
|
||||
static int __init omap3xxx_prcm_init(void)
|
||||
{
|
||||
if (cpu_is_omap34xx())
|
||||
return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(omap3xxx_prcm_init);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* OMAP2/3 Power/Reset Management (PRM) register definitions
|
||||
*
|
||||
* Copyright (C) 2007-2009 Texas Instruments, Inc.
|
||||
* Copyright (C) 2007-2009, 2011 Texas Instruments, Inc.
|
||||
* Copyright (C) 2008-2010 Nokia Corporation
|
||||
* Paul Walmsley
|
||||
*
|
||||
|
@ -314,6 +314,13 @@ void omap3_prm_vp_clear_txdone(u8 vp_id);
|
|||
extern u32 omap3_prm_vcvp_read(u8 offset);
|
||||
extern void omap3_prm_vcvp_write(u32 val, u8 offset);
|
||||
extern u32 omap3_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
|
||||
|
||||
/* PRM interrupt-related functions */
|
||||
extern void omap3xxx_prm_read_pending_irqs(unsigned long *events);
|
||||
extern void omap3xxx_prm_ocp_barrier(void);
|
||||
extern void omap3xxx_prm_save_and_clear_irqen(u32 *saved_mask);
|
||||
extern void omap3xxx_prm_restore_irqen(u32 *saved_mask);
|
||||
|
||||
#endif /* CONFIG_ARCH_OMAP4 */
|
||||
|
||||
#endif
|
||||
|
|
|
@ -27,6 +27,24 @@
|
|||
#include "prcm44xx.h"
|
||||
#include "prminst44xx.h"
|
||||
|
||||
static const struct omap_prcm_irq omap4_prcm_irqs[] = {
|
||||
OMAP_PRCM_IRQ("wkup", 0, 0),
|
||||
OMAP_PRCM_IRQ("io", 9, 1),
|
||||
};
|
||||
|
||||
static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
|
||||
.ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET,
|
||||
.mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET,
|
||||
.nr_regs = 2,
|
||||
.irqs = omap4_prcm_irqs,
|
||||
.nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
|
||||
.irq = OMAP44XX_IRQ_PRCM,
|
||||
.read_pending_irqs = &omap44xx_prm_read_pending_irqs,
|
||||
.ocp_barrier = &omap44xx_prm_ocp_barrier,
|
||||
.save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
|
||||
.restore_irqen = &omap44xx_prm_restore_irqen,
|
||||
};
|
||||
|
||||
/* PRM low-level functions */
|
||||
|
||||
/* Read a register in a CM/PRM instance in the PRM module */
|
||||
|
@ -121,3 +139,101 @@ u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset)
|
|||
OMAP4430_PRM_DEVICE_INST,
|
||||
offset);
|
||||
}
|
||||
|
||||
static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs)
|
||||
{
|
||||
u32 mask, st;
|
||||
|
||||
/* XXX read mask from RAM? */
|
||||
mask = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqen_offs);
|
||||
st = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, irqst_offs);
|
||||
|
||||
return mask & st;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events
|
||||
* @events: ptr to two consecutive u32s, preallocated by caller
|
||||
*
|
||||
* Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM
|
||||
* MPU IRQs, and store the result into the two u32s pointed to by @events.
|
||||
* No return value.
|
||||
*/
|
||||
void omap44xx_prm_read_pending_irqs(unsigned long *events)
|
||||
{
|
||||
events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET,
|
||||
OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
|
||||
events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET,
|
||||
OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete
|
||||
*
|
||||
* Force any buffered writes to the PRM IP block to complete. Needed
|
||||
* by the PRM IRQ handler, which reads and writes directly to the IP
|
||||
* block, to avoid race conditions after acknowledging or clearing IRQ
|
||||
* bits. No return value.
|
||||
*/
|
||||
void omap44xx_prm_ocp_barrier(void)
|
||||
{
|
||||
omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_REVISION_PRM_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs
|
||||
* @saved_mask: ptr to a u32 array to save IRQENABLE bits
|
||||
*
|
||||
* Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to
|
||||
* @saved_mask. @saved_mask must be allocated by the caller.
|
||||
* Intended to be used in the PRM interrupt handler suspend callback.
|
||||
* The OCP barrier is needed to ensure the write to disable PRM
|
||||
* interrupts reaches the PRM before returning; otherwise, spurious
|
||||
* interrupts might occur. No return value.
|
||||
*/
|
||||
void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask)
|
||||
{
|
||||
saved_mask[0] =
|
||||
omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
saved_mask[1] =
|
||||
omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET);
|
||||
|
||||
omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_PRM_IRQENABLE_MPU_OFFSET);
|
||||
omap4_prm_write_inst_reg(0, OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
|
||||
|
||||
/* OCP barrier */
|
||||
omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_REVISION_PRM_OFFSET);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args
|
||||
* @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously
|
||||
*
|
||||
* Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from
|
||||
* @saved_mask. Intended to be used in the PRM interrupt handler resume
|
||||
* callback to restore values saved by omap44xx_prm_save_and_clear_irqen().
|
||||
* No OCP barrier should be needed here; any pending PRM interrupts will fire
|
||||
* once the writes reach the PRM. No return value.
|
||||
*/
|
||||
void omap44xx_prm_restore_irqen(u32 *saved_mask)
|
||||
{
|
||||
omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_PRM_IRQENABLE_MPU_OFFSET);
|
||||
omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_DEVICE_INST,
|
||||
OMAP4_PRM_IRQENABLE_MPU_2_OFFSET);
|
||||
}
|
||||
|
||||
static int __init omap4xxx_prcm_init(void)
|
||||
{
|
||||
if (cpu_is_omap44xx())
|
||||
return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(omap4xxx_prcm_init);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* OMAP44xx PRM instance offset macros
|
||||
*
|
||||
* Copyright (C) 2009-2010 Texas Instruments, Inc.
|
||||
* Copyright (C) 2009-2011 Texas Instruments, Inc.
|
||||
* Copyright (C) 2009-2010 Nokia Corporation
|
||||
*
|
||||
* Paul Walmsley (paul@pwsan.com)
|
||||
|
@ -763,6 +763,12 @@ extern u32 omap4_prm_vcvp_read(u8 offset);
|
|||
extern void omap4_prm_vcvp_write(u32 val, u8 offset);
|
||||
extern u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset);
|
||||
|
||||
/* PRM interrupt-related functions */
|
||||
extern void omap44xx_prm_read_pending_irqs(unsigned long *events);
|
||||
extern void omap44xx_prm_ocp_barrier(void);
|
||||
extern void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask);
|
||||
extern void omap44xx_prm_restore_irqen(u32 *saved_mask);
|
||||
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,320 @@
|
|||
/*
|
||||
* OMAP2+ common Power & Reset Management (PRM) IP block functions
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Tero Kristo <t-kristo@ti.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
*
|
||||
* For historical purposes, the API used to configure the PRM
|
||||
* interrupt handler refers to it as the "PRCM interrupt." The
|
||||
* underlying registers are located in the PRM on OMAP3/4.
|
||||
*
|
||||
* XXX This code should eventually be moved to a PRM driver.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <mach/system.h>
|
||||
#include <plat/common.h>
|
||||
#include <plat/prcm.h>
|
||||
#include <plat/irqs.h>
|
||||
|
||||
#include "prm2xxx_3xxx.h"
|
||||
#include "prm44xx.h"
|
||||
|
||||
/*
|
||||
* OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
|
||||
* XXX this is technically not needed, since
|
||||
* omap_prcm_register_chain_handler() could allocate this based on the
|
||||
* actual amount of memory needed for the SoC
|
||||
*/
|
||||
#define OMAP_PRCM_MAX_NR_PENDING_REG 2
|
||||
|
||||
/*
|
||||
* prcm_irq_chips: an array of all of the "generic IRQ chips" in use
|
||||
* by the PRCM interrupt handler code. There will be one 'chip' per
|
||||
* PRM_{IRQSTATUS,IRQENABLE}_MPU register pair. (So OMAP3 will have
|
||||
* one "chip" and OMAP4 will have two.)
|
||||
*/
|
||||
static struct irq_chip_generic **prcm_irq_chips;
|
||||
|
||||
/*
|
||||
* prcm_irq_setup: the PRCM IRQ parameters for the hardware the code
|
||||
* is currently running on. Defined and passed by initialization code
|
||||
* that calls omap_prcm_register_chain_handler().
|
||||
*/
|
||||
static struct omap_prcm_irq_setup *prcm_irq_setup;
|
||||
|
||||
/* Private functions */
|
||||
|
||||
/*
|
||||
* Move priority events from events to priority_events array
|
||||
*/
|
||||
static void omap_prcm_events_filter_priority(unsigned long *events,
|
||||
unsigned long *priority_events)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
|
||||
priority_events[i] =
|
||||
events[i] & prcm_irq_setup->priority_mask[i];
|
||||
events[i] ^= priority_events[i];
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* PRCM Interrupt Handler
|
||||
*
|
||||
* This is a common handler for the OMAP PRCM interrupts. Pending
|
||||
* interrupts are detected by a call to prcm_pending_events and
|
||||
* dispatched accordingly. Clearing of the wakeup events should be
|
||||
* done by the SoC specific individual handlers.
|
||||
*/
|
||||
static void omap_prcm_irq_handler(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
unsigned long pending[OMAP_PRCM_MAX_NR_PENDING_REG];
|
||||
unsigned long priority_pending[OMAP_PRCM_MAX_NR_PENDING_REG];
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
unsigned int virtirq;
|
||||
int nr_irqs = prcm_irq_setup->nr_regs * 32;
|
||||
|
||||
/*
|
||||
* If we are suspended, mask all interrupts from PRCM level,
|
||||
* this does not ack them, and they will be pending until we
|
||||
* re-enable the interrupts, at which point the
|
||||
* omap_prcm_irq_handler will be executed again. The
|
||||
* _save_and_clear_irqen() function must ensure that the PRM
|
||||
* write to disable all IRQs has reached the PRM before
|
||||
* returning, or spurious PRCM interrupts may occur during
|
||||
* suspend.
|
||||
*/
|
||||
if (prcm_irq_setup->suspended) {
|
||||
prcm_irq_setup->save_and_clear_irqen(prcm_irq_setup->saved_mask);
|
||||
prcm_irq_setup->suspend_save_flag = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Loop until all pending irqs are handled, since
|
||||
* generic_handle_irq() can cause new irqs to come
|
||||
*/
|
||||
while (!prcm_irq_setup->suspended) {
|
||||
prcm_irq_setup->read_pending_irqs(pending);
|
||||
|
||||
/* No bit set, then all IRQs are handled */
|
||||
if (find_first_bit(pending, nr_irqs) >= nr_irqs)
|
||||
break;
|
||||
|
||||
omap_prcm_events_filter_priority(pending, priority_pending);
|
||||
|
||||
/*
|
||||
* Loop on all currently pending irqs so that new irqs
|
||||
* cannot starve previously pending irqs
|
||||
*/
|
||||
|
||||
/* Serve priority events first */
|
||||
for_each_set_bit(virtirq, priority_pending, nr_irqs)
|
||||
generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
|
||||
|
||||
/* Serve normal events next */
|
||||
for_each_set_bit(virtirq, pending, nr_irqs)
|
||||
generic_handle_irq(prcm_irq_setup->base_irq + virtirq);
|
||||
}
|
||||
if (chip->irq_ack)
|
||||
chip->irq_ack(&desc->irq_data);
|
||||
if (chip->irq_eoi)
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
chip->irq_unmask(&desc->irq_data);
|
||||
|
||||
prcm_irq_setup->ocp_barrier(); /* avoid spurious IRQs */
|
||||
}
|
||||
|
||||
/* Public functions */
|
||||
|
||||
/**
|
||||
* omap_prcm_event_to_irq - given a PRCM event name, returns the
|
||||
* corresponding IRQ on which the handler should be registered
|
||||
* @name: name of the PRCM interrupt bit to look up - see struct omap_prcm_irq
|
||||
*
|
||||
* Returns the Linux internal IRQ ID corresponding to @name upon success,
|
||||
* or -ENOENT upon failure.
|
||||
*/
|
||||
int omap_prcm_event_to_irq(const char *name)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!prcm_irq_setup || !name)
|
||||
return -ENOENT;
|
||||
|
||||
for (i = 0; i < prcm_irq_setup->nr_irqs; i++)
|
||||
if (!strcmp(prcm_irq_setup->irqs[i].name, name))
|
||||
return prcm_irq_setup->base_irq +
|
||||
prcm_irq_setup->irqs[i].offset;
|
||||
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_prcm_irq_cleanup - reverses memory allocated and other steps
|
||||
* done by omap_prcm_register_chain_handler()
|
||||
*
|
||||
* No return value.
|
||||
*/
|
||||
void omap_prcm_irq_cleanup(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!prcm_irq_setup) {
|
||||
pr_err("PRCM: IRQ handler not initialized; cannot cleanup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (prcm_irq_chips) {
|
||||
for (i = 0; i < prcm_irq_setup->nr_regs; i++) {
|
||||
if (prcm_irq_chips[i])
|
||||
irq_remove_generic_chip(prcm_irq_chips[i],
|
||||
0xffffffff, 0, 0);
|
||||
prcm_irq_chips[i] = NULL;
|
||||
}
|
||||
kfree(prcm_irq_chips);
|
||||
prcm_irq_chips = NULL;
|
||||
}
|
||||
|
||||
kfree(prcm_irq_setup->saved_mask);
|
||||
prcm_irq_setup->saved_mask = NULL;
|
||||
|
||||
kfree(prcm_irq_setup->priority_mask);
|
||||
prcm_irq_setup->priority_mask = NULL;
|
||||
|
||||
irq_set_chained_handler(prcm_irq_setup->irq, NULL);
|
||||
|
||||
if (prcm_irq_setup->base_irq > 0)
|
||||
irq_free_descs(prcm_irq_setup->base_irq,
|
||||
prcm_irq_setup->nr_regs * 32);
|
||||
prcm_irq_setup->base_irq = 0;
|
||||
}
|
||||
|
||||
void omap_prcm_irq_prepare(void)
|
||||
{
|
||||
prcm_irq_setup->suspended = true;
|
||||
}
|
||||
|
||||
void omap_prcm_irq_complete(void)
|
||||
{
|
||||
prcm_irq_setup->suspended = false;
|
||||
|
||||
/* If we have not saved the masks, do not attempt to restore */
|
||||
if (!prcm_irq_setup->suspend_save_flag)
|
||||
return;
|
||||
|
||||
prcm_irq_setup->suspend_save_flag = false;
|
||||
|
||||
/*
|
||||
* Re-enable all masked PRCM irq sources, this causes the PRCM
|
||||
* interrupt to fire immediately if the events were masked
|
||||
* previously in the chain handler
|
||||
*/
|
||||
prcm_irq_setup->restore_irqen(prcm_irq_setup->saved_mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* omap_prcm_register_chain_handler - initializes the prcm chained interrupt
|
||||
* handler based on provided parameters
|
||||
* @irq_setup: hardware data about the underlying PRM/PRCM
|
||||
*
|
||||
* Set up the PRCM chained interrupt handler on the PRCM IRQ. Sets up
|
||||
* one generic IRQ chip per PRM interrupt status/enable register pair.
|
||||
* Returns 0 upon success, -EINVAL if called twice or if invalid
|
||||
* arguments are passed, or -ENOMEM on any other error.
|
||||
*/
|
||||
int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
|
||||
{
|
||||
int nr_regs = irq_setup->nr_regs;
|
||||
u32 mask[OMAP_PRCM_MAX_NR_PENDING_REG];
|
||||
int offset, i;
|
||||
struct irq_chip_generic *gc;
|
||||
struct irq_chip_type *ct;
|
||||
|
||||
if (!irq_setup)
|
||||
return -EINVAL;
|
||||
|
||||
if (prcm_irq_setup) {
|
||||
pr_err("PRCM: already initialized; won't reinitialize\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (nr_regs > OMAP_PRCM_MAX_NR_PENDING_REG) {
|
||||
pr_err("PRCM: nr_regs too large\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
prcm_irq_setup = irq_setup;
|
||||
|
||||
prcm_irq_chips = kzalloc(sizeof(void *) * nr_regs, GFP_KERNEL);
|
||||
prcm_irq_setup->saved_mask = kzalloc(sizeof(u32) * nr_regs, GFP_KERNEL);
|
||||
prcm_irq_setup->priority_mask = kzalloc(sizeof(u32) * nr_regs,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!prcm_irq_chips || !prcm_irq_setup->saved_mask ||
|
||||
!prcm_irq_setup->priority_mask) {
|
||||
pr_err("PRCM: kzalloc failed\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
memset(mask, 0, sizeof(mask));
|
||||
|
||||
for (i = 0; i < irq_setup->nr_irqs; i++) {
|
||||
offset = irq_setup->irqs[i].offset;
|
||||
mask[offset >> 5] |= 1 << (offset & 0x1f);
|
||||
if (irq_setup->irqs[i].priority)
|
||||
irq_setup->priority_mask[offset >> 5] |=
|
||||
1 << (offset & 0x1f);
|
||||
}
|
||||
|
||||
irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
|
||||
|
||||
irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
|
||||
0);
|
||||
|
||||
if (irq_setup->base_irq < 0) {
|
||||
pr_err("PRCM: failed to allocate irq descs: %d\n",
|
||||
irq_setup->base_irq);
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i <= irq_setup->nr_regs; i++) {
|
||||
gc = irq_alloc_generic_chip("PRCM", 1,
|
||||
irq_setup->base_irq + i * 32, prm_base,
|
||||
handle_level_irq);
|
||||
|
||||
if (!gc) {
|
||||
pr_err("PRCM: failed to allocate generic chip\n");
|
||||
goto err;
|
||||
}
|
||||
ct = gc->chip_types;
|
||||
ct->chip.irq_ack = irq_gc_ack_set_bit;
|
||||
ct->chip.irq_mask = irq_gc_mask_clr_bit;
|
||||
ct->chip.irq_unmask = irq_gc_mask_set_bit;
|
||||
|
||||
ct->regs.ack = irq_setup->ack + i * 4;
|
||||
ct->regs.mask = irq_setup->mask + i * 4;
|
||||
|
||||
irq_setup_generic_chip(gc, mask[i], 0, IRQ_NOREQUEST, 0);
|
||||
prcm_irq_chips[i] = gc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
omap_prcm_irq_cleanup();
|
||||
return -ENOMEM;
|
||||
}
|
Разница между файлами не показана из-за своего большого размера
Загрузить разницу
|
@ -0,0 +1,379 @@
|
|||
/*
|
||||
* OMAP44xx sleep code.
|
||||
*
|
||||
* Copyright (C) 2011 Texas Instruments, Inc.
|
||||
* Santosh Shilimkar <santosh.shilimkar@ti.com>
|
||||
*
|
||||
* This program is free software,you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/smp_scu.h>
|
||||
#include <asm/memory.h>
|
||||
#include <asm/hardware/cache-l2x0.h>
|
||||
|
||||
#include <plat/omap44xx.h>
|
||||
#include <mach/omap-secure.h>
|
||||
|
||||
#include "common.h"
|
||||
#include "omap4-sar-layout.h"
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
|
||||
|
||||
.macro DO_SMC
|
||||
dsb
|
||||
smc #0
|
||||
dsb
|
||||
.endm
|
||||
|
||||
ppa_zero_params:
|
||||
.word 0x0
|
||||
|
||||
ppa_por_params:
|
||||
.word 1, 0
|
||||
|
||||
/*
|
||||
* =============================
|
||||
* == CPU suspend finisher ==
|
||||
* =============================
|
||||
*
|
||||
* void omap4_finish_suspend(unsigned long cpu_state)
|
||||
*
|
||||
* This function code saves the CPU context and performs the CPU
|
||||
* power down sequence. Calling WFI effectively changes the CPU
|
||||
* power domains states to the desired target power state.
|
||||
*
|
||||
* @cpu_state : contains context save state (r0)
|
||||
* 0 - No context lost
|
||||
* 1 - CPUx L1 and logic lost: MPUSS CSWR
|
||||
* 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
|
||||
* 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
|
||||
* @return: This function never returns for CPU OFF and DORMANT power states.
|
||||
* Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
|
||||
* from this follows a full CPU reset path via ROM code to CPU restore code.
|
||||
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
|
||||
* It returns to the caller for CPU INACTIVE and ON power states or in case
|
||||
* CPU failed to transition to targeted OFF/DORMANT state.
|
||||
*/
|
||||
ENTRY(omap4_finish_suspend)
|
||||
stmfd sp!, {lr}
|
||||
cmp r0, #0x0
|
||||
beq do_WFI @ No lowpower state, jump to WFI
|
||||
|
||||
/*
|
||||
* Flush all data from the L1 data cache before disabling
|
||||
* SCTLR.C bit.
|
||||
*/
|
||||
bl omap4_get_sar_ram_base
|
||||
ldr r9, [r0, #OMAP_TYPE_OFFSET]
|
||||
cmp r9, #0x1 @ Check for HS device
|
||||
bne skip_secure_l1_clean
|
||||
mov r0, #SCU_PM_NORMAL
|
||||
mov r1, #0xFF @ clean seucre L1
|
||||
stmfd r13!, {r4-r12, r14}
|
||||
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
|
||||
DO_SMC
|
||||
ldmfd r13!, {r4-r12, r14}
|
||||
skip_secure_l1_clean:
|
||||
bl v7_flush_dcache_all
|
||||
|
||||
/*
|
||||
* Clear the SCTLR.C bit to prevent further data cache
|
||||
* allocation. Clearing SCTLR.C would make all the data accesses
|
||||
* strongly ordered and would not hit the cache.
|
||||
*/
|
||||
mrc p15, 0, r0, c1, c0, 0
|
||||
bic r0, r0, #(1 << 2) @ Disable the C bit
|
||||
mcr p15, 0, r0, c1, c0, 0
|
||||
isb
|
||||
|
||||
/*
|
||||
* Invalidate L1 data cache. Even though only invalidate is
|
||||
* necessary exported flush API is used here. Doing clean
|
||||
* on already clean cache would be almost NOP.
|
||||
*/
|
||||
bl v7_flush_dcache_all
|
||||
|
||||
/*
|
||||
* Switch the CPU from Symmetric Multiprocessing (SMP) mode
|
||||
* to AsymmetricMultiprocessing (AMP) mode by programming
|
||||
* the SCU power status to DORMANT or OFF mode.
|
||||
* This enables the CPU to be taken out of coherency by
|
||||
* preventing the CPU from receiving cache, TLB, or BTB
|
||||
* maintenance operations broadcast by other CPUs in the cluster.
|
||||
*/
|
||||
bl omap4_get_sar_ram_base
|
||||
mov r8, r0
|
||||
ldr r9, [r8, #OMAP_TYPE_OFFSET]
|
||||
cmp r9, #0x1 @ Check for HS device
|
||||
bne scu_gp_set
|
||||
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
|
||||
ands r0, r0, #0x0f
|
||||
ldreq r0, [r8, #SCU_OFFSET0]
|
||||
ldrne r0, [r8, #SCU_OFFSET1]
|
||||
mov r1, #0x00
|
||||
stmfd r13!, {r4-r12, r14}
|
||||
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
|
||||
DO_SMC
|
||||
ldmfd r13!, {r4-r12, r14}
|
||||
b skip_scu_gp_set
|
||||
scu_gp_set:
|
||||
mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
|
||||
ands r0, r0, #0x0f
|
||||
ldreq r1, [r8, #SCU_OFFSET0]
|
||||
ldrne r1, [r8, #SCU_OFFSET1]
|
||||
bl omap4_get_scu_base
|
||||
bl scu_power_mode
|
||||
skip_scu_gp_set:
|
||||
mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
|
||||
tst r0, #(1 << 18)
|
||||
mrcne p15, 0, r0, c1, c0, 1
|
||||
bicne r0, r0, #(1 << 6) @ Disable SMP bit
|
||||
mcrne p15, 0, r0, c1, c0, 1
|
||||
isb
|
||||
dsb
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
/*
|
||||
* Clean and invalidate the L2 cache.
|
||||
* Common cache-l2x0.c functions can't be used here since it
|
||||
* uses spinlocks. We are out of coherency here with data cache
|
||||
* disabled. The spinlock implementation uses exclusive load/store
|
||||
* instruction which can fail without data cache being enabled.
|
||||
* OMAP4 hardware doesn't support exclusive monitor which can
|
||||
* overcome exclusive access issue. Because of this, CPU can
|
||||
* lead to deadlock.
|
||||
*/
|
||||
bl omap4_get_sar_ram_base
|
||||
mov r8, r0
|
||||
mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
|
||||
ands r5, r5, #0x0f
|
||||
ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
|
||||
ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
|
||||
cmp r0, #3
|
||||
bne do_WFI
|
||||
#ifdef CONFIG_PL310_ERRATA_727915
|
||||
mov r0, #0x03
|
||||
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
|
||||
DO_SMC
|
||||
#endif
|
||||
bl omap4_get_l2cache_base
|
||||
mov r2, r0
|
||||
ldr r0, =0xffff
|
||||
str r0, [r2, #L2X0_CLEAN_INV_WAY]
|
||||
wait:
|
||||
ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
|
||||
ldr r1, =0xffff
|
||||
ands r0, r0, r1
|
||||
bne wait
|
||||
#ifdef CONFIG_PL310_ERRATA_727915
|
||||
mov r0, #0x00
|
||||
mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
|
||||
DO_SMC
|
||||
#endif
|
||||
l2x_sync:
|
||||
bl omap4_get_l2cache_base
|
||||
mov r2, r0
|
||||
mov r0, #0x0
|
||||
str r0, [r2, #L2X0_CACHE_SYNC]
|
||||
sync:
|
||||
ldr r0, [r2, #L2X0_CACHE_SYNC]
|
||||
ands r0, r0, #0x1
|
||||
bne sync
|
||||
#endif
|
||||
|
||||
do_WFI:
|
||||
bl omap_do_wfi
|
||||
|
||||
/*
|
||||
* CPU is here when it failed to enter OFF/DORMANT or
|
||||
* no low power state was attempted.
|
||||
*/
|
||||
mrc p15, 0, r0, c1, c0, 0
|
||||
tst r0, #(1 << 2) @ Check C bit enabled?
|
||||
orreq r0, r0, #(1 << 2) @ Enable the C bit
|
||||
mcreq p15, 0, r0, c1, c0, 0
|
||||
isb
|
||||
|
||||
/*
|
||||
* Ensure the CPU power state is set to NORMAL in
|
||||
* SCU power state so that CPU is back in coherency.
|
||||
* In non-coherent mode CPU can lock-up and lead to
|
||||
* system deadlock.
|
||||
*/
|
||||
mrc p15, 0, r0, c1, c0, 1
|
||||
tst r0, #(1 << 6) @ Check SMP bit enabled?
|
||||
orreq r0, r0, #(1 << 6)
|
||||
mcreq p15, 0, r0, c1, c0, 1
|
||||
isb
|
||||
bl omap4_get_sar_ram_base
|
||||
mov r8, r0
|
||||
ldr r9, [r8, #OMAP_TYPE_OFFSET]
|
||||
cmp r9, #0x1 @ Check for HS device
|
||||
bne scu_gp_clear
|
||||
mov r0, #SCU_PM_NORMAL
|
||||
mov r1, #0x00
|
||||
stmfd r13!, {r4-r12, r14}
|
||||
ldr r12, =OMAP4_MON_SCU_PWR_INDEX
|
||||
DO_SMC
|
||||
ldmfd r13!, {r4-r12, r14}
|
||||
b skip_scu_gp_clear
|
||||
scu_gp_clear:
|
||||
bl omap4_get_scu_base
|
||||
mov r1, #SCU_PM_NORMAL
|
||||
bl scu_power_mode
|
||||
skip_scu_gp_clear:
|
||||
isb
|
||||
dsb
|
||||
ldmfd sp!, {pc}
|
||||
ENDPROC(omap4_finish_suspend)
|
||||
|
||||
/*
|
||||
* ============================
|
||||
* == CPU resume entry point ==
|
||||
* ============================
|
||||
*
|
||||
* void omap4_cpu_resume(void)
|
||||
*
|
||||
* ROM code jumps to this function while waking up from CPU
|
||||
* OFF or DORMANT state. Physical address of the function is
|
||||
* stored in the SAR RAM while entering to OFF or DORMANT mode.
|
||||
* The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
|
||||
*/
|
||||
ENTRY(omap4_cpu_resume)
|
||||
/*
|
||||
* Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
|
||||
* OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
|
||||
* init and for CPU1, a secure PPA API provided. CPU0 must be ON
|
||||
* while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
|
||||
* OMAP443X GP devices- SMP bit isn't accessible.
|
||||
* OMAP446X GP devices - SMP bit access is enabled on both CPUs.
|
||||
*/
|
||||
ldr r8, =OMAP44XX_SAR_RAM_BASE
|
||||
ldr r9, [r8, #OMAP_TYPE_OFFSET]
|
||||
cmp r9, #0x1 @ Skip if GP device
|
||||
bne skip_ns_smp_enable
|
||||
mrc p15, 0, r0, c0, c0, 5
|
||||
ands r0, r0, #0x0f
|
||||
beq skip_ns_smp_enable
|
||||
ppa_actrl_retry:
|
||||
mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
|
||||
adr r3, ppa_zero_params @ Pointer to parameters
|
||||
mov r1, #0x0 @ Process ID
|
||||
mov r2, #0x4 @ Flag
|
||||
mov r6, #0xff
|
||||
mov r12, #0x00 @ Secure Service ID
|
||||
DO_SMC
|
||||
cmp r0, #0x0 @ API returns 0 on success.
|
||||
beq enable_smp_bit
|
||||
b ppa_actrl_retry
|
||||
enable_smp_bit:
|
||||
mrc p15, 0, r0, c1, c0, 1
|
||||
tst r0, #(1 << 6) @ Check SMP bit enabled?
|
||||
orreq r0, r0, #(1 << 6)
|
||||
mcreq p15, 0, r0, c1, c0, 1
|
||||
isb
|
||||
skip_ns_smp_enable:
|
||||
#ifdef CONFIG_CACHE_L2X0
|
||||
/*
|
||||
* Restore the L2 AUXCTRL and enable the L2 cache.
|
||||
* OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
|
||||
* OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
|
||||
* register r0 contains value to be programmed.
|
||||
* L2 cache is already invalidate by ROM code as part
|
||||
* of MPUSS OFF wakeup path.
|
||||
*/
|
||||
ldr r2, =OMAP44XX_L2CACHE_BASE
|
||||
ldr r0, [r2, #L2X0_CTRL]
|
||||
and r0, #0x0f
|
||||
cmp r0, #1
|
||||
beq skip_l2en @ Skip if already enabled
|
||||
ldr r3, =OMAP44XX_SAR_RAM_BASE
|
||||
ldr r1, [r3, #OMAP_TYPE_OFFSET]
|
||||
cmp r1, #0x1 @ Check for HS device
|
||||
bne set_gp_por
|
||||
ldr r0, =OMAP4_PPA_L2_POR_INDEX
|
||||
ldr r1, =OMAP44XX_SAR_RAM_BASE
|
||||
ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
|
||||
adr r3, ppa_por_params
|
||||
str r4, [r3, #0x04]
|
||||
mov r1, #0x0 @ Process ID
|
||||
mov r2, #0x4 @ Flag
|
||||
mov r6, #0xff
|
||||
mov r12, #0x00 @ Secure Service ID
|
||||
DO_SMC
|
||||
b set_aux_ctrl
|
||||
set_gp_por:
|
||||
ldr r1, =OMAP44XX_SAR_RAM_BASE
|
||||
ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
|
||||
ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
|
||||
DO_SMC
|
||||
set_aux_ctrl:
|
||||
ldr r1, =OMAP44XX_SAR_RAM_BASE
|
||||
ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
|
||||
ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
|
||||
DO_SMC
|
||||
mov r0, #0x1
|
||||
ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
|
||||
DO_SMC
|
||||
skip_l2en:
|
||||
#endif
|
||||
|
||||
b cpu_resume @ Jump to generic resume
|
||||
ENDPROC(omap4_cpu_resume)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_OMAP4_ERRATA_I688
|
||||
ENTRY(omap_bus_sync)
|
||||
mov pc, lr
|
||||
ENDPROC(omap_bus_sync)
|
||||
#endif
|
||||
|
||||
ENTRY(omap_do_wfi)
|
||||
stmfd sp!, {lr}
|
||||
/* Drain interconnect write buffers. */
|
||||
bl omap_bus_sync
|
||||
|
||||
/*
|
||||
* Execute an ISB instruction to ensure that all of the
|
||||
* CP15 register changes have been committed.
|
||||
*/
|
||||
isb
|
||||
|
||||
/*
|
||||
* Execute a barrier instruction to ensure that all cache,
|
||||
* TLB and branch predictor maintenance operations issued
|
||||
* by any CPU in the cluster have completed.
|
||||
*/
|
||||
dsb
|
||||
dmb
|
||||
|
||||
/*
|
||||
* Execute a WFI instruction and wait until the
|
||||
* STANDBYWFI output is asserted to indicate that the
|
||||
* CPU is in idle and low power state. CPU can specualatively
|
||||
* prefetch the instructions so add NOPs after WFI. Sixteen
|
||||
* NOPs as per Cortex-A9 pipeline.
|
||||
*/
|
||||
wfi @ Wait For Interrupt
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
nop
|
||||
|
||||
ldmfd sp!, {pc}
|
||||
ENDPROC(omap_do_wfi)
|
|
@ -131,6 +131,12 @@ extern void imx53_evk_common_init(void);
|
|||
extern void imx53_qsb_common_init(void);
|
||||
extern void imx53_smd_common_init(void);
|
||||
extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode);
|
||||
extern void imx6q_pm_init(void);
|
||||
extern void imx6q_clock_map_io(void);
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
extern void imx6q_pm_init(void);
|
||||
#else
|
||||
static inline void imx6q_pm_init(void) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -168,7 +168,7 @@ struct cpu_op {
|
|||
u32 cpu_rate;
|
||||
};
|
||||
|
||||
int tzic_enable_wake(int is_idle);
|
||||
int tzic_enable_wake(void);
|
||||
|
||||
extern struct cpu_op *(*get_cpu_op)(int *op);
|
||||
#endif
|
||||
|
|
|
@ -73,7 +73,28 @@ static int tzic_set_irq_fiq(unsigned int irq, unsigned int type)
|
|||
#define tzic_set_irq_fiq NULL
|
||||
#endif
|
||||
|
||||
static unsigned int *wakeup_intr[4];
|
||||
#ifdef CONFIG_PM
|
||||
static void tzic_irq_suspend(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
int idx = gc->irq_base >> 5;
|
||||
|
||||
__raw_writel(gc->wake_active, tzic_base + TZIC_WAKEUP0(idx));
|
||||
}
|
||||
|
||||
static void tzic_irq_resume(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
int idx = gc->irq_base >> 5;
|
||||
|
||||
__raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(idx)),
|
||||
tzic_base + TZIC_WAKEUP0(idx));
|
||||
}
|
||||
|
||||
#else
|
||||
#define tzic_irq_suspend NULL
|
||||
#define tzic_irq_resume NULL
|
||||
#endif
|
||||
|
||||
static struct mxc_extra_irq tzic_extra_irq = {
|
||||
#ifdef CONFIG_FIQ
|
||||
|
@ -91,12 +112,13 @@ static __init void tzic_init_gc(unsigned int irq_start)
|
|||
handle_level_irq);
|
||||
gc->private = &tzic_extra_irq;
|
||||
gc->wake_enabled = IRQ_MSK(32);
|
||||
wakeup_intr[idx] = &gc->wake_active;
|
||||
|
||||
ct = gc->chip_types;
|
||||
ct->chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
ct->chip.irq_set_wake = irq_gc_set_wake;
|
||||
ct->chip.irq_suspend = tzic_irq_suspend;
|
||||
ct->chip.irq_resume = tzic_irq_resume;
|
||||
ct->regs.disable = TZIC_ENCLEAR0(idx);
|
||||
ct->regs.enable = TZIC_ENSET0(idx);
|
||||
|
||||
|
@ -167,23 +189,19 @@ void __init tzic_init_irq(void __iomem *irqbase)
|
|||
/**
|
||||
* tzic_enable_wake() - enable wakeup interrupt
|
||||
*
|
||||
* @param is_idle 1 if called in idle loop (ENSET0 register);
|
||||
* 0 to be used when called from low power entry
|
||||
* @return 0 if successful; non-zero otherwise
|
||||
*/
|
||||
int tzic_enable_wake(int is_idle)
|
||||
int tzic_enable_wake(void)
|
||||
{
|
||||
unsigned int i, v;
|
||||
unsigned int i;
|
||||
|
||||
__raw_writel(1, tzic_base + TZIC_DSMINT);
|
||||
if (unlikely(__raw_readl(tzic_base + TZIC_DSMINT) == 0))
|
||||
return -EAGAIN;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
v = is_idle ? __raw_readl(tzic_base + TZIC_ENSET0(i)) :
|
||||
*wakeup_intr[i];
|
||||
__raw_writel(v, tzic_base + TZIC_WAKEUP0(i));
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
__raw_writel(__raw_readl(tzic_base + TZIC_ENSET0(i)),
|
||||
tzic_base + TZIC_WAKEUP0(i));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#include <plat/vram.h>
|
||||
#include <plat/dsp.h>
|
||||
|
||||
#include <plat/omap-secure.h>
|
||||
|
||||
|
||||
#define NO_LENGTH_CHECK 0xffffffff
|
||||
|
||||
|
@ -66,6 +68,7 @@ void __init omap_reserve(void)
|
|||
omapfb_reserve_sdram_memblock();
|
||||
omap_vram_reserve_sdram_memblock();
|
||||
omap_dsp_reserve_sdram_memblock();
|
||||
omap_secure_ram_reserve_memblock();
|
||||
}
|
||||
|
||||
void __init omap_init_consistent_dma_size(void)
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#ifndef __OMAP_SECURE_H__
|
||||
#define __OMAP_SECURE_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_ARCH_OMAP2PLUS
|
||||
extern int omap_secure_ram_reserve_memblock(void);
|
||||
#else
|
||||
static inline void omap_secure_ram_reserve_memblock(void)
|
||||
{ }
|
||||
#endif
|
||||
|
||||
#endif /* __OMAP_SECURE_H__ */
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#include <plat/mux.h>
|
||||
|
||||
|
@ -33,6 +34,8 @@
|
|||
|
||||
#define OMAP_MODE13X_SPEED 230400
|
||||
|
||||
#define OMAP_UART_SCR_TX_EMPTY 0x08
|
||||
|
||||
/* WER = 0x7F
|
||||
* Enable module level wakeup in WER reg
|
||||
*/
|
||||
|
@ -51,18 +54,27 @@
|
|||
|
||||
#define OMAP_UART_DMA_CH_FREE -1
|
||||
|
||||
#define RX_TIMEOUT (3 * HZ)
|
||||
#define OMAP_MAX_HSUART_PORTS 4
|
||||
|
||||
#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
|
||||
|
||||
#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
|
||||
#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
|
||||
|
||||
struct omap_uart_port_info {
|
||||
bool dma_enabled; /* To specify DMA Mode */
|
||||
unsigned int uartclk; /* UART clock rate */
|
||||
void __iomem *membase; /* ioremap cookie or NULL */
|
||||
resource_size_t mapbase; /* resource base */
|
||||
unsigned long irqflags; /* request_irq flags */
|
||||
upf_t flags; /* UPF_* flags */
|
||||
u32 errata;
|
||||
unsigned int dma_rx_buf_size;
|
||||
unsigned int dma_rx_timeout;
|
||||
unsigned int autosuspend_timeout;
|
||||
unsigned int dma_rx_poll_rate;
|
||||
|
||||
int (*get_context_loss_count)(struct device *);
|
||||
void (*set_forceidle)(struct platform_device *);
|
||||
void (*set_noidle)(struct platform_device *);
|
||||
void (*enable_wakeup)(struct platform_device *, bool);
|
||||
};
|
||||
|
||||
struct uart_omap_dma {
|
||||
|
@ -86,8 +98,9 @@ struct uart_omap_dma {
|
|||
spinlock_t rx_lock;
|
||||
/* timer to poll activity on rx dma */
|
||||
struct timer_list rx_timer;
|
||||
int rx_buf_size;
|
||||
int rx_timeout;
|
||||
unsigned int rx_buf_size;
|
||||
unsigned int rx_poll_rate;
|
||||
unsigned int rx_timeout;
|
||||
};
|
||||
|
||||
struct uart_omap_port {
|
||||
|
@ -100,6 +113,10 @@ struct uart_omap_port {
|
|||
unsigned char mcr;
|
||||
unsigned char fcr;
|
||||
unsigned char efr;
|
||||
unsigned char dll;
|
||||
unsigned char dlh;
|
||||
unsigned char mdr1;
|
||||
unsigned char scr;
|
||||
|
||||
int use_dma;
|
||||
/*
|
||||
|
@ -111,6 +128,14 @@ struct uart_omap_port {
|
|||
unsigned char msr_saved_flags;
|
||||
char name[20];
|
||||
unsigned long port_activity;
|
||||
u32 context_loss_cnt;
|
||||
u32 errata;
|
||||
u8 wakeups_enabled;
|
||||
|
||||
struct pm_qos_request pm_qos_request;
|
||||
u32 latency;
|
||||
u32 calc_latency;
|
||||
struct work_struct qos_work;
|
||||
};
|
||||
|
||||
#endif /* __OMAP_SERIAL_H__ */
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
#define OMAP44XX_WKUPGEN_BASE 0x48281000
|
||||
#define OMAP44XX_MCPDM_BASE 0x40132000
|
||||
#define OMAP44XX_MCPDM_L3_BASE 0x49032000
|
||||
#define OMAP44XX_SAR_RAM_BASE 0x4a326000
|
||||
|
||||
#define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000)
|
||||
#define OMAP44XX_HSUSB_OTG_BASE (L4_44XX_BASE + 0xAB000)
|
||||
|
|
|
@ -97,6 +97,7 @@ struct omap_hwmod_mux_info {
|
|||
struct omap_device_pad *pads;
|
||||
int nr_pads_dynamic;
|
||||
struct omap_device_pad **pads_dynamic;
|
||||
int *irqs;
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
|
@ -416,10 +417,13 @@ struct omap_hwmod_omap4_prcm {
|
|||
* _HWMOD_NO_MPU_PORT: no path exists for the MPU to write to this module
|
||||
* _HWMOD_WAKEUP_ENABLED: set when the omap_hwmod code has enabled ENAWAKEUP
|
||||
* _HWMOD_SYSCONFIG_LOADED: set when the OCP_SYSCONFIG value has been cached
|
||||
* _HWMOD_SKIP_ENABLE: set if hwmod enabled during init (HWMOD_INIT_NO_IDLE) -
|
||||
* causes the first call to _enable() to only update the pinmux
|
||||
*/
|
||||
#define _HWMOD_NO_MPU_PORT (1 << 0)
|
||||
#define _HWMOD_WAKEUP_ENABLED (1 << 1)
|
||||
#define _HWMOD_SYSCONFIG_LOADED (1 << 2)
|
||||
#define _HWMOD_SKIP_ENABLE (1 << 3)
|
||||
|
||||
/*
|
||||
* omap_hwmod._state definitions
|
||||
|
@ -604,6 +608,8 @@ int omap_hwmod_get_context_loss_count(struct omap_hwmod *oh);
|
|||
|
||||
int omap_hwmod_no_setup_reset(struct omap_hwmod *oh);
|
||||
|
||||
int omap_hwmod_pad_route_irq(struct omap_hwmod *oh, int pad_idx, int irq_idx);
|
||||
|
||||
/*
|
||||
* Chip variant-specific hwmod init routines - XXX should be converted
|
||||
* to use initcalls once the initial boot ordering is straightened out
|
||||
|
|
|
@ -107,15 +107,13 @@
|
|||
#ifndef __ASSEMBLER__
|
||||
|
||||
struct omap_board_data;
|
||||
struct omap_uart_port_info;
|
||||
|
||||
extern void omap_serial_init(void);
|
||||
extern void omap_serial_init_port(struct omap_board_data *bdata);
|
||||
extern int omap_uart_can_sleep(void);
|
||||
extern void omap_uart_check_wakeup(void);
|
||||
extern void omap_uart_prepare_suspend(void);
|
||||
extern void omap_uart_prepare_idle(int num);
|
||||
extern void omap_uart_resume_idle(int num);
|
||||
extern void omap_uart_enable_irqs(int enable);
|
||||
extern void omap_serial_board_init(struct omap_uart_port_info *platform_data);
|
||||
extern void omap_serial_init_port(struct omap_board_data *bdata,
|
||||
struct omap_uart_port_info *platform_data);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -95,6 +95,10 @@ static inline void omap_push_sram_idle(void) {}
|
|||
*/
|
||||
#define OMAP2_SRAM_PA 0x40200000
|
||||
#define OMAP3_SRAM_PA 0x40200000
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
#define OMAP4_SRAM_PA 0x40304000
|
||||
#define OMAP4_SRAM_VA 0xfe404000
|
||||
#else
|
||||
#define OMAP4_SRAM_PA 0x40300000
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
@ -40,7 +40,11 @@
|
|||
#define OMAP1_SRAM_PA 0x20000000
|
||||
#define OMAP2_SRAM_PUB_PA (OMAP2_SRAM_PA + 0xf800)
|
||||
#define OMAP3_SRAM_PUB_PA (OMAP3_SRAM_PA + 0x8000)
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
#define OMAP4_SRAM_PUB_PA OMAP4_SRAM_PA
|
||||
#else
|
||||
#define OMAP4_SRAM_PUB_PA (OMAP4_SRAM_PA + 0x4000)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_ARCH_OMAP2PLUS)
|
||||
#define SRAM_BOOTLOADER_SZ 0x00
|
||||
|
@ -161,6 +165,10 @@ static void __init omap_map_sram(void)
|
|||
if (omap_sram_size == 0)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
omap_sram_start += PAGE_SIZE;
|
||||
omap_sram_size -= SZ_16K;
|
||||
#endif
|
||||
if (cpu_is_omap34xx()) {
|
||||
/*
|
||||
* SRAM must be marked as non-cached on OMAP3 since the
|
||||
|
|
|
@ -37,17 +37,24 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <plat/dma.h>
|
||||
#include <plat/dmtimer.h>
|
||||
#include <plat/omap-serial.h>
|
||||
|
||||
#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
|
||||
|
||||
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
|
||||
|
||||
/* Forward declaration of functions */
|
||||
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
|
||||
static void serial_omap_rx_timeout(unsigned long uart_no);
|
||||
static void serial_omap_rxdma_poll(unsigned long uart_no);
|
||||
static int serial_omap_start_rxdma(struct uart_omap_port *up);
|
||||
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
|
||||
|
||||
static struct workqueue_struct *serial_omap_uart_wq;
|
||||
|
||||
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
|
||||
{
|
||||
|
@ -102,6 +109,8 @@ static void serial_omap_stop_rxdma(struct uart_omap_port *up)
|
|||
omap_free_dma(up->uart_dma.rx_dma_channel);
|
||||
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
|
||||
up->uart_dma.rx_dma_used = false;
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -109,9 +118,12 @@ static void serial_omap_enable_ms(struct uart_port *port)
|
|||
{
|
||||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
up->ier |= UART_IER_MSI;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static void serial_omap_stop_tx(struct uart_port *port)
|
||||
|
@ -129,30 +141,40 @@ static void serial_omap_stop_tx(struct uart_port *port)
|
|||
omap_stop_dma(up->uart_dma.tx_dma_channel);
|
||||
omap_free_dma(up->uart_dma.tx_dma_channel);
|
||||
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
if (up->ier & UART_IER_THRI) {
|
||||
up->ier &= ~UART_IER_THRI;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static void serial_omap_stop_rx(struct uart_port *port)
|
||||
{
|
||||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
if (up->use_dma)
|
||||
serial_omap_stop_rxdma(up);
|
||||
up->ier &= ~UART_IER_RLSI;
|
||||
up->port.read_status_mask &= ~UART_LSR_DR;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static inline void receive_chars(struct uart_omap_port *up, int *status)
|
||||
static inline void receive_chars(struct uart_omap_port *up,
|
||||
unsigned int *status)
|
||||
{
|
||||
struct tty_struct *tty = up->port.state->port.tty;
|
||||
unsigned int flag;
|
||||
unsigned char ch, lsr = *status;
|
||||
unsigned int flag, lsr = *status;
|
||||
unsigned char ch = 0;
|
||||
int max_count = 256;
|
||||
|
||||
do {
|
||||
|
@ -262,7 +284,10 @@ static void serial_omap_start_tx(struct uart_port *port)
|
|||
int ret = 0;
|
||||
|
||||
if (!up->use_dma) {
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
serial_omap_enable_ier_thri(up);
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -272,6 +297,7 @@ static void serial_omap_start_tx(struct uart_port *port)
|
|||
xmit = &up->port.state->xmit;
|
||||
|
||||
if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
ret = omap_request_dma(up->uart_dma.uart_dma_tx,
|
||||
"UART Tx DMA",
|
||||
(void *)uart_tx_dma_callback, up,
|
||||
|
@ -354,9 +380,13 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
|
|||
unsigned int iir, lsr;
|
||||
unsigned long flags;
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
iir = serial_in(up, UART_IIR);
|
||||
if (iir & UART_IIR_NO_INT)
|
||||
if (iir & UART_IIR_NO_INT) {
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&up->port.lock, flags);
|
||||
lsr = serial_in(up, UART_LSR);
|
||||
|
@ -378,6 +408,9 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
|
|||
transmit_chars(up);
|
||||
|
||||
spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
|
||||
up->port_activity = jiffies;
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -388,11 +421,12 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
|
|||
unsigned long flags = 0;
|
||||
unsigned int ret = 0;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
|
||||
spin_lock_irqsave(&up->port.lock, flags);
|
||||
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
|
||||
spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -402,8 +436,11 @@ static unsigned int serial_omap_get_mctrl(struct uart_port *port)
|
|||
unsigned int status;
|
||||
unsigned int ret = 0;
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
status = check_modem_status(up);
|
||||
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
|
||||
|
||||
if (status & UART_MSR_DCD)
|
||||
ret |= TIOCM_CAR;
|
||||
|
@ -421,7 +458,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
|||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
unsigned char mcr = 0;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
|
||||
if (mctrl & TIOCM_RTS)
|
||||
mcr |= UART_MCR_RTS;
|
||||
if (mctrl & TIOCM_DTR)
|
||||
|
@ -433,8 +470,11 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
|
|||
if (mctrl & TIOCM_LOOP)
|
||||
mcr |= UART_MCR_LOOP;
|
||||
|
||||
mcr |= up->mcr;
|
||||
serial_out(up, UART_MCR, mcr);
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
up->mcr = serial_in(up, UART_MCR);
|
||||
up->mcr |= mcr;
|
||||
serial_out(up, UART_MCR, up->mcr);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
|
||||
|
@ -442,7 +482,8 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
|
|||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
unsigned long flags = 0;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
spin_lock_irqsave(&up->port.lock, flags);
|
||||
if (break_state == -1)
|
||||
up->lcr |= UART_LCR_SBC;
|
||||
|
@ -450,6 +491,7 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
|
|||
up->lcr &= ~UART_LCR_SBC;
|
||||
serial_out(up, UART_LCR, up->lcr);
|
||||
spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static int serial_omap_startup(struct uart_port *port)
|
||||
|
@ -466,8 +508,9 @@ static int serial_omap_startup(struct uart_port *port)
|
|||
if (retval)
|
||||
return retval;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
/*
|
||||
* Clear the FIFO buffers and disable them.
|
||||
* (they will be reenabled in set_termios())
|
||||
|
@ -505,8 +548,8 @@ static int serial_omap_startup(struct uart_port *port)
|
|||
(dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
|
||||
0);
|
||||
init_timer(&(up->uart_dma.rx_timer));
|
||||
up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
|
||||
up->uart_dma.rx_timer.data = up->pdev->id;
|
||||
up->uart_dma.rx_timer.function = serial_omap_rxdma_poll;
|
||||
up->uart_dma.rx_timer.data = up->port.line;
|
||||
/* Currently the buffer size is 4KB. Can increase it */
|
||||
up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
|
||||
up->uart_dma.rx_buf_size,
|
||||
|
@ -523,6 +566,8 @@ static int serial_omap_startup(struct uart_port *port)
|
|||
/* Enable module level wake up */
|
||||
serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
|
||||
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
up->port_activity = jiffies;
|
||||
return 0;
|
||||
}
|
||||
|
@ -532,7 +577,9 @@ static void serial_omap_shutdown(struct uart_port *port)
|
|||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
unsigned long flags = 0;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
/*
|
||||
* Disable interrupts from this port
|
||||
*/
|
||||
|
@ -566,6 +613,8 @@ static void serial_omap_shutdown(struct uart_port *port)
|
|||
up->uart_dma.rx_buf_dma_phys);
|
||||
up->uart_dma.rx_buf = NULL;
|
||||
}
|
||||
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
free_irq(up->port.irq, up);
|
||||
}
|
||||
|
||||
|
@ -573,8 +622,6 @@ static inline void
|
|||
serial_omap_configure_xonxoff
|
||||
(struct uart_omap_port *up, struct ktermios *termios)
|
||||
{
|
||||
unsigned char efr = 0;
|
||||
|
||||
up->lcr = serial_in(up, UART_LCR);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
up->efr = serial_in(up, UART_EFR);
|
||||
|
@ -584,8 +631,7 @@ serial_omap_configure_xonxoff
|
|||
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
|
||||
|
||||
/* clear SW control mode bits */
|
||||
efr = up->efr;
|
||||
efr &= OMAP_UART_SW_CLR;
|
||||
up->efr &= OMAP_UART_SW_CLR;
|
||||
|
||||
/*
|
||||
* IXON Flag:
|
||||
|
@ -593,7 +639,7 @@ serial_omap_configure_xonxoff
|
|||
* Transmit XON1, XOFF1
|
||||
*/
|
||||
if (termios->c_iflag & IXON)
|
||||
efr |= OMAP_UART_SW_TX;
|
||||
up->efr |= OMAP_UART_SW_TX;
|
||||
|
||||
/*
|
||||
* IXOFF Flag:
|
||||
|
@ -601,7 +647,7 @@ serial_omap_configure_xonxoff
|
|||
* Receiver compares XON1, XOFF1.
|
||||
*/
|
||||
if (termios->c_iflag & IXOFF)
|
||||
efr |= OMAP_UART_SW_RX;
|
||||
up->efr |= OMAP_UART_SW_RX;
|
||||
|
||||
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
|
||||
|
@ -624,13 +670,21 @@ serial_omap_configure_xonxoff
|
|||
* load the new software flow control mode IXON or IXOFF
|
||||
* and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
|
||||
*/
|
||||
serial_out(up, UART_EFR, efr | UART_EFR_SCD);
|
||||
serial_out(up, UART_EFR, up->efr | UART_EFR_SCD);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
|
||||
|
||||
serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
|
||||
serial_out(up, UART_LCR, up->lcr);
|
||||
}
|
||||
|
||||
static void serial_omap_uart_qos_work(struct work_struct *work)
|
||||
{
|
||||
struct uart_omap_port *up = container_of(work, struct uart_omap_port,
|
||||
qos_work);
|
||||
|
||||
pm_qos_update_request(&up->pm_qos_request, up->latency);
|
||||
}
|
||||
|
||||
static void
|
||||
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
||||
struct ktermios *old)
|
||||
|
@ -671,6 +725,16 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
|
||||
quot = serial_omap_get_divisor(port, baud);
|
||||
|
||||
/* calculate wakeup latency constraint */
|
||||
up->calc_latency = (1000000 * up->port.fifosize) /
|
||||
(1000 * baud / 8);
|
||||
up->latency = up->calc_latency;
|
||||
schedule_work(&up->qos_work);
|
||||
|
||||
up->dll = quot & 0xff;
|
||||
up->dlh = quot >> 8;
|
||||
up->mdr1 = UART_OMAP_MDR1_DISABLE;
|
||||
|
||||
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
|
||||
UART_FCR_ENABLE_FIFO;
|
||||
if (up->use_dma)
|
||||
|
@ -680,6 +744,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
* Ok, we're now changing the port state. Do it with
|
||||
* interrupts disabled.
|
||||
*/
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
spin_lock_irqsave(&up->port.lock, flags);
|
||||
|
||||
/*
|
||||
|
@ -723,6 +788,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
up->ier |= UART_IER_MSI;
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
serial_out(up, UART_LCR, cval); /* reset DLAB */
|
||||
up->lcr = cval;
|
||||
up->scr = OMAP_UART_SCR_TX_EMPTY;
|
||||
|
||||
/* FIFOs and DMA Settings */
|
||||
|
||||
|
@ -749,17 +816,22 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
|
||||
if (up->use_dma) {
|
||||
serial_out(up, UART_TI752_TLR, 0);
|
||||
serial_out(up, UART_OMAP_SCR,
|
||||
(UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
|
||||
up->scr |= (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8);
|
||||
}
|
||||
|
||||
serial_out(up, UART_OMAP_SCR, up->scr);
|
||||
|
||||
serial_out(up, UART_EFR, up->efr);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
|
||||
serial_out(up, UART_MCR, up->mcr);
|
||||
|
||||
/* Protocol, Baud Rate, and Interrupt Settings */
|
||||
|
||||
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
|
||||
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
|
||||
serial_omap_mdr1_errataset(up, up->mdr1);
|
||||
else
|
||||
serial_out(up, UART_OMAP_MDR1, up->mdr1);
|
||||
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
|
||||
up->efr = serial_in(up, UART_EFR);
|
||||
|
@ -769,8 +841,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
serial_out(up, UART_IER, 0);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
|
||||
serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
|
||||
serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
|
||||
serial_out(up, UART_DLL, up->dll); /* LS of divisor */
|
||||
serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
|
||||
|
||||
serial_out(up, UART_LCR, 0);
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
|
@ -780,9 +852,14 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
serial_out(up, UART_LCR, cval);
|
||||
|
||||
if (baud > 230400 && baud != 3000000)
|
||||
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
|
||||
up->mdr1 = UART_OMAP_MDR1_13X_MODE;
|
||||
else
|
||||
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
|
||||
up->mdr1 = UART_OMAP_MDR1_16X_MODE;
|
||||
|
||||
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
|
||||
serial_omap_mdr1_errataset(up, up->mdr1);
|
||||
else
|
||||
serial_out(up, UART_OMAP_MDR1, up->mdr1);
|
||||
|
||||
/* Hardware Flow Control Configuration */
|
||||
|
||||
|
@ -809,7 +886,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
|
|||
serial_omap_configure_xonxoff(up, termios);
|
||||
|
||||
spin_unlock_irqrestore(&up->port.lock, flags);
|
||||
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -819,7 +897,9 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
|
|||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
unsigned char efr;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
efr = serial_in(up, UART_EFR);
|
||||
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
|
||||
|
@ -829,6 +909,15 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
|
|||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
|
||||
serial_out(up, UART_EFR, efr);
|
||||
serial_out(up, UART_LCR, 0);
|
||||
|
||||
if (!device_may_wakeup(&up->pdev->dev)) {
|
||||
if (!state)
|
||||
pm_runtime_forbid(&up->pdev->dev);
|
||||
else
|
||||
pm_runtime_allow(&up->pdev->dev);
|
||||
}
|
||||
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static void serial_omap_release_port(struct uart_port *port)
|
||||
|
@ -847,7 +936,7 @@ static void serial_omap_config_port(struct uart_port *port, int flags)
|
|||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
|
||||
up->pdev->id);
|
||||
up->port.line);
|
||||
up->port.type = PORT_OMAP;
|
||||
}
|
||||
|
||||
|
@ -864,7 +953,7 @@ serial_omap_type(struct uart_port *port)
|
|||
{
|
||||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
|
||||
dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
|
||||
dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
|
||||
return up->name;
|
||||
}
|
||||
|
||||
|
@ -906,19 +995,26 @@ static inline void wait_for_xmitr(struct uart_omap_port *up)
|
|||
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
|
||||
{
|
||||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
wait_for_xmitr(up);
|
||||
serial_out(up, UART_TX, ch);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
}
|
||||
|
||||
static int serial_omap_poll_get_char(struct uart_port *port)
|
||||
{
|
||||
struct uart_omap_port *up = (struct uart_omap_port *)port;
|
||||
unsigned int status = serial_in(up, UART_LSR);
|
||||
unsigned int status;
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
status = serial_in(up, UART_LSR);
|
||||
if (!(status & UART_LSR_DR))
|
||||
return NO_POLL_CHAR;
|
||||
|
||||
return serial_in(up, UART_RX);
|
||||
status = serial_in(up, UART_RX);
|
||||
pm_runtime_put(&up->pdev->dev);
|
||||
return status;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CONSOLE_POLL */
|
||||
|
@ -946,6 +1042,8 @@ serial_omap_console_write(struct console *co, const char *s,
|
|||
unsigned int ier;
|
||||
int locked = 1;
|
||||
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
|
||||
local_irq_save(flags);
|
||||
if (up->port.sysrq)
|
||||
locked = 0;
|
||||
|
@ -978,6 +1076,8 @@ serial_omap_console_write(struct console *co, const char *s,
|
|||
if (up->msr_saved_flags)
|
||||
check_modem_status(up);
|
||||
|
||||
pm_runtime_mark_last_busy(&up->pdev->dev);
|
||||
pm_runtime_put_autosuspend(&up->pdev->dev);
|
||||
if (locked)
|
||||
spin_unlock(&up->port.lock);
|
||||
local_irq_restore(flags);
|
||||
|
@ -1014,7 +1114,7 @@ static struct console serial_omap_console = {
|
|||
|
||||
static void serial_omap_add_console_port(struct uart_omap_port *up)
|
||||
{
|
||||
serial_omap_console_ports[up->pdev->id] = up;
|
||||
serial_omap_console_ports[up->port.line] = up;
|
||||
}
|
||||
|
||||
#define OMAP_CONSOLE (&serial_omap_console)
|
||||
|
@ -1060,26 +1160,30 @@ static struct uart_driver serial_omap_reg = {
|
|||
.cons = OMAP_CONSOLE,
|
||||
};
|
||||
|
||||
static int
|
||||
serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static int serial_omap_suspend(struct device *dev)
|
||||
{
|
||||
struct uart_omap_port *up = platform_get_drvdata(pdev);
|
||||
struct uart_omap_port *up = dev_get_drvdata(dev);
|
||||
|
||||
if (up)
|
||||
if (up) {
|
||||
uart_suspend_port(&serial_omap_reg, &up->port);
|
||||
flush_work_sync(&up->qos_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int serial_omap_resume(struct platform_device *dev)
|
||||
static int serial_omap_resume(struct device *dev)
|
||||
{
|
||||
struct uart_omap_port *up = platform_get_drvdata(dev);
|
||||
struct uart_omap_port *up = dev_get_drvdata(dev);
|
||||
|
||||
if (up)
|
||||
uart_resume_port(&serial_omap_reg, &up->port);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void serial_omap_rx_timeout(unsigned long uart_no)
|
||||
static void serial_omap_rxdma_poll(unsigned long uart_no)
|
||||
{
|
||||
struct uart_omap_port *up = ui[uart_no];
|
||||
unsigned int curr_dma_pos, curr_transmitted_size;
|
||||
|
@ -1089,9 +1193,9 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
|
|||
if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
|
||||
(curr_dma_pos == 0)) {
|
||||
if (jiffies_to_msecs(jiffies - up->port_activity) <
|
||||
RX_TIMEOUT) {
|
||||
up->uart_dma.rx_timeout) {
|
||||
mod_timer(&up->uart_dma.rx_timer, jiffies +
|
||||
usecs_to_jiffies(up->uart_dma.rx_timeout));
|
||||
usecs_to_jiffies(up->uart_dma.rx_poll_rate));
|
||||
} else {
|
||||
serial_omap_stop_rxdma(up);
|
||||
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
|
||||
|
@ -1120,7 +1224,7 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
|
|||
}
|
||||
} else {
|
||||
mod_timer(&up->uart_dma.rx_timer, jiffies +
|
||||
usecs_to_jiffies(up->uart_dma.rx_timeout));
|
||||
usecs_to_jiffies(up->uart_dma.rx_poll_rate));
|
||||
}
|
||||
up->port_activity = jiffies;
|
||||
}
|
||||
|
@ -1135,6 +1239,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
|
|||
int ret = 0;
|
||||
|
||||
if (up->uart_dma.rx_dma_channel == -1) {
|
||||
pm_runtime_get_sync(&up->pdev->dev);
|
||||
ret = omap_request_dma(up->uart_dma.uart_dma_rx,
|
||||
"UART Rx DMA",
|
||||
(void *)uart_rx_dma_callback, up,
|
||||
|
@ -1158,7 +1263,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
|
|||
/* FIXME: Cache maintenance needed here? */
|
||||
omap_start_dma(up->uart_dma.rx_dma_channel);
|
||||
mod_timer(&up->uart_dma.rx_timer, jiffies +
|
||||
usecs_to_jiffies(up->uart_dma.rx_timeout));
|
||||
usecs_to_jiffies(up->uart_dma.rx_poll_rate));
|
||||
up->uart_dma.rx_dma_used = true;
|
||||
return ret;
|
||||
}
|
||||
|
@ -1221,6 +1326,19 @@ static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
|
|||
return;
|
||||
}
|
||||
|
||||
static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
|
||||
{
|
||||
struct omap_uart_port_info *omap_up_info;
|
||||
|
||||
omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
|
||||
if (!omap_up_info)
|
||||
return NULL; /* out of memory */
|
||||
|
||||
of_property_read_u32(dev->of_node, "clock-frequency",
|
||||
&omap_up_info->uartclk);
|
||||
return omap_up_info;
|
||||
}
|
||||
|
||||
static int serial_omap_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct uart_omap_port *up;
|
||||
|
@ -1228,6 +1346,9 @@ static int serial_omap_probe(struct platform_device *pdev)
|
|||
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
|
||||
int ret = -ENOSPC;
|
||||
|
||||
if (pdev->dev.of_node)
|
||||
omap_up_info = of_get_uart_port_info(&pdev->dev);
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!mem) {
|
||||
dev_err(&pdev->dev, "no mem resource?\n");
|
||||
|
@ -1263,7 +1384,6 @@ static int serial_omap_probe(struct platform_device *pdev)
|
|||
ret = -ENOMEM;
|
||||
goto do_release_region;
|
||||
}
|
||||
sprintf(up->name, "OMAP UART%d", pdev->id);
|
||||
up->pdev = pdev;
|
||||
up->port.dev = &pdev->dev;
|
||||
up->port.type = PORT_OMAP;
|
||||
|
@ -1273,34 +1393,74 @@ static int serial_omap_probe(struct platform_device *pdev)
|
|||
up->port.regshift = 2;
|
||||
up->port.fifosize = 64;
|
||||
up->port.ops = &serial_omap_pops;
|
||||
up->port.line = pdev->id;
|
||||
|
||||
up->port.membase = omap_up_info->membase;
|
||||
up->port.mapbase = omap_up_info->mapbase;
|
||||
if (pdev->dev.of_node)
|
||||
up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
|
||||
else
|
||||
up->port.line = pdev->id;
|
||||
|
||||
if (up->port.line < 0) {
|
||||
dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
|
||||
up->port.line);
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
sprintf(up->name, "OMAP UART%d", up->port.line);
|
||||
up->port.mapbase = mem->start;
|
||||
up->port.membase = ioremap(mem->start, resource_size(mem));
|
||||
if (!up->port.membase) {
|
||||
dev_err(&pdev->dev, "can't ioremap UART\n");
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
up->port.flags = omap_up_info->flags;
|
||||
up->port.irqflags = omap_up_info->irqflags;
|
||||
up->port.uartclk = omap_up_info->uartclk;
|
||||
if (!up->port.uartclk) {
|
||||
up->port.uartclk = DEFAULT_CLK_SPEED;
|
||||
dev_warn(&pdev->dev, "No clock speed specified: using default:"
|
||||
"%d\n", DEFAULT_CLK_SPEED);
|
||||
}
|
||||
up->uart_dma.uart_base = mem->start;
|
||||
up->errata = omap_up_info->errata;
|
||||
|
||||
if (omap_up_info->dma_enabled) {
|
||||
up->uart_dma.uart_dma_tx = dma_tx->start;
|
||||
up->uart_dma.uart_dma_rx = dma_rx->start;
|
||||
up->use_dma = 1;
|
||||
up->uart_dma.rx_buf_size = 4096;
|
||||
up->uart_dma.rx_timeout = 2;
|
||||
up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
|
||||
up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
|
||||
up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
|
||||
spin_lock_init(&(up->uart_dma.tx_lock));
|
||||
spin_lock_init(&(up->uart_dma.rx_lock));
|
||||
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
|
||||
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
|
||||
}
|
||||
|
||||
ui[pdev->id] = up;
|
||||
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
|
||||
up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
|
||||
pm_qos_add_request(&up->pm_qos_request,
|
||||
PM_QOS_CPU_DMA_LATENCY, up->latency);
|
||||
serial_omap_uart_wq = create_singlethread_workqueue(up->name);
|
||||
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
|
||||
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev,
|
||||
omap_up_info->autosuspend_timeout);
|
||||
|
||||
pm_runtime_irq_safe(&pdev->dev);
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
pm_runtime_get_sync(&pdev->dev);
|
||||
|
||||
ui[up->port.line] = up;
|
||||
serial_omap_add_console_port(up);
|
||||
|
||||
ret = uart_add_one_port(&serial_omap_reg, &up->port);
|
||||
if (ret != 0)
|
||||
goto do_release_region;
|
||||
|
||||
pm_runtime_put(&pdev->dev);
|
||||
platform_set_drvdata(pdev, up);
|
||||
return 0;
|
||||
err:
|
||||
|
@ -1315,22 +1475,168 @@ static int serial_omap_remove(struct platform_device *dev)
|
|||
{
|
||||
struct uart_omap_port *up = platform_get_drvdata(dev);
|
||||
|
||||
platform_set_drvdata(dev, NULL);
|
||||
if (up) {
|
||||
pm_runtime_disable(&up->pdev->dev);
|
||||
uart_remove_one_port(&serial_omap_reg, &up->port);
|
||||
pm_qos_remove_request(&up->pm_qos_request);
|
||||
|
||||
kfree(up);
|
||||
}
|
||||
|
||||
platform_set_drvdata(dev, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
|
||||
* The access to uart register after MDR1 Access
|
||||
* causes UART to corrupt data.
|
||||
*
|
||||
* Need a delay =
|
||||
* 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
|
||||
* give 10 times as much
|
||||
*/
|
||||
static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
|
||||
{
|
||||
u8 timeout = 255;
|
||||
|
||||
serial_out(up, UART_OMAP_MDR1, mdr1);
|
||||
udelay(2);
|
||||
serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
|
||||
UART_FCR_CLEAR_RCVR);
|
||||
/*
|
||||
* Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
|
||||
* TX_FIFO_E bit is 1.
|
||||
*/
|
||||
while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
|
||||
(UART_LSR_THRE | UART_LSR_DR))) {
|
||||
timeout--;
|
||||
if (!timeout) {
|
||||
/* Should *never* happen. we warn and carry on */
|
||||
dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n",
|
||||
serial_in(up, UART_LSR));
|
||||
break;
|
||||
}
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
static void serial_omap_restore_context(struct uart_omap_port *up)
|
||||
{
|
||||
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
|
||||
serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
|
||||
else
|
||||
serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
|
||||
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
|
||||
serial_out(up, UART_EFR, UART_EFR_ECB);
|
||||
serial_out(up, UART_LCR, 0x0); /* Operational mode */
|
||||
serial_out(up, UART_IER, 0x0);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
|
||||
serial_out(up, UART_DLL, up->dll);
|
||||
serial_out(up, UART_DLM, up->dlh);
|
||||
serial_out(up, UART_LCR, 0x0); /* Operational mode */
|
||||
serial_out(up, UART_IER, up->ier);
|
||||
serial_out(up, UART_FCR, up->fcr);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
|
||||
serial_out(up, UART_MCR, up->mcr);
|
||||
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
|
||||
serial_out(up, UART_OMAP_SCR, up->scr);
|
||||
serial_out(up, UART_EFR, up->efr);
|
||||
serial_out(up, UART_LCR, up->lcr);
|
||||
if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
|
||||
serial_omap_mdr1_errataset(up, up->mdr1);
|
||||
else
|
||||
serial_out(up, UART_OMAP_MDR1, up->mdr1);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_RUNTIME
|
||||
static int serial_omap_runtime_suspend(struct device *dev)
|
||||
{
|
||||
struct uart_omap_port *up = dev_get_drvdata(dev);
|
||||
struct omap_uart_port_info *pdata = dev->platform_data;
|
||||
|
||||
if (!up)
|
||||
return -EINVAL;
|
||||
|
||||
if (!pdata || !pdata->enable_wakeup)
|
||||
return 0;
|
||||
|
||||
if (pdata->get_context_loss_count)
|
||||
up->context_loss_cnt = pdata->get_context_loss_count(dev);
|
||||
|
||||
if (device_may_wakeup(dev)) {
|
||||
if (!up->wakeups_enabled) {
|
||||
pdata->enable_wakeup(up->pdev, true);
|
||||
up->wakeups_enabled = true;
|
||||
}
|
||||
} else {
|
||||
if (up->wakeups_enabled) {
|
||||
pdata->enable_wakeup(up->pdev, false);
|
||||
up->wakeups_enabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
/* Errata i291 */
|
||||
if (up->use_dma && pdata->set_forceidle &&
|
||||
(up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
|
||||
pdata->set_forceidle(up->pdev);
|
||||
|
||||
up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
|
||||
schedule_work(&up->qos_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int serial_omap_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct uart_omap_port *up = dev_get_drvdata(dev);
|
||||
struct omap_uart_port_info *pdata = dev->platform_data;
|
||||
|
||||
if (up) {
|
||||
if (pdata->get_context_loss_count) {
|
||||
u32 loss_cnt = pdata->get_context_loss_count(dev);
|
||||
|
||||
if (up->context_loss_cnt != loss_cnt)
|
||||
serial_omap_restore_context(up);
|
||||
}
|
||||
|
||||
/* Errata i291 */
|
||||
if (up->use_dma && pdata->set_noidle &&
|
||||
(up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
|
||||
pdata->set_noidle(up->pdev);
|
||||
|
||||
up->latency = up->calc_latency;
|
||||
schedule_work(&up->qos_work);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct dev_pm_ops serial_omap_dev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
|
||||
SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
|
||||
serial_omap_runtime_resume, NULL)
|
||||
};
|
||||
|
||||
#if defined(CONFIG_OF)
|
||||
static const struct of_device_id omap_serial_of_match[] = {
|
||||
{ .compatible = "ti,omap2-uart" },
|
||||
{ .compatible = "ti,omap3-uart" },
|
||||
{ .compatible = "ti,omap4-uart" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, omap_serial_of_match);
|
||||
#endif
|
||||
|
||||
static struct platform_driver serial_omap_driver = {
|
||||
.probe = serial_omap_probe,
|
||||
.remove = serial_omap_remove,
|
||||
|
||||
.suspend = serial_omap_suspend,
|
||||
.resume = serial_omap_resume,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.pm = &serial_omap_dev_pm_ops,
|
||||
.of_match_table = of_match_ptr(omap_serial_of_match),
|
||||
},
|
||||
};
|
||||
|
||||
|
|
Загрузка…
Ссылка в новой задаче