WSL2-Linux-Kernel/arch/arm/mach-omap2/pm34xx.c

1068 строки
29 KiB
C
Исходник Обычный вид История

/*
* OMAP3 Power Management Routines
*
* Copyright (C) 2006-2008 Nokia Corporation
* Tony Lindgren <tony@atomide.com>
* Jouni Hogander
*
* Copyright (C) 2007 Texas Instruments, Inc.
* Rajendra Nayak <rnayak@ti.com>
*
* Copyright (C) 2005 Texas Instruments, Inc.
* Richard Woodruff <r-woodruff2@ti.com>
*
* Based on pm.c for omap1
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/pm.h>
#include <linux/suspend.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/delay.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 11:04:11 +03:00
#include <linux/slab.h>
#include <plat/sram.h>
#include <plat/clockdomain.h>
#include <plat/powerdomain.h>
#include <plat/control.h>
#include <plat/serial.h>
#include <plat/sdrc.h>
#include <plat/prcm.h>
#include <plat/gpmc.h>
#include <plat/dma.h>
#include <asm/tlbflush.h>
#include "cm.h"
#include "cm-regbits-34xx.h"
#include "prm-regbits-34xx.h"
#include "prm.h"
#include "pm.h"
#include "sdrc.h"
/* Scratchpad offsets */
#define OMAP343X_TABLE_ADDRESS_OFFSET 0x31
#define OMAP343X_TABLE_VALUE_OFFSET 0x30
#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0x32
struct power_state {
struct powerdomain *pwrdm;
u32 next_state;
#ifdef CONFIG_SUSPEND
u32 saved_state;
#endif
struct list_head node;
};
static LIST_HEAD(pwrst_list);
static void (*_omap_sram_idle)(u32 *addr, int save_state);
static int (*_omap_save_secure_sram)(u32 *addr);
static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
static struct powerdomain *core_pwrdm, *per_pwrdm;
static struct powerdomain *cam_pwrdm;
static inline void omap3_per_save_context(void)
{
omap_gpio_save_context();
}
static inline void omap3_per_restore_context(void)
{
omap_gpio_restore_context();
}
static void omap3_enable_io_chain(void)
{
int timeout = 0;
if (omap_rev() >= OMAP3430_REV_ES3_1) {
prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
PM_WKEN);
/* Do a readback to assure write has been done */
prm_read_mod_reg(WKUP_MOD, PM_WKEN);
while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
OMAP3430_ST_IO_CHAIN_MASK)) {
timeout++;
if (timeout > 1000) {
printk(KERN_ERR "Wake up daisy chain "
"activation failed.\n");
return;
}
prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
WKUP_MOD, PM_WKEN);
}
}
}
static void omap3_disable_io_chain(void)
{
if (omap_rev() >= OMAP3430_REV_ES3_1)
prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
PM_WKEN);
}
static void omap3_core_save_context(void)
{
u32 control_padconf_off;
/* Save the padconf registers */
control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
control_padconf_off |= START_PADCONF_SAVE;
omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
/* wait for the save to complete */
while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
& PADCONF_SAVE_DONE))
udelay(1);
/*
* Force write last pad into memory, as this can fail in some
* cases according to erratas 1.157, 1.185
*/
omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
/* Save the Interrupt controller context */
omap_intc_save_context();
/* Save the GPMC context */
omap3_gpmc_save_context();
/* Save the system control module context, padconf already save above*/
omap3_control_save_context();
omap_dma_global_context_save();
}
static void omap3_core_restore_context(void)
{
/* Restore the control module context, padconf restored by h/w */
omap3_control_restore_context();
/* Restore the GPMC context */
omap3_gpmc_restore_context();
/* Restore the interrupt controller context */
omap_intc_restore_context();
omap_dma_global_context_restore();
}
/*
* FIXME: This function should be called before entering off-mode after
* OMAP3 secure services have been accessed. Currently it is only called
* once during boot sequence, but this works as we are not using secure
* services.
*/
static void omap3_save_secure_ram_context(u32 target_mpu_state)
{
u32 ret;
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
/*
* MPU next state must be set to POWER_ON temporarily,
* otherwise the WFI executed inside the ROM code
* will hang the system.
*/
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
ret = _omap_save_secure_sram((u32 *)
__pa(omap3_secure_ram_storage));
pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
/* Following is for error tracking, it should not happen */
if (ret) {
printk(KERN_ERR "save_secure_sram() returns %08x\n",
ret);
while (1)
;
}
}
}
2009-06-27 09:07:25 +04:00
/*
* PRCM Interrupt Handler Helper Function
*
* The purpose of this function is to clear any wake-up events latched
* in the PRCM PM_WKST_x registers. It is possible that a wake-up event
* may occur whilst attempting to clear a PM_WKST_x register and thus
* set another bit in this register. A while loop is used to ensure
* that any peripheral wake-up events occurring while attempting to
* clear the PM_WKST_x are detected and cleared.
*/
static int prcm_clear_mod_irqs(s16 module, u8 regs)
{
u32 wkst, fclk, iclk, clken;
2009-06-27 09:07:25 +04:00
u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
u16 grpsel_off = (regs == 3) ?
OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
int c = 0;
2009-06-27 09:07:25 +04:00
wkst = prm_read_mod_reg(module, wkst_off);
wkst &= prm_read_mod_reg(module, grpsel_off);
if (wkst) {
2009-06-27 09:07:25 +04:00
iclk = cm_read_mod_reg(module, iclk_off);
fclk = cm_read_mod_reg(module, fclk_off);
while (wkst) {
clken = wkst;
cm_set_mod_reg_bits(clken, module, iclk_off);
/*
* For USBHOST, we don't know whether HOST1 or
* HOST2 woke us up, so enable both f-clocks
*/
if (module == OMAP3430ES2_USBHOST_MOD)
clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
cm_set_mod_reg_bits(clken, module, fclk_off);
2009-06-27 09:07:25 +04:00
prm_write_mod_reg(wkst, module, wkst_off);
wkst = prm_read_mod_reg(module, wkst_off);
c++;
2009-06-27 09:07:25 +04:00
}
cm_write_mod_reg(iclk, module, iclk_off);
cm_write_mod_reg(fclk, module, fclk_off);
}
return c;
}
static int _prcm_int_handle_wakeup(void)
{
int c;
c = prcm_clear_mod_irqs(WKUP_MOD, 1);
c += prcm_clear_mod_irqs(CORE_MOD, 1);
c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
if (omap_rev() > OMAP3430_REV_ES1_0) {
c += prcm_clear_mod_irqs(CORE_MOD, 3);
c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
}
return c;
2009-06-27 09:07:25 +04:00
}
2009-06-27 09:07:25 +04:00
/*
* PRCM Interrupt Handler
*
* The PRM_IRQSTATUS_MPU register indicates if there are any pending
* interrupts from the PRCM for the MPU. These bits must be cleared in
* order to clear the PRCM interrupt. The PRCM interrupt handler is
* implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
* the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
* register indicates that a wake-up event is pending for the MPU and
* this bit can only be cleared if the all the wake-up events latched
* in the various PM_WKST_x registers have been cleared. The interrupt
* handler is implemented using a do-while loop so that if a wake-up
* event occurred during the processing of the prcm interrupt handler
* (setting a bit in the corresponding PM_WKST_x register and thus
* preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
* this would be handled.
*/
static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
{
u32 irqenable_mpu, irqstatus_mpu;
int c = 0;
2009-06-27 09:07:25 +04:00
irqenable_mpu = prm_read_mod_reg(OCP_MOD,
OMAP3_PRM_IRQENABLE_MPU_OFFSET);
irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
irqstatus_mpu &= irqenable_mpu;
do {
if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
OMAP3430_IO_ST_MASK)) {
c = _prcm_int_handle_wakeup();
/*
* Is the MPU PRCM interrupt handler racing with the
* IVA2 PRCM interrupt handler ?
*/
WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
"but no wakeup sources are marked\n");
} else {
/* XXX we need to expand our PRCM interrupt handler */
WARN(1, "prcm: WARNING: PRCM interrupt received, but "
"no code to handle it (%08x)\n", irqstatus_mpu);
}
2009-06-27 09:07:25 +04:00
prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
irqstatus_mpu &= irqenable_mpu;
} while (irqstatus_mpu);
return IRQ_HANDLED;
}
static void restore_control_register(u32 val)
{
__asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
}
/* Function to restore the table entry that was modified for enabling MMU */
static void restore_table_entry(void)
{
u32 *scratchpad_address;
u32 previous_value, control_reg_value;
u32 *address;
scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
/* Get address of entry that was modified */
address = (u32 *)__raw_readl(scratchpad_address +
OMAP343X_TABLE_ADDRESS_OFFSET);
/* Get the previous value which needs to be restored */
previous_value = __raw_readl(scratchpad_address +
OMAP343X_TABLE_VALUE_OFFSET);
address = __va(address);
*address = previous_value;
flush_tlb_all();
control_reg_value = __raw_readl(scratchpad_address
+ OMAP343X_CONTROL_REG_VALUE_OFFSET);
/* This will enable caches and prediction */
restore_control_register(control_reg_value);
}
void omap_sram_idle(void)
{
/* Variable to tell what needs to be saved and restored
* in omap_sram_idle*/
/* save_state = 0 => Nothing to save and restored */
/* save_state = 1 => Only L1 and logic lost */
/* save_state = 2 => Only L2 lost */
/* save_state = 3 => L1, L2 and logic lost */
int save_state = 0;
int mpu_next_state = PWRDM_POWER_ON;
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
int core_prev_state, per_prev_state;
u32 sdrc_pwr = 0;
if (!_omap_sram_idle)
return;
pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
pwrdm_clear_all_prev_pwrst(neon_pwrdm);
pwrdm_clear_all_prev_pwrst(core_pwrdm);
pwrdm_clear_all_prev_pwrst(per_pwrdm);
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
switch (mpu_next_state) {
case PWRDM_POWER_ON:
case PWRDM_POWER_RET:
/* No need to save context */
save_state = 0;
break;
case PWRDM_POWER_OFF:
save_state = 3;
break;
default:
/* Invalid state */
printk(KERN_ERR "Invalid mpu state in sram_idle\n");
return;
}
pwrdm_pre_transition();
/* NEON control */
if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
/* Enable IO-PAD and IO-CHAIN wakeups */
per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
if (omap3_has_io_wakeup() &&
(per_next_state < PWRDM_POWER_ON ||
core_next_state < PWRDM_POWER_ON)) {
prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
omap3_enable_io_chain();
}
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
omap_uart_prepare_idle(2);
omap2_gpio_prepare_for_idle(per_next_state);
if (per_next_state == PWRDM_POWER_OFF)
omap3_per_save_context();
}
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
omap_uart_prepare_idle(0);
omap_uart_prepare_idle(1);
if (core_next_state == PWRDM_POWER_OFF) {
omap3_core_save_context();
omap3_prcm_save_context();
}
}
omap3_intc_prepare_idle();
/*
* On EMU/HS devices ROM code restores a SRDC value
* from scratchpad which has automatic self refresh on timeout
* of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
* Hence store/restore the SDRC_POWER register here.
*/
if (omap_rev() >= OMAP3430_REV_ES3_0 &&
omap_type() != OMAP2_DEVICE_TYPE_GP &&
core_next_state == PWRDM_POWER_OFF)
sdrc_pwr = sdrc_read_reg(SDRC_POWER);
/*
* omap3_arm_context is the location where ARM registers
* get saved. The restore path then reads from this
* location and restores them back.
*/
_omap_sram_idle(omap3_arm_context, save_state);
cpu_init();
/* Restore normal SDRC POWER settings */
if (omap_rev() >= OMAP3430_REV_ES3_0 &&
omap_type() != OMAP2_DEVICE_TYPE_GP &&
core_next_state == PWRDM_POWER_OFF)
sdrc_write_reg(sdrc_pwr, SDRC_POWER);
/* Restore table entry modified during MMU restoration */
if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
restore_table_entry();
/* CORE */
if (core_next_state < PWRDM_POWER_ON) {
core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
if (core_prev_state == PWRDM_POWER_OFF) {
omap3_core_restore_context();
omap3_prcm_restore_context();
omap3_sram_restore_context();
omap2_sms_restore_context();
}
omap_uart_resume_idle(0);
omap_uart_resume_idle(1);
if (core_next_state == PWRDM_POWER_OFF)
prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
OMAP3430_GR_MOD,
OMAP3_PRM_VOLTCTRL_OFFSET);
}
omap3_intc_resume_idle();
/* PER */
if (per_next_state < PWRDM_POWER_ON) {
per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
omap2_gpio_resume_after_idle();
if (per_prev_state == PWRDM_POWER_OFF)
omap3_per_restore_context();
omap_uart_resume_idle(2);
}
/* Disable IO-PAD and IO-CHAIN wakeup */
if (omap3_has_io_wakeup() &&
(per_next_state < PWRDM_POWER_ON ||
core_next_state < PWRDM_POWER_ON)) {
prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
omap3_disable_io_chain();
}
pwrdm_post_transition();
omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
}
int omap3_can_sleep(void)
{
if (!sleep_while_idle)
return 0;
if (!omap_uart_can_sleep())
return 0;
return 1;
}
static void omap3_pm_idle(void)
{
local_irq_disable();
local_fiq_disable();
if (!omap3_can_sleep())
goto out;
if (omap_irq_pending() || need_resched())
goto out;
omap_sram_idle();
out:
local_fiq_enable();
local_irq_enable();
}
#ifdef CONFIG_SUSPEND
static suspend_state_t suspend_state;
static int omap3_pm_prepare(void)
{
disable_hlt();
return 0;
}
static int omap3_pm_suspend(void)
{
struct power_state *pwrst;
int state, ret = 0;
if (wakeup_timer_seconds || wakeup_timer_milliseconds)
omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
wakeup_timer_milliseconds);
/* Read current next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node)
pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
/* Set ones wanted by suspend */
list_for_each_entry(pwrst, &pwrst_list, node) {
if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
goto restore;
if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
goto restore;
}
omap_uart_prepare_suspend();
omap3_intc_suspend();
omap_sram_idle();
restore:
/* Restore next_pwrsts */
list_for_each_entry(pwrst, &pwrst_list, node) {
state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
if (state > pwrst->next_state) {
printk(KERN_INFO "Powerdomain (%s) didn't enter "
"target state %d\n",
pwrst->pwrdm->name, pwrst->next_state);
ret = -1;
}
omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
}
if (ret)
printk(KERN_ERR "Could not enter target state in pm_suspend\n");
else
printk(KERN_INFO "Successfully put all powerdomains "
"to target state\n");
return ret;
}
static int omap3_pm_enter(suspend_state_t unused)
{
int ret = 0;
switch (suspend_state) {
case PM_SUSPEND_STANDBY:
case PM_SUSPEND_MEM:
ret = omap3_pm_suspend();
break;
default:
ret = -EINVAL;
}
return ret;
}
static void omap3_pm_finish(void)
{
enable_hlt();
}
/* Hooks to enable / disable UART interrupts during suspend */
static int omap3_pm_begin(suspend_state_t state)
{
suspend_state = state;
omap_uart_enable_irqs(0);
return 0;
}
static void omap3_pm_end(void)
{
suspend_state = PM_SUSPEND_ON;
omap_uart_enable_irqs(1);
return;
}
static struct platform_suspend_ops omap_pm_ops = {
.begin = omap3_pm_begin,
.end = omap3_pm_end,
.prepare = omap3_pm_prepare,
.enter = omap3_pm_enter,
.finish = omap3_pm_finish,
.valid = suspend_valid_only_mem,
};
#endif /* CONFIG_SUSPEND */
/**
* omap3_iva_idle(): ensure IVA is in idle so it can be put into
* retention
*
* In cases where IVA2 is activated by bootcode, it may prevent
* full-chip retention or off-mode because it is not idle. This
* function forces the IVA2 into idle state so it can go
* into retention/off and thus allow full-chip retention/off.
*
**/
static void __init omap3_iva_idle(void)
{
/* ensure IVA2 clock is disabled */
cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
/* if no clock activity, nothing else to do */
if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
OMAP3430_CLKACTIVITY_IVA2_MASK))
return;
/* Reset IVA2 */
prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
OMAP3430_RST2_IVA2_MASK |
OMAP3430_RST3_IVA2_MASK,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
/* Enable IVA2 clock */
cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
OMAP3430_IVA2_MOD, CM_FCLKEN);
/* Set IVA2 boot mode to 'idle' */
omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
OMAP343X_CONTROL_IVA2_BOOTMOD);
/* Un-reset IVA2 */
prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
/* Disable IVA2 clock */
cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
/* Reset IVA2 */
prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
OMAP3430_RST2_IVA2_MASK |
OMAP3430_RST3_IVA2_MASK,
OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
}
static void __init omap3_d2d_idle(void)
{
u16 mask, padconf;
/* In a stand alone OMAP3430 where there is not a stacked
* modem for the D2D Idle Ack and D2D MStandby must be pulled
* high. S CONTROL_PADCONF_SAD2D_IDLEACK and
* CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
padconf |= mask;
omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
padconf |= mask;
omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
/* reset modem */
prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
CORE_MOD, OMAP2_RM_RSTCTRL);
prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
}
static void __init prcm_setup_regs(void)
{
u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
OMAP3630_AUTO_UART4_MASK : 0;
u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
OMAP3630_EN_UART4_MASK : 0;
u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
OMAP3630_GRPSEL_UART4_MASK : 0;
/* XXX Reset all wkdeps. This should be done when initializing
* powerdomains */
prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
if (omap_rev() > OMAP3430_REV_ES1_0) {
prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
} else
prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
/*
* Enable interface clock autoidle for all modules.
* Note that in the long run this should be done by clockfw
*/
cm_write_mod_reg(
OMAP3430_AUTO_MODEM_MASK |
OMAP3430ES2_AUTO_MMC3_MASK |
OMAP3430ES2_AUTO_ICR_MASK |
OMAP3430_AUTO_AES2_MASK |
OMAP3430_AUTO_SHA12_MASK |
OMAP3430_AUTO_DES2_MASK |
OMAP3430_AUTO_MMC2_MASK |
OMAP3430_AUTO_MMC1_MASK |
OMAP3430_AUTO_MSPRO_MASK |
OMAP3430_AUTO_HDQ_MASK |
OMAP3430_AUTO_MCSPI4_MASK |
OMAP3430_AUTO_MCSPI3_MASK |
OMAP3430_AUTO_MCSPI2_MASK |
OMAP3430_AUTO_MCSPI1_MASK |
OMAP3430_AUTO_I2C3_MASK |
OMAP3430_AUTO_I2C2_MASK |
OMAP3430_AUTO_I2C1_MASK |
OMAP3430_AUTO_UART2_MASK |
OMAP3430_AUTO_UART1_MASK |
OMAP3430_AUTO_GPT11_MASK |
OMAP3430_AUTO_GPT10_MASK |
OMAP3430_AUTO_MCBSP5_MASK |
OMAP3430_AUTO_MCBSP1_MASK |
OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
OMAP3430_AUTO_MAILBOXES_MASK |
OMAP3430_AUTO_OMAPCTRL_MASK |
OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
OMAP3430_AUTO_HSOTGUSB_MASK |
OMAP3430_AUTO_SAD2D_MASK |
OMAP3430_AUTO_SSI_MASK,
CORE_MOD, CM_AUTOIDLE1);
cm_write_mod_reg(
OMAP3430_AUTO_PKA_MASK |
OMAP3430_AUTO_AES1_MASK |
OMAP3430_AUTO_RNG_MASK |
OMAP3430_AUTO_SHA11_MASK |
OMAP3430_AUTO_DES1_MASK,
CORE_MOD, CM_AUTOIDLE2);
if (omap_rev() > OMAP3430_REV_ES1_0) {
cm_write_mod_reg(
OMAP3430_AUTO_MAD2D_MASK |
OMAP3430ES2_AUTO_USBTLL_MASK,
CORE_MOD, CM_AUTOIDLE3);
}
cm_write_mod_reg(
OMAP3430_AUTO_WDT2_MASK |
OMAP3430_AUTO_WDT1_MASK |
OMAP3430_AUTO_GPIO1_MASK |
OMAP3430_AUTO_32KSYNC_MASK |
OMAP3430_AUTO_GPT12_MASK |
OMAP3430_AUTO_GPT1_MASK,
WKUP_MOD, CM_AUTOIDLE);
cm_write_mod_reg(
OMAP3430_AUTO_DSS_MASK,
OMAP3430_DSS_MOD,
CM_AUTOIDLE);
cm_write_mod_reg(
OMAP3430_AUTO_CAM_MASK,
OMAP3430_CAM_MOD,
CM_AUTOIDLE);
cm_write_mod_reg(
omap3630_auto_uart4_mask |
OMAP3430_AUTO_GPIO6_MASK |
OMAP3430_AUTO_GPIO5_MASK |
OMAP3430_AUTO_GPIO4_MASK |
OMAP3430_AUTO_GPIO3_MASK |
OMAP3430_AUTO_GPIO2_MASK |
OMAP3430_AUTO_WDT3_MASK |
OMAP3430_AUTO_UART3_MASK |
OMAP3430_AUTO_GPT9_MASK |
OMAP3430_AUTO_GPT8_MASK |
OMAP3430_AUTO_GPT7_MASK |
OMAP3430_AUTO_GPT6_MASK |
OMAP3430_AUTO_GPT5_MASK |
OMAP3430_AUTO_GPT4_MASK |
OMAP3430_AUTO_GPT3_MASK |
OMAP3430_AUTO_GPT2_MASK |
OMAP3430_AUTO_MCBSP4_MASK |
OMAP3430_AUTO_MCBSP3_MASK |
OMAP3430_AUTO_MCBSP2_MASK,
OMAP3430_PER_MOD,
CM_AUTOIDLE);
if (omap_rev() > OMAP3430_REV_ES1_0) {
cm_write_mod_reg(
OMAP3430ES2_AUTO_USBHOST_MASK,
OMAP3430ES2_USBHOST_MOD,
CM_AUTOIDLE);
}
omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
/*
* Set all plls to autoidle. This is needed until autoidle is
* enabled by clockfw
*/
cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
MPU_MOD,
CM_AUTOIDLE2);
cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
(1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
PLL_MOD,
CM_AUTOIDLE);
cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
PLL_MOD,
CM_AUTOIDLE2);
/*
* Enable control of expternal oscillator through
* sys_clkreq. In the long run clock framework should
* take care of this.
*/
prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
1 << OMAP_AUTOEXTCLKMODE_SHIFT,
OMAP3430_GR_MOD,
OMAP3_PRM_CLKSRC_CTRL_OFFSET);
/* setup wakup source */
prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
WKUP_MOD, PM_WKEN);
/* No need to write EN_IO, that is always enabled */
prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
OMAP3430_GRPSEL_GPT1_MASK |
OMAP3430_GRPSEL_GPT12_MASK,
WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
/* For some reason IO doesn't generate wakeup event even if
* it is selected to mpu wakeup goup */
prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
/* Enable PM_WKEN to support DSS LPR */
prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
OMAP3430_DSS_MOD, PM_WKEN);
/* Enable wakeups in PER */
prm_write_mod_reg(omap3630_en_uart4_mask |
OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
OMAP3430_EN_MCBSP4_MASK,
OMAP3430_PER_MOD, PM_WKEN);
/* and allow them to wake up MPU */
prm_write_mod_reg(omap3630_grpsel_uart4_mask |
OMAP3430_GRPSEL_GPIO2_MASK |
OMAP3430_GRPSEL_GPIO3_MASK |
OMAP3430_GRPSEL_GPIO4_MASK |
OMAP3430_GRPSEL_GPIO5_MASK |
OMAP3430_GRPSEL_GPIO6_MASK |
OMAP3430_GRPSEL_UART3_MASK |
OMAP3430_GRPSEL_MCBSP2_MASK |
OMAP3430_GRPSEL_MCBSP3_MASK |
OMAP3430_GRPSEL_MCBSP4_MASK,
OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
/* Don't attach IVA interrupts */
prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
/* Clear any pending 'reset' flags */
prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
/* Clear any pending PRCM interrupts */
prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
omap3_iva_idle();
omap3_d2d_idle();
}
void omap3_pm_off_mode_enable(int enable)
{
struct power_state *pwrst;
u32 state;
if (enable)
state = PWRDM_POWER_OFF;
else
state = PWRDM_POWER_RET;
#ifdef CONFIG_CPU_IDLE
omap3_cpuidle_update_states();
#endif
list_for_each_entry(pwrst, &pwrst_list, node) {
pwrst->next_state = state;
omap_set_pwrdm_state(pwrst->pwrdm, state);
}
}
int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
{
struct power_state *pwrst;
list_for_each_entry(pwrst, &pwrst_list, node) {
if (pwrst->pwrdm == pwrdm)
return pwrst->next_state;
}
return -EINVAL;
}
int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
{
struct power_state *pwrst;
list_for_each_entry(pwrst, &pwrst_list, node) {
if (pwrst->pwrdm == pwrdm) {
pwrst->next_state = state;
return 0;
}
}
return -EINVAL;
}
static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
{
struct power_state *pwrst;
if (!pwrdm->pwrsts)
return 0;
pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
if (!pwrst)
return -ENOMEM;
pwrst->pwrdm = pwrdm;
pwrst->next_state = PWRDM_POWER_RET;
list_add(&pwrst->node, &pwrst_list);
if (pwrdm_has_hdwr_sar(pwrdm))
pwrdm_enable_hdwr_sar(pwrdm);
return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
}
/*
* Enable hw supervised mode for all clockdomains if it's
* supported. Initiate sleep transition for other clockdomains, if
* they are not used
*/
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
{
if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
omap2_clkdm_allow_idle(clkdm);
else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
atomic_read(&clkdm->usecount) == 0)
omap2_clkdm_sleep(clkdm);
return 0;
}
void omap_push_sram_idle(void)
{
_omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
omap34xx_cpu_suspend_sz);
if (omap_type() != OMAP2_DEVICE_TYPE_GP)
_omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
save_secure_ram_context_sz);
}
static int __init omap3_pm_init(void)
{
struct power_state *pwrst, *tmp;
struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
int ret;
if (!cpu_is_omap34xx())
return -ENODEV;
printk(KERN_ERR "Power Management for TI OMAP3.\n");
/* XXX prcm_setup_regs needs to be before enabling hw
* supervised mode for powerdomains */
prcm_setup_regs();
ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
(irq_handler_t)prcm_interrupt_handler,
IRQF_DISABLED, "prcm", NULL);
if (ret) {
printk(KERN_ERR "request_irq failed to register for 0x%x\n",
INT_34XX_PRCM_MPU_IRQ);
goto err1;
}
ret = pwrdm_for_each(pwrdms_setup, NULL);
if (ret) {
printk(KERN_ERR "Failed to setup powerdomains\n");
goto err2;
}
(void) clkdm_for_each(clkdms_setup, NULL);
mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
if (mpu_pwrdm == NULL) {
printk(KERN_ERR "Failed to get mpu_pwrdm\n");
goto err2;
}
neon_pwrdm = pwrdm_lookup("neon_pwrdm");
per_pwrdm = pwrdm_lookup("per_pwrdm");
core_pwrdm = pwrdm_lookup("core_pwrdm");
cam_pwrdm = pwrdm_lookup("cam_pwrdm");
neon_clkdm = clkdm_lookup("neon_clkdm");
mpu_clkdm = clkdm_lookup("mpu_clkdm");
per_clkdm = clkdm_lookup("per_clkdm");
core_clkdm = clkdm_lookup("core_clkdm");
omap_push_sram_idle();
#ifdef CONFIG_SUSPEND
suspend_set_ops(&omap_pm_ops);
#endif /* CONFIG_SUSPEND */
pm_idle = omap3_pm_idle;
omap3_idle_init();
clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
omap3_secure_ram_storage =
kmalloc(0x803F, GFP_KERNEL);
if (!omap3_secure_ram_storage)
printk(KERN_ERR "Memory allocation failed when"
"allocating for secure sram context\n");
local_irq_disable();
local_fiq_disable();
omap_dma_global_context_save();
omap3_save_secure_ram_context(PWRDM_POWER_ON);
omap_dma_global_context_restore();
local_irq_enable();
local_fiq_enable();
}
omap3_save_scratchpad_contents();
err1:
return ret;
err2:
free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
list_del(&pwrst->node);
kfree(pwrst);
}
return ret;
}
late_initcall(omap3_pm_init);