blob: e57c9aeeefe03c9343251d46c3125ab7c0607670 [file] [log] [blame]
Kevin Hilman8bd22942009-05-28 10:56:16 -07001/*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
Rajendra Nayak2f5939c2008-09-26 17:50:07 +05308 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
Kevin Hilman8bd22942009-05-28 10:56:16 -070011 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/pm.h>
22#include <linux/suspend.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/list.h>
26#include <linux/err.h>
27#include <linux/gpio.h>
Kevin Hilmanc40552b2009-10-06 14:25:09 -070028#include <linux/clk.h>
Tero Kristodccaad82009-11-17 18:34:53 +020029#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Kevin Hilman8bd22942009-05-28 10:56:16 -070031
Tony Lindgrence491cf2009-10-20 09:40:47 -070032#include <plat/sram.h>
33#include <plat/clockdomain.h>
34#include <plat/powerdomain.h>
35#include <plat/control.h>
36#include <plat/serial.h>
Rajendra Nayak61255ab2008-09-26 17:49:56 +053037#include <plat/sdrc.h>
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053038#include <plat/prcm.h>
39#include <plat/gpmc.h>
Tero Kristof2d11852008-08-28 13:13:31 +000040#include <plat/dma.h>
Kevin Hilman8bd22942009-05-28 10:56:16 -070041
Rajendra Nayak57f277b2008-09-26 17:49:34 +053042#include <asm/tlbflush.h>
43
Kevin Hilman8bd22942009-05-28 10:56:16 -070044#include "cm.h"
45#include "cm-regbits-34xx.h"
46#include "prm-regbits-34xx.h"
47
48#include "prm.h"
49#include "pm.h"
Tero Kristo13a6fe0f2008-10-13 13:17:06 +030050#include "sdrc.h"
51
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053052/* Scratchpad offsets */
53#define OMAP343X_TABLE_ADDRESS_OFFSET 0x31
54#define OMAP343X_TABLE_VALUE_OFFSET 0x30
55#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0x32
56
Kevin Hilman8bd22942009-05-28 10:56:16 -070057struct power_state {
58 struct powerdomain *pwrdm;
59 u32 next_state;
Kevin Hilman10f90ed2009-06-24 11:39:18 -070060#ifdef CONFIG_SUSPEND
Kevin Hilman8bd22942009-05-28 10:56:16 -070061 u32 saved_state;
Kevin Hilman10f90ed2009-06-24 11:39:18 -070062#endif
Kevin Hilman8bd22942009-05-28 10:56:16 -070063 struct list_head node;
64};
65
66static LIST_HEAD(pwrst_list);
67
68static void (*_omap_sram_idle)(u32 *addr, int save_state);
69
Tero Kristo27d59a42008-10-13 13:15:00 +030070static int (*_omap_save_secure_sram)(u32 *addr);
71
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +053072static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
73static struct powerdomain *core_pwrdm, *per_pwrdm;
Tero Kristoc16c3f62008-12-11 16:46:57 +020074static struct powerdomain *cam_pwrdm;
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +053075
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053076static inline void omap3_per_save_context(void)
77{
78 omap_gpio_save_context();
79}
80
81static inline void omap3_per_restore_context(void)
82{
83 omap_gpio_restore_context();
84}
85
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +020086static void omap3_enable_io_chain(void)
87{
88 int timeout = 0;
89
90 if (omap_rev() >= OMAP3430_REV_ES3_1) {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -060091 prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
92 PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +020093 /* Do a readback to assure write has been done */
94 prm_read_mod_reg(WKUP_MOD, PM_WKEN);
95
Kevin Hilman0b96a3a2010-06-09 13:53:09 +030096 while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
Paul Walmsley2bc4ef72010-05-18 18:47:24 -060097 OMAP3430_ST_IO_CHAIN_MASK)) {
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +020098 timeout++;
99 if (timeout > 1000) {
100 printk(KERN_ERR "Wake up daisy chain "
101 "activation failed.\n");
102 return;
103 }
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600104 prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
Kevin Hilman0b96a3a2010-06-09 13:53:09 +0300105 WKUP_MOD, PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200106 }
107 }
108}
109
110static void omap3_disable_io_chain(void)
111{
112 if (omap_rev() >= OMAP3430_REV_ES3_1)
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600113 prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
114 PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200115}
116
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530117static void omap3_core_save_context(void)
118{
119 u32 control_padconf_off;
120
121 /* Save the padconf registers */
122 control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
123 control_padconf_off |= START_PADCONF_SAVE;
124 omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
125 /* wait for the save to complete */
Roel Kluin1b6e8212010-01-08 10:29:07 -0800126 while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
127 & PADCONF_SAVE_DONE))
Tero Kristodccaad82009-11-17 18:34:53 +0200128 udelay(1);
129
130 /*
131 * Force write last pad into memory, as this can fail in some
132 * cases according to erratas 1.157, 1.185
133 */
134 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
135 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
136
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530137 /* Save the Interrupt controller context */
138 omap_intc_save_context();
139 /* Save the GPMC context */
140 omap3_gpmc_save_context();
141 /* Save the system control module context, padconf already save above*/
142 omap3_control_save_context();
Tero Kristof2d11852008-08-28 13:13:31 +0000143 omap_dma_global_context_save();
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530144}
145
146static void omap3_core_restore_context(void)
147{
148 /* Restore the control module context, padconf restored by h/w */
149 omap3_control_restore_context();
150 /* Restore the GPMC context */
151 omap3_gpmc_restore_context();
152 /* Restore the interrupt controller context */
153 omap_intc_restore_context();
Tero Kristof2d11852008-08-28 13:13:31 +0000154 omap_dma_global_context_restore();
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530155}
156
Tero Kristo9d971402008-12-12 11:20:05 +0200157/*
158 * FIXME: This function should be called before entering off-mode after
159 * OMAP3 secure services have been accessed. Currently it is only called
160 * once during boot sequence, but this works as we are not using secure
161 * services.
162 */
Tero Kristo27d59a42008-10-13 13:15:00 +0300163static void omap3_save_secure_ram_context(u32 target_mpu_state)
164{
165 u32 ret;
166
167 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
Tero Kristo27d59a42008-10-13 13:15:00 +0300168 /*
169 * MPU next state must be set to POWER_ON temporarily,
170 * otherwise the WFI executed inside the ROM code
171 * will hang the system.
172 */
173 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
174 ret = _omap_save_secure_sram((u32 *)
175 __pa(omap3_secure_ram_storage));
176 pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
177 /* Following is for error tracking, it should not happen */
178 if (ret) {
179 printk(KERN_ERR "save_secure_sram() returns %08x\n",
180 ret);
181 while (1)
182 ;
183 }
184 }
185}
186
Jon Hunter77da2d92009-06-27 00:07:25 -0500187/*
188 * PRCM Interrupt Handler Helper Function
189 *
190 * The purpose of this function is to clear any wake-up events latched
191 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
192 * may occur whilst attempting to clear a PM_WKST_x register and thus
193 * set another bit in this register. A while loop is used to ensure
194 * that any peripheral wake-up events occurring while attempting to
195 * clear the PM_WKST_x are detected and cleared.
196 */
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700197static int prcm_clear_mod_irqs(s16 module, u8 regs)
Jon Hunter77da2d92009-06-27 00:07:25 -0500198{
Vikram Pandita71a80772009-07-17 19:33:09 -0500199 u32 wkst, fclk, iclk, clken;
Jon Hunter77da2d92009-06-27 00:07:25 -0500200 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
201 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
202 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
Paul Walmsley5d805972009-07-22 10:18:07 -0700203 u16 grpsel_off = (regs == 3) ?
204 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700205 int c = 0;
Jon Hunter77da2d92009-06-27 00:07:25 -0500206
207 wkst = prm_read_mod_reg(module, wkst_off);
Paul Walmsley5d805972009-07-22 10:18:07 -0700208 wkst &= prm_read_mod_reg(module, grpsel_off);
Jon Hunter77da2d92009-06-27 00:07:25 -0500209 if (wkst) {
210 iclk = cm_read_mod_reg(module, iclk_off);
211 fclk = cm_read_mod_reg(module, fclk_off);
212 while (wkst) {
Vikram Pandita71a80772009-07-17 19:33:09 -0500213 clken = wkst;
214 cm_set_mod_reg_bits(clken, module, iclk_off);
215 /*
216 * For USBHOST, we don't know whether HOST1 or
217 * HOST2 woke us up, so enable both f-clocks
218 */
219 if (module == OMAP3430ES2_USBHOST_MOD)
220 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
221 cm_set_mod_reg_bits(clken, module, fclk_off);
Jon Hunter77da2d92009-06-27 00:07:25 -0500222 prm_write_mod_reg(wkst, module, wkst_off);
223 wkst = prm_read_mod_reg(module, wkst_off);
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700224 c++;
Jon Hunter77da2d92009-06-27 00:07:25 -0500225 }
226 cm_write_mod_reg(iclk, module, iclk_off);
227 cm_write_mod_reg(fclk, module, fclk_off);
228 }
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700229
230 return c;
231}
232
233static int _prcm_int_handle_wakeup(void)
234{
235 int c;
236
237 c = prcm_clear_mod_irqs(WKUP_MOD, 1);
238 c += prcm_clear_mod_irqs(CORE_MOD, 1);
239 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
240 if (omap_rev() > OMAP3430_REV_ES1_0) {
241 c += prcm_clear_mod_irqs(CORE_MOD, 3);
242 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
243 }
244
245 return c;
Jon Hunter77da2d92009-06-27 00:07:25 -0500246}
247
248/*
249 * PRCM Interrupt Handler
250 *
251 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
252 * interrupts from the PRCM for the MPU. These bits must be cleared in
253 * order to clear the PRCM interrupt. The PRCM interrupt handler is
254 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
255 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
256 * register indicates that a wake-up event is pending for the MPU and
257 * this bit can only be cleared if the all the wake-up events latched
258 * in the various PM_WKST_x registers have been cleared. The interrupt
259 * handler is implemented using a do-while loop so that if a wake-up
260 * event occurred during the processing of the prcm interrupt handler
261 * (setting a bit in the corresponding PM_WKST_x register and thus
262 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
263 * this would be handled.
264 */
Kevin Hilman8bd22942009-05-28 10:56:16 -0700265static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
266{
Kevin Hilmand6290a32010-04-26 14:59:09 -0700267 u32 irqenable_mpu, irqstatus_mpu;
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700268 int c = 0;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700269
Kevin Hilmand6290a32010-04-26 14:59:09 -0700270 irqenable_mpu = prm_read_mod_reg(OCP_MOD,
271 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
272 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
273 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
274 irqstatus_mpu &= irqenable_mpu;
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700275
Kevin Hilmand6290a32010-04-26 14:59:09 -0700276 do {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600277 if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
278 OMAP3430_IO_ST_MASK)) {
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700279 c = _prcm_int_handle_wakeup();
280
281 /*
282 * Is the MPU PRCM interrupt handler racing with the
283 * IVA2 PRCM interrupt handler ?
284 */
285 WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
286 "but no wakeup sources are marked\n");
287 } else {
288 /* XXX we need to expand our PRCM interrupt handler */
289 WARN(1, "prcm: WARNING: PRCM interrupt received, but "
290 "no code to handle it (%08x)\n", irqstatus_mpu);
291 }
292
Jon Hunter77da2d92009-06-27 00:07:25 -0500293 prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
294 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700295
Kevin Hilmand6290a32010-04-26 14:59:09 -0700296 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
297 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
298 irqstatus_mpu &= irqenable_mpu;
299
300 } while (irqstatus_mpu);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700301
302 return IRQ_HANDLED;
303}
304
Rajendra Nayak57f277b2008-09-26 17:49:34 +0530305static void restore_control_register(u32 val)
306{
307 __asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
308}
309
310/* Function to restore the table entry that was modified for enabling MMU */
311static void restore_table_entry(void)
312{
Manjunath Kondaiah G4d63bc12010-10-08 09:56:11 -0700313 void __iomem *scratchpad_address;
Rajendra Nayak57f277b2008-09-26 17:49:34 +0530314 u32 previous_value, control_reg_value;
315 u32 *address;
316
317 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
318
319 /* Get address of entry that was modified */
320 address = (u32 *)__raw_readl(scratchpad_address +
321 OMAP343X_TABLE_ADDRESS_OFFSET);
322 /* Get the previous value which needs to be restored */
323 previous_value = __raw_readl(scratchpad_address +
324 OMAP343X_TABLE_VALUE_OFFSET);
325 address = __va(address);
326 *address = previous_value;
327 flush_tlb_all();
328 control_reg_value = __raw_readl(scratchpad_address
329 + OMAP343X_CONTROL_REG_VALUE_OFFSET);
330 /* This will enable caches and prediction */
331 restore_control_register(control_reg_value);
332}
333
Rajendra Nayak99e6a4d2008-10-08 17:30:58 +0530334void omap_sram_idle(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700335{
336 /* Variable to tell what needs to be saved and restored
337 * in omap_sram_idle*/
338 /* save_state = 0 => Nothing to save and restored */
339 /* save_state = 1 => Only L1 and logic lost */
340 /* save_state = 2 => Only L2 lost */
341 /* save_state = 3 => L1, L2 and logic lost */
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530342 int save_state = 0;
343 int mpu_next_state = PWRDM_POWER_ON;
344 int per_next_state = PWRDM_POWER_ON;
345 int core_next_state = PWRDM_POWER_ON;
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530346 int core_prev_state, per_prev_state;
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300347 u32 sdrc_pwr = 0;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700348
349 if (!_omap_sram_idle)
350 return;
351
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530352 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
353 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
354 pwrdm_clear_all_prev_pwrst(core_pwrdm);
355 pwrdm_clear_all_prev_pwrst(per_pwrdm);
356
Kevin Hilman8bd22942009-05-28 10:56:16 -0700357 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
358 switch (mpu_next_state) {
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530359 case PWRDM_POWER_ON:
Kevin Hilman8bd22942009-05-28 10:56:16 -0700360 case PWRDM_POWER_RET:
361 /* No need to save context */
362 save_state = 0;
363 break;
Rajendra Nayak61255ab2008-09-26 17:49:56 +0530364 case PWRDM_POWER_OFF:
365 save_state = 3;
366 break;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700367 default:
368 /* Invalid state */
369 printk(KERN_ERR "Invalid mpu state in sram_idle\n");
370 return;
371 }
Peter 'p2' De Schrijverfe617af2008-10-15 17:48:44 +0300372 pwrdm_pre_transition();
373
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530374 /* NEON control */
375 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
Jouni Hogander71391782008-10-28 10:59:05 +0200376 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530377
Mike Chan40742fa2010-05-03 16:04:06 -0700378 /* Enable IO-PAD and IO-CHAIN wakeups */
Kevin Hilman658ce972008-11-04 20:50:52 -0800379 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
Tero Kristoecf157d2008-12-01 13:17:29 +0200380 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
Kevin Hilmand5c47d72010-08-10 16:04:35 -0700381 if (omap3_has_io_wakeup() &&
382 (per_next_state < PWRDM_POWER_ON ||
383 core_next_state < PWRDM_POWER_ON)) {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600384 prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
Mike Chan40742fa2010-05-03 16:04:06 -0700385 omap3_enable_io_chain();
386 }
387
388 /* PER */
Kevin Hilman658ce972008-11-04 20:50:52 -0800389 if (per_next_state < PWRDM_POWER_ON) {
Kevin Hilman658ce972008-11-04 20:50:52 -0800390 omap_uart_prepare_idle(2);
Govindraj.Rcd4f1fa2010-09-27 20:20:32 +0530391 omap_uart_prepare_idle(3);
Kevin Hilman43ffcd92009-01-27 11:09:24 -0800392 omap2_gpio_prepare_for_idle(per_next_state);
Kevin Hilmane7410cf2010-09-08 16:37:42 -0700393 if (per_next_state == PWRDM_POWER_OFF)
Tero Kristoecf157d2008-12-01 13:17:29 +0200394 omap3_per_save_context();
Kevin Hilman658ce972008-11-04 20:50:52 -0800395 }
396
397 /* CORE */
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530398 if (core_next_state < PWRDM_POWER_ON) {
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530399 omap_uart_prepare_idle(0);
400 omap_uart_prepare_idle(1);
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530401 if (core_next_state == PWRDM_POWER_OFF) {
402 omap3_core_save_context();
403 omap3_prcm_save_context();
404 }
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530405 }
Mike Chan40742fa2010-05-03 16:04:06 -0700406
Tero Kristof18cc2f2009-10-23 19:03:50 +0300407 omap3_intc_prepare_idle();
Kevin Hilman8bd22942009-05-28 10:56:16 -0700408
Rajendra Nayak61255ab2008-09-26 17:49:56 +0530409 /*
Rajendra Nayakf265dc42009-06-09 22:30:41 +0530410 * On EMU/HS devices ROM code restores a SRDC value
411 * from scratchpad which has automatic self refresh on timeout
412 * of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
413 * Hence store/restore the SDRC_POWER register here.
414 */
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300415 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
416 omap_type() != OMAP2_DEVICE_TYPE_GP &&
Rajendra Nayakf265dc42009-06-09 22:30:41 +0530417 core_next_state == PWRDM_POWER_OFF)
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300418 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300419
420 /*
Rajendra Nayak61255ab2008-09-26 17:49:56 +0530421 * omap3_arm_context is the location where ARM registers
422 * get saved. The restore path then reads from this
423 * location and restores them back.
424 */
425 _omap_sram_idle(omap3_arm_context, save_state);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700426 cpu_init();
427
Rajendra Nayakf265dc42009-06-09 22:30:41 +0530428 /* Restore normal SDRC POWER settings */
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300429 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
430 omap_type() != OMAP2_DEVICE_TYPE_GP &&
431 core_next_state == PWRDM_POWER_OFF)
432 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
433
Rajendra Nayak57f277b2008-09-26 17:49:34 +0530434 /* Restore table entry modified during MMU restoration */
435 if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
436 restore_table_entry();
437
Kevin Hilman658ce972008-11-04 20:50:52 -0800438 /* CORE */
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530439 if (core_next_state < PWRDM_POWER_ON) {
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530440 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
441 if (core_prev_state == PWRDM_POWER_OFF) {
442 omap3_core_restore_context();
443 omap3_prcm_restore_context();
444 omap3_sram_restore_context();
Kalle Jokiniemi8a917d22009-05-13 13:32:11 +0300445 omap2_sms_restore_context();
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530446 }
Kevin Hilman658ce972008-11-04 20:50:52 -0800447 omap_uart_resume_idle(0);
448 omap_uart_resume_idle(1);
449 if (core_next_state == PWRDM_POWER_OFF)
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600450 prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
Kevin Hilman658ce972008-11-04 20:50:52 -0800451 OMAP3430_GR_MOD,
452 OMAP3_PRM_VOLTCTRL_OFFSET);
453 }
Tero Kristof18cc2f2009-10-23 19:03:50 +0300454 omap3_intc_resume_idle();
Kevin Hilman658ce972008-11-04 20:50:52 -0800455
456 /* PER */
457 if (per_next_state < PWRDM_POWER_ON) {
458 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
Kevin Hilman43ffcd92009-01-27 11:09:24 -0800459 omap2_gpio_resume_after_idle();
460 if (per_prev_state == PWRDM_POWER_OFF)
Kevin Hilman658ce972008-11-04 20:50:52 -0800461 omap3_per_restore_context();
Tero Kristoecf157d2008-12-01 13:17:29 +0200462 omap_uart_resume_idle(2);
Govindraj.Rcd4f1fa2010-09-27 20:20:32 +0530463 omap_uart_resume_idle(3);
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530464 }
Peter 'p2' De Schrijverfe617af2008-10-15 17:48:44 +0300465
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200466 /* Disable IO-PAD and IO-CHAIN wakeup */
Kevin Hilman58a55592010-08-16 09:21:19 +0300467 if (omap3_has_io_wakeup() &&
468 (per_next_state < PWRDM_POWER_ON ||
469 core_next_state < PWRDM_POWER_ON)) {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600470 prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200471 omap3_disable_io_chain();
472 }
Kevin Hilman658ce972008-11-04 20:50:52 -0800473
Peter 'p2' De Schrijverfe617af2008-10-15 17:48:44 +0300474 pwrdm_post_transition();
475
Tero Kristoc16c3f62008-12-11 16:46:57 +0200476 omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700477}
478
Rajendra Nayak20b01662008-10-08 17:31:22 +0530479int omap3_can_sleep(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700480{
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700481 if (!sleep_while_idle)
482 return 0;
Kevin Hilman4af40162009-02-04 10:51:40 -0800483 if (!omap_uart_can_sleep())
484 return 0;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700485 return 1;
486}
487
Kevin Hilman8bd22942009-05-28 10:56:16 -0700488static void omap3_pm_idle(void)
489{
490 local_irq_disable();
491 local_fiq_disable();
492
493 if (!omap3_can_sleep())
494 goto out;
495
Tero Kristocf228542009-03-20 15:21:02 +0200496 if (omap_irq_pending() || need_resched())
Kevin Hilman8bd22942009-05-28 10:56:16 -0700497 goto out;
498
499 omap_sram_idle();
500
501out:
502 local_fiq_enable();
503 local_irq_enable();
504}
505
Kevin Hilman10f90ed2009-06-24 11:39:18 -0700506#ifdef CONFIG_SUSPEND
Tero Kristo24662112009-03-05 16:32:23 +0200507static suspend_state_t suspend_state;
508
Kevin Hilman8bd22942009-05-28 10:56:16 -0700509static int omap3_pm_prepare(void)
510{
511 disable_hlt();
512 return 0;
513}
514
515static int omap3_pm_suspend(void)
516{
517 struct power_state *pwrst;
518 int state, ret = 0;
519
Ari Kauppi8e2efde2010-03-23 09:04:59 +0200520 if (wakeup_timer_seconds || wakeup_timer_milliseconds)
521 omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
522 wakeup_timer_milliseconds);
Kevin Hilmand7814e42009-10-06 14:30:23 -0700523
Kevin Hilman8bd22942009-05-28 10:56:16 -0700524 /* Read current next_pwrsts */
525 list_for_each_entry(pwrst, &pwrst_list, node)
526 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
527 /* Set ones wanted by suspend */
528 list_for_each_entry(pwrst, &pwrst_list, node) {
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530529 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
Kevin Hilman8bd22942009-05-28 10:56:16 -0700530 goto restore;
531 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
532 goto restore;
533 }
534
Kevin Hilman4af40162009-02-04 10:51:40 -0800535 omap_uart_prepare_suspend();
Tero Kristo2bbe3af2009-10-23 19:03:48 +0300536 omap3_intc_suspend();
537
Kevin Hilman8bd22942009-05-28 10:56:16 -0700538 omap_sram_idle();
539
540restore:
541 /* Restore next_pwrsts */
542 list_for_each_entry(pwrst, &pwrst_list, node) {
Kevin Hilman8bd22942009-05-28 10:56:16 -0700543 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
544 if (state > pwrst->next_state) {
545 printk(KERN_INFO "Powerdomain (%s) didn't enter "
546 "target state %d\n",
547 pwrst->pwrdm->name, pwrst->next_state);
548 ret = -1;
549 }
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530550 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700551 }
552 if (ret)
553 printk(KERN_ERR "Could not enter target state in pm_suspend\n");
554 else
555 printk(KERN_INFO "Successfully put all powerdomains "
556 "to target state\n");
557
558 return ret;
559}
560
Tero Kristo24662112009-03-05 16:32:23 +0200561static int omap3_pm_enter(suspend_state_t unused)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700562{
563 int ret = 0;
564
Tero Kristo24662112009-03-05 16:32:23 +0200565 switch (suspend_state) {
Kevin Hilman8bd22942009-05-28 10:56:16 -0700566 case PM_SUSPEND_STANDBY:
567 case PM_SUSPEND_MEM:
568 ret = omap3_pm_suspend();
569 break;
570 default:
571 ret = -EINVAL;
572 }
573
574 return ret;
575}
576
577static void omap3_pm_finish(void)
578{
579 enable_hlt();
580}
581
Tero Kristo24662112009-03-05 16:32:23 +0200582/* Hooks to enable / disable UART interrupts during suspend */
583static int omap3_pm_begin(suspend_state_t state)
584{
585 suspend_state = state;
586 omap_uart_enable_irqs(0);
587 return 0;
588}
589
590static void omap3_pm_end(void)
591{
592 suspend_state = PM_SUSPEND_ON;
593 omap_uart_enable_irqs(1);
594 return;
595}
596
Kevin Hilman8bd22942009-05-28 10:56:16 -0700597static struct platform_suspend_ops omap_pm_ops = {
Tero Kristo24662112009-03-05 16:32:23 +0200598 .begin = omap3_pm_begin,
599 .end = omap3_pm_end,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700600 .prepare = omap3_pm_prepare,
601 .enter = omap3_pm_enter,
602 .finish = omap3_pm_finish,
603 .valid = suspend_valid_only_mem,
604};
Kevin Hilman10f90ed2009-06-24 11:39:18 -0700605#endif /* CONFIG_SUSPEND */
Kevin Hilman8bd22942009-05-28 10:56:16 -0700606
Kevin Hilman1155e422008-11-25 11:48:24 -0800607
608/**
609 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
610 * retention
611 *
612 * In cases where IVA2 is activated by bootcode, it may prevent
613 * full-chip retention or off-mode because it is not idle. This
614 * function forces the IVA2 into idle state so it can go
615 * into retention/off and thus allow full-chip retention/off.
616 *
617 **/
618static void __init omap3_iva_idle(void)
619{
620 /* ensure IVA2 clock is disabled */
621 cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
622
623 /* if no clock activity, nothing else to do */
624 if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
625 OMAP3430_CLKACTIVITY_IVA2_MASK))
626 return;
627
628 /* Reset IVA2 */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600629 prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
630 OMAP3430_RST2_IVA2_MASK |
631 OMAP3430_RST3_IVA2_MASK,
Abhijit Pagare37903002010-01-26 20:12:51 -0700632 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman1155e422008-11-25 11:48:24 -0800633
634 /* Enable IVA2 clock */
Kevin Hilmandfa6d6f2010-02-24 12:05:48 -0700635 cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
Kevin Hilman1155e422008-11-25 11:48:24 -0800636 OMAP3430_IVA2_MOD, CM_FCLKEN);
637
638 /* Set IVA2 boot mode to 'idle' */
639 omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
640 OMAP343X_CONTROL_IVA2_BOOTMOD);
641
642 /* Un-reset IVA2 */
Abhijit Pagare37903002010-01-26 20:12:51 -0700643 prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman1155e422008-11-25 11:48:24 -0800644
645 /* Disable IVA2 clock */
646 cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
647
648 /* Reset IVA2 */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600649 prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
650 OMAP3430_RST2_IVA2_MASK |
651 OMAP3430_RST3_IVA2_MASK,
Abhijit Pagare37903002010-01-26 20:12:51 -0700652 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman1155e422008-11-25 11:48:24 -0800653}
654
Kevin Hilman8111b222009-04-28 15:27:44 -0700655static void __init omap3_d2d_idle(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700656{
Kevin Hilman8111b222009-04-28 15:27:44 -0700657 u16 mask, padconf;
658
659 /* In a stand alone OMAP3430 where there is not a stacked
660 * modem for the D2D Idle Ack and D2D MStandby must be pulled
661 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
662 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
663 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
664 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
665 padconf |= mask;
666 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
667
668 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
669 padconf |= mask;
670 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
671
Kevin Hilman8bd22942009-05-28 10:56:16 -0700672 /* reset modem */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600673 prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
674 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
Abhijit Pagare37903002010-01-26 20:12:51 -0700675 CORE_MOD, OMAP2_RM_RSTCTRL);
676 prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman8111b222009-04-28 15:27:44 -0700677}
Kevin Hilman8bd22942009-05-28 10:56:16 -0700678
Kevin Hilman8111b222009-04-28 15:27:44 -0700679static void __init prcm_setup_regs(void)
680{
Govindraj.Re5863682010-09-27 20:20:25 +0530681 u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
682 OMAP3630_AUTO_UART4_MASK : 0;
683 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
684 OMAP3630_EN_UART4_MASK : 0;
685 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
686 OMAP3630_GRPSEL_UART4_MASK : 0;
687
688
Kevin Hilman8bd22942009-05-28 10:56:16 -0700689 /* XXX Reset all wkdeps. This should be done when initializing
690 * powerdomains */
691 prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
692 prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
693 prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
694 prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
695 prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
696 prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
697 if (omap_rev() > OMAP3430_REV_ES1_0) {
698 prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
699 prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
700 } else
701 prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
702
703 /*
704 * Enable interface clock autoidle for all modules.
705 * Note that in the long run this should be done by clockfw
706 */
707 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600708 OMAP3430_AUTO_MODEM_MASK |
709 OMAP3430ES2_AUTO_MMC3_MASK |
710 OMAP3430ES2_AUTO_ICR_MASK |
711 OMAP3430_AUTO_AES2_MASK |
712 OMAP3430_AUTO_SHA12_MASK |
713 OMAP3430_AUTO_DES2_MASK |
714 OMAP3430_AUTO_MMC2_MASK |
715 OMAP3430_AUTO_MMC1_MASK |
716 OMAP3430_AUTO_MSPRO_MASK |
717 OMAP3430_AUTO_HDQ_MASK |
718 OMAP3430_AUTO_MCSPI4_MASK |
719 OMAP3430_AUTO_MCSPI3_MASK |
720 OMAP3430_AUTO_MCSPI2_MASK |
721 OMAP3430_AUTO_MCSPI1_MASK |
722 OMAP3430_AUTO_I2C3_MASK |
723 OMAP3430_AUTO_I2C2_MASK |
724 OMAP3430_AUTO_I2C1_MASK |
725 OMAP3430_AUTO_UART2_MASK |
726 OMAP3430_AUTO_UART1_MASK |
727 OMAP3430_AUTO_GPT11_MASK |
728 OMAP3430_AUTO_GPT10_MASK |
729 OMAP3430_AUTO_MCBSP5_MASK |
730 OMAP3430_AUTO_MCBSP1_MASK |
731 OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
732 OMAP3430_AUTO_MAILBOXES_MASK |
733 OMAP3430_AUTO_OMAPCTRL_MASK |
734 OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
735 OMAP3430_AUTO_HSOTGUSB_MASK |
736 OMAP3430_AUTO_SAD2D_MASK |
737 OMAP3430_AUTO_SSI_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700738 CORE_MOD, CM_AUTOIDLE1);
739
740 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600741 OMAP3430_AUTO_PKA_MASK |
742 OMAP3430_AUTO_AES1_MASK |
743 OMAP3430_AUTO_RNG_MASK |
744 OMAP3430_AUTO_SHA11_MASK |
745 OMAP3430_AUTO_DES1_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700746 CORE_MOD, CM_AUTOIDLE2);
747
748 if (omap_rev() > OMAP3430_REV_ES1_0) {
749 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600750 OMAP3430_AUTO_MAD2D_MASK |
751 OMAP3430ES2_AUTO_USBTLL_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700752 CORE_MOD, CM_AUTOIDLE3);
753 }
754
755 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600756 OMAP3430_AUTO_WDT2_MASK |
757 OMAP3430_AUTO_WDT1_MASK |
758 OMAP3430_AUTO_GPIO1_MASK |
759 OMAP3430_AUTO_32KSYNC_MASK |
760 OMAP3430_AUTO_GPT12_MASK |
761 OMAP3430_AUTO_GPT1_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700762 WKUP_MOD, CM_AUTOIDLE);
763
764 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600765 OMAP3430_AUTO_DSS_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700766 OMAP3430_DSS_MOD,
767 CM_AUTOIDLE);
768
769 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600770 OMAP3430_AUTO_CAM_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700771 OMAP3430_CAM_MOD,
772 CM_AUTOIDLE);
773
774 cm_write_mod_reg(
Govindraj.Re5863682010-09-27 20:20:25 +0530775 omap3630_auto_uart4_mask |
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600776 OMAP3430_AUTO_GPIO6_MASK |
777 OMAP3430_AUTO_GPIO5_MASK |
778 OMAP3430_AUTO_GPIO4_MASK |
779 OMAP3430_AUTO_GPIO3_MASK |
780 OMAP3430_AUTO_GPIO2_MASK |
781 OMAP3430_AUTO_WDT3_MASK |
782 OMAP3430_AUTO_UART3_MASK |
783 OMAP3430_AUTO_GPT9_MASK |
784 OMAP3430_AUTO_GPT8_MASK |
785 OMAP3430_AUTO_GPT7_MASK |
786 OMAP3430_AUTO_GPT6_MASK |
787 OMAP3430_AUTO_GPT5_MASK |
788 OMAP3430_AUTO_GPT4_MASK |
789 OMAP3430_AUTO_GPT3_MASK |
790 OMAP3430_AUTO_GPT2_MASK |
791 OMAP3430_AUTO_MCBSP4_MASK |
792 OMAP3430_AUTO_MCBSP3_MASK |
793 OMAP3430_AUTO_MCBSP2_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700794 OMAP3430_PER_MOD,
795 CM_AUTOIDLE);
796
797 if (omap_rev() > OMAP3430_REV_ES1_0) {
798 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600799 OMAP3430ES2_AUTO_USBHOST_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700800 OMAP3430ES2_USBHOST_MOD,
801 CM_AUTOIDLE);
802 }
803
Paul Walmsley2fd0f752010-05-18 18:40:23 -0600804 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
Tero Kristob296c812009-10-23 19:03:49 +0300805
Kevin Hilman8bd22942009-05-28 10:56:16 -0700806 /*
807 * Set all plls to autoidle. This is needed until autoidle is
808 * enabled by clockfw
809 */
810 cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
811 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
812 cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
813 MPU_MOD,
814 CM_AUTOIDLE2);
815 cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
816 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
817 PLL_MOD,
818 CM_AUTOIDLE);
819 cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
820 PLL_MOD,
821 CM_AUTOIDLE2);
822
823 /*
824 * Enable control of expternal oscillator through
825 * sys_clkreq. In the long run clock framework should
826 * take care of this.
827 */
828 prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
829 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
830 OMAP3430_GR_MOD,
831 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
832
833 /* setup wakup source */
Paul Walmsley2fd0f752010-05-18 18:40:23 -0600834 prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
835 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700836 WKUP_MOD, PM_WKEN);
837 /* No need to write EN_IO, that is always enabled */
Paul Walmsley275f6752010-05-18 18:40:23 -0600838 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
839 OMAP3430_GRPSEL_GPT1_MASK |
840 OMAP3430_GRPSEL_GPT12_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700841 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
842 /* For some reason IO doesn't generate wakeup event even if
843 * it is selected to mpu wakeup goup */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600844 prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700845 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
Kevin Hilman1155e422008-11-25 11:48:24 -0800846
Subramani Venkateshb92c5722009-12-22 15:07:50 +0530847 /* Enable PM_WKEN to support DSS LPR */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600848 prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
Subramani Venkateshb92c5722009-12-22 15:07:50 +0530849 OMAP3430_DSS_MOD, PM_WKEN);
850
Kevin Hilmanb427f922009-10-22 14:48:13 -0700851 /* Enable wakeups in PER */
Govindraj.Re5863682010-09-27 20:20:25 +0530852 prm_write_mod_reg(omap3630_en_uart4_mask |
853 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
Paul Walmsley2fd0f752010-05-18 18:40:23 -0600854 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
855 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
856 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
857 OMAP3430_EN_MCBSP4_MASK,
Kevin Hilmanb427f922009-10-22 14:48:13 -0700858 OMAP3430_PER_MOD, PM_WKEN);
Kevin Hilmaneb350f72009-09-10 15:53:08 +0000859 /* and allow them to wake up MPU */
Govindraj.Re5863682010-09-27 20:20:25 +0530860 prm_write_mod_reg(omap3630_grpsel_uart4_mask |
861 OMAP3430_GRPSEL_GPIO2_MASK |
Paul Walmsley275f6752010-05-18 18:40:23 -0600862 OMAP3430_GRPSEL_GPIO3_MASK |
863 OMAP3430_GRPSEL_GPIO4_MASK |
864 OMAP3430_GRPSEL_GPIO5_MASK |
865 OMAP3430_GRPSEL_GPIO6_MASK |
866 OMAP3430_GRPSEL_UART3_MASK |
867 OMAP3430_GRPSEL_MCBSP2_MASK |
868 OMAP3430_GRPSEL_MCBSP3_MASK |
869 OMAP3430_GRPSEL_MCBSP4_MASK,
Kevin Hilmaneb350f72009-09-10 15:53:08 +0000870 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
871
Kevin Hilmand3fd3292009-05-05 16:34:25 -0700872 /* Don't attach IVA interrupts */
873 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
874 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
875 prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
876 prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
877
Kevin Hilmanb1340d12009-04-27 16:14:54 -0700878 /* Clear any pending 'reset' flags */
Abhijit Pagare37903002010-01-26 20:12:51 -0700879 prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
880 prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
881 prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
882 prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
883 prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
884 prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
885 prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
Kevin Hilmanb1340d12009-04-27 16:14:54 -0700886
Kevin Hilman014c46d2009-04-27 07:50:23 -0700887 /* Clear any pending PRCM interrupts */
888 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
889
Kevin Hilman1155e422008-11-25 11:48:24 -0800890 omap3_iva_idle();
Kevin Hilman8111b222009-04-28 15:27:44 -0700891 omap3_d2d_idle();
Kevin Hilman8bd22942009-05-28 10:56:16 -0700892}
893
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700894void omap3_pm_off_mode_enable(int enable)
895{
896 struct power_state *pwrst;
897 u32 state;
898
899 if (enable)
900 state = PWRDM_POWER_OFF;
901 else
902 state = PWRDM_POWER_RET;
903
Sanjeev Premi6af83b32010-01-28 23:16:43 +0530904#ifdef CONFIG_CPU_IDLE
905 omap3_cpuidle_update_states();
906#endif
907
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700908 list_for_each_entry(pwrst, &pwrst_list, node) {
909 pwrst->next_state = state;
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530910 omap_set_pwrdm_state(pwrst->pwrdm, state);
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700911 }
912}
913
Tero Kristo68d47782008-11-26 12:26:24 +0200914int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
915{
916 struct power_state *pwrst;
917
918 list_for_each_entry(pwrst, &pwrst_list, node) {
919 if (pwrst->pwrdm == pwrdm)
920 return pwrst->next_state;
921 }
922 return -EINVAL;
923}
924
925int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
926{
927 struct power_state *pwrst;
928
929 list_for_each_entry(pwrst, &pwrst_list, node) {
930 if (pwrst->pwrdm == pwrdm) {
931 pwrst->next_state = state;
932 return 0;
933 }
934 }
935 return -EINVAL;
936}
937
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +0300938static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700939{
940 struct power_state *pwrst;
941
942 if (!pwrdm->pwrsts)
943 return 0;
944
Ming Leid3d381c2009-08-22 21:20:26 +0800945 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700946 if (!pwrst)
947 return -ENOMEM;
948 pwrst->pwrdm = pwrdm;
949 pwrst->next_state = PWRDM_POWER_RET;
950 list_add(&pwrst->node, &pwrst_list);
951
952 if (pwrdm_has_hdwr_sar(pwrdm))
953 pwrdm_enable_hdwr_sar(pwrdm);
954
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530955 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700956}
957
958/*
959 * Enable hw supervised mode for all clockdomains if it's
960 * supported. Initiate sleep transition for other clockdomains, if
961 * they are not used
962 */
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +0300963static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700964{
965 if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
966 omap2_clkdm_allow_idle(clkdm);
967 else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
968 atomic_read(&clkdm->usecount) == 0)
969 omap2_clkdm_sleep(clkdm);
970 return 0;
971}
972
Rajendra Nayak3231fc82008-09-26 17:49:14 +0530973void omap_push_sram_idle(void)
974{
975 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
976 omap34xx_cpu_suspend_sz);
Tero Kristo27d59a42008-10-13 13:15:00 +0300977 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
978 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
979 save_secure_ram_context_sz);
Rajendra Nayak3231fc82008-09-26 17:49:14 +0530980}
981
Kevin Hilman7cc515f2009-06-10 09:02:25 -0700982static int __init omap3_pm_init(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700983{
984 struct power_state *pwrst, *tmp;
Paul Walmsley55ed9692010-01-26 20:12:59 -0700985 struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700986 int ret;
987
988 if (!cpu_is_omap34xx())
989 return -ENODEV;
990
991 printk(KERN_ERR "Power Management for TI OMAP3.\n");
992
993 /* XXX prcm_setup_regs needs to be before enabling hw
994 * supervised mode for powerdomains */
995 prcm_setup_regs();
996
997 ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
998 (irq_handler_t)prcm_interrupt_handler,
999 IRQF_DISABLED, "prcm", NULL);
1000 if (ret) {
1001 printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1002 INT_34XX_PRCM_MPU_IRQ);
1003 goto err1;
1004 }
1005
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +03001006 ret = pwrdm_for_each(pwrdms_setup, NULL);
Kevin Hilman8bd22942009-05-28 10:56:16 -07001007 if (ret) {
1008 printk(KERN_ERR "Failed to setup powerdomains\n");
1009 goto err2;
1010 }
1011
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +03001012 (void) clkdm_for_each(clkdms_setup, NULL);
Kevin Hilman8bd22942009-05-28 10:56:16 -07001013
1014 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1015 if (mpu_pwrdm == NULL) {
1016 printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1017 goto err2;
1018 }
1019
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +05301020 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1021 per_pwrdm = pwrdm_lookup("per_pwrdm");
1022 core_pwrdm = pwrdm_lookup("core_pwrdm");
Tero Kristoc16c3f62008-12-11 16:46:57 +02001023 cam_pwrdm = pwrdm_lookup("cam_pwrdm");
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +05301024
Paul Walmsley55ed9692010-01-26 20:12:59 -07001025 neon_clkdm = clkdm_lookup("neon_clkdm");
1026 mpu_clkdm = clkdm_lookup("mpu_clkdm");
1027 per_clkdm = clkdm_lookup("per_clkdm");
1028 core_clkdm = clkdm_lookup("core_clkdm");
1029
Rajendra Nayak3231fc82008-09-26 17:49:14 +05301030 omap_push_sram_idle();
Kevin Hilman10f90ed2009-06-24 11:39:18 -07001031#ifdef CONFIG_SUSPEND
Kevin Hilman8bd22942009-05-28 10:56:16 -07001032 suspend_set_ops(&omap_pm_ops);
Kevin Hilman10f90ed2009-06-24 11:39:18 -07001033#endif /* CONFIG_SUSPEND */
Kevin Hilman8bd22942009-05-28 10:56:16 -07001034
1035 pm_idle = omap3_pm_idle;
Kalle Jokiniemi03433712008-09-26 11:04:20 +03001036 omap3_idle_init();
Kevin Hilman8bd22942009-05-28 10:56:16 -07001037
Paul Walmsley55ed9692010-01-26 20:12:59 -07001038 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
Tero Kristo27d59a42008-10-13 13:15:00 +03001039 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1040 omap3_secure_ram_storage =
1041 kmalloc(0x803F, GFP_KERNEL);
1042 if (!omap3_secure_ram_storage)
1043 printk(KERN_ERR "Memory allocation failed when"
1044 "allocating for secure sram context\n");
Tero Kristo27d59a42008-10-13 13:15:00 +03001045
Tero Kristo9d971402008-12-12 11:20:05 +02001046 local_irq_disable();
1047 local_fiq_disable();
1048
1049 omap_dma_global_context_save();
1050 omap3_save_secure_ram_context(PWRDM_POWER_ON);
1051 omap_dma_global_context_restore();
1052
1053 local_irq_enable();
1054 local_fiq_enable();
1055 }
1056
1057 omap3_save_scratchpad_contents();
Kevin Hilman8bd22942009-05-28 10:56:16 -07001058err1:
1059 return ret;
1060err2:
1061 free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1062 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1063 list_del(&pwrst->node);
1064 kfree(pwrst);
1065 }
1066 return ret;
1067}
1068
1069late_initcall(omap3_pm_init);