blob: a81ed251e6672695c4c5efef605e3020197960e9 [file] [log] [blame]
Kevin Hilman8bd22942009-05-28 10:56:16 -07001/*
2 * OMAP3 Power Management Routines
3 *
4 * Copyright (C) 2006-2008 Nokia Corporation
5 * Tony Lindgren <tony@atomide.com>
6 * Jouni Hogander
7 *
Rajendra Nayak2f5939c2008-09-26 17:50:07 +05308 * Copyright (C) 2007 Texas Instruments, Inc.
9 * Rajendra Nayak <rnayak@ti.com>
10 *
Kevin Hilman8bd22942009-05-28 10:56:16 -070011 * Copyright (C) 2005 Texas Instruments, Inc.
12 * Richard Woodruff <r-woodruff2@ti.com>
13 *
14 * Based on pm.c for omap1
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/pm.h>
22#include <linux/suspend.h>
23#include <linux/interrupt.h>
24#include <linux/module.h>
25#include <linux/list.h>
26#include <linux/err.h>
27#include <linux/gpio.h>
Kevin Hilmanc40552b2009-10-06 14:25:09 -070028#include <linux/clk.h>
Tero Kristodccaad82009-11-17 18:34:53 +020029#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Paul Walmsley0d8e2d02010-11-24 16:49:05 -070031#include <linux/console.h>
Kevin Hilman8bd22942009-05-28 10:56:16 -070032
Tony Lindgrence491cf2009-10-20 09:40:47 -070033#include <plat/sram.h>
34#include <plat/clockdomain.h>
35#include <plat/powerdomain.h>
Tony Lindgrence491cf2009-10-20 09:40:47 -070036#include <plat/serial.h>
Rajendra Nayak61255ab2008-09-26 17:49:56 +053037#include <plat/sdrc.h>
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053038#include <plat/prcm.h>
39#include <plat/gpmc.h>
Tero Kristof2d11852008-08-28 13:13:31 +000040#include <plat/dma.h>
Kevin Hilman8bd22942009-05-28 10:56:16 -070041
Rajendra Nayak57f277b2008-09-26 17:49:34 +053042#include <asm/tlbflush.h>
43
Kevin Hilman8bd22942009-05-28 10:56:16 -070044#include "cm.h"
45#include "cm-regbits-34xx.h"
46#include "prm-regbits-34xx.h"
47
48#include "prm.h"
49#include "pm.h"
Tero Kristo13a6fe0f2008-10-13 13:17:06 +030050#include "sdrc.h"
Paul Walmsley4814ced2010-10-08 11:40:20 -060051#include "control.h"
Tero Kristo13a6fe0f2008-10-13 13:17:06 +030052
Kevin Hilmane83df172010-12-08 22:40:40 +000053#ifdef CONFIG_SUSPEND
54static suspend_state_t suspend_state = PM_SUSPEND_ON;
55static inline bool is_suspending(void)
56{
57 return (suspend_state != PM_SUSPEND_ON);
58}
59#else
60static inline bool is_suspending(void)
61{
62 return false;
63}
64#endif
65
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053066/* Scratchpad offsets */
Kevin Hilmande658152010-10-08 22:43:45 +000067#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
68#define OMAP343X_TABLE_VALUE_OFFSET 0xc0
69#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053070
Nishanth Menon8cdfd832010-12-20 14:05:05 -060071/* pm34xx errata defined in pm.h */
72u16 pm34xx_errata;
73
Kevin Hilman8bd22942009-05-28 10:56:16 -070074struct power_state {
75 struct powerdomain *pwrdm;
76 u32 next_state;
Kevin Hilman10f90ed2009-06-24 11:39:18 -070077#ifdef CONFIG_SUSPEND
Kevin Hilman8bd22942009-05-28 10:56:16 -070078 u32 saved_state;
Kevin Hilman10f90ed2009-06-24 11:39:18 -070079#endif
Kevin Hilman8bd22942009-05-28 10:56:16 -070080 struct list_head node;
81};
82
83static LIST_HEAD(pwrst_list);
84
85static void (*_omap_sram_idle)(u32 *addr, int save_state);
86
Tero Kristo27d59a42008-10-13 13:15:00 +030087static int (*_omap_save_secure_sram)(u32 *addr);
88
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +053089static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
90static struct powerdomain *core_pwrdm, *per_pwrdm;
Tero Kristoc16c3f62008-12-11 16:46:57 +020091static struct powerdomain *cam_pwrdm;
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +053092
Rajendra Nayak2f5939c2008-09-26 17:50:07 +053093static inline void omap3_per_save_context(void)
94{
95 omap_gpio_save_context();
96}
97
98static inline void omap3_per_restore_context(void)
99{
100 omap_gpio_restore_context();
101}
102
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200103static void omap3_enable_io_chain(void)
104{
105 int timeout = 0;
106
107 if (omap_rev() >= OMAP3430_REV_ES3_1) {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600108 prm_set_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
109 PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200110 /* Do a readback to assure write has been done */
111 prm_read_mod_reg(WKUP_MOD, PM_WKEN);
112
Kevin Hilman0b96a3a2010-06-09 13:53:09 +0300113 while (!(prm_read_mod_reg(WKUP_MOD, PM_WKEN) &
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600114 OMAP3430_ST_IO_CHAIN_MASK)) {
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200115 timeout++;
116 if (timeout > 1000) {
117 printk(KERN_ERR "Wake up daisy chain "
118 "activation failed.\n");
119 return;
120 }
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600121 prm_set_mod_reg_bits(OMAP3430_ST_IO_CHAIN_MASK,
Kevin Hilman0b96a3a2010-06-09 13:53:09 +0300122 WKUP_MOD, PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200123 }
124 }
125}
126
127static void omap3_disable_io_chain(void)
128{
129 if (omap_rev() >= OMAP3430_REV_ES3_1)
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600130 prm_clear_mod_reg_bits(OMAP3430_EN_IO_CHAIN_MASK, WKUP_MOD,
131 PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200132}
133
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530134static void omap3_core_save_context(void)
135{
136 u32 control_padconf_off;
137
138 /* Save the padconf registers */
139 control_padconf_off = omap_ctrl_readl(OMAP343X_CONTROL_PADCONF_OFF);
140 control_padconf_off |= START_PADCONF_SAVE;
141 omap_ctrl_writel(control_padconf_off, OMAP343X_CONTROL_PADCONF_OFF);
142 /* wait for the save to complete */
Roel Kluin1b6e8212010-01-08 10:29:07 -0800143 while (!(omap_ctrl_readl(OMAP343X_CONTROL_GENERAL_PURPOSE_STATUS)
144 & PADCONF_SAVE_DONE))
Tero Kristodccaad82009-11-17 18:34:53 +0200145 udelay(1);
146
147 /*
148 * Force write last pad into memory, as this can fail in some
149 * cases according to erratas 1.157, 1.185
150 */
151 omap_ctrl_writel(omap_ctrl_readl(OMAP343X_PADCONF_ETK_D14),
152 OMAP343X_CONTROL_MEM_WKUP + 0x2a0);
153
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530154 /* Save the Interrupt controller context */
155 omap_intc_save_context();
156 /* Save the GPMC context */
157 omap3_gpmc_save_context();
158 /* Save the system control module context, padconf already save above*/
159 omap3_control_save_context();
Tero Kristof2d11852008-08-28 13:13:31 +0000160 omap_dma_global_context_save();
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530161}
162
163static void omap3_core_restore_context(void)
164{
165 /* Restore the control module context, padconf restored by h/w */
166 omap3_control_restore_context();
167 /* Restore the GPMC context */
168 omap3_gpmc_restore_context();
169 /* Restore the interrupt controller context */
170 omap_intc_restore_context();
Tero Kristof2d11852008-08-28 13:13:31 +0000171 omap_dma_global_context_restore();
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530172}
173
Tero Kristo9d971402008-12-12 11:20:05 +0200174/*
175 * FIXME: This function should be called before entering off-mode after
176 * OMAP3 secure services have been accessed. Currently it is only called
177 * once during boot sequence, but this works as we are not using secure
178 * services.
179 */
Tero Kristo27d59a42008-10-13 13:15:00 +0300180static void omap3_save_secure_ram_context(u32 target_mpu_state)
181{
182 u32 ret;
183
184 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
Tero Kristo27d59a42008-10-13 13:15:00 +0300185 /*
186 * MPU next state must be set to POWER_ON temporarily,
187 * otherwise the WFI executed inside the ROM code
188 * will hang the system.
189 */
190 pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
191 ret = _omap_save_secure_sram((u32 *)
192 __pa(omap3_secure_ram_storage));
193 pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
194 /* Following is for error tracking, it should not happen */
195 if (ret) {
196 printk(KERN_ERR "save_secure_sram() returns %08x\n",
197 ret);
198 while (1)
199 ;
200 }
201 }
202}
203
Jon Hunter77da2d92009-06-27 00:07:25 -0500204/*
205 * PRCM Interrupt Handler Helper Function
206 *
207 * The purpose of this function is to clear any wake-up events latched
208 * in the PRCM PM_WKST_x registers. It is possible that a wake-up event
209 * may occur whilst attempting to clear a PM_WKST_x register and thus
210 * set another bit in this register. A while loop is used to ensure
211 * that any peripheral wake-up events occurring while attempting to
212 * clear the PM_WKST_x are detected and cleared.
213 */
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700214static int prcm_clear_mod_irqs(s16 module, u8 regs)
Jon Hunter77da2d92009-06-27 00:07:25 -0500215{
Vikram Pandita71a80772009-07-17 19:33:09 -0500216 u32 wkst, fclk, iclk, clken;
Jon Hunter77da2d92009-06-27 00:07:25 -0500217 u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1;
218 u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1;
219 u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1;
Paul Walmsley5d805972009-07-22 10:18:07 -0700220 u16 grpsel_off = (regs == 3) ?
221 OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL;
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700222 int c = 0;
Jon Hunter77da2d92009-06-27 00:07:25 -0500223
224 wkst = prm_read_mod_reg(module, wkst_off);
Paul Walmsley5d805972009-07-22 10:18:07 -0700225 wkst &= prm_read_mod_reg(module, grpsel_off);
Jon Hunter77da2d92009-06-27 00:07:25 -0500226 if (wkst) {
227 iclk = cm_read_mod_reg(module, iclk_off);
228 fclk = cm_read_mod_reg(module, fclk_off);
229 while (wkst) {
Vikram Pandita71a80772009-07-17 19:33:09 -0500230 clken = wkst;
231 cm_set_mod_reg_bits(clken, module, iclk_off);
232 /*
233 * For USBHOST, we don't know whether HOST1 or
234 * HOST2 woke us up, so enable both f-clocks
235 */
236 if (module == OMAP3430ES2_USBHOST_MOD)
237 clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT;
238 cm_set_mod_reg_bits(clken, module, fclk_off);
Jon Hunter77da2d92009-06-27 00:07:25 -0500239 prm_write_mod_reg(wkst, module, wkst_off);
240 wkst = prm_read_mod_reg(module, wkst_off);
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700241 c++;
Jon Hunter77da2d92009-06-27 00:07:25 -0500242 }
243 cm_write_mod_reg(iclk, module, iclk_off);
244 cm_write_mod_reg(fclk, module, fclk_off);
245 }
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700246
247 return c;
248}
249
250static int _prcm_int_handle_wakeup(void)
251{
252 int c;
253
254 c = prcm_clear_mod_irqs(WKUP_MOD, 1);
255 c += prcm_clear_mod_irqs(CORE_MOD, 1);
256 c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1);
257 if (omap_rev() > OMAP3430_REV_ES1_0) {
258 c += prcm_clear_mod_irqs(CORE_MOD, 3);
259 c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1);
260 }
261
262 return c;
Jon Hunter77da2d92009-06-27 00:07:25 -0500263}
264
265/*
266 * PRCM Interrupt Handler
267 *
268 * The PRM_IRQSTATUS_MPU register indicates if there are any pending
269 * interrupts from the PRCM for the MPU. These bits must be cleared in
270 * order to clear the PRCM interrupt. The PRCM interrupt handler is
271 * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear
272 * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU
273 * register indicates that a wake-up event is pending for the MPU and
274 * this bit can only be cleared if the all the wake-up events latched
275 * in the various PM_WKST_x registers have been cleared. The interrupt
276 * handler is implemented using a do-while loop so that if a wake-up
277 * event occurred during the processing of the prcm interrupt handler
278 * (setting a bit in the corresponding PM_WKST_x register and thus
279 * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register)
280 * this would be handled.
281 */
Kevin Hilman8bd22942009-05-28 10:56:16 -0700282static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
283{
Kevin Hilmand6290a32010-04-26 14:59:09 -0700284 u32 irqenable_mpu, irqstatus_mpu;
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700285 int c = 0;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700286
Kevin Hilmand6290a32010-04-26 14:59:09 -0700287 irqenable_mpu = prm_read_mod_reg(OCP_MOD,
288 OMAP3_PRM_IRQENABLE_MPU_OFFSET);
289 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
290 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
291 irqstatus_mpu &= irqenable_mpu;
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700292
Kevin Hilmand6290a32010-04-26 14:59:09 -0700293 do {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600294 if (irqstatus_mpu & (OMAP3430_WKUP_ST_MASK |
295 OMAP3430_IO_ST_MASK)) {
Paul Walmsley8cb0ac92009-07-22 10:29:02 -0700296 c = _prcm_int_handle_wakeup();
297
298 /*
299 * Is the MPU PRCM interrupt handler racing with the
300 * IVA2 PRCM interrupt handler ?
301 */
302 WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup "
303 "but no wakeup sources are marked\n");
304 } else {
305 /* XXX we need to expand our PRCM interrupt handler */
306 WARN(1, "prcm: WARNING: PRCM interrupt received, but "
307 "no code to handle it (%08x)\n", irqstatus_mpu);
308 }
309
Jon Hunter77da2d92009-06-27 00:07:25 -0500310 prm_write_mod_reg(irqstatus_mpu, OCP_MOD,
311 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700312
Kevin Hilmand6290a32010-04-26 14:59:09 -0700313 irqstatus_mpu = prm_read_mod_reg(OCP_MOD,
314 OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
315 irqstatus_mpu &= irqenable_mpu;
316
317 } while (irqstatus_mpu);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700318
319 return IRQ_HANDLED;
320}
321
Rajendra Nayak57f277b2008-09-26 17:49:34 +0530322static void restore_control_register(u32 val)
323{
324 __asm__ __volatile__ ("mcr p15, 0, %0, c1, c0, 0" : : "r" (val));
325}
326
327/* Function to restore the table entry that was modified for enabling MMU */
328static void restore_table_entry(void)
329{
Manjunath Kondaiah G4d63bc12010-10-08 09:56:11 -0700330 void __iomem *scratchpad_address;
Rajendra Nayak57f277b2008-09-26 17:49:34 +0530331 u32 previous_value, control_reg_value;
332 u32 *address;
333
334 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
335
336 /* Get address of entry that was modified */
337 address = (u32 *)__raw_readl(scratchpad_address +
338 OMAP343X_TABLE_ADDRESS_OFFSET);
339 /* Get the previous value which needs to be restored */
340 previous_value = __raw_readl(scratchpad_address +
341 OMAP343X_TABLE_VALUE_OFFSET);
342 address = __va(address);
343 *address = previous_value;
344 flush_tlb_all();
345 control_reg_value = __raw_readl(scratchpad_address
346 + OMAP343X_CONTROL_REG_VALUE_OFFSET);
347 /* This will enable caches and prediction */
348 restore_control_register(control_reg_value);
349}
350
Rajendra Nayak99e6a4d2008-10-08 17:30:58 +0530351void omap_sram_idle(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700352{
353 /* Variable to tell what needs to be saved and restored
354 * in omap_sram_idle*/
355 /* save_state = 0 => Nothing to save and restored */
356 /* save_state = 1 => Only L1 and logic lost */
357 /* save_state = 2 => Only L2 lost */
358 /* save_state = 3 => L1, L2 and logic lost */
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530359 int save_state = 0;
360 int mpu_next_state = PWRDM_POWER_ON;
361 int per_next_state = PWRDM_POWER_ON;
362 int core_next_state = PWRDM_POWER_ON;
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530363 int core_prev_state, per_prev_state;
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300364 u32 sdrc_pwr = 0;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700365
366 if (!_omap_sram_idle)
367 return;
368
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530369 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
370 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
371 pwrdm_clear_all_prev_pwrst(core_pwrdm);
372 pwrdm_clear_all_prev_pwrst(per_pwrdm);
373
Kevin Hilman8bd22942009-05-28 10:56:16 -0700374 mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
375 switch (mpu_next_state) {
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530376 case PWRDM_POWER_ON:
Kevin Hilman8bd22942009-05-28 10:56:16 -0700377 case PWRDM_POWER_RET:
378 /* No need to save context */
379 save_state = 0;
380 break;
Rajendra Nayak61255ab2008-09-26 17:49:56 +0530381 case PWRDM_POWER_OFF:
382 save_state = 3;
383 break;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700384 default:
385 /* Invalid state */
386 printk(KERN_ERR "Invalid mpu state in sram_idle\n");
387 return;
388 }
Peter 'p2' De Schrijverfe617af2008-10-15 17:48:44 +0300389 pwrdm_pre_transition();
390
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530391 /* NEON control */
392 if (pwrdm_read_pwrst(neon_pwrdm) == PWRDM_POWER_ON)
Jouni Hogander71391782008-10-28 10:59:05 +0200393 pwrdm_set_next_pwrst(neon_pwrdm, mpu_next_state);
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530394
Mike Chan40742fa2010-05-03 16:04:06 -0700395 /* Enable IO-PAD and IO-CHAIN wakeups */
Kevin Hilman658ce972008-11-04 20:50:52 -0800396 per_next_state = pwrdm_read_next_pwrst(per_pwrdm);
Tero Kristoecf157d2008-12-01 13:17:29 +0200397 core_next_state = pwrdm_read_next_pwrst(core_pwrdm);
Kevin Hilmand5c47d72010-08-10 16:04:35 -0700398 if (omap3_has_io_wakeup() &&
399 (per_next_state < PWRDM_POWER_ON ||
400 core_next_state < PWRDM_POWER_ON)) {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600401 prm_set_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
Mike Chan40742fa2010-05-03 16:04:06 -0700402 omap3_enable_io_chain();
403 }
404
Paul Walmsley0d8e2d02010-11-24 16:49:05 -0700405 /* Block console output in case it is on one of the OMAP UARTs */
Kevin Hilmane83df172010-12-08 22:40:40 +0000406 if (!is_suspending())
407 if (per_next_state < PWRDM_POWER_ON ||
408 core_next_state < PWRDM_POWER_ON)
409 if (try_acquire_console_sem())
410 goto console_still_active;
Paul Walmsley0d8e2d02010-11-24 16:49:05 -0700411
Mike Chan40742fa2010-05-03 16:04:06 -0700412 /* PER */
Kevin Hilman658ce972008-11-04 20:50:52 -0800413 if (per_next_state < PWRDM_POWER_ON) {
Kevin Hilman658ce972008-11-04 20:50:52 -0800414 omap_uart_prepare_idle(2);
Govindraj.Rcd4f1fa2010-09-27 20:20:32 +0530415 omap_uart_prepare_idle(3);
Kevin Hilman43ffcd92009-01-27 11:09:24 -0800416 omap2_gpio_prepare_for_idle(per_next_state);
Kevin Hilmane7410cf2010-09-08 16:37:42 -0700417 if (per_next_state == PWRDM_POWER_OFF)
Tero Kristoecf157d2008-12-01 13:17:29 +0200418 omap3_per_save_context();
Kevin Hilman658ce972008-11-04 20:50:52 -0800419 }
420
421 /* CORE */
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530422 if (core_next_state < PWRDM_POWER_ON) {
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530423 omap_uart_prepare_idle(0);
424 omap_uart_prepare_idle(1);
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530425 if (core_next_state == PWRDM_POWER_OFF) {
426 omap3_core_save_context();
427 omap3_prcm_save_context();
428 }
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530429 }
Mike Chan40742fa2010-05-03 16:04:06 -0700430
Tero Kristof18cc2f2009-10-23 19:03:50 +0300431 omap3_intc_prepare_idle();
Kevin Hilman8bd22942009-05-28 10:56:16 -0700432
Rajendra Nayak61255ab2008-09-26 17:49:56 +0530433 /*
Rajendra Nayakf265dc42009-06-09 22:30:41 +0530434 * On EMU/HS devices ROM code restores a SRDC value
435 * from scratchpad which has automatic self refresh on timeout
436 * of AUTO_CNT = 1 enabled. This takes care of errata 1.142.
437 * Hence store/restore the SDRC_POWER register here.
438 */
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300439 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
440 omap_type() != OMAP2_DEVICE_TYPE_GP &&
Rajendra Nayakf265dc42009-06-09 22:30:41 +0530441 core_next_state == PWRDM_POWER_OFF)
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300442 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300443
444 /*
Rajendra Nayak61255ab2008-09-26 17:49:56 +0530445 * omap3_arm_context is the location where ARM registers
446 * get saved. The restore path then reads from this
447 * location and restores them back.
448 */
449 _omap_sram_idle(omap3_arm_context, save_state);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700450 cpu_init();
451
Rajendra Nayakf265dc42009-06-09 22:30:41 +0530452 /* Restore normal SDRC POWER settings */
Tero Kristo13a6fe0f2008-10-13 13:17:06 +0300453 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
454 omap_type() != OMAP2_DEVICE_TYPE_GP &&
455 core_next_state == PWRDM_POWER_OFF)
456 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
457
Rajendra Nayak57f277b2008-09-26 17:49:34 +0530458 /* Restore table entry modified during MMU restoration */
459 if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
460 restore_table_entry();
461
Kevin Hilman658ce972008-11-04 20:50:52 -0800462 /* CORE */
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530463 if (core_next_state < PWRDM_POWER_ON) {
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530464 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
465 if (core_prev_state == PWRDM_POWER_OFF) {
466 omap3_core_restore_context();
467 omap3_prcm_restore_context();
468 omap3_sram_restore_context();
Kalle Jokiniemi8a917d22009-05-13 13:32:11 +0300469 omap2_sms_restore_context();
Rajendra Nayak2f5939c2008-09-26 17:50:07 +0530470 }
Kevin Hilman658ce972008-11-04 20:50:52 -0800471 omap_uart_resume_idle(0);
472 omap_uart_resume_idle(1);
473 if (core_next_state == PWRDM_POWER_OFF)
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600474 prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK,
Kevin Hilman658ce972008-11-04 20:50:52 -0800475 OMAP3430_GR_MOD,
476 OMAP3_PRM_VOLTCTRL_OFFSET);
477 }
Tero Kristof18cc2f2009-10-23 19:03:50 +0300478 omap3_intc_resume_idle();
Kevin Hilman658ce972008-11-04 20:50:52 -0800479
480 /* PER */
481 if (per_next_state < PWRDM_POWER_ON) {
482 per_prev_state = pwrdm_read_prev_pwrst(per_pwrdm);
Kevin Hilman43ffcd92009-01-27 11:09:24 -0800483 omap2_gpio_resume_after_idle();
484 if (per_prev_state == PWRDM_POWER_OFF)
Kevin Hilman658ce972008-11-04 20:50:52 -0800485 omap3_per_restore_context();
Tero Kristoecf157d2008-12-01 13:17:29 +0200486 omap_uart_resume_idle(2);
Govindraj.Rcd4f1fa2010-09-27 20:20:32 +0530487 omap_uart_resume_idle(3);
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +0530488 }
Peter 'p2' De Schrijverfe617af2008-10-15 17:48:44 +0300489
Kevin Hilmane83df172010-12-08 22:40:40 +0000490 if (!is_suspending())
491 release_console_sem();
Paul Walmsley0d8e2d02010-11-24 16:49:05 -0700492
493console_still_active:
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200494 /* Disable IO-PAD and IO-CHAIN wakeup */
Kevin Hilman58a55592010-08-16 09:21:19 +0300495 if (omap3_has_io_wakeup() &&
496 (per_next_state < PWRDM_POWER_ON ||
497 core_next_state < PWRDM_POWER_ON)) {
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600498 prm_clear_mod_reg_bits(OMAP3430_EN_IO_MASK, WKUP_MOD, PM_WKEN);
Kalle Jokiniemi3a7ec262009-03-26 15:59:01 +0200499 omap3_disable_io_chain();
500 }
Kevin Hilman658ce972008-11-04 20:50:52 -0800501
Peter 'p2' De Schrijverfe617af2008-10-15 17:48:44 +0300502 pwrdm_post_transition();
503
Tero Kristoc16c3f62008-12-11 16:46:57 +0200504 omap2_clkdm_allow_idle(mpu_pwrdm->pwrdm_clkdms[0]);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700505}
506
Rajendra Nayak20b01662008-10-08 17:31:22 +0530507int omap3_can_sleep(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700508{
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700509 if (!sleep_while_idle)
510 return 0;
Kevin Hilman4af40162009-02-04 10:51:40 -0800511 if (!omap_uart_can_sleep())
512 return 0;
Kevin Hilman8bd22942009-05-28 10:56:16 -0700513 return 1;
514}
515
Kevin Hilman8bd22942009-05-28 10:56:16 -0700516static void omap3_pm_idle(void)
517{
518 local_irq_disable();
519 local_fiq_disable();
520
521 if (!omap3_can_sleep())
522 goto out;
523
Tero Kristocf228542009-03-20 15:21:02 +0200524 if (omap_irq_pending() || need_resched())
Kevin Hilman8bd22942009-05-28 10:56:16 -0700525 goto out;
526
527 omap_sram_idle();
528
529out:
530 local_fiq_enable();
531 local_irq_enable();
532}
533
Kevin Hilman10f90ed2009-06-24 11:39:18 -0700534#ifdef CONFIG_SUSPEND
Kevin Hilman8bd22942009-05-28 10:56:16 -0700535static int omap3_pm_suspend(void)
536{
537 struct power_state *pwrst;
538 int state, ret = 0;
539
Ari Kauppi8e2efde2010-03-23 09:04:59 +0200540 if (wakeup_timer_seconds || wakeup_timer_milliseconds)
541 omap2_pm_wakeup_on_timer(wakeup_timer_seconds,
542 wakeup_timer_milliseconds);
Kevin Hilmand7814e42009-10-06 14:30:23 -0700543
Kevin Hilman8bd22942009-05-28 10:56:16 -0700544 /* Read current next_pwrsts */
545 list_for_each_entry(pwrst, &pwrst_list, node)
546 pwrst->saved_state = pwrdm_read_next_pwrst(pwrst->pwrdm);
547 /* Set ones wanted by suspend */
548 list_for_each_entry(pwrst, &pwrst_list, node) {
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530549 if (omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state))
Kevin Hilman8bd22942009-05-28 10:56:16 -0700550 goto restore;
551 if (pwrdm_clear_all_prev_pwrst(pwrst->pwrdm))
552 goto restore;
553 }
554
Kevin Hilman4af40162009-02-04 10:51:40 -0800555 omap_uart_prepare_suspend();
Tero Kristo2bbe3af2009-10-23 19:03:48 +0300556 omap3_intc_suspend();
557
Kevin Hilman8bd22942009-05-28 10:56:16 -0700558 omap_sram_idle();
559
560restore:
561 /* Restore next_pwrsts */
562 list_for_each_entry(pwrst, &pwrst_list, node) {
Kevin Hilman8bd22942009-05-28 10:56:16 -0700563 state = pwrdm_read_prev_pwrst(pwrst->pwrdm);
564 if (state > pwrst->next_state) {
565 printk(KERN_INFO "Powerdomain (%s) didn't enter "
566 "target state %d\n",
567 pwrst->pwrdm->name, pwrst->next_state);
568 ret = -1;
569 }
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530570 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->saved_state);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700571 }
572 if (ret)
573 printk(KERN_ERR "Could not enter target state in pm_suspend\n");
574 else
575 printk(KERN_INFO "Successfully put all powerdomains "
576 "to target state\n");
577
578 return ret;
579}
580
Tero Kristo24662112009-03-05 16:32:23 +0200581static int omap3_pm_enter(suspend_state_t unused)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700582{
583 int ret = 0;
584
Tero Kristo24662112009-03-05 16:32:23 +0200585 switch (suspend_state) {
Kevin Hilman8bd22942009-05-28 10:56:16 -0700586 case PM_SUSPEND_STANDBY:
587 case PM_SUSPEND_MEM:
588 ret = omap3_pm_suspend();
589 break;
590 default:
591 ret = -EINVAL;
592 }
593
594 return ret;
595}
596
Tero Kristo24662112009-03-05 16:32:23 +0200597/* Hooks to enable / disable UART interrupts during suspend */
598static int omap3_pm_begin(suspend_state_t state)
599{
Jean Pihetc1663812010-12-09 18:39:58 +0100600 disable_hlt();
Tero Kristo24662112009-03-05 16:32:23 +0200601 suspend_state = state;
602 omap_uart_enable_irqs(0);
603 return 0;
604}
605
606static void omap3_pm_end(void)
607{
608 suspend_state = PM_SUSPEND_ON;
609 omap_uart_enable_irqs(1);
Jean Pihetc1663812010-12-09 18:39:58 +0100610 enable_hlt();
Tero Kristo24662112009-03-05 16:32:23 +0200611 return;
612}
613
Kevin Hilman8bd22942009-05-28 10:56:16 -0700614static struct platform_suspend_ops omap_pm_ops = {
Tero Kristo24662112009-03-05 16:32:23 +0200615 .begin = omap3_pm_begin,
616 .end = omap3_pm_end,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700617 .enter = omap3_pm_enter,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700618 .valid = suspend_valid_only_mem,
619};
Kevin Hilman10f90ed2009-06-24 11:39:18 -0700620#endif /* CONFIG_SUSPEND */
Kevin Hilman8bd22942009-05-28 10:56:16 -0700621
Kevin Hilman1155e422008-11-25 11:48:24 -0800622
623/**
624 * omap3_iva_idle(): ensure IVA is in idle so it can be put into
625 * retention
626 *
627 * In cases where IVA2 is activated by bootcode, it may prevent
628 * full-chip retention or off-mode because it is not idle. This
629 * function forces the IVA2 into idle state so it can go
630 * into retention/off and thus allow full-chip retention/off.
631 *
632 **/
633static void __init omap3_iva_idle(void)
634{
635 /* ensure IVA2 clock is disabled */
636 cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
637
638 /* if no clock activity, nothing else to do */
639 if (!(cm_read_mod_reg(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSTST) &
640 OMAP3430_CLKACTIVITY_IVA2_MASK))
641 return;
642
643 /* Reset IVA2 */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600644 prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
645 OMAP3430_RST2_IVA2_MASK |
646 OMAP3430_RST3_IVA2_MASK,
Abhijit Pagare37903002010-01-26 20:12:51 -0700647 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman1155e422008-11-25 11:48:24 -0800648
649 /* Enable IVA2 clock */
Kevin Hilmandfa6d6f2010-02-24 12:05:48 -0700650 cm_write_mod_reg(OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK,
Kevin Hilman1155e422008-11-25 11:48:24 -0800651 OMAP3430_IVA2_MOD, CM_FCLKEN);
652
653 /* Set IVA2 boot mode to 'idle' */
654 omap_ctrl_writel(OMAP3_IVA2_BOOTMOD_IDLE,
655 OMAP343X_CONTROL_IVA2_BOOTMOD);
656
657 /* Un-reset IVA2 */
Abhijit Pagare37903002010-01-26 20:12:51 -0700658 prm_write_mod_reg(0, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman1155e422008-11-25 11:48:24 -0800659
660 /* Disable IVA2 clock */
661 cm_write_mod_reg(0, OMAP3430_IVA2_MOD, CM_FCLKEN);
662
663 /* Reset IVA2 */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600664 prm_write_mod_reg(OMAP3430_RST1_IVA2_MASK |
665 OMAP3430_RST2_IVA2_MASK |
666 OMAP3430_RST3_IVA2_MASK,
Abhijit Pagare37903002010-01-26 20:12:51 -0700667 OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman1155e422008-11-25 11:48:24 -0800668}
669
Kevin Hilman8111b222009-04-28 15:27:44 -0700670static void __init omap3_d2d_idle(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700671{
Kevin Hilman8111b222009-04-28 15:27:44 -0700672 u16 mask, padconf;
673
674 /* In a stand alone OMAP3430 where there is not a stacked
675 * modem for the D2D Idle Ack and D2D MStandby must be pulled
676 * high. S CONTROL_PADCONF_SAD2D_IDLEACK and
677 * CONTROL_PADCONF_SAD2D_MSTDBY to have a pull up. */
678 mask = (1 << 4) | (1 << 3); /* pull-up, enabled */
679 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_MSTANDBY);
680 padconf |= mask;
681 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_MSTANDBY);
682
683 padconf = omap_ctrl_readw(OMAP3_PADCONF_SAD2D_IDLEACK);
684 padconf |= mask;
685 omap_ctrl_writew(padconf, OMAP3_PADCONF_SAD2D_IDLEACK);
686
Kevin Hilman8bd22942009-05-28 10:56:16 -0700687 /* reset modem */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600688 prm_write_mod_reg(OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RSTPWRON_MASK |
689 OMAP3430_RM_RSTCTRL_CORE_MODEM_SW_RST_MASK,
Abhijit Pagare37903002010-01-26 20:12:51 -0700690 CORE_MOD, OMAP2_RM_RSTCTRL);
691 prm_write_mod_reg(0, CORE_MOD, OMAP2_RM_RSTCTRL);
Kevin Hilman8111b222009-04-28 15:27:44 -0700692}
Kevin Hilman8bd22942009-05-28 10:56:16 -0700693
Kevin Hilman8111b222009-04-28 15:27:44 -0700694static void __init prcm_setup_regs(void)
695{
Govindraj.Re5863682010-09-27 20:20:25 +0530696 u32 omap3630_auto_uart4_mask = cpu_is_omap3630() ?
697 OMAP3630_AUTO_UART4_MASK : 0;
698 u32 omap3630_en_uart4_mask = cpu_is_omap3630() ?
699 OMAP3630_EN_UART4_MASK : 0;
700 u32 omap3630_grpsel_uart4_mask = cpu_is_omap3630() ?
701 OMAP3630_GRPSEL_UART4_MASK : 0;
702
703
Kevin Hilman8bd22942009-05-28 10:56:16 -0700704 /* XXX Reset all wkdeps. This should be done when initializing
705 * powerdomains */
706 prm_write_mod_reg(0, OMAP3430_IVA2_MOD, PM_WKDEP);
707 prm_write_mod_reg(0, MPU_MOD, PM_WKDEP);
708 prm_write_mod_reg(0, OMAP3430_DSS_MOD, PM_WKDEP);
709 prm_write_mod_reg(0, OMAP3430_NEON_MOD, PM_WKDEP);
710 prm_write_mod_reg(0, OMAP3430_CAM_MOD, PM_WKDEP);
711 prm_write_mod_reg(0, OMAP3430_PER_MOD, PM_WKDEP);
712 if (omap_rev() > OMAP3430_REV_ES1_0) {
713 prm_write_mod_reg(0, OMAP3430ES2_SGX_MOD, PM_WKDEP);
714 prm_write_mod_reg(0, OMAP3430ES2_USBHOST_MOD, PM_WKDEP);
715 } else
716 prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
717
718 /*
719 * Enable interface clock autoidle for all modules.
720 * Note that in the long run this should be done by clockfw
721 */
722 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600723 OMAP3430_AUTO_MODEM_MASK |
724 OMAP3430ES2_AUTO_MMC3_MASK |
725 OMAP3430ES2_AUTO_ICR_MASK |
726 OMAP3430_AUTO_AES2_MASK |
727 OMAP3430_AUTO_SHA12_MASK |
728 OMAP3430_AUTO_DES2_MASK |
729 OMAP3430_AUTO_MMC2_MASK |
730 OMAP3430_AUTO_MMC1_MASK |
731 OMAP3430_AUTO_MSPRO_MASK |
732 OMAP3430_AUTO_HDQ_MASK |
733 OMAP3430_AUTO_MCSPI4_MASK |
734 OMAP3430_AUTO_MCSPI3_MASK |
735 OMAP3430_AUTO_MCSPI2_MASK |
736 OMAP3430_AUTO_MCSPI1_MASK |
737 OMAP3430_AUTO_I2C3_MASK |
738 OMAP3430_AUTO_I2C2_MASK |
739 OMAP3430_AUTO_I2C1_MASK |
740 OMAP3430_AUTO_UART2_MASK |
741 OMAP3430_AUTO_UART1_MASK |
742 OMAP3430_AUTO_GPT11_MASK |
743 OMAP3430_AUTO_GPT10_MASK |
744 OMAP3430_AUTO_MCBSP5_MASK |
745 OMAP3430_AUTO_MCBSP1_MASK |
746 OMAP3430ES1_AUTO_FAC_MASK | /* This is es1 only */
747 OMAP3430_AUTO_MAILBOXES_MASK |
748 OMAP3430_AUTO_OMAPCTRL_MASK |
749 OMAP3430ES1_AUTO_FSHOSTUSB_MASK |
750 OMAP3430_AUTO_HSOTGUSB_MASK |
751 OMAP3430_AUTO_SAD2D_MASK |
752 OMAP3430_AUTO_SSI_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700753 CORE_MOD, CM_AUTOIDLE1);
754
755 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600756 OMAP3430_AUTO_PKA_MASK |
757 OMAP3430_AUTO_AES1_MASK |
758 OMAP3430_AUTO_RNG_MASK |
759 OMAP3430_AUTO_SHA11_MASK |
760 OMAP3430_AUTO_DES1_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700761 CORE_MOD, CM_AUTOIDLE2);
762
763 if (omap_rev() > OMAP3430_REV_ES1_0) {
764 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600765 OMAP3430_AUTO_MAD2D_MASK |
766 OMAP3430ES2_AUTO_USBTLL_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700767 CORE_MOD, CM_AUTOIDLE3);
768 }
769
770 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600771 OMAP3430_AUTO_WDT2_MASK |
772 OMAP3430_AUTO_WDT1_MASK |
773 OMAP3430_AUTO_GPIO1_MASK |
774 OMAP3430_AUTO_32KSYNC_MASK |
775 OMAP3430_AUTO_GPT12_MASK |
776 OMAP3430_AUTO_GPT1_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700777 WKUP_MOD, CM_AUTOIDLE);
778
779 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600780 OMAP3430_AUTO_DSS_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700781 OMAP3430_DSS_MOD,
782 CM_AUTOIDLE);
783
784 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600785 OMAP3430_AUTO_CAM_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700786 OMAP3430_CAM_MOD,
787 CM_AUTOIDLE);
788
789 cm_write_mod_reg(
Govindraj.Re5863682010-09-27 20:20:25 +0530790 omap3630_auto_uart4_mask |
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600791 OMAP3430_AUTO_GPIO6_MASK |
792 OMAP3430_AUTO_GPIO5_MASK |
793 OMAP3430_AUTO_GPIO4_MASK |
794 OMAP3430_AUTO_GPIO3_MASK |
795 OMAP3430_AUTO_GPIO2_MASK |
796 OMAP3430_AUTO_WDT3_MASK |
797 OMAP3430_AUTO_UART3_MASK |
798 OMAP3430_AUTO_GPT9_MASK |
799 OMAP3430_AUTO_GPT8_MASK |
800 OMAP3430_AUTO_GPT7_MASK |
801 OMAP3430_AUTO_GPT6_MASK |
802 OMAP3430_AUTO_GPT5_MASK |
803 OMAP3430_AUTO_GPT4_MASK |
804 OMAP3430_AUTO_GPT3_MASK |
805 OMAP3430_AUTO_GPT2_MASK |
806 OMAP3430_AUTO_MCBSP4_MASK |
807 OMAP3430_AUTO_MCBSP3_MASK |
808 OMAP3430_AUTO_MCBSP2_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700809 OMAP3430_PER_MOD,
810 CM_AUTOIDLE);
811
812 if (omap_rev() > OMAP3430_REV_ES1_0) {
813 cm_write_mod_reg(
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600814 OMAP3430ES2_AUTO_USBHOST_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700815 OMAP3430ES2_USBHOST_MOD,
816 CM_AUTOIDLE);
817 }
818
Paul Walmsley2fd0f752010-05-18 18:40:23 -0600819 omap_ctrl_writel(OMAP3430_AUTOIDLE_MASK, OMAP2_CONTROL_SYSCONFIG);
Tero Kristob296c812009-10-23 19:03:49 +0300820
Kevin Hilman8bd22942009-05-28 10:56:16 -0700821 /*
822 * Set all plls to autoidle. This is needed until autoidle is
823 * enabled by clockfw
824 */
825 cm_write_mod_reg(1 << OMAP3430_AUTO_IVA2_DPLL_SHIFT,
826 OMAP3430_IVA2_MOD, CM_AUTOIDLE2);
827 cm_write_mod_reg(1 << OMAP3430_AUTO_MPU_DPLL_SHIFT,
828 MPU_MOD,
829 CM_AUTOIDLE2);
830 cm_write_mod_reg((1 << OMAP3430_AUTO_PERIPH_DPLL_SHIFT) |
831 (1 << OMAP3430_AUTO_CORE_DPLL_SHIFT),
832 PLL_MOD,
833 CM_AUTOIDLE);
834 cm_write_mod_reg(1 << OMAP3430ES2_AUTO_PERIPH2_DPLL_SHIFT,
835 PLL_MOD,
836 CM_AUTOIDLE2);
837
838 /*
839 * Enable control of expternal oscillator through
840 * sys_clkreq. In the long run clock framework should
841 * take care of this.
842 */
843 prm_rmw_mod_reg_bits(OMAP_AUTOEXTCLKMODE_MASK,
844 1 << OMAP_AUTOEXTCLKMODE_SHIFT,
845 OMAP3430_GR_MOD,
846 OMAP3_PRM_CLKSRC_CTRL_OFFSET);
847
848 /* setup wakup source */
Paul Walmsley2fd0f752010-05-18 18:40:23 -0600849 prm_write_mod_reg(OMAP3430_EN_IO_MASK | OMAP3430_EN_GPIO1_MASK |
850 OMAP3430_EN_GPT1_MASK | OMAP3430_EN_GPT12_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700851 WKUP_MOD, PM_WKEN);
852 /* No need to write EN_IO, that is always enabled */
Paul Walmsley275f6752010-05-18 18:40:23 -0600853 prm_write_mod_reg(OMAP3430_GRPSEL_GPIO1_MASK |
854 OMAP3430_GRPSEL_GPT1_MASK |
855 OMAP3430_GRPSEL_GPT12_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700856 WKUP_MOD, OMAP3430_PM_MPUGRPSEL);
857 /* For some reason IO doesn't generate wakeup event even if
858 * it is selected to mpu wakeup goup */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600859 prm_write_mod_reg(OMAP3430_IO_EN_MASK | OMAP3430_WKUP_EN_MASK,
Kevin Hilman8bd22942009-05-28 10:56:16 -0700860 OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET);
Kevin Hilman1155e422008-11-25 11:48:24 -0800861
Subramani Venkateshb92c5722009-12-22 15:07:50 +0530862 /* Enable PM_WKEN to support DSS LPR */
Paul Walmsley2bc4ef72010-05-18 18:47:24 -0600863 prm_write_mod_reg(OMAP3430_PM_WKEN_DSS_EN_DSS_MASK,
Subramani Venkateshb92c5722009-12-22 15:07:50 +0530864 OMAP3430_DSS_MOD, PM_WKEN);
865
Kevin Hilmanb427f922009-10-22 14:48:13 -0700866 /* Enable wakeups in PER */
Govindraj.Re5863682010-09-27 20:20:25 +0530867 prm_write_mod_reg(omap3630_en_uart4_mask |
868 OMAP3430_EN_GPIO2_MASK | OMAP3430_EN_GPIO3_MASK |
Paul Walmsley2fd0f752010-05-18 18:40:23 -0600869 OMAP3430_EN_GPIO4_MASK | OMAP3430_EN_GPIO5_MASK |
870 OMAP3430_EN_GPIO6_MASK | OMAP3430_EN_UART3_MASK |
871 OMAP3430_EN_MCBSP2_MASK | OMAP3430_EN_MCBSP3_MASK |
872 OMAP3430_EN_MCBSP4_MASK,
Kevin Hilmanb427f922009-10-22 14:48:13 -0700873 OMAP3430_PER_MOD, PM_WKEN);
Kevin Hilmaneb350f72009-09-10 15:53:08 +0000874 /* and allow them to wake up MPU */
Govindraj.Re5863682010-09-27 20:20:25 +0530875 prm_write_mod_reg(omap3630_grpsel_uart4_mask |
876 OMAP3430_GRPSEL_GPIO2_MASK |
Paul Walmsley275f6752010-05-18 18:40:23 -0600877 OMAP3430_GRPSEL_GPIO3_MASK |
878 OMAP3430_GRPSEL_GPIO4_MASK |
879 OMAP3430_GRPSEL_GPIO5_MASK |
880 OMAP3430_GRPSEL_GPIO6_MASK |
881 OMAP3430_GRPSEL_UART3_MASK |
882 OMAP3430_GRPSEL_MCBSP2_MASK |
883 OMAP3430_GRPSEL_MCBSP3_MASK |
884 OMAP3430_GRPSEL_MCBSP4_MASK,
Kevin Hilmaneb350f72009-09-10 15:53:08 +0000885 OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL);
886
Kevin Hilmand3fd3292009-05-05 16:34:25 -0700887 /* Don't attach IVA interrupts */
888 prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL);
889 prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1);
890 prm_write_mod_reg(0, CORE_MOD, OMAP3430ES2_PM_IVAGRPSEL3);
891 prm_write_mod_reg(0, OMAP3430_PER_MOD, OMAP3430_PM_IVAGRPSEL);
892
Kevin Hilmanb1340d12009-04-27 16:14:54 -0700893 /* Clear any pending 'reset' flags */
Abhijit Pagare37903002010-01-26 20:12:51 -0700894 prm_write_mod_reg(0xffffffff, MPU_MOD, OMAP2_RM_RSTST);
895 prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP2_RM_RSTST);
896 prm_write_mod_reg(0xffffffff, OMAP3430_PER_MOD, OMAP2_RM_RSTST);
897 prm_write_mod_reg(0xffffffff, OMAP3430_EMU_MOD, OMAP2_RM_RSTST);
898 prm_write_mod_reg(0xffffffff, OMAP3430_NEON_MOD, OMAP2_RM_RSTST);
899 prm_write_mod_reg(0xffffffff, OMAP3430_DSS_MOD, OMAP2_RM_RSTST);
900 prm_write_mod_reg(0xffffffff, OMAP3430ES2_USBHOST_MOD, OMAP2_RM_RSTST);
Kevin Hilmanb1340d12009-04-27 16:14:54 -0700901
Kevin Hilman014c46d2009-04-27 07:50:23 -0700902 /* Clear any pending PRCM interrupts */
903 prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
904
Kevin Hilman1155e422008-11-25 11:48:24 -0800905 omap3_iva_idle();
Kevin Hilman8111b222009-04-28 15:27:44 -0700906 omap3_d2d_idle();
Kevin Hilman8bd22942009-05-28 10:56:16 -0700907}
908
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700909void omap3_pm_off_mode_enable(int enable)
910{
911 struct power_state *pwrst;
912 u32 state;
913
914 if (enable)
915 state = PWRDM_POWER_OFF;
916 else
917 state = PWRDM_POWER_RET;
918
Sanjeev Premi6af83b32010-01-28 23:16:43 +0530919#ifdef CONFIG_CPU_IDLE
Eduardo Valentincc1b6022010-12-20 14:05:09 -0600920 /*
921 * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
922 * enable OFF mode in a stable form for previous revisions, restrict
923 * instead to RET
924 */
925 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
926 omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
927 else
928 omap3_cpuidle_update_states(state, state);
Sanjeev Premi6af83b32010-01-28 23:16:43 +0530929#endif
930
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700931 list_for_each_entry(pwrst, &pwrst_list, node) {
Eduardo Valentincc1b6022010-12-20 14:05:09 -0600932 if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
933 pwrst->pwrdm == core_pwrdm &&
934 state == PWRDM_POWER_OFF) {
935 pwrst->next_state = PWRDM_POWER_RET;
936 WARN_ONCE(1,
937 "%s: Core OFF disabled due to errata i583\n",
938 __func__);
939 } else {
940 pwrst->next_state = state;
941 }
942 omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
Kevin Hilmanc40552b2009-10-06 14:25:09 -0700943 }
944}
945
Tero Kristo68d47782008-11-26 12:26:24 +0200946int omap3_pm_get_suspend_state(struct powerdomain *pwrdm)
947{
948 struct power_state *pwrst;
949
950 list_for_each_entry(pwrst, &pwrst_list, node) {
951 if (pwrst->pwrdm == pwrdm)
952 return pwrst->next_state;
953 }
954 return -EINVAL;
955}
956
957int omap3_pm_set_suspend_state(struct powerdomain *pwrdm, int state)
958{
959 struct power_state *pwrst;
960
961 list_for_each_entry(pwrst, &pwrst_list, node) {
962 if (pwrst->pwrdm == pwrdm) {
963 pwrst->next_state = state;
964 return 0;
965 }
966 }
967 return -EINVAL;
968}
969
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +0300970static int __init pwrdms_setup(struct powerdomain *pwrdm, void *unused)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700971{
972 struct power_state *pwrst;
973
974 if (!pwrdm->pwrsts)
975 return 0;
976
Ming Leid3d381c2009-08-22 21:20:26 +0800977 pwrst = kmalloc(sizeof(struct power_state), GFP_ATOMIC);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700978 if (!pwrst)
979 return -ENOMEM;
980 pwrst->pwrdm = pwrdm;
981 pwrst->next_state = PWRDM_POWER_RET;
982 list_add(&pwrst->node, &pwrst_list);
983
984 if (pwrdm_has_hdwr_sar(pwrdm))
985 pwrdm_enable_hdwr_sar(pwrdm);
986
Santosh Shilimkareb6a2c72010-09-15 01:04:01 +0530987 return omap_set_pwrdm_state(pwrst->pwrdm, pwrst->next_state);
Kevin Hilman8bd22942009-05-28 10:56:16 -0700988}
989
990/*
991 * Enable hw supervised mode for all clockdomains if it's
992 * supported. Initiate sleep transition for other clockdomains, if
993 * they are not used
994 */
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +0300995static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
Kevin Hilman8bd22942009-05-28 10:56:16 -0700996{
997 if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
998 omap2_clkdm_allow_idle(clkdm);
999 else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
1000 atomic_read(&clkdm->usecount) == 0)
1001 omap2_clkdm_sleep(clkdm);
1002 return 0;
1003}
1004
Rajendra Nayak3231fc82008-09-26 17:49:14 +05301005void omap_push_sram_idle(void)
1006{
1007 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend,
1008 omap34xx_cpu_suspend_sz);
Tero Kristo27d59a42008-10-13 13:15:00 +03001009 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
1010 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
1011 save_secure_ram_context_sz);
Rajendra Nayak3231fc82008-09-26 17:49:14 +05301012}
1013
Nishanth Menon8cdfd832010-12-20 14:05:05 -06001014static void __init pm_errata_configure(void)
1015{
Peter 'p2' De Schrijverc4236d22010-12-20 14:05:07 -06001016 if (cpu_is_omap3630()) {
Nishanth Menon458e9992010-12-20 14:05:06 -06001017 pm34xx_errata |= PM_RTA_ERRATUM_i608;
Peter 'p2' De Schrijverc4236d22010-12-20 14:05:07 -06001018 /* Enable the l2 cache toggling in sleep logic */
1019 enable_omap3630_toggle_l2_on_restore();
Eduardo Valentincc1b6022010-12-20 14:05:09 -06001020 if (omap_rev() < OMAP3630_REV_ES1_2)
1021 pm34xx_errata |= PM_SDRC_WAKEUP_ERRATUM_i583;
Peter 'p2' De Schrijverc4236d22010-12-20 14:05:07 -06001022 }
Nishanth Menon8cdfd832010-12-20 14:05:05 -06001023}
1024
Kevin Hilman7cc515f2009-06-10 09:02:25 -07001025static int __init omap3_pm_init(void)
Kevin Hilman8bd22942009-05-28 10:56:16 -07001026{
1027 struct power_state *pwrst, *tmp;
Paul Walmsley55ed9692010-01-26 20:12:59 -07001028 struct clockdomain *neon_clkdm, *per_clkdm, *mpu_clkdm, *core_clkdm;
Kevin Hilman8bd22942009-05-28 10:56:16 -07001029 int ret;
1030
1031 if (!cpu_is_omap34xx())
1032 return -ENODEV;
1033
Nishanth Menon8cdfd832010-12-20 14:05:05 -06001034 pm_errata_configure();
1035
Kevin Hilman8bd22942009-05-28 10:56:16 -07001036 printk(KERN_ERR "Power Management for TI OMAP3.\n");
1037
1038 /* XXX prcm_setup_regs needs to be before enabling hw
1039 * supervised mode for powerdomains */
1040 prcm_setup_regs();
1041
1042 ret = request_irq(INT_34XX_PRCM_MPU_IRQ,
1043 (irq_handler_t)prcm_interrupt_handler,
1044 IRQF_DISABLED, "prcm", NULL);
1045 if (ret) {
1046 printk(KERN_ERR "request_irq failed to register for 0x%x\n",
1047 INT_34XX_PRCM_MPU_IRQ);
1048 goto err1;
1049 }
1050
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +03001051 ret = pwrdm_for_each(pwrdms_setup, NULL);
Kevin Hilman8bd22942009-05-28 10:56:16 -07001052 if (ret) {
1053 printk(KERN_ERR "Failed to setup powerdomains\n");
1054 goto err2;
1055 }
1056
Peter 'p2' De Schrijvera23456e2008-10-15 18:13:47 +03001057 (void) clkdm_for_each(clkdms_setup, NULL);
Kevin Hilman8bd22942009-05-28 10:56:16 -07001058
1059 mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");
1060 if (mpu_pwrdm == NULL) {
1061 printk(KERN_ERR "Failed to get mpu_pwrdm\n");
1062 goto err2;
1063 }
1064
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +05301065 neon_pwrdm = pwrdm_lookup("neon_pwrdm");
1066 per_pwrdm = pwrdm_lookup("per_pwrdm");
1067 core_pwrdm = pwrdm_lookup("core_pwrdm");
Tero Kristoc16c3f62008-12-11 16:46:57 +02001068 cam_pwrdm = pwrdm_lookup("cam_pwrdm");
Rajendra Nayakfa3c2a42008-09-26 17:49:22 +05301069
Paul Walmsley55ed9692010-01-26 20:12:59 -07001070 neon_clkdm = clkdm_lookup("neon_clkdm");
1071 mpu_clkdm = clkdm_lookup("mpu_clkdm");
1072 per_clkdm = clkdm_lookup("per_clkdm");
1073 core_clkdm = clkdm_lookup("core_clkdm");
1074
Rajendra Nayak3231fc82008-09-26 17:49:14 +05301075 omap_push_sram_idle();
Kevin Hilman10f90ed2009-06-24 11:39:18 -07001076#ifdef CONFIG_SUSPEND
Kevin Hilman8bd22942009-05-28 10:56:16 -07001077 suspend_set_ops(&omap_pm_ops);
Kevin Hilman10f90ed2009-06-24 11:39:18 -07001078#endif /* CONFIG_SUSPEND */
Kevin Hilman8bd22942009-05-28 10:56:16 -07001079
1080 pm_idle = omap3_pm_idle;
Kalle Jokiniemi03433712008-09-26 11:04:20 +03001081 omap3_idle_init();
Kevin Hilman8bd22942009-05-28 10:56:16 -07001082
Nishanth Menon458e9992010-12-20 14:05:06 -06001083 /*
1084 * RTA is disabled during initialization as per erratum i608
1085 * it is safer to disable RTA by the bootloader, but we would like
1086 * to be doubly sure here and prevent any mishaps.
1087 */
1088 if (IS_PM34XX_ERRATUM(PM_RTA_ERRATUM_i608))
1089 omap3630_ctrl_disable_rta();
1090
Paul Walmsley55ed9692010-01-26 20:12:59 -07001091 clkdm_add_wkdep(neon_clkdm, mpu_clkdm);
Tero Kristo27d59a42008-10-13 13:15:00 +03001092 if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
1093 omap3_secure_ram_storage =
1094 kmalloc(0x803F, GFP_KERNEL);
1095 if (!omap3_secure_ram_storage)
1096 printk(KERN_ERR "Memory allocation failed when"
1097 "allocating for secure sram context\n");
Tero Kristo27d59a42008-10-13 13:15:00 +03001098
Tero Kristo9d971402008-12-12 11:20:05 +02001099 local_irq_disable();
1100 local_fiq_disable();
1101
1102 omap_dma_global_context_save();
1103 omap3_save_secure_ram_context(PWRDM_POWER_ON);
1104 omap_dma_global_context_restore();
1105
1106 local_irq_enable();
1107 local_fiq_enable();
1108 }
1109
1110 omap3_save_scratchpad_contents();
Kevin Hilman8bd22942009-05-28 10:56:16 -07001111err1:
1112 return ret;
1113err2:
1114 free_irq(INT_34XX_PRCM_MPU_IRQ, NULL);
1115 list_for_each_entry_safe(pwrst, tmp, &pwrst_list, node) {
1116 list_del(&pwrst->node);
1117 kfree(pwrst);
1118 }
1119 return ret;
1120}
1121
1122late_initcall(omap3_pm_init);