blob: 1e5bb6640f562c7eba64bf16b88034859aa4ac3b [file] [log] [blame]
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +02001/*
2 * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM)
3 *
4 * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com>
5 * and Jonas Aaberg <jonas.aberg@stericsson.com>.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/cpuidle.h>
14#include <linux/clockchips.h>
15#include <linux/spinlock.h>
16#include <linux/atomic.h>
17#include <linux/smp.h>
18#include <linux/mfd/dbx500-prcmu.h>
Linus Walleij1e22a8c2013-03-19 15:36:12 +010019#include <linux/platform_data/arm-ux500-pm.h>
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020020
21#include <asm/cpuidle.h>
22#include <asm/proc-fns.h>
23
24static atomic_t master = ATOMIC_INIT(0);
25static DEFINE_SPINLOCK(master_lock);
26static DEFINE_PER_CPU(struct cpuidle_device, ux500_cpuidle_device);
27
28static inline int ux500_enter_idle(struct cpuidle_device *dev,
29 struct cpuidle_driver *drv, int index)
30{
31 int this_cpu = smp_processor_id();
32 bool recouple = false;
33
34 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &this_cpu);
35
36 if (atomic_inc_return(&master) == num_online_cpus()) {
37
38 /* With this lock, we prevent the other cpu to exit and enter
39 * this function again and become the master */
40 if (!spin_trylock(&master_lock))
41 goto wfi;
42
43 /* decouple the gic from the A9 cores */
Steve Zhan5cc23662013-01-23 11:24:47 +010044 if (prcmu_gic_decouple()) {
45 spin_unlock(&master_lock);
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020046 goto out;
Steve Zhan5cc23662013-01-23 11:24:47 +010047 }
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +020048
49 /* If an error occur, we will have to recouple the gic
50 * manually */
51 recouple = true;
52
53 /* At this state, as the gic is decoupled, if the other
54 * cpu is in WFI, we have the guarantee it won't be wake
55 * up, so we can safely go to retention */
56 if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1))
57 goto out;
58
59 /* The prcmu will be in charge of watching the interrupts
60 * and wake up the cpus */
61 if (prcmu_copy_gic_settings())
62 goto out;
63
64 /* Check in the meantime an interrupt did
65 * not occur on the gic ... */
66 if (prcmu_gic_pending_irq())
67 goto out;
68
69 /* ... and the prcmu */
70 if (prcmu_pending_irq())
71 goto out;
72
73 /* Go to the retention state, the prcmu will wait for the
74 * cpu to go WFI and this is what happens after exiting this
75 * 'master' critical section */
76 if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true))
77 goto out;
78
79 /* When we switch to retention, the prcmu is in charge
80 * of recoupling the gic automatically */
81 recouple = false;
82
83 spin_unlock(&master_lock);
84 }
85wfi:
86 cpu_do_idle();
87out:
88 atomic_dec(&master);
89
90 if (recouple) {
91 prcmu_gic_recouple();
92 spin_unlock(&master_lock);
93 }
94
95 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &this_cpu);
96
97 return index;
98}
99
100static struct cpuidle_driver ux500_idle_driver = {
101 .name = "ux500_idle",
102 .owner = THIS_MODULE,
103 .en_core_tk_irqen = 1,
104 .states = {
105 ARM_CPUIDLE_WFI_STATE,
106 {
107 .enter = ux500_enter_idle,
108 .exit_latency = 70,
109 .target_residency = 260,
110 .flags = CPUIDLE_FLAG_TIME_VALID,
111 .name = "ApIdle",
112 .desc = "ARM Retention",
113 },
114 },
115 .safe_state_index = 0,
116 .state_count = 2,
117};
118
119/*
120 * For each cpu, setup the broadcast timer because we will
121 * need to migrate the timers for the states >= ApIdle.
122 */
123static void ux500_setup_broadcast_timer(void *arg)
124{
125 int cpu = smp_processor_id();
126 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu);
127}
128
129int __init ux500_idle_init(void)
130{
131 int ret, cpu;
132 struct cpuidle_device *device;
133
Linus Walleij1e22a8c2013-03-19 15:36:12 +0100134 /* Configure wake up reasons */
Daniel Lezcano3ebabaa2012-04-19 14:46:32 +0200135 prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
136 PRCMU_WAKEUP(ABB));
137
138 /*
139 * Configure the timer broadcast for each cpu, that must
140 * be done from the cpu context, so we use a smp cross
141 * call with 'on_each_cpu'.
142 */
143 on_each_cpu(ux500_setup_broadcast_timer, NULL, 1);
144
145 ret = cpuidle_register_driver(&ux500_idle_driver);
146 if (ret) {
147 printk(KERN_ERR "failed to register ux500 idle driver\n");
148 return ret;
149 }
150
151 for_each_online_cpu(cpu) {
152 device = &per_cpu(ux500_cpuidle_device, cpu);
153 device->cpu = cpu;
154 ret = cpuidle_register_device(device);
155 if (ret) {
156 printk(KERN_ERR "Failed to register cpuidle "
157 "device for cpu%d\n", cpu);
158 goto out_unregister;
159 }
160 }
161out:
162 return ret;
163
164out_unregister:
165 for_each_online_cpu(cpu) {
166 device = &per_cpu(ux500_cpuidle_device, cpu);
167 cpuidle_unregister_device(device);
168 }
169
170 cpuidle_unregister_driver(&ux500_idle_driver);
171 goto out;
172}
173
174device_initcall(ux500_idle_init);