blob: 033d34dcbd3fb8a8e1325900ddecdeb64090e874 [file] [log] [blame]
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +01001/*
2 * Versatile Express Serial Power Controller (SPC) support
3 *
4 * Copyright (C) 2013 ARM Ltd.
5 *
6 * Authors: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
7 * Achin Gupta <achin.gupta@arm.com>
8 * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
15 * kind, whether express or implied; without even the implied warranty
16 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
Sudeep KarkadaNagesha4d910d52013-10-29 12:18:38 +000020#include <linux/clk-provider.h>
21#include <linux/clkdev.h>
22#include <linux/cpu.h>
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000023#include <linux/delay.h>
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010024#include <linux/err.h>
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000025#include <linux/interrupt.h>
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010026#include <linux/io.h>
Sudeep KarkadaNagesha9e941b62013-10-29 12:18:40 +000027#include <linux/platform_device.h>
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000028#include <linux/pm_opp.h>
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010029#include <linux/slab.h>
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000030#include <linux/semaphore.h>
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010031
32#include <asm/cacheflush.h>
33
34#define SPCLOG "vexpress-spc: "
35
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000036#define PERF_LVL_A15 0x00
37#define PERF_REQ_A15 0x04
38#define PERF_LVL_A7 0x08
39#define PERF_REQ_A7 0x0c
40#define COMMS 0x10
41#define COMMS_REQ 0x14
42#define PWC_STATUS 0x18
43#define PWC_FLAG 0x1c
44
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010045/* SPC wake-up IRQs status and mask */
46#define WAKE_INT_MASK 0x24
47#define WAKE_INT_RAW 0x28
48#define WAKE_INT_STAT 0x2c
49/* SPC power down registers */
50#define A15_PWRDN_EN 0x30
51#define A7_PWRDN_EN 0x34
52/* SPC per-CPU mailboxes */
53#define A15_BX_ADDR0 0x68
54#define A7_BX_ADDR0 0x78
55
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000056/* SPC system config interface registers */
57#define SYSCFG_WDATA 0x70
58#define SYSCFG_RDATA 0x74
59
60/* A15/A7 OPP virtual register base */
61#define A15_PERFVAL_BASE 0xC10
62#define A7_PERFVAL_BASE 0xC30
63
64/* Config interface control bits */
65#define SYSCFG_START (1 << 31)
66#define SYSCFG_SCC (6 << 20)
67#define SYSCFG_STAT (14 << 20)
68
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010069/* wake-up interrupt masks */
70#define GBL_WAKEUP_INT_MSK (0x3 << 10)
71
72/* TC2 static dual-cluster configuration */
73#define MAX_CLUSTERS 2
74
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +000075/*
76 * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
77 * operation, the operation could start just before jiffie is about
78 * to be incremented. So setting timeout value of 20ms = 2jiffies@100Hz
79 */
80#define TIMEOUT_US 20000
81
82#define MAX_OPPS 8
83#define CA15_DVFS 0
84#define CA7_DVFS 1
85#define SPC_SYS_CFG 2
86#define STAT_COMPLETE(type) ((1 << 0) << (type << 2))
87#define STAT_ERR(type) ((1 << 1) << (type << 2))
88#define RESPONSE_MASK(type) (STAT_COMPLETE(type) | STAT_ERR(type))
89
90struct ve_spc_opp {
91 unsigned long freq;
92 unsigned long u_volt;
93};
94
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +010095struct ve_spc_drvdata {
96 void __iomem *baseaddr;
97 /*
98 * A15s cluster identifier
99 * It corresponds to A15 processors MPIDR[15:8] bitfield
100 */
101 u32 a15_clusid;
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +0000102 uint32_t cur_rsp_mask;
103 uint32_t cur_rsp_stat;
104 struct semaphore sem;
105 struct completion done;
106 struct ve_spc_opp *opps[MAX_CLUSTERS];
107 int num_opps[MAX_CLUSTERS];
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +0100108};
109
110static struct ve_spc_drvdata *info;
111
112static inline bool cluster_is_a15(u32 cluster)
113{
114 return cluster == info->a15_clusid;
115}
116
117/**
118 * ve_spc_global_wakeup_irq()
119 *
120 * Function to set/clear global wakeup IRQs. Not protected by locking since
121 * it might be used in code paths where normal cacheable locks are not
122 * working. Locking must be provided by the caller to ensure atomicity.
123 *
124 * @set: if true, global wake-up IRQs are set, if false they are cleared
125 */
126void ve_spc_global_wakeup_irq(bool set)
127{
128 u32 reg;
129
130 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
131
132 if (set)
133 reg |= GBL_WAKEUP_INT_MSK;
134 else
135 reg &= ~GBL_WAKEUP_INT_MSK;
136
137 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
138}
139
140/**
141 * ve_spc_cpu_wakeup_irq()
142 *
143 * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
144 * it might be used in code paths where normal cacheable locks are not
145 * working. Locking must be provided by the caller to ensure atomicity.
146 *
147 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
148 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
149 * @set: if true, wake-up IRQs are set, if false they are cleared
150 */
151void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set)
152{
153 u32 mask, reg;
154
155 if (cluster >= MAX_CLUSTERS)
156 return;
157
158 mask = 1 << cpu;
159
160 if (!cluster_is_a15(cluster))
161 mask <<= 4;
162
163 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK);
164
165 if (set)
166 reg |= mask;
167 else
168 reg &= ~mask;
169
170 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK);
171}
172
173/**
174 * ve_spc_set_resume_addr() - set the jump address used for warm boot
175 *
176 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
177 * @cpu: mpidr[7:0] bitfield describing cpu affinity level
178 * @addr: physical resume address
179 */
180void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr)
181{
182 void __iomem *baseaddr;
183
184 if (cluster >= MAX_CLUSTERS)
185 return;
186
187 if (cluster_is_a15(cluster))
188 baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2);
189 else
190 baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2);
191
192 writel_relaxed(addr, baseaddr);
193}
194
195/**
196 * ve_spc_powerdown()
197 *
198 * Function to enable/disable cluster powerdown. Not protected by locking
199 * since it might be used in code paths where normal cacheable locks are not
200 * working. Locking must be provided by the caller to ensure atomicity.
201 *
202 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
203 * @enable: if true enables powerdown, if false disables it
204 */
205void ve_spc_powerdown(u32 cluster, bool enable)
206{
207 u32 pwdrn_reg;
208
209 if (cluster >= MAX_CLUSTERS)
210 return;
211
212 pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN;
213 writel_relaxed(enable, info->baseaddr + pwdrn_reg);
214}
215
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +0000216static int ve_spc_get_performance(int cluster, u32 *freq)
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +0100217{
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +0000218 struct ve_spc_opp *opps = info->opps[cluster];
219 u32 perf_cfg_reg = 0;
220 u32 perf;
221
222 perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7;
223
224 perf = readl_relaxed(info->baseaddr + perf_cfg_reg);
225 if (perf >= info->num_opps[cluster])
226 return -EINVAL;
227
228 opps += perf;
229 *freq = opps->freq;
230
231 return 0;
232}
233
234/* find closest match to given frequency in OPP table */
235static int ve_spc_round_performance(int cluster, u32 freq)
236{
237 int idx, max_opp = info->num_opps[cluster];
238 struct ve_spc_opp *opps = info->opps[cluster];
239 u32 fmin = 0, fmax = ~0, ftmp;
240
241 freq /= 1000; /* OPP entries in kHz */
242 for (idx = 0; idx < max_opp; idx++, opps++) {
243 ftmp = opps->freq;
244 if (ftmp >= freq) {
245 if (ftmp <= fmax)
246 fmax = ftmp;
247 } else {
248 if (ftmp >= fmin)
249 fmin = ftmp;
250 }
251 }
252 if (fmax != ~0)
253 return fmax * 1000;
254 else
255 return fmin * 1000;
256}
257
258static int ve_spc_find_performance_index(int cluster, u32 freq)
259{
260 int idx, max_opp = info->num_opps[cluster];
261 struct ve_spc_opp *opps = info->opps[cluster];
262
263 for (idx = 0; idx < max_opp; idx++, opps++)
264 if (opps->freq == freq)
265 break;
266 return (idx == max_opp) ? -EINVAL : idx;
267}
268
269static int ve_spc_waitforcompletion(int req_type)
270{
271 int ret = wait_for_completion_interruptible_timeout(
272 &info->done, usecs_to_jiffies(TIMEOUT_US));
273 if (ret == 0)
274 ret = -ETIMEDOUT;
275 else if (ret > 0)
276 ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO;
277 return ret;
278}
279
280static int ve_spc_set_performance(int cluster, u32 freq)
281{
282 u32 perf_cfg_reg, perf_stat_reg;
283 int ret, perf, req_type;
284
285 if (cluster_is_a15(cluster)) {
286 req_type = CA15_DVFS;
287 perf_cfg_reg = PERF_LVL_A15;
288 perf_stat_reg = PERF_REQ_A15;
289 } else {
290 req_type = CA7_DVFS;
291 perf_cfg_reg = PERF_LVL_A7;
292 perf_stat_reg = PERF_REQ_A7;
293 }
294
295 perf = ve_spc_find_performance_index(cluster, freq);
296
297 if (perf < 0)
298 return perf;
299
300 if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
301 return -ETIME;
302
303 init_completion(&info->done);
304 info->cur_rsp_mask = RESPONSE_MASK(req_type);
305
306 writel(perf, info->baseaddr + perf_cfg_reg);
307 ret = ve_spc_waitforcompletion(req_type);
308
309 info->cur_rsp_mask = 0;
310 up(&info->sem);
311
312 return ret;
313}
314
315static int ve_spc_read_sys_cfg(int func, int offset, uint32_t *data)
316{
317 int ret;
318
319 if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US)))
320 return -ETIME;
321
322 init_completion(&info->done);
323 info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG);
324
325 /* Set the control value */
326 writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS);
327 ret = ve_spc_waitforcompletion(SPC_SYS_CFG);
328
329 if (ret == 0)
330 *data = readl(info->baseaddr + SYSCFG_RDATA);
331
332 info->cur_rsp_mask = 0;
333 up(&info->sem);
334
335 return ret;
336}
337
338static irqreturn_t ve_spc_irq_handler(int irq, void *data)
339{
340 struct ve_spc_drvdata *drv_data = data;
341 uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS);
342
343 if (info->cur_rsp_mask & status) {
344 info->cur_rsp_stat = status;
345 complete(&drv_data->done);
346 }
347
348 return IRQ_HANDLED;
349}
350
351/*
352 * +--------------------------+
353 * | 31 20 | 19 0 |
354 * +--------------------------+
355 * | u_volt | freq(kHz) |
356 * +--------------------------+
357 */
358#define MULT_FACTOR 20
359#define VOLT_SHIFT 20
360#define FREQ_MASK (0xFFFFF)
361static int ve_spc_populate_opps(uint32_t cluster)
362{
363 uint32_t data = 0, off, ret, idx;
364 struct ve_spc_opp *opps;
365
366 opps = kzalloc(sizeof(*opps) * MAX_OPPS, GFP_KERNEL);
367 if (!opps)
368 return -ENOMEM;
369
370 info->opps[cluster] = opps;
371
372 off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE;
373 for (idx = 0; idx < MAX_OPPS; idx++, off += 4, opps++) {
374 ret = ve_spc_read_sys_cfg(SYSCFG_SCC, off, &data);
375 if (!ret) {
376 opps->freq = (data & FREQ_MASK) * MULT_FACTOR;
377 opps->u_volt = data >> VOLT_SHIFT;
378 } else {
379 break;
380 }
381 }
382 info->num_opps[cluster] = idx;
383
384 return ret;
385}
386
387static int ve_init_opp_table(struct device *cpu_dev)
388{
389 int cluster = topology_physical_package_id(cpu_dev->id);
390 int idx, ret = 0, max_opp = info->num_opps[cluster];
391 struct ve_spc_opp *opps = info->opps[cluster];
392
393 for (idx = 0; idx < max_opp; idx++, opps++) {
394 ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt);
395 if (ret) {
396 dev_warn(cpu_dev, "failed to add opp %lu %lu\n",
397 opps->freq, opps->u_volt);
398 return ret;
399 }
400 }
401 return ret;
402}
403
404int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
405{
406 int ret;
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +0100407 info = kzalloc(sizeof(*info), GFP_KERNEL);
408 if (!info) {
409 pr_err(SPCLOG "unable to allocate mem\n");
410 return -ENOMEM;
411 }
412
413 info->baseaddr = baseaddr;
414 info->a15_clusid = a15_clusid;
415
Sudeep KarkadaNageshaf7cd2d82013-10-29 12:18:37 +0000416 if (irq <= 0) {
417 pr_err(SPCLOG "Invalid IRQ %d\n", irq);
418 kfree(info);
419 return -EINVAL;
420 }
421
422 init_completion(&info->done);
423
424 readl_relaxed(info->baseaddr + PWC_STATUS);
425
426 ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
427 | IRQF_ONESHOT, "vexpress-spc", info);
428 if (ret) {
429 pr_err(SPCLOG "IRQ %d request failed\n", irq);
430 kfree(info);
431 return -ENODEV;
432 }
433
434 sema_init(&info->sem, 1);
Lorenzo Pieralisi63819cb2013-07-16 17:05:43 +0100435 /*
436 * Multi-cluster systems may need this data when non-coherent, during
437 * cluster power-up/power-down. Make sure driver info reaches main
438 * memory.
439 */
440 sync_cache_w(info);
441 sync_cache_w(&info);
442
443 return 0;
444}
Sudeep KarkadaNagesha4d910d52013-10-29 12:18:38 +0000445
446struct clk_spc {
447 struct clk_hw hw;
448 int cluster;
449};
450
451#define to_clk_spc(spc) container_of(spc, struct clk_spc, hw)
452static unsigned long spc_recalc_rate(struct clk_hw *hw,
453 unsigned long parent_rate)
454{
455 struct clk_spc *spc = to_clk_spc(hw);
456 u32 freq;
457
458 if (ve_spc_get_performance(spc->cluster, &freq))
459 return -EIO;
460
461 return freq * 1000;
462}
463
464static long spc_round_rate(struct clk_hw *hw, unsigned long drate,
465 unsigned long *parent_rate)
466{
467 struct clk_spc *spc = to_clk_spc(hw);
468
469 return ve_spc_round_performance(spc->cluster, drate);
470}
471
472static int spc_set_rate(struct clk_hw *hw, unsigned long rate,
473 unsigned long parent_rate)
474{
475 struct clk_spc *spc = to_clk_spc(hw);
476
477 return ve_spc_set_performance(spc->cluster, rate / 1000);
478}
479
480static struct clk_ops clk_spc_ops = {
481 .recalc_rate = spc_recalc_rate,
482 .round_rate = spc_round_rate,
483 .set_rate = spc_set_rate,
484};
485
486static struct clk *ve_spc_clk_register(struct device *cpu_dev)
487{
488 struct clk_init_data init;
489 struct clk_spc *spc;
490
491 spc = kzalloc(sizeof(*spc), GFP_KERNEL);
492 if (!spc) {
493 pr_err("could not allocate spc clk\n");
494 return ERR_PTR(-ENOMEM);
495 }
496
497 spc->hw.init = &init;
498 spc->cluster = topology_physical_package_id(cpu_dev->id);
499
500 init.name = dev_name(cpu_dev);
501 init.ops = &clk_spc_ops;
502 init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE;
503 init.num_parents = 0;
504
505 return devm_clk_register(cpu_dev, &spc->hw);
506}
507
508static int __init ve_spc_clk_init(void)
509{
510 int cpu;
511 struct clk *clk;
512
513 if (!info)
514 return 0; /* Continue only if SPC is initialised */
515
516 if (ve_spc_populate_opps(0) || ve_spc_populate_opps(1)) {
517 pr_err("failed to build OPP table\n");
518 return -ENODEV;
519 }
520
521 for_each_possible_cpu(cpu) {
522 struct device *cpu_dev = get_cpu_device(cpu);
523 if (!cpu_dev) {
524 pr_warn("failed to get cpu%d device\n", cpu);
525 continue;
526 }
527 clk = ve_spc_clk_register(cpu_dev);
528 if (IS_ERR(clk)) {
529 pr_warn("failed to register cpu%d clock\n", cpu);
530 continue;
531 }
532 if (clk_register_clkdev(clk, NULL, dev_name(cpu_dev))) {
533 pr_warn("failed to register cpu%d clock lookup\n", cpu);
534 continue;
535 }
536
537 if (ve_init_opp_table(cpu_dev))
538 pr_warn("failed to initialise cpu%d opp table\n", cpu);
539 }
540
Sudeep KarkadaNagesha9e941b62013-10-29 12:18:40 +0000541 platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0);
Sudeep KarkadaNagesha4d910d52013-10-29 12:18:38 +0000542 return 0;
543}
544module_init(ve_spc_clk_init);