blob: c0a8fdf0777f4e5220b6dfd929a2d298f751b485 [file] [log] [blame]
Alex Elderba764c42020-03-05 22:28:19 -06001// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Alex Elder4c7ccfc2021-02-12 08:33:59 -06004 * Copyright (C) 2018-2021 Linaro Ltd.
Alex Elderba764c42020-03-05 22:28:19 -06005 */
6
Alex Elder0305b702020-09-17 12:39:20 -05007#include <linux/refcount.h>
Alex Elderba764c42020-03-05 22:28:19 -06008#include <linux/mutex.h>
9#include <linux/clk.h>
10#include <linux/device.h>
11#include <linux/interconnect.h>
Alex Elder73ff3162021-08-04 10:36:24 -050012#include <linux/pm.h>
Alex Elder2abb0c72021-08-10 14:27:00 -050013#include <linux/pm_runtime.h>
Alex Elder73ff3162021-08-04 10:36:24 -050014#include <linux/bitops.h>
Alex Elderba764c42020-03-05 22:28:19 -060015
16#include "ipa.h"
17#include "ipa_clock.h"
Alex Elder73ff3162021-08-04 10:36:24 -050018#include "ipa_endpoint.h"
Alex Elderba764c42020-03-05 22:28:19 -060019#include "ipa_modem.h"
Alex Elderdfccb8b2020-11-19 16:40:39 -060020#include "ipa_data.h"
Alex Elderba764c42020-03-05 22:28:19 -060021
22/**
23 * DOC: IPA Clocking
24 *
25 * The "IPA Clock" manages both the IPA core clock and the interconnects
26 * (buses) the IPA depends on as a single logical entity. A reference count
27 * is incremented by "get" operations and decremented by "put" operations.
28 * Transitions of that count from 0 to 1 result in the clock and interconnects
29 * being enabled, and transitions of the count from 1 to 0 cause them to be
30 * disabled. We currently operate the core clock at a fixed clock rate, and
31 * all buses at a fixed average and peak bandwidth. As more advanced IPA
32 * features are enabled, we can make better use of clock and bus scaling.
33 *
34 * An IPA clock reference must be held for any access to IPA hardware.
35 */
36
Alex Elderba764c42020-03-05 22:28:19 -060037/**
Alex Elder5b408102021-01-15 06:50:46 -060038 * struct ipa_interconnect - IPA interconnect information
39 * @path: Interconnect path
Alex Elderdb6cd512021-01-15 06:50:47 -060040 * @average_bandwidth: Average interconnect bandwidth (KB/second)
41 * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
Alex Elder5b408102021-01-15 06:50:46 -060042 */
43struct ipa_interconnect {
44 struct icc_path *path;
Alex Elderdb6cd512021-01-15 06:50:47 -060045 u32 average_bandwidth;
46 u32 peak_bandwidth;
Alex Elder5b408102021-01-15 06:50:46 -060047};
48
49/**
Alex Elderafb08b72021-08-04 10:36:26 -050050 * enum ipa_power_flag - IPA power flags
51 * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
52 * @IPA_POWER_FLAG_COUNT: Number of defined power flags
53 */
54enum ipa_power_flag {
55 IPA_POWER_FLAG_RESUMED,
56 IPA_POWER_FLAG_COUNT, /* Last; not a flag */
57};
58
59/**
Alex Elderba764c42020-03-05 22:28:19 -060060 * struct ipa_clock - IPA clocking information
61 * @count: Clocking reference count
Alex Eldere3eea082020-07-13 07:24:18 -050062 * @mutex: Protects clock enable/disable
Alex Elderba764c42020-03-05 22:28:19 -060063 * @core: IPA core clock
Alex Elderafb08b72021-08-04 10:36:26 -050064 * @flags: Boolean state flags
Alex Elderea151e12021-01-15 06:50:50 -060065 * @interconnect_count: Number of elements in interconnect[]
Alex Elder5b408102021-01-15 06:50:46 -060066 * @interconnect: Interconnect array
Alex Elderba764c42020-03-05 22:28:19 -060067 */
68struct ipa_clock {
Alex Elder0305b702020-09-17 12:39:20 -050069 refcount_t count;
Alex Elderba764c42020-03-05 22:28:19 -060070 struct mutex mutex; /* protects clock enable/disable */
71 struct clk *core;
Alex Elderafb08b72021-08-04 10:36:26 -050072 DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
Alex Elderea151e12021-01-15 06:50:50 -060073 u32 interconnect_count;
74 struct ipa_interconnect *interconnect;
Alex Elderba764c42020-03-05 22:28:19 -060075};
76
Alex Elder10d0d392021-01-15 06:50:49 -060077static int ipa_interconnect_init_one(struct device *dev,
78 struct ipa_interconnect *interconnect,
79 const struct ipa_interconnect_data *data)
Alex Elderba764c42020-03-05 22:28:19 -060080{
81 struct icc_path *path;
82
Alex Elder10d0d392021-01-15 06:50:49 -060083 path = of_icc_get(dev, data->name);
84 if (IS_ERR(path)) {
85 int ret = PTR_ERR(path);
Alex Elderba764c42020-03-05 22:28:19 -060086
Alex Elder4c7ccfc2021-02-12 08:33:59 -060087 dev_err_probe(dev, ret, "error getting %s interconnect\n",
88 data->name);
Alex Elder10d0d392021-01-15 06:50:49 -060089
90 return ret;
91 }
92
93 interconnect->path = path;
94 interconnect->average_bandwidth = data->average_bandwidth;
95 interconnect->peak_bandwidth = data->peak_bandwidth;
96
97 return 0;
98}
99
100static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
101{
102 icc_put(interconnect->path);
103 memset(interconnect, 0, sizeof(*interconnect));
Alex Elderba764c42020-03-05 22:28:19 -0600104}
105
106/* Initialize interconnects required for IPA operation */
Alex Elderdb6cd512021-01-15 06:50:47 -0600107static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
108 const struct ipa_interconnect_data *data)
Alex Elderba764c42020-03-05 22:28:19 -0600109{
Alex Elderdb6cd512021-01-15 06:50:47 -0600110 struct ipa_interconnect *interconnect;
Alex Elderea151e12021-01-15 06:50:50 -0600111 u32 count;
Alex Elder10d0d392021-01-15 06:50:49 -0600112 int ret;
Alex Elderba764c42020-03-05 22:28:19 -0600113
Alex Elderea151e12021-01-15 06:50:50 -0600114 count = clock->interconnect_count;
115 interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
116 if (!interconnect)
117 return -ENOMEM;
118 clock->interconnect = interconnect;
Alex Elderba764c42020-03-05 22:28:19 -0600119
Alex Elderea151e12021-01-15 06:50:50 -0600120 while (count--) {
121 ret = ipa_interconnect_init_one(dev, interconnect, data++);
122 if (ret)
123 goto out_unwind;
124 interconnect++;
125 }
Alex Elderba764c42020-03-05 22:28:19 -0600126
127 return 0;
128
Alex Elderea151e12021-01-15 06:50:50 -0600129out_unwind:
130 while (interconnect-- > clock->interconnect)
131 ipa_interconnect_exit_one(interconnect);
132 kfree(clock->interconnect);
133 clock->interconnect = NULL;
Alex Elder10d0d392021-01-15 06:50:49 -0600134
135 return ret;
Alex Elderba764c42020-03-05 22:28:19 -0600136}
137
138/* Inverse of ipa_interconnect_init() */
139static void ipa_interconnect_exit(struct ipa_clock *clock)
140{
Alex Elder10d0d392021-01-15 06:50:49 -0600141 struct ipa_interconnect *interconnect;
142
Alex Elderea151e12021-01-15 06:50:50 -0600143 interconnect = clock->interconnect + clock->interconnect_count;
144 while (interconnect-- > clock->interconnect)
145 ipa_interconnect_exit_one(interconnect);
146 kfree(clock->interconnect);
147 clock->interconnect = NULL;
Alex Elderba764c42020-03-05 22:28:19 -0600148}
149
150/* Currently we only use one bandwidth level, so just "enable" interconnects */
151static int ipa_interconnect_enable(struct ipa *ipa)
152{
Alex Elderdb6cd512021-01-15 06:50:47 -0600153 struct ipa_interconnect *interconnect;
Alex Elderba764c42020-03-05 22:28:19 -0600154 struct ipa_clock *clock = ipa->clock;
155 int ret;
Alex Elderea151e12021-01-15 06:50:50 -0600156 u32 i;
Alex Elderba764c42020-03-05 22:28:19 -0600157
Alex Elderea151e12021-01-15 06:50:50 -0600158 interconnect = clock->interconnect;
159 for (i = 0; i < clock->interconnect_count; i++) {
160 ret = icc_set_bw(interconnect->path,
161 interconnect->average_bandwidth,
162 interconnect->peak_bandwidth);
Alex Elder8ee7c402021-08-04 10:36:23 -0500163 if (ret) {
164 dev_err(&ipa->pdev->dev,
165 "error %d enabling %s interconnect\n",
166 ret, icc_get_name(interconnect->path));
Alex Elderea151e12021-01-15 06:50:50 -0600167 goto out_unwind;
Alex Elder8ee7c402021-08-04 10:36:23 -0500168 }
Alex Elderea151e12021-01-15 06:50:50 -0600169 interconnect++;
170 }
Alex Elderba764c42020-03-05 22:28:19 -0600171
172 return 0;
173
Alex Elderea151e12021-01-15 06:50:50 -0600174out_unwind:
175 while (interconnect-- > clock->interconnect)
176 (void)icc_set_bw(interconnect->path, 0, 0);
Alex Elderba764c42020-03-05 22:28:19 -0600177
178 return ret;
179}
180
181/* To disable an interconnect, we just its bandwidth to 0 */
Alex Elder8ee7c402021-08-04 10:36:23 -0500182static int ipa_interconnect_disable(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600183{
Alex Elderdb6cd512021-01-15 06:50:47 -0600184 struct ipa_interconnect *interconnect;
Alex Elderba764c42020-03-05 22:28:19 -0600185 struct ipa_clock *clock = ipa->clock;
Alex Elder8ee7c402021-08-04 10:36:23 -0500186 struct device *dev = &ipa->pdev->dev;
Alex Elderec0ef6d2021-01-15 06:50:45 -0600187 int result = 0;
Alex Elderea151e12021-01-15 06:50:50 -0600188 u32 count;
Alex Elderba764c42020-03-05 22:28:19 -0600189 int ret;
190
Alex Elderea151e12021-01-15 06:50:50 -0600191 count = clock->interconnect_count;
192 interconnect = clock->interconnect + count;
193 while (count--) {
194 interconnect--;
195 ret = icc_set_bw(interconnect->path, 0, 0);
Alex Elder8ee7c402021-08-04 10:36:23 -0500196 if (ret) {
197 dev_err(dev, "error %d disabling %s interconnect\n",
198 ret, icc_get_name(interconnect->path));
199 /* Try to disable all; record only the first error */
200 if (!result)
201 result = ret;
202 }
Alex Elderea151e12021-01-15 06:50:50 -0600203 }
Alex Elderba764c42020-03-05 22:28:19 -0600204
Alex Elder8ee7c402021-08-04 10:36:23 -0500205 return result;
Alex Elderba764c42020-03-05 22:28:19 -0600206}
207
208/* Turn on IPA clocks, including interconnects */
209static int ipa_clock_enable(struct ipa *ipa)
210{
211 int ret;
212
213 ret = ipa_interconnect_enable(ipa);
214 if (ret)
215 return ret;
216
217 ret = clk_prepare_enable(ipa->clock->core);
Alex Elder8ee7c402021-08-04 10:36:23 -0500218 if (ret) {
219 dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
220 (void)ipa_interconnect_disable(ipa);
221 }
Alex Elderba764c42020-03-05 22:28:19 -0600222
223 return ret;
224}
225
226/* Inverse of ipa_clock_enable() */
Alex Elder7ebd1682021-08-10 14:26:58 -0500227static int ipa_clock_disable(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600228{
229 clk_disable_unprepare(ipa->clock->core);
Alex Elder7ebd1682021-08-10 14:26:58 -0500230
231 return ipa_interconnect_disable(ipa);
Alex Elderba764c42020-03-05 22:28:19 -0600232}
233
Alex Elder2abb0c72021-08-10 14:27:00 -0500234static int ipa_runtime_suspend(struct device *dev)
235{
236 struct ipa *ipa = dev_get_drvdata(dev);
237
238 /* Endpoints aren't usable until setup is complete */
239 if (ipa->setup_complete) {
240 __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags);
241 ipa_endpoint_suspend(ipa);
242 gsi_suspend(&ipa->gsi);
243 }
244
245 return ipa_clock_disable(ipa);
246}
247
248static int ipa_runtime_resume(struct device *dev)
249{
250 struct ipa *ipa = dev_get_drvdata(dev);
251 int ret;
252
253 ret = ipa_clock_enable(ipa);
254 if (WARN_ON(ret < 0))
255 return ret;
256
257 /* Endpoints aren't usable until setup is complete */
258 if (ipa->setup_complete) {
259 gsi_resume(&ipa->gsi);
260 ipa_endpoint_resume(ipa);
261 }
262
263 return 0;
264}
265
Alex Elderba764c42020-03-05 22:28:19 -0600266/* Get an IPA clock reference, but only if the reference count is
267 * already non-zero. Returns true if the additional reference was
268 * added successfully, or false otherwise.
269 */
270bool ipa_clock_get_additional(struct ipa *ipa)
271{
Alex Elder0305b702020-09-17 12:39:20 -0500272 return refcount_inc_not_zero(&ipa->clock->count);
Alex Elderba764c42020-03-05 22:28:19 -0600273}
274
275/* Get an IPA clock reference. If the reference count is non-zero, it is
276 * incremented and return is immediate. Otherwise it is checked again
Alex Elderdc6e6072020-09-17 12:39:22 -0500277 * under protection of the mutex, and if appropriate the IPA clock
278 * is enabled.
Alex Elderba764c42020-03-05 22:28:19 -0600279 *
280 * Incrementing the reference count is intentionally deferred until
281 * after the clock is running and endpoints are resumed.
282 */
Alex Elder7ebd1682021-08-10 14:26:58 -0500283int ipa_clock_get(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600284{
285 struct ipa_clock *clock = ipa->clock;
286 int ret;
287
288 /* If the clock is running, just bump the reference count */
289 if (ipa_clock_get_additional(ipa))
Alex Elder7ebd1682021-08-10 14:26:58 -0500290 return 1;
Alex Elderba764c42020-03-05 22:28:19 -0600291
292 /* Otherwise get the mutex and check again */
293 mutex_lock(&clock->mutex);
294
295 /* A reference might have been added before we got the mutex. */
Alex Elder7ebd1682021-08-10 14:26:58 -0500296 if (ipa_clock_get_additional(ipa)) {
297 ret = 1;
Alex Elderba764c42020-03-05 22:28:19 -0600298 goto out_mutex_unlock;
Alex Elder7ebd1682021-08-10 14:26:58 -0500299 }
Alex Elderba764c42020-03-05 22:28:19 -0600300
Alex Elder2abb0c72021-08-10 14:27:00 -0500301 ret = ipa_runtime_resume(&ipa->pdev->dev);
Alex Elder7ebd1682021-08-10 14:26:58 -0500302
303 refcount_set(&clock->count, 1);
304
Alex Elderba764c42020-03-05 22:28:19 -0600305out_mutex_unlock:
306 mutex_unlock(&clock->mutex);
Alex Elder7ebd1682021-08-10 14:26:58 -0500307
308 return ret;
Alex Elderba764c42020-03-05 22:28:19 -0600309}
310
Alex Elderdc6e6072020-09-17 12:39:22 -0500311/* Attempt to remove an IPA clock reference. If this represents the
312 * last reference, disable the IPA clock under protection of the mutex.
Alex Elderba764c42020-03-05 22:28:19 -0600313 */
Alex Elder7ebd1682021-08-10 14:26:58 -0500314int ipa_clock_put(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600315{
316 struct ipa_clock *clock = ipa->clock;
Alex Elder7ebd1682021-08-10 14:26:58 -0500317 int ret;
Alex Elderba764c42020-03-05 22:28:19 -0600318
319 /* If this is not the last reference there's nothing more to do */
Alex Elder0305b702020-09-17 12:39:20 -0500320 if (!refcount_dec_and_mutex_lock(&clock->count, &clock->mutex))
Alex Elder7ebd1682021-08-10 14:26:58 -0500321 return 0;
Alex Elderba764c42020-03-05 22:28:19 -0600322
Alex Elder2abb0c72021-08-10 14:27:00 -0500323 ret = ipa_runtime_suspend(&ipa->pdev->dev);
Alex Elderba764c42020-03-05 22:28:19 -0600324
325 mutex_unlock(&clock->mutex);
Alex Elder7ebd1682021-08-10 14:26:58 -0500326
327 return ret;
Alex Elderba764c42020-03-05 22:28:19 -0600328}
329
Alex Elder78b348f2020-07-03 16:23:34 -0500330/* Return the current IPA core clock rate */
331u32 ipa_clock_rate(struct ipa *ipa)
332{
333 return ipa->clock ? (u32)clk_get_rate(ipa->clock->core) : 0;
334}
335
Alex Elderafe1baa2021-08-04 10:36:25 -0500336/**
337 * ipa_suspend_handler() - Handle the suspend IPA interrupt
338 * @ipa: IPA pointer
339 * @irq_id: IPA interrupt type (unused)
340 *
341 * If an RX endpoint is suspended, and the IPA has a packet destined for
342 * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
343 * that it should resume the endpoint. If we get one of these interrupts
344 * we just wake up the system.
345 */
346static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
347{
348 /* Just report the event, and let system resume handle the rest.
349 * More than one endpoint could signal this; if so, ignore
350 * all but the first.
351 */
Alex Elderafb08b72021-08-04 10:36:26 -0500352 if (!test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->clock->flags))
Alex Elderafe1baa2021-08-04 10:36:25 -0500353 pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
354
355 /* Acknowledge/clear the suspend interrupt on all endpoints */
356 ipa_interrupt_suspend_clear_all(ipa->interrupt);
357}
358
359void ipa_power_setup(struct ipa *ipa)
360{
361 ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
362 ipa_suspend_handler);
363}
364
365void ipa_power_teardown(struct ipa *ipa)
366{
367 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
368}
369
Alex Elderba764c42020-03-05 22:28:19 -0600370/* Initialize IPA clocking */
Alex Elderdfccb8b2020-11-19 16:40:39 -0600371struct ipa_clock *
372ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
Alex Elderba764c42020-03-05 22:28:19 -0600373{
374 struct ipa_clock *clock;
375 struct clk *clk;
376 int ret;
377
378 clk = clk_get(dev, "core");
379 if (IS_ERR(clk)) {
Alex Elder4c7ccfc2021-02-12 08:33:59 -0600380 dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
381
Alex Elderba764c42020-03-05 22:28:19 -0600382 return ERR_CAST(clk);
383 }
384
Alex Elder91d02f92020-11-19 16:40:41 -0600385 ret = clk_set_rate(clk, data->core_clock_rate);
Alex Elderba764c42020-03-05 22:28:19 -0600386 if (ret) {
Alex Elder91d02f92020-11-19 16:40:41 -0600387 dev_err(dev, "error %d setting core clock rate to %u\n",
388 ret, data->core_clock_rate);
Alex Elderba764c42020-03-05 22:28:19 -0600389 goto err_clk_put;
390 }
391
392 clock = kzalloc(sizeof(*clock), GFP_KERNEL);
393 if (!clock) {
394 ret = -ENOMEM;
395 goto err_clk_put;
396 }
397 clock->core = clk;
Alex Elderea151e12021-01-15 06:50:50 -0600398 clock->interconnect_count = data->interconnect_count;
Alex Elderba764c42020-03-05 22:28:19 -0600399
Alex Elderea151e12021-01-15 06:50:50 -0600400 ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
Alex Elderba764c42020-03-05 22:28:19 -0600401 if (ret)
402 goto err_kfree;
403
404 mutex_init(&clock->mutex);
Alex Elder0305b702020-09-17 12:39:20 -0500405 refcount_set(&clock->count, 0);
Alex Elderba764c42020-03-05 22:28:19 -0600406
407 return clock;
408
409err_kfree:
410 kfree(clock);
411err_clk_put:
412 clk_put(clk);
413
414 return ERR_PTR(ret);
415}
416
417/* Inverse of ipa_clock_init() */
418void ipa_clock_exit(struct ipa_clock *clock)
419{
420 struct clk *clk = clock->core;
421
Alex Elder0305b702020-09-17 12:39:20 -0500422 WARN_ON(refcount_read(&clock->count) != 0);
Alex Elderba764c42020-03-05 22:28:19 -0600423 mutex_destroy(&clock->mutex);
424 ipa_interconnect_exit(clock);
425 kfree(clock);
426 clk_put(clk);
427}
Alex Elder73ff3162021-08-04 10:36:24 -0500428
429/**
430 * ipa_suspend() - Power management system suspend callback
431 * @dev: IPA device structure
432 *
Alex Elder7ebd1682021-08-10 14:26:58 -0500433 * Return: 0 on success, or a negative error code
Alex Elder73ff3162021-08-04 10:36:24 -0500434 *
435 * Called by the PM framework when a system suspend operation is invoked.
436 * Suspends endpoints and releases the clock reference held to keep
437 * the IPA clock running until this point.
438 */
439static int ipa_suspend(struct device *dev)
440{
Alex Elder2abb0c72021-08-10 14:27:00 -0500441 return ipa_runtime_suspend(dev);
Alex Elder73ff3162021-08-04 10:36:24 -0500442}
443
444/**
445 * ipa_resume() - Power management system resume callback
446 * @dev: IPA device structure
447 *
Alex Elder7ebd1682021-08-10 14:26:58 -0500448 * Return: 0 on success, or a negative error code
Alex Elder73ff3162021-08-04 10:36:24 -0500449 *
450 * Called by the PM framework when a system resume operation is invoked.
451 * Takes an IPA clock reference to keep the clock running until suspend,
452 * and resumes endpoints.
453 */
454static int ipa_resume(struct device *dev)
455{
Alex Elder2abb0c72021-08-10 14:27:00 -0500456 return ipa_runtime_resume(dev);
Alex Elder73ff3162021-08-04 10:36:24 -0500457}
458
459const struct dev_pm_ops ipa_pm_ops = {
460 .suspend = ipa_suspend,
461 .resume = ipa_resume,
462};