blob: f2989aac47a62f5944560fddb0465e3b3d7e7671 [file] [log] [blame]
Alex Elderba764c42020-03-05 22:28:19 -06001// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
Alex Elder4c7ccfc2021-02-12 08:33:59 -06004 * Copyright (C) 2018-2021 Linaro Ltd.
Alex Elderba764c42020-03-05 22:28:19 -06005 */
6
Alex Elderba764c42020-03-05 22:28:19 -06007#include <linux/clk.h>
8#include <linux/device.h>
9#include <linux/interconnect.h>
Alex Elder73ff3162021-08-04 10:36:24 -050010#include <linux/pm.h>
Alex Elder2abb0c72021-08-10 14:27:00 -050011#include <linux/pm_runtime.h>
Alex Elder73ff3162021-08-04 10:36:24 -050012#include <linux/bitops.h>
Alex Elderba764c42020-03-05 22:28:19 -060013
Alex Elder34a08172022-02-01 09:02:05 -060014#include "linux/soc/qcom/qcom_aoss.h"
15
Alex Elderba764c42020-03-05 22:28:19 -060016#include "ipa.h"
Alex Elder2775cbc2021-08-20 11:01:29 -050017#include "ipa_power.h"
Alex Elder73ff3162021-08-04 10:36:24 -050018#include "ipa_endpoint.h"
Alex Elderba764c42020-03-05 22:28:19 -060019#include "ipa_modem.h"
Alex Elderdfccb8b2020-11-19 16:40:39 -060020#include "ipa_data.h"
Alex Elderba764c42020-03-05 22:28:19 -060021
22/**
Alex Elder7aa0e8b2021-08-20 11:01:28 -050023 * DOC: IPA Power Management
Alex Elderba764c42020-03-05 22:28:19 -060024 *
Alex Elder7aa0e8b2021-08-20 11:01:28 -050025 * The IPA hardware is enabled when the IPA core clock and all the
26 * interconnects (buses) it depends on are enabled. Runtime power
27 * management is used to determine whether the core clock and
28 * interconnects are enabled, and if not in use to be suspended
29 * automatically.
Alex Elderba764c42020-03-05 22:28:19 -060030 *
Alex Elder7aa0e8b2021-08-20 11:01:28 -050031 * The core clock currently runs at a fixed clock rate when enabled,
32 * an all interconnects use a fixed average and peak bandwidth.
Alex Elderba764c42020-03-05 22:28:19 -060033 */
34
Alex Elder1aac3092021-08-20 11:01:27 -050035#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
36
Alex Elderba764c42020-03-05 22:28:19 -060037/**
Alex Elder5b408102021-01-15 06:50:46 -060038 * struct ipa_interconnect - IPA interconnect information
39 * @path: Interconnect path
Alex Elderdb6cd512021-01-15 06:50:47 -060040 * @average_bandwidth: Average interconnect bandwidth (KB/second)
41 * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
Alex Elder5b408102021-01-15 06:50:46 -060042 */
43struct ipa_interconnect {
44 struct icc_path *path;
Alex Elderdb6cd512021-01-15 06:50:47 -060045 u32 average_bandwidth;
46 u32 peak_bandwidth;
Alex Elder5b408102021-01-15 06:50:46 -060047};
48
49/**
Alex Elderafb08b72021-08-04 10:36:26 -050050 * enum ipa_power_flag - IPA power flags
51 * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
Alex Elderb9c532c2021-08-12 14:50:31 -050052 * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
Alex Elderb8e36e12021-08-19 16:12:28 -050053 * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit()
54 * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume()
Alex Elderafb08b72021-08-04 10:36:26 -050055 * @IPA_POWER_FLAG_COUNT: Number of defined power flags
56 */
57enum ipa_power_flag {
58 IPA_POWER_FLAG_RESUMED,
Alex Elderb9c532c2021-08-12 14:50:31 -050059 IPA_POWER_FLAG_SYSTEM,
Alex Elderb8e36e12021-08-19 16:12:28 -050060 IPA_POWER_FLAG_STOPPED,
61 IPA_POWER_FLAG_STARTED,
Alex Elderafb08b72021-08-04 10:36:26 -050062 IPA_POWER_FLAG_COUNT, /* Last; not a flag */
63};
64
65/**
Alex Elder7aa0e8b2021-08-20 11:01:28 -050066 * struct ipa_power - IPA power management information
Alex Elder63de79f2021-08-10 14:27:01 -050067 * @dev: IPA device pointer
Alex Elderba764c42020-03-05 22:28:19 -060068 * @core: IPA core clock
Alex Elder34a08172022-02-01 09:02:05 -060069 * @qmp: QMP handle for AOSS communication
Alex Elderb8e36e12021-08-19 16:12:28 -050070 * @spinlock: Protects modem TX queue enable/disable
Alex Elderafb08b72021-08-04 10:36:26 -050071 * @flags: Boolean state flags
Alex Elderea151e12021-01-15 06:50:50 -060072 * @interconnect_count: Number of elements in interconnect[]
Alex Elder5b408102021-01-15 06:50:46 -060073 * @interconnect: Interconnect array
Alex Elderba764c42020-03-05 22:28:19 -060074 */
Alex Elder7aa0e8b2021-08-20 11:01:28 -050075struct ipa_power {
Alex Elder63de79f2021-08-10 14:27:01 -050076 struct device *dev;
Alex Elderba764c42020-03-05 22:28:19 -060077 struct clk *core;
Alex Elder34a08172022-02-01 09:02:05 -060078 struct qmp *qmp;
Alex Elderb8e36e12021-08-19 16:12:28 -050079 spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
Alex Elderafb08b72021-08-04 10:36:26 -050080 DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
Alex Elderea151e12021-01-15 06:50:50 -060081 u32 interconnect_count;
82 struct ipa_interconnect *interconnect;
Alex Elderba764c42020-03-05 22:28:19 -060083};
84
Alex Elder10d0d392021-01-15 06:50:49 -060085static int ipa_interconnect_init_one(struct device *dev,
86 struct ipa_interconnect *interconnect,
87 const struct ipa_interconnect_data *data)
Alex Elderba764c42020-03-05 22:28:19 -060088{
89 struct icc_path *path;
90
Alex Elder10d0d392021-01-15 06:50:49 -060091 path = of_icc_get(dev, data->name);
92 if (IS_ERR(path)) {
93 int ret = PTR_ERR(path);
Alex Elderba764c42020-03-05 22:28:19 -060094
Alex Elder4c7ccfc2021-02-12 08:33:59 -060095 dev_err_probe(dev, ret, "error getting %s interconnect\n",
96 data->name);
Alex Elder10d0d392021-01-15 06:50:49 -060097
98 return ret;
99 }
100
101 interconnect->path = path;
102 interconnect->average_bandwidth = data->average_bandwidth;
103 interconnect->peak_bandwidth = data->peak_bandwidth;
104
105 return 0;
106}
107
108static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
109{
110 icc_put(interconnect->path);
111 memset(interconnect, 0, sizeof(*interconnect));
Alex Elderba764c42020-03-05 22:28:19 -0600112}
113
114/* Initialize interconnects required for IPA operation */
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500115static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
Alex Elderdb6cd512021-01-15 06:50:47 -0600116 const struct ipa_interconnect_data *data)
Alex Elderba764c42020-03-05 22:28:19 -0600117{
Alex Elderdb6cd512021-01-15 06:50:47 -0600118 struct ipa_interconnect *interconnect;
Alex Elderea151e12021-01-15 06:50:50 -0600119 u32 count;
Alex Elder10d0d392021-01-15 06:50:49 -0600120 int ret;
Alex Elderba764c42020-03-05 22:28:19 -0600121
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500122 count = power->interconnect_count;
Alex Elderea151e12021-01-15 06:50:50 -0600123 interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
124 if (!interconnect)
125 return -ENOMEM;
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500126 power->interconnect = interconnect;
Alex Elderba764c42020-03-05 22:28:19 -0600127
Alex Elderea151e12021-01-15 06:50:50 -0600128 while (count--) {
129 ret = ipa_interconnect_init_one(dev, interconnect, data++);
130 if (ret)
131 goto out_unwind;
132 interconnect++;
133 }
Alex Elderba764c42020-03-05 22:28:19 -0600134
135 return 0;
136
Alex Elderea151e12021-01-15 06:50:50 -0600137out_unwind:
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500138 while (interconnect-- > power->interconnect)
Alex Elderea151e12021-01-15 06:50:50 -0600139 ipa_interconnect_exit_one(interconnect);
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500140 kfree(power->interconnect);
141 power->interconnect = NULL;
Alex Elder10d0d392021-01-15 06:50:49 -0600142
143 return ret;
Alex Elderba764c42020-03-05 22:28:19 -0600144}
145
146/* Inverse of ipa_interconnect_init() */
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500147static void ipa_interconnect_exit(struct ipa_power *power)
Alex Elderba764c42020-03-05 22:28:19 -0600148{
Alex Elder10d0d392021-01-15 06:50:49 -0600149 struct ipa_interconnect *interconnect;
150
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500151 interconnect = power->interconnect + power->interconnect_count;
152 while (interconnect-- > power->interconnect)
Alex Elderea151e12021-01-15 06:50:50 -0600153 ipa_interconnect_exit_one(interconnect);
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500154 kfree(power->interconnect);
155 power->interconnect = NULL;
Alex Elderba764c42020-03-05 22:28:19 -0600156}
157
158/* Currently we only use one bandwidth level, so just "enable" interconnects */
159static int ipa_interconnect_enable(struct ipa *ipa)
160{
Alex Elderdb6cd512021-01-15 06:50:47 -0600161 struct ipa_interconnect *interconnect;
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500162 struct ipa_power *power = ipa->power;
Alex Elderba764c42020-03-05 22:28:19 -0600163 int ret;
Alex Elderea151e12021-01-15 06:50:50 -0600164 u32 i;
Alex Elderba764c42020-03-05 22:28:19 -0600165
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500166 interconnect = power->interconnect;
167 for (i = 0; i < power->interconnect_count; i++) {
Alex Elderea151e12021-01-15 06:50:50 -0600168 ret = icc_set_bw(interconnect->path,
169 interconnect->average_bandwidth,
170 interconnect->peak_bandwidth);
Alex Elder8ee7c402021-08-04 10:36:23 -0500171 if (ret) {
172 dev_err(&ipa->pdev->dev,
173 "error %d enabling %s interconnect\n",
174 ret, icc_get_name(interconnect->path));
Alex Elderea151e12021-01-15 06:50:50 -0600175 goto out_unwind;
Alex Elder8ee7c402021-08-04 10:36:23 -0500176 }
Alex Elderea151e12021-01-15 06:50:50 -0600177 interconnect++;
178 }
Alex Elderba764c42020-03-05 22:28:19 -0600179
180 return 0;
181
Alex Elderea151e12021-01-15 06:50:50 -0600182out_unwind:
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500183 while (interconnect-- > power->interconnect)
Alex Elderea151e12021-01-15 06:50:50 -0600184 (void)icc_set_bw(interconnect->path, 0, 0);
Alex Elderba764c42020-03-05 22:28:19 -0600185
186 return ret;
187}
188
189/* To disable an interconnect, we just its bandwidth to 0 */
Alex Elder8ee7c402021-08-04 10:36:23 -0500190static int ipa_interconnect_disable(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600191{
Alex Elderdb6cd512021-01-15 06:50:47 -0600192 struct ipa_interconnect *interconnect;
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500193 struct ipa_power *power = ipa->power;
Alex Elder8ee7c402021-08-04 10:36:23 -0500194 struct device *dev = &ipa->pdev->dev;
Alex Elderec0ef6d2021-01-15 06:50:45 -0600195 int result = 0;
Alex Elderea151e12021-01-15 06:50:50 -0600196 u32 count;
Alex Elderba764c42020-03-05 22:28:19 -0600197 int ret;
198
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500199 count = power->interconnect_count;
200 interconnect = power->interconnect + count;
Alex Elderea151e12021-01-15 06:50:50 -0600201 while (count--) {
202 interconnect--;
203 ret = icc_set_bw(interconnect->path, 0, 0);
Alex Elder8ee7c402021-08-04 10:36:23 -0500204 if (ret) {
205 dev_err(dev, "error %d disabling %s interconnect\n",
206 ret, icc_get_name(interconnect->path));
207 /* Try to disable all; record only the first error */
208 if (!result)
209 result = ret;
210 }
Alex Elderea151e12021-01-15 06:50:50 -0600211 }
Alex Elderba764c42020-03-05 22:28:19 -0600212
Alex Elder8ee7c402021-08-04 10:36:23 -0500213 return result;
Alex Elderba764c42020-03-05 22:28:19 -0600214}
215
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500216/* Enable IPA power, enabling interconnects and the core clock */
217static int ipa_power_enable(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600218{
219 int ret;
220
221 ret = ipa_interconnect_enable(ipa);
222 if (ret)
223 return ret;
224
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500225 ret = clk_prepare_enable(ipa->power->core);
Alex Elder8ee7c402021-08-04 10:36:23 -0500226 if (ret) {
227 dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
228 (void)ipa_interconnect_disable(ipa);
229 }
Alex Elderba764c42020-03-05 22:28:19 -0600230
231 return ret;
232}
233
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500234/* Inverse of ipa_power_enable() */
235static int ipa_power_disable(struct ipa *ipa)
Alex Elderba764c42020-03-05 22:28:19 -0600236{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500237 clk_disable_unprepare(ipa->power->core);
Alex Elder7ebd1682021-08-10 14:26:58 -0500238
239 return ipa_interconnect_disable(ipa);
Alex Elderba764c42020-03-05 22:28:19 -0600240}
241
Alex Elder2abb0c72021-08-10 14:27:00 -0500242static int ipa_runtime_suspend(struct device *dev)
243{
244 struct ipa *ipa = dev_get_drvdata(dev);
245
246 /* Endpoints aren't usable until setup is complete */
247 if (ipa->setup_complete) {
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500248 __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags);
Alex Elder2abb0c72021-08-10 14:27:00 -0500249 ipa_endpoint_suspend(ipa);
250 gsi_suspend(&ipa->gsi);
251 }
252
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500253 return ipa_power_disable(ipa);
Alex Elder2abb0c72021-08-10 14:27:00 -0500254}
255
256static int ipa_runtime_resume(struct device *dev)
257{
258 struct ipa *ipa = dev_get_drvdata(dev);
259 int ret;
260
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500261 ret = ipa_power_enable(ipa);
Alex Elder2abb0c72021-08-10 14:27:00 -0500262 if (WARN_ON(ret < 0))
263 return ret;
264
265 /* Endpoints aren't usable until setup is complete */
266 if (ipa->setup_complete) {
267 gsi_resume(&ipa->gsi);
268 ipa_endpoint_resume(ipa);
269 }
270
271 return 0;
272}
273
Alex Elderb9c532c2021-08-12 14:50:31 -0500274static int ipa_suspend(struct device *dev)
275{
276 struct ipa *ipa = dev_get_drvdata(dev);
277
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500278 __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
Alex Elderb9c532c2021-08-12 14:50:31 -0500279
280 return pm_runtime_force_suspend(dev);
281}
282
283static int ipa_resume(struct device *dev)
284{
285 struct ipa *ipa = dev_get_drvdata(dev);
286 int ret;
287
288 ret = pm_runtime_force_resume(dev);
289
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500290 __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
Alex Elderb9c532c2021-08-12 14:50:31 -0500291
292 return ret;
293}
294
Alex Elder78b348f2020-07-03 16:23:34 -0500295/* Return the current IPA core clock rate */
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500296u32 ipa_core_clock_rate(struct ipa *ipa)
Alex Elder78b348f2020-07-03 16:23:34 -0500297{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500298 return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0;
Alex Elder78b348f2020-07-03 16:23:34 -0500299}
300
Alex Elderafe1baa2021-08-04 10:36:25 -0500301/**
302 * ipa_suspend_handler() - Handle the suspend IPA interrupt
303 * @ipa: IPA pointer
304 * @irq_id: IPA interrupt type (unused)
305 *
306 * If an RX endpoint is suspended, and the IPA has a packet destined for
307 * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP
308 * that it should resume the endpoint. If we get one of these interrupts
309 * we just wake up the system.
310 */
311static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id)
312{
Alex Elderb9c532c2021-08-12 14:50:31 -0500313 /* To handle an IPA interrupt we will have resumed the hardware
314 * just to handle the interrupt, so we're done. If we are in a
315 * system suspend, trigger a system resume.
Alex Elderafe1baa2021-08-04 10:36:25 -0500316 */
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500317 if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags))
318 if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags))
Alex Elderb9c532c2021-08-12 14:50:31 -0500319 pm_wakeup_dev_event(&ipa->pdev->dev, 0, true);
Alex Elderafe1baa2021-08-04 10:36:25 -0500320
321 /* Acknowledge/clear the suspend interrupt on all endpoints */
322 ipa_interrupt_suspend_clear_all(ipa->interrupt);
323}
324
Alex Elderb8e36e12021-08-19 16:12:28 -0500325/* The next few functions coordinate stopping and starting the modem
326 * network device transmit queue.
327 *
328 * Transmit can be running concurrent with power resume, and there's a
329 * chance the resume completes before the transmit path stops the queue,
330 * leaving the queue in a stopped state. The next two functions are used
331 * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit()
332 * to conditionally stop the TX queue; and ipa_power_modem_queue_start()
333 * is used by ipa_runtime_resume() to conditionally restart it.
334 *
335 * Two flags and a spinlock are used. If the queue is stopped, the STOPPED
336 * power flag is set. And if the queue is started, the STARTED flag is set.
337 * The queue is only started on resume if the STOPPED flag is set. And the
338 * queue is only started in ipa_start_xmit() if the STARTED flag is *not*
339 * set. As a result, the queue remains operational if the two activites
340 * happen concurrently regardless of the order they complete. The spinlock
341 * ensures the flag and TX queue operations are done atomically.
342 *
343 * The first function stops the modem netdev transmit queue, but only if
344 * the STARTED flag is *not* set. That flag is cleared if it was set.
345 * If the queue is stopped, the STOPPED flag is set. This is called only
346 * from the power ->runtime_resume operation.
347 */
348void ipa_power_modem_queue_stop(struct ipa *ipa)
349{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500350 struct ipa_power *power = ipa->power;
Alex Elderb8e36e12021-08-19 16:12:28 -0500351 unsigned long flags;
352
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500353 spin_lock_irqsave(&power->spinlock, flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500354
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500355 if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) {
Alex Elderb8e36e12021-08-19 16:12:28 -0500356 netif_stop_queue(ipa->modem_netdev);
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500357 __set_bit(IPA_POWER_FLAG_STOPPED, power->flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500358 }
359
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500360 spin_unlock_irqrestore(&power->spinlock, flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500361}
362
363/* This function starts the modem netdev transmit queue, but only if the
364 * STOPPED flag is set. That flag is cleared if it was set. If the queue
365 * was restarted, the STARTED flag is set; this allows ipa_start_xmit()
366 * to skip stopping the queue in the event of a race.
367 */
368void ipa_power_modem_queue_wake(struct ipa *ipa)
369{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500370 struct ipa_power *power = ipa->power;
Alex Elderb8e36e12021-08-19 16:12:28 -0500371 unsigned long flags;
372
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500373 spin_lock_irqsave(&power->spinlock, flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500374
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500375 if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) {
376 __set_bit(IPA_POWER_FLAG_STARTED, power->flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500377 netif_wake_queue(ipa->modem_netdev);
378 }
379
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500380 spin_unlock_irqrestore(&power->spinlock, flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500381}
382
383/* This function clears the STARTED flag once the TX queue is operating */
384void ipa_power_modem_queue_active(struct ipa *ipa)
385{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500386 clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
Alex Elderb8e36e12021-08-19 16:12:28 -0500387}
388
Alex Elder34a08172022-02-01 09:02:05 -0600389static int ipa_power_retention_init(struct ipa_power *power)
390{
391 struct qmp *qmp = qmp_get(power->dev);
392
393 if (IS_ERR(qmp)) {
394 if (PTR_ERR(qmp) == -EPROBE_DEFER)
395 return -EPROBE_DEFER;
396
397 /* We assume any other error means it's not defined/needed */
398 qmp = NULL;
399 }
400 power->qmp = qmp;
401
402 return 0;
403}
404
405static void ipa_power_retention_exit(struct ipa_power *power)
406{
407 qmp_put(power->qmp);
408 power->qmp = NULL;
409}
410
411/* Control register retention on power collapse */
412void ipa_power_retention(struct ipa *ipa, bool enable)
413{
414 static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
415 struct ipa_power *power = ipa->power;
416 char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
417 int ret;
418
419 if (!power->qmp)
420 return; /* Not needed on this platform */
421
422 (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
423
424 ret = qmp_send(power->qmp, buf, sizeof(buf));
425 if (ret)
426 dev_err(power->dev, "error %d sending QMP %sable request\n",
427 ret, enable ? "en" : "dis");
428}
429
Alex Elderd430fe42021-08-12 14:50:30 -0500430int ipa_power_setup(struct ipa *ipa)
Alex Elderafe1baa2021-08-04 10:36:25 -0500431{
Alex Elderd430fe42021-08-12 14:50:30 -0500432 int ret;
433
Alex Elderafe1baa2021-08-04 10:36:25 -0500434 ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND,
435 ipa_suspend_handler);
Alex Elderd430fe42021-08-12 14:50:30 -0500436
437 ret = device_init_wakeup(&ipa->pdev->dev, true);
438 if (ret)
439 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
440
441 return ret;
Alex Elderafe1baa2021-08-04 10:36:25 -0500442}
443
444void ipa_power_teardown(struct ipa *ipa)
445{
Alex Elderd430fe42021-08-12 14:50:30 -0500446 (void)device_init_wakeup(&ipa->pdev->dev, false);
Alex Elderafe1baa2021-08-04 10:36:25 -0500447 ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND);
448}
449
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500450/* Initialize IPA power management */
451struct ipa_power *
452ipa_power_init(struct device *dev, const struct ipa_power_data *data)
Alex Elderba764c42020-03-05 22:28:19 -0600453{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500454 struct ipa_power *power;
Alex Elderba764c42020-03-05 22:28:19 -0600455 struct clk *clk;
456 int ret;
457
458 clk = clk_get(dev, "core");
459 if (IS_ERR(clk)) {
Alex Elder4c7ccfc2021-02-12 08:33:59 -0600460 dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
461
Alex Elderba764c42020-03-05 22:28:19 -0600462 return ERR_CAST(clk);
463 }
464
Alex Elder91d02f92020-11-19 16:40:41 -0600465 ret = clk_set_rate(clk, data->core_clock_rate);
Alex Elderba764c42020-03-05 22:28:19 -0600466 if (ret) {
Alex Elder91d02f92020-11-19 16:40:41 -0600467 dev_err(dev, "error %d setting core clock rate to %u\n",
468 ret, data->core_clock_rate);
Alex Elderba764c42020-03-05 22:28:19 -0600469 goto err_clk_put;
470 }
471
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500472 power = kzalloc(sizeof(*power), GFP_KERNEL);
473 if (!power) {
Alex Elderba764c42020-03-05 22:28:19 -0600474 ret = -ENOMEM;
475 goto err_clk_put;
476 }
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500477 power->dev = dev;
478 power->core = clk;
479 spin_lock_init(&power->spinlock);
480 power->interconnect_count = data->interconnect_count;
Alex Elderba764c42020-03-05 22:28:19 -0600481
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500482 ret = ipa_interconnect_init(power, dev, data->interconnect_data);
Alex Elderba764c42020-03-05 22:28:19 -0600483 if (ret)
484 goto err_kfree;
485
Alex Elder34a08172022-02-01 09:02:05 -0600486 ret = ipa_power_retention_init(power);
487 if (ret)
488 goto err_interconnect_exit;
489
Alex Elder1aac3092021-08-20 11:01:27 -0500490 pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
491 pm_runtime_use_autosuspend(dev);
Alex Elder63de79f2021-08-10 14:27:01 -0500492 pm_runtime_enable(dev);
493
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500494 return power;
Alex Elderba764c42020-03-05 22:28:19 -0600495
Alex Elder34a08172022-02-01 09:02:05 -0600496err_interconnect_exit:
497 ipa_interconnect_exit(power);
Alex Elderba764c42020-03-05 22:28:19 -0600498err_kfree:
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500499 kfree(power);
Alex Elderba764c42020-03-05 22:28:19 -0600500err_clk_put:
501 clk_put(clk);
502
503 return ERR_PTR(ret);
504}
505
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500506/* Inverse of ipa_power_init() */
507void ipa_power_exit(struct ipa_power *power)
Alex Elderba764c42020-03-05 22:28:19 -0600508{
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500509 struct device *dev = power->dev;
510 struct clk *clk = power->core;
Alex Elderba764c42020-03-05 22:28:19 -0600511
Alex Elder1aac3092021-08-20 11:01:27 -0500512 pm_runtime_disable(dev);
513 pm_runtime_dont_use_autosuspend(dev);
Alex Elder34a08172022-02-01 09:02:05 -0600514 ipa_power_retention_exit(power);
Alex Elder7aa0e8b2021-08-20 11:01:28 -0500515 ipa_interconnect_exit(power);
516 kfree(power);
Alex Elderba764c42020-03-05 22:28:19 -0600517 clk_put(clk);
518}
Alex Elder73ff3162021-08-04 10:36:24 -0500519
Alex Elder73ff3162021-08-04 10:36:24 -0500520const struct dev_pm_ops ipa_pm_ops = {
Alex Elderb9c532c2021-08-12 14:50:31 -0500521 .suspend = ipa_suspend,
522 .resume = ipa_resume,
Alex Elder63de79f2021-08-10 14:27:01 -0500523 .runtime_suspend = ipa_runtime_suspend,
524 .runtime_resume = ipa_runtime_resume,
Alex Elder73ff3162021-08-04 10:36:24 -0500525};