Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
Alex Elder | 4c7ccfc | 2021-02-12 08:33:59 -0600 | [diff] [blame] | 4 | * Copyright (C) 2018-2021 Linaro Ltd. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 5 | */ |
| 6 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 7 | #include <linux/clk.h> |
| 8 | #include <linux/device.h> |
| 9 | #include <linux/interconnect.h> |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 10 | #include <linux/pm.h> |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 11 | #include <linux/pm_runtime.h> |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 12 | #include <linux/bitops.h> |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 13 | |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 14 | #include "linux/soc/qcom/qcom_aoss.h" |
| 15 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 16 | #include "ipa.h" |
Alex Elder | 2775cbc | 2021-08-20 11:01:29 -0500 | [diff] [blame] | 17 | #include "ipa_power.h" |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 18 | #include "ipa_endpoint.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 19 | #include "ipa_modem.h" |
Alex Elder | dfccb8b | 2020-11-19 16:40:39 -0600 | [diff] [blame] | 20 | #include "ipa_data.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 21 | |
| 22 | /** |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 23 | * DOC: IPA Power Management |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 24 | * |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 25 | * The IPA hardware is enabled when the IPA core clock and all the |
| 26 | * interconnects (buses) it depends on are enabled. Runtime power |
| 27 | * management is used to determine whether the core clock and |
| 28 | * interconnects are enabled, and if not in use to be suspended |
| 29 | * automatically. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 30 | * |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 31 | * The core clock currently runs at a fixed clock rate when enabled, |
| 32 | * an all interconnects use a fixed average and peak bandwidth. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 33 | */ |
| 34 | |
Alex Elder | 1aac309 | 2021-08-20 11:01:27 -0500 | [diff] [blame] | 35 | #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ |
| 36 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 37 | /** |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 38 | * struct ipa_interconnect - IPA interconnect information |
| 39 | * @path: Interconnect path |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 40 | * @average_bandwidth: Average interconnect bandwidth (KB/second) |
| 41 | * @peak_bandwidth: Peak interconnect bandwidth (KB/second) |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 42 | */ |
| 43 | struct ipa_interconnect { |
| 44 | struct icc_path *path; |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 45 | u32 average_bandwidth; |
| 46 | u32 peak_bandwidth; |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 47 | }; |
| 48 | |
| 49 | /** |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 50 | * enum ipa_power_flag - IPA power flags |
| 51 | * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 52 | * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 53 | * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit() |
| 54 | * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume() |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 55 | * @IPA_POWER_FLAG_COUNT: Number of defined power flags |
| 56 | */ |
| 57 | enum ipa_power_flag { |
| 58 | IPA_POWER_FLAG_RESUMED, |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 59 | IPA_POWER_FLAG_SYSTEM, |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 60 | IPA_POWER_FLAG_STOPPED, |
| 61 | IPA_POWER_FLAG_STARTED, |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 62 | IPA_POWER_FLAG_COUNT, /* Last; not a flag */ |
| 63 | }; |
| 64 | |
| 65 | /** |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 66 | * struct ipa_power - IPA power management information |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 67 | * @dev: IPA device pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 68 | * @core: IPA core clock |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 69 | * @qmp: QMP handle for AOSS communication |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 70 | * @spinlock: Protects modem TX queue enable/disable |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 71 | * @flags: Boolean state flags |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 72 | * @interconnect_count: Number of elements in interconnect[] |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 73 | * @interconnect: Interconnect array |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 74 | */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 75 | struct ipa_power { |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 76 | struct device *dev; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 77 | struct clk *core; |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 78 | struct qmp *qmp; |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 79 | spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 80 | DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 81 | u32 interconnect_count; |
| 82 | struct ipa_interconnect *interconnect; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 83 | }; |
| 84 | |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 85 | static int ipa_interconnect_init_one(struct device *dev, |
| 86 | struct ipa_interconnect *interconnect, |
| 87 | const struct ipa_interconnect_data *data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 88 | { |
| 89 | struct icc_path *path; |
| 90 | |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 91 | path = of_icc_get(dev, data->name); |
| 92 | if (IS_ERR(path)) { |
| 93 | int ret = PTR_ERR(path); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 94 | |
Alex Elder | 4c7ccfc | 2021-02-12 08:33:59 -0600 | [diff] [blame] | 95 | dev_err_probe(dev, ret, "error getting %s interconnect\n", |
| 96 | data->name); |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 97 | |
| 98 | return ret; |
| 99 | } |
| 100 | |
| 101 | interconnect->path = path; |
| 102 | interconnect->average_bandwidth = data->average_bandwidth; |
| 103 | interconnect->peak_bandwidth = data->peak_bandwidth; |
| 104 | |
| 105 | return 0; |
| 106 | } |
| 107 | |
| 108 | static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) |
| 109 | { |
| 110 | icc_put(interconnect->path); |
| 111 | memset(interconnect, 0, sizeof(*interconnect)); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | /* Initialize interconnects required for IPA operation */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 115 | static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 116 | const struct ipa_interconnect_data *data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 117 | { |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 118 | struct ipa_interconnect *interconnect; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 119 | u32 count; |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 120 | int ret; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 121 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 122 | count = power->interconnect_count; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 123 | interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); |
| 124 | if (!interconnect) |
| 125 | return -ENOMEM; |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 126 | power->interconnect = interconnect; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 127 | |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 128 | while (count--) { |
| 129 | ret = ipa_interconnect_init_one(dev, interconnect, data++); |
| 130 | if (ret) |
| 131 | goto out_unwind; |
| 132 | interconnect++; |
| 133 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 134 | |
| 135 | return 0; |
| 136 | |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 137 | out_unwind: |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 138 | while (interconnect-- > power->interconnect) |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 139 | ipa_interconnect_exit_one(interconnect); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 140 | kfree(power->interconnect); |
| 141 | power->interconnect = NULL; |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 142 | |
| 143 | return ret; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 144 | } |
| 145 | |
| 146 | /* Inverse of ipa_interconnect_init() */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 147 | static void ipa_interconnect_exit(struct ipa_power *power) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 148 | { |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 149 | struct ipa_interconnect *interconnect; |
| 150 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 151 | interconnect = power->interconnect + power->interconnect_count; |
| 152 | while (interconnect-- > power->interconnect) |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 153 | ipa_interconnect_exit_one(interconnect); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 154 | kfree(power->interconnect); |
| 155 | power->interconnect = NULL; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 156 | } |
| 157 | |
| 158 | /* Currently we only use one bandwidth level, so just "enable" interconnects */ |
| 159 | static int ipa_interconnect_enable(struct ipa *ipa) |
| 160 | { |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 161 | struct ipa_interconnect *interconnect; |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 162 | struct ipa_power *power = ipa->power; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 163 | int ret; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 164 | u32 i; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 165 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 166 | interconnect = power->interconnect; |
| 167 | for (i = 0; i < power->interconnect_count; i++) { |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 168 | ret = icc_set_bw(interconnect->path, |
| 169 | interconnect->average_bandwidth, |
| 170 | interconnect->peak_bandwidth); |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 171 | if (ret) { |
| 172 | dev_err(&ipa->pdev->dev, |
| 173 | "error %d enabling %s interconnect\n", |
| 174 | ret, icc_get_name(interconnect->path)); |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 175 | goto out_unwind; |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 176 | } |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 177 | interconnect++; |
| 178 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 179 | |
| 180 | return 0; |
| 181 | |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 182 | out_unwind: |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 183 | while (interconnect-- > power->interconnect) |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 184 | (void)icc_set_bw(interconnect->path, 0, 0); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 185 | |
| 186 | return ret; |
| 187 | } |
| 188 | |
| 189 | /* To disable an interconnect, we just its bandwidth to 0 */ |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 190 | static int ipa_interconnect_disable(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 191 | { |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 192 | struct ipa_interconnect *interconnect; |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 193 | struct ipa_power *power = ipa->power; |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 194 | struct device *dev = &ipa->pdev->dev; |
Alex Elder | ec0ef6d | 2021-01-15 06:50:45 -0600 | [diff] [blame] | 195 | int result = 0; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 196 | u32 count; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 197 | int ret; |
| 198 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 199 | count = power->interconnect_count; |
| 200 | interconnect = power->interconnect + count; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 201 | while (count--) { |
| 202 | interconnect--; |
| 203 | ret = icc_set_bw(interconnect->path, 0, 0); |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 204 | if (ret) { |
| 205 | dev_err(dev, "error %d disabling %s interconnect\n", |
| 206 | ret, icc_get_name(interconnect->path)); |
| 207 | /* Try to disable all; record only the first error */ |
| 208 | if (!result) |
| 209 | result = ret; |
| 210 | } |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 211 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 212 | |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 213 | return result; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 214 | } |
| 215 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 216 | /* Enable IPA power, enabling interconnects and the core clock */ |
| 217 | static int ipa_power_enable(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 218 | { |
| 219 | int ret; |
| 220 | |
| 221 | ret = ipa_interconnect_enable(ipa); |
| 222 | if (ret) |
| 223 | return ret; |
| 224 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 225 | ret = clk_prepare_enable(ipa->power->core); |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 226 | if (ret) { |
| 227 | dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); |
| 228 | (void)ipa_interconnect_disable(ipa); |
| 229 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 230 | |
| 231 | return ret; |
| 232 | } |
| 233 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 234 | /* Inverse of ipa_power_enable() */ |
| 235 | static int ipa_power_disable(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 236 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 237 | clk_disable_unprepare(ipa->power->core); |
Alex Elder | 7ebd168 | 2021-08-10 14:26:58 -0500 | [diff] [blame] | 238 | |
| 239 | return ipa_interconnect_disable(ipa); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 240 | } |
| 241 | |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 242 | static int ipa_runtime_suspend(struct device *dev) |
| 243 | { |
| 244 | struct ipa *ipa = dev_get_drvdata(dev); |
| 245 | |
| 246 | /* Endpoints aren't usable until setup is complete */ |
| 247 | if (ipa->setup_complete) { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 248 | __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 249 | ipa_endpoint_suspend(ipa); |
| 250 | gsi_suspend(&ipa->gsi); |
| 251 | } |
| 252 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 253 | return ipa_power_disable(ipa); |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | static int ipa_runtime_resume(struct device *dev) |
| 257 | { |
| 258 | struct ipa *ipa = dev_get_drvdata(dev); |
| 259 | int ret; |
| 260 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 261 | ret = ipa_power_enable(ipa); |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 262 | if (WARN_ON(ret < 0)) |
| 263 | return ret; |
| 264 | |
| 265 | /* Endpoints aren't usable until setup is complete */ |
| 266 | if (ipa->setup_complete) { |
| 267 | gsi_resume(&ipa->gsi); |
| 268 | ipa_endpoint_resume(ipa); |
| 269 | } |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 274 | static int ipa_suspend(struct device *dev) |
| 275 | { |
| 276 | struct ipa *ipa = dev_get_drvdata(dev); |
| 277 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 278 | __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 279 | |
| 280 | return pm_runtime_force_suspend(dev); |
| 281 | } |
| 282 | |
| 283 | static int ipa_resume(struct device *dev) |
| 284 | { |
| 285 | struct ipa *ipa = dev_get_drvdata(dev); |
| 286 | int ret; |
| 287 | |
| 288 | ret = pm_runtime_force_resume(dev); |
| 289 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 290 | __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 291 | |
| 292 | return ret; |
| 293 | } |
| 294 | |
Alex Elder | 78b348f | 2020-07-03 16:23:34 -0500 | [diff] [blame] | 295 | /* Return the current IPA core clock rate */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 296 | u32 ipa_core_clock_rate(struct ipa *ipa) |
Alex Elder | 78b348f | 2020-07-03 16:23:34 -0500 | [diff] [blame] | 297 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 298 | return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; |
Alex Elder | 78b348f | 2020-07-03 16:23:34 -0500 | [diff] [blame] | 299 | } |
| 300 | |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 301 | /** |
| 302 | * ipa_suspend_handler() - Handle the suspend IPA interrupt |
| 303 | * @ipa: IPA pointer |
| 304 | * @irq_id: IPA interrupt type (unused) |
| 305 | * |
| 306 | * If an RX endpoint is suspended, and the IPA has a packet destined for |
| 307 | * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP |
| 308 | * that it should resume the endpoint. If we get one of these interrupts |
| 309 | * we just wake up the system. |
| 310 | */ |
| 311 | static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) |
| 312 | { |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 313 | /* To handle an IPA interrupt we will have resumed the hardware |
| 314 | * just to handle the interrupt, so we're done. If we are in a |
| 315 | * system suspend, trigger a system resume. |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 316 | */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 317 | if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) |
| 318 | if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 319 | pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 320 | |
| 321 | /* Acknowledge/clear the suspend interrupt on all endpoints */ |
| 322 | ipa_interrupt_suspend_clear_all(ipa->interrupt); |
| 323 | } |
| 324 | |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 325 | /* The next few functions coordinate stopping and starting the modem |
| 326 | * network device transmit queue. |
| 327 | * |
| 328 | * Transmit can be running concurrent with power resume, and there's a |
| 329 | * chance the resume completes before the transmit path stops the queue, |
| 330 | * leaving the queue in a stopped state. The next two functions are used |
| 331 | * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit() |
| 332 | * to conditionally stop the TX queue; and ipa_power_modem_queue_start() |
| 333 | * is used by ipa_runtime_resume() to conditionally restart it. |
| 334 | * |
| 335 | * Two flags and a spinlock are used. If the queue is stopped, the STOPPED |
| 336 | * power flag is set. And if the queue is started, the STARTED flag is set. |
| 337 | * The queue is only started on resume if the STOPPED flag is set. And the |
| 338 | * queue is only started in ipa_start_xmit() if the STARTED flag is *not* |
| 339 | * set. As a result, the queue remains operational if the two activites |
| 340 | * happen concurrently regardless of the order they complete. The spinlock |
| 341 | * ensures the flag and TX queue operations are done atomically. |
| 342 | * |
| 343 | * The first function stops the modem netdev transmit queue, but only if |
| 344 | * the STARTED flag is *not* set. That flag is cleared if it was set. |
| 345 | * If the queue is stopped, the STOPPED flag is set. This is called only |
| 346 | * from the power ->runtime_resume operation. |
| 347 | */ |
| 348 | void ipa_power_modem_queue_stop(struct ipa *ipa) |
| 349 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 350 | struct ipa_power *power = ipa->power; |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 351 | unsigned long flags; |
| 352 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 353 | spin_lock_irqsave(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 354 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 355 | if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 356 | netif_stop_queue(ipa->modem_netdev); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 357 | __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 358 | } |
| 359 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 360 | spin_unlock_irqrestore(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 361 | } |
| 362 | |
| 363 | /* This function starts the modem netdev transmit queue, but only if the |
| 364 | * STOPPED flag is set. That flag is cleared if it was set. If the queue |
| 365 | * was restarted, the STARTED flag is set; this allows ipa_start_xmit() |
| 366 | * to skip stopping the queue in the event of a race. |
| 367 | */ |
| 368 | void ipa_power_modem_queue_wake(struct ipa *ipa) |
| 369 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 370 | struct ipa_power *power = ipa->power; |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 371 | unsigned long flags; |
| 372 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 373 | spin_lock_irqsave(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 374 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 375 | if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { |
| 376 | __set_bit(IPA_POWER_FLAG_STARTED, power->flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 377 | netif_wake_queue(ipa->modem_netdev); |
| 378 | } |
| 379 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 380 | spin_unlock_irqrestore(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 381 | } |
| 382 | |
| 383 | /* This function clears the STARTED flag once the TX queue is operating */ |
| 384 | void ipa_power_modem_queue_active(struct ipa *ipa) |
| 385 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 386 | clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 387 | } |
| 388 | |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 389 | static int ipa_power_retention_init(struct ipa_power *power) |
| 390 | { |
| 391 | struct qmp *qmp = qmp_get(power->dev); |
| 392 | |
| 393 | if (IS_ERR(qmp)) { |
| 394 | if (PTR_ERR(qmp) == -EPROBE_DEFER) |
| 395 | return -EPROBE_DEFER; |
| 396 | |
| 397 | /* We assume any other error means it's not defined/needed */ |
| 398 | qmp = NULL; |
| 399 | } |
| 400 | power->qmp = qmp; |
| 401 | |
| 402 | return 0; |
| 403 | } |
| 404 | |
| 405 | static void ipa_power_retention_exit(struct ipa_power *power) |
| 406 | { |
| 407 | qmp_put(power->qmp); |
| 408 | power->qmp = NULL; |
| 409 | } |
| 410 | |
| 411 | /* Control register retention on power collapse */ |
| 412 | void ipa_power_retention(struct ipa *ipa, bool enable) |
| 413 | { |
| 414 | static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }"; |
| 415 | struct ipa_power *power = ipa->power; |
| 416 | char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */ |
| 417 | int ret; |
| 418 | |
| 419 | if (!power->qmp) |
| 420 | return; /* Not needed on this platform */ |
| 421 | |
| 422 | (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0'); |
| 423 | |
| 424 | ret = qmp_send(power->qmp, buf, sizeof(buf)); |
| 425 | if (ret) |
| 426 | dev_err(power->dev, "error %d sending QMP %sable request\n", |
| 427 | ret, enable ? "en" : "dis"); |
| 428 | } |
| 429 | |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 430 | int ipa_power_setup(struct ipa *ipa) |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 431 | { |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 432 | int ret; |
| 433 | |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 434 | ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, |
| 435 | ipa_suspend_handler); |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 436 | |
| 437 | ret = device_init_wakeup(&ipa->pdev->dev, true); |
| 438 | if (ret) |
| 439 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
| 440 | |
| 441 | return ret; |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | void ipa_power_teardown(struct ipa *ipa) |
| 445 | { |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 446 | (void)device_init_wakeup(&ipa->pdev->dev, false); |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 447 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
| 448 | } |
| 449 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 450 | /* Initialize IPA power management */ |
| 451 | struct ipa_power * |
| 452 | ipa_power_init(struct device *dev, const struct ipa_power_data *data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 453 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 454 | struct ipa_power *power; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 455 | struct clk *clk; |
| 456 | int ret; |
| 457 | |
| 458 | clk = clk_get(dev, "core"); |
| 459 | if (IS_ERR(clk)) { |
Alex Elder | 4c7ccfc | 2021-02-12 08:33:59 -0600 | [diff] [blame] | 460 | dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); |
| 461 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 462 | return ERR_CAST(clk); |
| 463 | } |
| 464 | |
Alex Elder | 91d02f9 | 2020-11-19 16:40:41 -0600 | [diff] [blame] | 465 | ret = clk_set_rate(clk, data->core_clock_rate); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 466 | if (ret) { |
Alex Elder | 91d02f9 | 2020-11-19 16:40:41 -0600 | [diff] [blame] | 467 | dev_err(dev, "error %d setting core clock rate to %u\n", |
| 468 | ret, data->core_clock_rate); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 469 | goto err_clk_put; |
| 470 | } |
| 471 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 472 | power = kzalloc(sizeof(*power), GFP_KERNEL); |
| 473 | if (!power) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 474 | ret = -ENOMEM; |
| 475 | goto err_clk_put; |
| 476 | } |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 477 | power->dev = dev; |
| 478 | power->core = clk; |
| 479 | spin_lock_init(&power->spinlock); |
| 480 | power->interconnect_count = data->interconnect_count; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 481 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 482 | ret = ipa_interconnect_init(power, dev, data->interconnect_data); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 483 | if (ret) |
| 484 | goto err_kfree; |
| 485 | |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 486 | ret = ipa_power_retention_init(power); |
| 487 | if (ret) |
| 488 | goto err_interconnect_exit; |
| 489 | |
Alex Elder | 1aac309 | 2021-08-20 11:01:27 -0500 | [diff] [blame] | 490 | pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); |
| 491 | pm_runtime_use_autosuspend(dev); |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 492 | pm_runtime_enable(dev); |
| 493 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 494 | return power; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 495 | |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 496 | err_interconnect_exit: |
| 497 | ipa_interconnect_exit(power); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 498 | err_kfree: |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 499 | kfree(power); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 500 | err_clk_put: |
| 501 | clk_put(clk); |
| 502 | |
| 503 | return ERR_PTR(ret); |
| 504 | } |
| 505 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 506 | /* Inverse of ipa_power_init() */ |
| 507 | void ipa_power_exit(struct ipa_power *power) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 508 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 509 | struct device *dev = power->dev; |
| 510 | struct clk *clk = power->core; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 511 | |
Alex Elder | 1aac309 | 2021-08-20 11:01:27 -0500 | [diff] [blame] | 512 | pm_runtime_disable(dev); |
| 513 | pm_runtime_dont_use_autosuspend(dev); |
Alex Elder | 34a0817 | 2022-02-01 09:02:05 -0600 | [diff] [blame] | 514 | ipa_power_retention_exit(power); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 515 | ipa_interconnect_exit(power); |
| 516 | kfree(power); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 517 | clk_put(clk); |
| 518 | } |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 519 | |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 520 | const struct dev_pm_ops ipa_pm_ops = { |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 521 | .suspend = ipa_suspend, |
| 522 | .resume = ipa_resume, |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 523 | .runtime_suspend = ipa_runtime_suspend, |
| 524 | .runtime_resume = ipa_runtime_resume, |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 525 | }; |