Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. |
Alex Elder | 4c7ccfc | 2021-02-12 08:33:59 -0600 | [diff] [blame] | 4 | * Copyright (C) 2018-2021 Linaro Ltd. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 5 | */ |
| 6 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 7 | #include <linux/clk.h> |
| 8 | #include <linux/device.h> |
| 9 | #include <linux/interconnect.h> |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 10 | #include <linux/pm.h> |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 11 | #include <linux/pm_runtime.h> |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 12 | #include <linux/bitops.h> |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 13 | |
| 14 | #include "ipa.h" |
Alex Elder | 2775cbc | 2021-08-20 11:01:29 -0500 | [diff] [blame^] | 15 | #include "ipa_power.h" |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 16 | #include "ipa_endpoint.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 17 | #include "ipa_modem.h" |
Alex Elder | dfccb8b | 2020-11-19 16:40:39 -0600 | [diff] [blame] | 18 | #include "ipa_data.h" |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 19 | |
| 20 | /** |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 21 | * DOC: IPA Power Management |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 22 | * |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 23 | * The IPA hardware is enabled when the IPA core clock and all the |
| 24 | * interconnects (buses) it depends on are enabled. Runtime power |
| 25 | * management is used to determine whether the core clock and |
| 26 | * interconnects are enabled, and if not in use to be suspended |
| 27 | * automatically. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 28 | * |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 29 | * The core clock currently runs at a fixed clock rate when enabled, |
| 30 | * an all interconnects use a fixed average and peak bandwidth. |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 31 | */ |
| 32 | |
Alex Elder | 1aac309 | 2021-08-20 11:01:27 -0500 | [diff] [blame] | 33 | #define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */ |
| 34 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 35 | /** |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 36 | * struct ipa_interconnect - IPA interconnect information |
| 37 | * @path: Interconnect path |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 38 | * @average_bandwidth: Average interconnect bandwidth (KB/second) |
| 39 | * @peak_bandwidth: Peak interconnect bandwidth (KB/second) |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 40 | */ |
| 41 | struct ipa_interconnect { |
| 42 | struct icc_path *path; |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 43 | u32 average_bandwidth; |
| 44 | u32 peak_bandwidth; |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 45 | }; |
| 46 | |
| 47 | /** |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 48 | * enum ipa_power_flag - IPA power flags |
| 49 | * @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 50 | * @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 51 | * @IPA_POWER_FLAG_STOPPED: Modem TX is disabled by ipa_start_xmit() |
| 52 | * @IPA_POWER_FLAG_STARTED: Modem TX was enabled by ipa_runtime_resume() |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 53 | * @IPA_POWER_FLAG_COUNT: Number of defined power flags |
| 54 | */ |
| 55 | enum ipa_power_flag { |
| 56 | IPA_POWER_FLAG_RESUMED, |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 57 | IPA_POWER_FLAG_SYSTEM, |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 58 | IPA_POWER_FLAG_STOPPED, |
| 59 | IPA_POWER_FLAG_STARTED, |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 60 | IPA_POWER_FLAG_COUNT, /* Last; not a flag */ |
| 61 | }; |
| 62 | |
| 63 | /** |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 64 | * struct ipa_power - IPA power management information |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 65 | * @dev: IPA device pointer |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 66 | * @core: IPA core clock |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 67 | * @spinlock: Protects modem TX queue enable/disable |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 68 | * @flags: Boolean state flags |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 69 | * @interconnect_count: Number of elements in interconnect[] |
Alex Elder | 5b40810 | 2021-01-15 06:50:46 -0600 | [diff] [blame] | 70 | * @interconnect: Interconnect array |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 71 | */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 72 | struct ipa_power { |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 73 | struct device *dev; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 74 | struct clk *core; |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 75 | spinlock_t spinlock; /* used with STOPPED/STARTED power flags */ |
Alex Elder | afb08b7 | 2021-08-04 10:36:26 -0500 | [diff] [blame] | 76 | DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT); |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 77 | u32 interconnect_count; |
| 78 | struct ipa_interconnect *interconnect; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 79 | }; |
| 80 | |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 81 | static int ipa_interconnect_init_one(struct device *dev, |
| 82 | struct ipa_interconnect *interconnect, |
| 83 | const struct ipa_interconnect_data *data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 84 | { |
| 85 | struct icc_path *path; |
| 86 | |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 87 | path = of_icc_get(dev, data->name); |
| 88 | if (IS_ERR(path)) { |
| 89 | int ret = PTR_ERR(path); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 90 | |
Alex Elder | 4c7ccfc | 2021-02-12 08:33:59 -0600 | [diff] [blame] | 91 | dev_err_probe(dev, ret, "error getting %s interconnect\n", |
| 92 | data->name); |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 93 | |
| 94 | return ret; |
| 95 | } |
| 96 | |
| 97 | interconnect->path = path; |
| 98 | interconnect->average_bandwidth = data->average_bandwidth; |
| 99 | interconnect->peak_bandwidth = data->peak_bandwidth; |
| 100 | |
| 101 | return 0; |
| 102 | } |
| 103 | |
| 104 | static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect) |
| 105 | { |
| 106 | icc_put(interconnect->path); |
| 107 | memset(interconnect, 0, sizeof(*interconnect)); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 108 | } |
| 109 | |
| 110 | /* Initialize interconnects required for IPA operation */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 111 | static int ipa_interconnect_init(struct ipa_power *power, struct device *dev, |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 112 | const struct ipa_interconnect_data *data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 113 | { |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 114 | struct ipa_interconnect *interconnect; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 115 | u32 count; |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 116 | int ret; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 117 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 118 | count = power->interconnect_count; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 119 | interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL); |
| 120 | if (!interconnect) |
| 121 | return -ENOMEM; |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 122 | power->interconnect = interconnect; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 123 | |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 124 | while (count--) { |
| 125 | ret = ipa_interconnect_init_one(dev, interconnect, data++); |
| 126 | if (ret) |
| 127 | goto out_unwind; |
| 128 | interconnect++; |
| 129 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 130 | |
| 131 | return 0; |
| 132 | |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 133 | out_unwind: |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 134 | while (interconnect-- > power->interconnect) |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 135 | ipa_interconnect_exit_one(interconnect); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 136 | kfree(power->interconnect); |
| 137 | power->interconnect = NULL; |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 138 | |
| 139 | return ret; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 140 | } |
| 141 | |
| 142 | /* Inverse of ipa_interconnect_init() */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 143 | static void ipa_interconnect_exit(struct ipa_power *power) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 144 | { |
Alex Elder | 10d0d39 | 2021-01-15 06:50:49 -0600 | [diff] [blame] | 145 | struct ipa_interconnect *interconnect; |
| 146 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 147 | interconnect = power->interconnect + power->interconnect_count; |
| 148 | while (interconnect-- > power->interconnect) |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 149 | ipa_interconnect_exit_one(interconnect); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 150 | kfree(power->interconnect); |
| 151 | power->interconnect = NULL; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 152 | } |
| 153 | |
| 154 | /* Currently we only use one bandwidth level, so just "enable" interconnects */ |
| 155 | static int ipa_interconnect_enable(struct ipa *ipa) |
| 156 | { |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 157 | struct ipa_interconnect *interconnect; |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 158 | struct ipa_power *power = ipa->power; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 159 | int ret; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 160 | u32 i; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 161 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 162 | interconnect = power->interconnect; |
| 163 | for (i = 0; i < power->interconnect_count; i++) { |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 164 | ret = icc_set_bw(interconnect->path, |
| 165 | interconnect->average_bandwidth, |
| 166 | interconnect->peak_bandwidth); |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 167 | if (ret) { |
| 168 | dev_err(&ipa->pdev->dev, |
| 169 | "error %d enabling %s interconnect\n", |
| 170 | ret, icc_get_name(interconnect->path)); |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 171 | goto out_unwind; |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 172 | } |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 173 | interconnect++; |
| 174 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 175 | |
| 176 | return 0; |
| 177 | |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 178 | out_unwind: |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 179 | while (interconnect-- > power->interconnect) |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 180 | (void)icc_set_bw(interconnect->path, 0, 0); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 181 | |
| 182 | return ret; |
| 183 | } |
| 184 | |
| 185 | /* To disable an interconnect, we just its bandwidth to 0 */ |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 186 | static int ipa_interconnect_disable(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 187 | { |
Alex Elder | db6cd51 | 2021-01-15 06:50:47 -0600 | [diff] [blame] | 188 | struct ipa_interconnect *interconnect; |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 189 | struct ipa_power *power = ipa->power; |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 190 | struct device *dev = &ipa->pdev->dev; |
Alex Elder | ec0ef6d | 2021-01-15 06:50:45 -0600 | [diff] [blame] | 191 | int result = 0; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 192 | u32 count; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 193 | int ret; |
| 194 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 195 | count = power->interconnect_count; |
| 196 | interconnect = power->interconnect + count; |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 197 | while (count--) { |
| 198 | interconnect--; |
| 199 | ret = icc_set_bw(interconnect->path, 0, 0); |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 200 | if (ret) { |
| 201 | dev_err(dev, "error %d disabling %s interconnect\n", |
| 202 | ret, icc_get_name(interconnect->path)); |
| 203 | /* Try to disable all; record only the first error */ |
| 204 | if (!result) |
| 205 | result = ret; |
| 206 | } |
Alex Elder | ea151e1 | 2021-01-15 06:50:50 -0600 | [diff] [blame] | 207 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 208 | |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 209 | return result; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 210 | } |
| 211 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 212 | /* Enable IPA power, enabling interconnects and the core clock */ |
| 213 | static int ipa_power_enable(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 214 | { |
| 215 | int ret; |
| 216 | |
| 217 | ret = ipa_interconnect_enable(ipa); |
| 218 | if (ret) |
| 219 | return ret; |
| 220 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 221 | ret = clk_prepare_enable(ipa->power->core); |
Alex Elder | 8ee7c40 | 2021-08-04 10:36:23 -0500 | [diff] [blame] | 222 | if (ret) { |
| 223 | dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret); |
| 224 | (void)ipa_interconnect_disable(ipa); |
| 225 | } |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 226 | |
| 227 | return ret; |
| 228 | } |
| 229 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 230 | /* Inverse of ipa_power_enable() */ |
| 231 | static int ipa_power_disable(struct ipa *ipa) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 232 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 233 | clk_disable_unprepare(ipa->power->core); |
Alex Elder | 7ebd168 | 2021-08-10 14:26:58 -0500 | [diff] [blame] | 234 | |
| 235 | return ipa_interconnect_disable(ipa); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 236 | } |
| 237 | |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 238 | static int ipa_runtime_suspend(struct device *dev) |
| 239 | { |
| 240 | struct ipa *ipa = dev_get_drvdata(dev); |
| 241 | |
| 242 | /* Endpoints aren't usable until setup is complete */ |
| 243 | if (ipa->setup_complete) { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 244 | __clear_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags); |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 245 | ipa_endpoint_suspend(ipa); |
| 246 | gsi_suspend(&ipa->gsi); |
| 247 | } |
| 248 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 249 | return ipa_power_disable(ipa); |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | static int ipa_runtime_resume(struct device *dev) |
| 253 | { |
| 254 | struct ipa *ipa = dev_get_drvdata(dev); |
| 255 | int ret; |
| 256 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 257 | ret = ipa_power_enable(ipa); |
Alex Elder | 2abb0c7 | 2021-08-10 14:27:00 -0500 | [diff] [blame] | 258 | if (WARN_ON(ret < 0)) |
| 259 | return ret; |
| 260 | |
| 261 | /* Endpoints aren't usable until setup is complete */ |
| 262 | if (ipa->setup_complete) { |
| 263 | gsi_resume(&ipa->gsi); |
| 264 | ipa_endpoint_resume(ipa); |
| 265 | } |
| 266 | |
| 267 | return 0; |
| 268 | } |
| 269 | |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 270 | static int ipa_suspend(struct device *dev) |
| 271 | { |
| 272 | struct ipa *ipa = dev_get_drvdata(dev); |
| 273 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 274 | __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 275 | |
| 276 | return pm_runtime_force_suspend(dev); |
| 277 | } |
| 278 | |
| 279 | static int ipa_resume(struct device *dev) |
| 280 | { |
| 281 | struct ipa *ipa = dev_get_drvdata(dev); |
| 282 | int ret; |
| 283 | |
| 284 | ret = pm_runtime_force_resume(dev); |
| 285 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 286 | __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags); |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 287 | |
| 288 | return ret; |
| 289 | } |
| 290 | |
Alex Elder | 78b348f | 2020-07-03 16:23:34 -0500 | [diff] [blame] | 291 | /* Return the current IPA core clock rate */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 292 | u32 ipa_core_clock_rate(struct ipa *ipa) |
Alex Elder | 78b348f | 2020-07-03 16:23:34 -0500 | [diff] [blame] | 293 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 294 | return ipa->power ? (u32)clk_get_rate(ipa->power->core) : 0; |
Alex Elder | 78b348f | 2020-07-03 16:23:34 -0500 | [diff] [blame] | 295 | } |
| 296 | |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 297 | /** |
| 298 | * ipa_suspend_handler() - Handle the suspend IPA interrupt |
| 299 | * @ipa: IPA pointer |
| 300 | * @irq_id: IPA interrupt type (unused) |
| 301 | * |
| 302 | * If an RX endpoint is suspended, and the IPA has a packet destined for |
| 303 | * that endpoint, the IPA generates a SUSPEND interrupt to inform the AP |
| 304 | * that it should resume the endpoint. If we get one of these interrupts |
| 305 | * we just wake up the system. |
| 306 | */ |
| 307 | static void ipa_suspend_handler(struct ipa *ipa, enum ipa_irq_id irq_id) |
| 308 | { |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 309 | /* To handle an IPA interrupt we will have resumed the hardware |
| 310 | * just to handle the interrupt, so we're done. If we are in a |
| 311 | * system suspend, trigger a system resume. |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 312 | */ |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 313 | if (!__test_and_set_bit(IPA_POWER_FLAG_RESUMED, ipa->power->flags)) |
| 314 | if (test_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags)) |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 315 | pm_wakeup_dev_event(&ipa->pdev->dev, 0, true); |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 316 | |
| 317 | /* Acknowledge/clear the suspend interrupt on all endpoints */ |
| 318 | ipa_interrupt_suspend_clear_all(ipa->interrupt); |
| 319 | } |
| 320 | |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 321 | /* The next few functions coordinate stopping and starting the modem |
| 322 | * network device transmit queue. |
| 323 | * |
| 324 | * Transmit can be running concurrent with power resume, and there's a |
| 325 | * chance the resume completes before the transmit path stops the queue, |
| 326 | * leaving the queue in a stopped state. The next two functions are used |
| 327 | * to avoid this: ipa_power_modem_queue_stop() is used by ipa_start_xmit() |
| 328 | * to conditionally stop the TX queue; and ipa_power_modem_queue_start() |
| 329 | * is used by ipa_runtime_resume() to conditionally restart it. |
| 330 | * |
| 331 | * Two flags and a spinlock are used. If the queue is stopped, the STOPPED |
| 332 | * power flag is set. And if the queue is started, the STARTED flag is set. |
| 333 | * The queue is only started on resume if the STOPPED flag is set. And the |
| 334 | * queue is only started in ipa_start_xmit() if the STARTED flag is *not* |
| 335 | * set. As a result, the queue remains operational if the two activites |
| 336 | * happen concurrently regardless of the order they complete. The spinlock |
| 337 | * ensures the flag and TX queue operations are done atomically. |
| 338 | * |
| 339 | * The first function stops the modem netdev transmit queue, but only if |
| 340 | * the STARTED flag is *not* set. That flag is cleared if it was set. |
| 341 | * If the queue is stopped, the STOPPED flag is set. This is called only |
| 342 | * from the power ->runtime_resume operation. |
| 343 | */ |
| 344 | void ipa_power_modem_queue_stop(struct ipa *ipa) |
| 345 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 346 | struct ipa_power *power = ipa->power; |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 347 | unsigned long flags; |
| 348 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 349 | spin_lock_irqsave(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 350 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 351 | if (!__test_and_clear_bit(IPA_POWER_FLAG_STARTED, power->flags)) { |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 352 | netif_stop_queue(ipa->modem_netdev); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 353 | __set_bit(IPA_POWER_FLAG_STOPPED, power->flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 354 | } |
| 355 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 356 | spin_unlock_irqrestore(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 357 | } |
| 358 | |
| 359 | /* This function starts the modem netdev transmit queue, but only if the |
| 360 | * STOPPED flag is set. That flag is cleared if it was set. If the queue |
| 361 | * was restarted, the STARTED flag is set; this allows ipa_start_xmit() |
| 362 | * to skip stopping the queue in the event of a race. |
| 363 | */ |
| 364 | void ipa_power_modem_queue_wake(struct ipa *ipa) |
| 365 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 366 | struct ipa_power *power = ipa->power; |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 367 | unsigned long flags; |
| 368 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 369 | spin_lock_irqsave(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 370 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 371 | if (__test_and_clear_bit(IPA_POWER_FLAG_STOPPED, power->flags)) { |
| 372 | __set_bit(IPA_POWER_FLAG_STARTED, power->flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 373 | netif_wake_queue(ipa->modem_netdev); |
| 374 | } |
| 375 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 376 | spin_unlock_irqrestore(&power->spinlock, flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 377 | } |
| 378 | |
| 379 | /* This function clears the STARTED flag once the TX queue is operating */ |
| 380 | void ipa_power_modem_queue_active(struct ipa *ipa) |
| 381 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 382 | clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags); |
Alex Elder | b8e36e1 | 2021-08-19 16:12:28 -0500 | [diff] [blame] | 383 | } |
| 384 | |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 385 | int ipa_power_setup(struct ipa *ipa) |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 386 | { |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 387 | int ret; |
| 388 | |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 389 | ipa_interrupt_add(ipa->interrupt, IPA_IRQ_TX_SUSPEND, |
| 390 | ipa_suspend_handler); |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 391 | |
| 392 | ret = device_init_wakeup(&ipa->pdev->dev, true); |
| 393 | if (ret) |
| 394 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
| 395 | |
| 396 | return ret; |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 397 | } |
| 398 | |
| 399 | void ipa_power_teardown(struct ipa *ipa) |
| 400 | { |
Alex Elder | d430fe4 | 2021-08-12 14:50:30 -0500 | [diff] [blame] | 401 | (void)device_init_wakeup(&ipa->pdev->dev, false); |
Alex Elder | afe1baa | 2021-08-04 10:36:25 -0500 | [diff] [blame] | 402 | ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_TX_SUSPEND); |
| 403 | } |
| 404 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 405 | /* Initialize IPA power management */ |
| 406 | struct ipa_power * |
| 407 | ipa_power_init(struct device *dev, const struct ipa_power_data *data) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 408 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 409 | struct ipa_power *power; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 410 | struct clk *clk; |
| 411 | int ret; |
| 412 | |
| 413 | clk = clk_get(dev, "core"); |
| 414 | if (IS_ERR(clk)) { |
Alex Elder | 4c7ccfc | 2021-02-12 08:33:59 -0600 | [diff] [blame] | 415 | dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n"); |
| 416 | |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 417 | return ERR_CAST(clk); |
| 418 | } |
| 419 | |
Alex Elder | 91d02f9 | 2020-11-19 16:40:41 -0600 | [diff] [blame] | 420 | ret = clk_set_rate(clk, data->core_clock_rate); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 421 | if (ret) { |
Alex Elder | 91d02f9 | 2020-11-19 16:40:41 -0600 | [diff] [blame] | 422 | dev_err(dev, "error %d setting core clock rate to %u\n", |
| 423 | ret, data->core_clock_rate); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 424 | goto err_clk_put; |
| 425 | } |
| 426 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 427 | power = kzalloc(sizeof(*power), GFP_KERNEL); |
| 428 | if (!power) { |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 429 | ret = -ENOMEM; |
| 430 | goto err_clk_put; |
| 431 | } |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 432 | power->dev = dev; |
| 433 | power->core = clk; |
| 434 | spin_lock_init(&power->spinlock); |
| 435 | power->interconnect_count = data->interconnect_count; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 436 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 437 | ret = ipa_interconnect_init(power, dev, data->interconnect_data); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 438 | if (ret) |
| 439 | goto err_kfree; |
| 440 | |
Alex Elder | 1aac309 | 2021-08-20 11:01:27 -0500 | [diff] [blame] | 441 | pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY); |
| 442 | pm_runtime_use_autosuspend(dev); |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 443 | pm_runtime_enable(dev); |
| 444 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 445 | return power; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 446 | |
| 447 | err_kfree: |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 448 | kfree(power); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 449 | err_clk_put: |
| 450 | clk_put(clk); |
| 451 | |
| 452 | return ERR_PTR(ret); |
| 453 | } |
| 454 | |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 455 | /* Inverse of ipa_power_init() */ |
| 456 | void ipa_power_exit(struct ipa_power *power) |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 457 | { |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 458 | struct device *dev = power->dev; |
| 459 | struct clk *clk = power->core; |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 460 | |
Alex Elder | 1aac309 | 2021-08-20 11:01:27 -0500 | [diff] [blame] | 461 | pm_runtime_disable(dev); |
| 462 | pm_runtime_dont_use_autosuspend(dev); |
Alex Elder | 7aa0e8b | 2021-08-20 11:01:28 -0500 | [diff] [blame] | 463 | ipa_interconnect_exit(power); |
| 464 | kfree(power); |
Alex Elder | ba764c4 | 2020-03-05 22:28:19 -0600 | [diff] [blame] | 465 | clk_put(clk); |
| 466 | } |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 467 | |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 468 | const struct dev_pm_ops ipa_pm_ops = { |
Alex Elder | b9c532c | 2021-08-12 14:50:31 -0500 | [diff] [blame] | 469 | .suspend = ipa_suspend, |
| 470 | .resume = ipa_resume, |
Alex Elder | 63de79f | 2021-08-10 14:27:01 -0500 | [diff] [blame] | 471 | .runtime_suspend = ipa_runtime_suspend, |
| 472 | .runtime_resume = ipa_runtime_resume, |
Alex Elder | 73ff316 | 2021-08-04 10:36:24 -0500 | [diff] [blame] | 473 | }; |