Matti Vaittinen | 7111c6d | 2021-06-03 08:41:55 +0300 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | // |
| 3 | // Copyright (C) 2021 ROHM Semiconductors |
| 4 | // regulator IRQ based event notification helpers |
| 5 | // |
| 6 | // Logic has been partially adapted from qcom-labibb driver. |
| 7 | // |
| 8 | // Author: Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com> |
| 9 | |
| 10 | #include <linux/device.h> |
| 11 | #include <linux/err.h> |
| 12 | #include <linux/interrupt.h> |
| 13 | #include <linux/kernel.h> |
| 14 | #include <linux/reboot.h> |
| 15 | #include <linux/regmap.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/regulator/driver.h> |
| 19 | |
| 20 | #include "internal.h" |
| 21 | |
| 22 | #define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000 |
| 23 | |
| 24 | struct regulator_irq { |
| 25 | struct regulator_irq_data rdata; |
| 26 | struct regulator_irq_desc desc; |
| 27 | int irq; |
| 28 | int retry_cnt; |
| 29 | struct delayed_work isr_work; |
| 30 | }; |
| 31 | |
| 32 | /* |
| 33 | * Should only be called from threaded handler to prevent potential deadlock |
| 34 | */ |
| 35 | static void rdev_flag_err(struct regulator_dev *rdev, int err) |
| 36 | { |
| 37 | spin_lock(&rdev->err_lock); |
| 38 | rdev->cached_err |= err; |
| 39 | spin_unlock(&rdev->err_lock); |
| 40 | } |
| 41 | |
| 42 | static void rdev_clear_err(struct regulator_dev *rdev, int err) |
| 43 | { |
| 44 | spin_lock(&rdev->err_lock); |
| 45 | rdev->cached_err &= ~err; |
| 46 | spin_unlock(&rdev->err_lock); |
| 47 | } |
| 48 | |
| 49 | static void regulator_notifier_isr_work(struct work_struct *work) |
| 50 | { |
| 51 | struct regulator_irq *h; |
| 52 | struct regulator_irq_desc *d; |
| 53 | struct regulator_irq_data *rid; |
| 54 | int ret = 0; |
| 55 | int tmo, i; |
| 56 | int num_rdevs; |
| 57 | |
| 58 | h = container_of(work, struct regulator_irq, |
| 59 | isr_work.work); |
| 60 | d = &h->desc; |
| 61 | rid = &h->rdata; |
| 62 | num_rdevs = rid->num_states; |
| 63 | |
| 64 | reread: |
| 65 | if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) { |
| 66 | if (!d->die) |
| 67 | return hw_protection_shutdown("Regulator HW failure? - no IC recovery", |
| 68 | REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS); |
| 69 | ret = d->die(rid); |
| 70 | /* |
| 71 | * If the 'last resort' IC recovery failed we will have |
| 72 | * nothing else left to do... |
| 73 | */ |
| 74 | if (ret) |
| 75 | return hw_protection_shutdown("Regulator HW failure. IC recovery failed", |
| 76 | REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS); |
| 77 | |
| 78 | /* |
| 79 | * If h->die() was implemented we assume recovery has been |
| 80 | * attempted (probably regulator was shut down) and we |
| 81 | * just enable IRQ and bail-out. |
| 82 | */ |
| 83 | goto enable_out; |
| 84 | } |
| 85 | if (d->renable) { |
| 86 | ret = d->renable(rid); |
| 87 | |
| 88 | if (ret == REGULATOR_FAILED_RETRY) { |
| 89 | /* Driver could not get current status */ |
| 90 | h->retry_cnt++; |
| 91 | if (!d->reread_ms) |
| 92 | goto reread; |
| 93 | |
| 94 | tmo = d->reread_ms; |
| 95 | goto reschedule; |
| 96 | } |
| 97 | |
| 98 | if (ret) { |
| 99 | /* |
| 100 | * IC status reading succeeded. update error info |
| 101 | * just in case the renable changed it. |
| 102 | */ |
| 103 | for (i = 0; i < num_rdevs; i++) { |
| 104 | struct regulator_err_state *stat; |
| 105 | struct regulator_dev *rdev; |
| 106 | |
| 107 | stat = &rid->states[i]; |
| 108 | rdev = stat->rdev; |
| 109 | rdev_clear_err(rdev, (~stat->errors) & |
| 110 | stat->possible_errs); |
| 111 | } |
| 112 | h->retry_cnt++; |
| 113 | /* |
| 114 | * The IC indicated problem is still ON - no point in |
| 115 | * re-enabling the IRQ. Retry later. |
| 116 | */ |
| 117 | tmo = d->irq_off_ms; |
| 118 | goto reschedule; |
| 119 | } |
| 120 | } |
| 121 | |
| 122 | /* |
| 123 | * Either IC reported problem cleared or no status checker was provided. |
| 124 | * If problems are gone - good. If not - then the IRQ will fire again |
| 125 | * and we'll have a new nice loop. In any case we should clear error |
| 126 | * flags here and re-enable IRQs. |
| 127 | */ |
| 128 | for (i = 0; i < num_rdevs; i++) { |
| 129 | struct regulator_err_state *stat; |
| 130 | struct regulator_dev *rdev; |
| 131 | |
| 132 | stat = &rid->states[i]; |
| 133 | rdev = stat->rdev; |
| 134 | rdev_clear_err(rdev, stat->possible_errs); |
| 135 | } |
| 136 | |
| 137 | /* |
| 138 | * Things have been seemingly successful => zero retry-counter. |
| 139 | */ |
| 140 | h->retry_cnt = 0; |
| 141 | |
| 142 | enable_out: |
| 143 | enable_irq(h->irq); |
| 144 | |
| 145 | return; |
| 146 | |
| 147 | reschedule: |
| 148 | if (!d->high_prio) |
| 149 | mod_delayed_work(system_wq, &h->isr_work, |
| 150 | msecs_to_jiffies(tmo)); |
| 151 | else |
| 152 | mod_delayed_work(system_highpri_wq, &h->isr_work, |
| 153 | msecs_to_jiffies(tmo)); |
| 154 | } |
| 155 | |
| 156 | static irqreturn_t regulator_notifier_isr(int irq, void *data) |
| 157 | { |
| 158 | struct regulator_irq *h = data; |
| 159 | struct regulator_irq_desc *d; |
| 160 | struct regulator_irq_data *rid; |
| 161 | unsigned long rdev_map = 0; |
| 162 | int num_rdevs; |
| 163 | int ret, i; |
| 164 | |
| 165 | d = &h->desc; |
| 166 | rid = &h->rdata; |
| 167 | num_rdevs = rid->num_states; |
| 168 | |
| 169 | if (d->fatal_cnt) |
| 170 | h->retry_cnt++; |
| 171 | |
| 172 | /* |
| 173 | * we spare a few cycles by not clearing statuses prior to this call. |
| 174 | * The IC driver must initialize the status buffers for rdevs |
| 175 | * which it indicates having active events via rdev_map. |
| 176 | * |
| 177 | * Maybe we should just to be on a safer side(?) |
| 178 | */ |
| 179 | ret = d->map_event(irq, rid, &rdev_map); |
| 180 | |
| 181 | /* |
| 182 | * If status reading fails (which is unlikely) we don't ack/disable |
| 183 | * IRQ but just increase fail count and retry when IRQ fires again. |
| 184 | * If retry_count exceeds the given safety limit we call IC specific die |
| 185 | * handler which can try disabling regulator(s). |
| 186 | * |
Matti Vaittinen | ad3ead1 | 2021-08-23 10:56:51 +0300 | [diff] [blame] | 187 | * If no die handler is given we will just power-off as a last resort. |
Matti Vaittinen | 7111c6d | 2021-06-03 08:41:55 +0300 | [diff] [blame] | 188 | * |
| 189 | * We could try disabling all associated rdevs - but we might shoot |
| 190 | * ourselves in the head and leave the problematic regulator enabled. So |
| 191 | * if IC has no die-handler populated we just assume the regulator |
| 192 | * can't be disabled. |
| 193 | */ |
| 194 | if (unlikely(ret == REGULATOR_FAILED_RETRY)) |
| 195 | goto fail_out; |
| 196 | |
| 197 | h->retry_cnt = 0; |
| 198 | /* |
| 199 | * Let's not disable IRQ if there were no status bits for us. We'd |
| 200 | * better leave spurious IRQ handling to genirq |
| 201 | */ |
| 202 | if (ret || !rdev_map) |
| 203 | return IRQ_NONE; |
| 204 | |
| 205 | /* |
| 206 | * Some events are bogus if the regulator is disabled. Skip such events |
| 207 | * if all relevant regulators are disabled |
| 208 | */ |
| 209 | if (d->skip_off) { |
| 210 | for_each_set_bit(i, &rdev_map, num_rdevs) { |
| 211 | struct regulator_dev *rdev; |
| 212 | const struct regulator_ops *ops; |
| 213 | |
| 214 | rdev = rid->states[i].rdev; |
| 215 | ops = rdev->desc->ops; |
| 216 | |
| 217 | /* |
| 218 | * If any of the flagged regulators is enabled we do |
| 219 | * handle this |
| 220 | */ |
| 221 | if (ops->is_enabled(rdev)) |
| 222 | break; |
| 223 | } |
| 224 | if (i == num_rdevs) |
| 225 | return IRQ_NONE; |
| 226 | } |
| 227 | |
| 228 | /* Disable IRQ if HW keeps line asserted */ |
| 229 | if (d->irq_off_ms) |
| 230 | disable_irq_nosync(irq); |
| 231 | |
| 232 | /* |
| 233 | * IRQ seems to be for us. Let's fire correct notifiers / store error |
| 234 | * flags |
| 235 | */ |
| 236 | for_each_set_bit(i, &rdev_map, num_rdevs) { |
| 237 | struct regulator_err_state *stat; |
| 238 | struct regulator_dev *rdev; |
| 239 | |
| 240 | stat = &rid->states[i]; |
| 241 | rdev = stat->rdev; |
| 242 | |
| 243 | rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n", |
| 244 | stat->notifs); |
| 245 | |
| 246 | regulator_notifier_call_chain(rdev, stat->notifs, NULL); |
| 247 | rdev_flag_err(rdev, stat->errors); |
| 248 | } |
| 249 | |
| 250 | if (d->irq_off_ms) { |
| 251 | if (!d->high_prio) |
| 252 | schedule_delayed_work(&h->isr_work, |
| 253 | msecs_to_jiffies(d->irq_off_ms)); |
| 254 | else |
| 255 | mod_delayed_work(system_highpri_wq, |
| 256 | &h->isr_work, |
| 257 | msecs_to_jiffies(d->irq_off_ms)); |
| 258 | } |
| 259 | |
| 260 | return IRQ_HANDLED; |
| 261 | |
| 262 | fail_out: |
| 263 | if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) { |
| 264 | /* If we have no recovery, just try shut down straight away */ |
| 265 | if (!d->die) { |
| 266 | hw_protection_shutdown("Regulator failure. Retry count exceeded", |
| 267 | REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS); |
| 268 | } else { |
| 269 | ret = d->die(rid); |
| 270 | /* If die() failed shut down as a last attempt to save the HW */ |
| 271 | if (ret) |
| 272 | hw_protection_shutdown("Regulator failure. Recovery failed", |
| 273 | REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS); |
| 274 | } |
| 275 | } |
| 276 | |
| 277 | return IRQ_NONE; |
| 278 | } |
| 279 | |
| 280 | static int init_rdev_state(struct device *dev, struct regulator_irq *h, |
| 281 | struct regulator_dev **rdev, int common_err, |
| 282 | int *rdev_err, int rdev_amount) |
| 283 | { |
| 284 | int i; |
| 285 | |
| 286 | h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) * |
| 287 | rdev_amount, GFP_KERNEL); |
| 288 | if (!h->rdata.states) |
| 289 | return -ENOMEM; |
| 290 | |
| 291 | h->rdata.num_states = rdev_amount; |
| 292 | h->rdata.data = h->desc.data; |
| 293 | |
| 294 | for (i = 0; i < rdev_amount; i++) { |
| 295 | h->rdata.states[i].possible_errs = common_err; |
| 296 | if (rdev_err) |
| 297 | h->rdata.states[i].possible_errs |= *rdev_err++; |
| 298 | h->rdata.states[i].rdev = *rdev++; |
| 299 | } |
| 300 | |
| 301 | return 0; |
| 302 | } |
| 303 | |
| 304 | static void init_rdev_errors(struct regulator_irq *h) |
| 305 | { |
| 306 | int i; |
| 307 | |
| 308 | for (i = 0; i < h->rdata.num_states; i++) |
| 309 | if (h->rdata.states[i].possible_errs) |
| 310 | h->rdata.states[i].rdev->use_cached_err = true; |
| 311 | } |
| 312 | |
| 313 | /** |
| 314 | * regulator_irq_helper - register IRQ based regulator event/error notifier |
| 315 | * |
| 316 | * @dev: device providing the IRQs |
| 317 | * @d: IRQ helper descriptor. |
| 318 | * @irq: IRQ used to inform events/errors to be notified. |
| 319 | * @irq_flags: Extra IRQ flags to be OR'ed with the default |
| 320 | * IRQF_ONESHOT when requesting the (threaded) irq. |
| 321 | * @common_errs: Errors which can be flagged by this IRQ for all rdevs. |
| 322 | * When IRQ is re-enabled these errors will be cleared |
| 323 | * from all associated regulators |
| 324 | * @per_rdev_errs: Optional error flag array describing errors specific |
| 325 | * for only some of the regulators. These errors will be |
| 326 | * or'ed with common errors. If this is given the array |
| 327 | * should contain rdev_amount flags. Can be set to NULL |
| 328 | * if there is no regulator specific error flags for this |
| 329 | * IRQ. |
| 330 | * @rdev: Array of pointers to regulators associated with this |
| 331 | * IRQ. |
| 332 | * @rdev_amount: Amount of regulators associated with this IRQ. |
| 333 | * |
| 334 | * Return: handle to irq_helper or an ERR_PTR() encoded error code. |
| 335 | */ |
| 336 | void *regulator_irq_helper(struct device *dev, |
| 337 | const struct regulator_irq_desc *d, int irq, |
| 338 | int irq_flags, int common_errs, int *per_rdev_errs, |
| 339 | struct regulator_dev **rdev, int rdev_amount) |
| 340 | { |
| 341 | struct regulator_irq *h; |
| 342 | int ret; |
| 343 | |
| 344 | if (!rdev_amount || !d || !d->map_event || !d->name) |
| 345 | return ERR_PTR(-EINVAL); |
| 346 | |
| 347 | h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL); |
| 348 | if (!h) |
| 349 | return ERR_PTR(-ENOMEM); |
| 350 | |
| 351 | h->irq = irq; |
| 352 | h->desc = *d; |
| 353 | |
| 354 | ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs, |
| 355 | rdev_amount); |
| 356 | if (ret) |
| 357 | return ERR_PTR(ret); |
| 358 | |
| 359 | init_rdev_errors(h); |
| 360 | |
| 361 | if (h->desc.irq_off_ms) |
| 362 | INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work); |
| 363 | |
| 364 | ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr, |
| 365 | IRQF_ONESHOT | irq_flags, h->desc.name, h); |
| 366 | if (ret) { |
| 367 | dev_err(dev, "Failed to request IRQ %d\n", irq); |
| 368 | |
| 369 | return ERR_PTR(ret); |
| 370 | } |
| 371 | |
| 372 | return h; |
| 373 | } |
| 374 | EXPORT_SYMBOL_GPL(regulator_irq_helper); |
| 375 | |
| 376 | /** |
| 377 | * regulator_irq_helper_cancel - drop IRQ based regulator event/error notifier |
| 378 | * |
| 379 | * @handle: Pointer to handle returned by a successful call to |
| 380 | * regulator_irq_helper(). Will be NULLed upon return. |
| 381 | * |
| 382 | * The associated IRQ is released and work is cancelled when the function |
| 383 | * returns. |
| 384 | */ |
| 385 | void regulator_irq_helper_cancel(void **handle) |
| 386 | { |
| 387 | if (handle && *handle) { |
| 388 | struct regulator_irq *h = *handle; |
| 389 | |
| 390 | free_irq(h->irq, h); |
| 391 | if (h->desc.irq_off_ms) |
| 392 | cancel_delayed_work_sync(&h->isr_work); |
| 393 | |
| 394 | h = NULL; |
| 395 | } |
| 396 | } |
| 397 | EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel); |