irq_helpers.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. // SPDX-License-Identifier: GPL-2.0
  2. //
  3. // Copyright (C) 2021 ROHM Semiconductors
  4. // regulator IRQ based event notification helpers
  5. //
  6. // Logic has been partially adapted from qcom-labibb driver.
  7. //
  8. // Author: Matti Vaittinen <[email protected]>
  9. #include <linux/device.h>
  10. #include <linux/err.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/kernel.h>
  13. #include <linux/reboot.h>
  14. #include <linux/regmap.h>
  15. #include <linux/slab.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/regulator/driver.h>
  18. #include "internal.h"
  19. #define REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS 10000
  20. struct regulator_irq {
  21. struct regulator_irq_data rdata;
  22. struct regulator_irq_desc desc;
  23. int irq;
  24. int retry_cnt;
  25. struct delayed_work isr_work;
  26. };
  27. /*
  28. * Should only be called from threaded handler to prevent potential deadlock
  29. */
  30. static void rdev_flag_err(struct regulator_dev *rdev, int err)
  31. {
  32. spin_lock(&rdev->err_lock);
  33. rdev->cached_err |= err;
  34. spin_unlock(&rdev->err_lock);
  35. }
  36. static void rdev_clear_err(struct regulator_dev *rdev, int err)
  37. {
  38. spin_lock(&rdev->err_lock);
  39. rdev->cached_err &= ~err;
  40. spin_unlock(&rdev->err_lock);
  41. }
  42. static void regulator_notifier_isr_work(struct work_struct *work)
  43. {
  44. struct regulator_irq *h;
  45. struct regulator_irq_desc *d;
  46. struct regulator_irq_data *rid;
  47. int ret = 0;
  48. int tmo, i;
  49. int num_rdevs;
  50. h = container_of(work, struct regulator_irq,
  51. isr_work.work);
  52. d = &h->desc;
  53. rid = &h->rdata;
  54. num_rdevs = rid->num_states;
  55. reread:
  56. if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
  57. if (!d->die)
  58. return hw_protection_shutdown("Regulator HW failure? - no IC recovery",
  59. REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
  60. ret = d->die(rid);
  61. /*
  62. * If the 'last resort' IC recovery failed we will have
  63. * nothing else left to do...
  64. */
  65. if (ret)
  66. return hw_protection_shutdown("Regulator HW failure. IC recovery failed",
  67. REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
  68. /*
  69. * If h->die() was implemented we assume recovery has been
  70. * attempted (probably regulator was shut down) and we
  71. * just enable IRQ and bail-out.
  72. */
  73. goto enable_out;
  74. }
  75. if (d->renable) {
  76. ret = d->renable(rid);
  77. if (ret == REGULATOR_FAILED_RETRY) {
  78. /* Driver could not get current status */
  79. h->retry_cnt++;
  80. if (!d->reread_ms)
  81. goto reread;
  82. tmo = d->reread_ms;
  83. goto reschedule;
  84. }
  85. if (ret) {
  86. /*
  87. * IC status reading succeeded. update error info
  88. * just in case the renable changed it.
  89. */
  90. for (i = 0; i < num_rdevs; i++) {
  91. struct regulator_err_state *stat;
  92. struct regulator_dev *rdev;
  93. stat = &rid->states[i];
  94. rdev = stat->rdev;
  95. rdev_clear_err(rdev, (~stat->errors) &
  96. stat->possible_errs);
  97. }
  98. h->retry_cnt++;
  99. /*
  100. * The IC indicated problem is still ON - no point in
  101. * re-enabling the IRQ. Retry later.
  102. */
  103. tmo = d->irq_off_ms;
  104. goto reschedule;
  105. }
  106. }
  107. /*
  108. * Either IC reported problem cleared or no status checker was provided.
  109. * If problems are gone - good. If not - then the IRQ will fire again
  110. * and we'll have a new nice loop. In any case we should clear error
  111. * flags here and re-enable IRQs.
  112. */
  113. for (i = 0; i < num_rdevs; i++) {
  114. struct regulator_err_state *stat;
  115. struct regulator_dev *rdev;
  116. stat = &rid->states[i];
  117. rdev = stat->rdev;
  118. rdev_clear_err(rdev, stat->possible_errs);
  119. }
  120. /*
  121. * Things have been seemingly successful => zero retry-counter.
  122. */
  123. h->retry_cnt = 0;
  124. enable_out:
  125. enable_irq(h->irq);
  126. return;
  127. reschedule:
  128. if (!d->high_prio)
  129. mod_delayed_work(system_wq, &h->isr_work,
  130. msecs_to_jiffies(tmo));
  131. else
  132. mod_delayed_work(system_highpri_wq, &h->isr_work,
  133. msecs_to_jiffies(tmo));
  134. }
  135. static irqreturn_t regulator_notifier_isr(int irq, void *data)
  136. {
  137. struct regulator_irq *h = data;
  138. struct regulator_irq_desc *d;
  139. struct regulator_irq_data *rid;
  140. unsigned long rdev_map = 0;
  141. int num_rdevs;
  142. int ret, i;
  143. d = &h->desc;
  144. rid = &h->rdata;
  145. num_rdevs = rid->num_states;
  146. if (d->fatal_cnt)
  147. h->retry_cnt++;
  148. /*
  149. * we spare a few cycles by not clearing statuses prior to this call.
  150. * The IC driver must initialize the status buffers for rdevs
  151. * which it indicates having active events via rdev_map.
  152. *
  153. * Maybe we should just to be on a safer side(?)
  154. */
  155. ret = d->map_event(irq, rid, &rdev_map);
  156. /*
  157. * If status reading fails (which is unlikely) we don't ack/disable
  158. * IRQ but just increase fail count and retry when IRQ fires again.
  159. * If retry_count exceeds the given safety limit we call IC specific die
  160. * handler which can try disabling regulator(s).
  161. *
  162. * If no die handler is given we will just power-off as a last resort.
  163. *
  164. * We could try disabling all associated rdevs - but we might shoot
  165. * ourselves in the head and leave the problematic regulator enabled. So
  166. * if IC has no die-handler populated we just assume the regulator
  167. * can't be disabled.
  168. */
  169. if (unlikely(ret == REGULATOR_FAILED_RETRY))
  170. goto fail_out;
  171. h->retry_cnt = 0;
  172. /*
  173. * Let's not disable IRQ if there were no status bits for us. We'd
  174. * better leave spurious IRQ handling to genirq
  175. */
  176. if (ret || !rdev_map)
  177. return IRQ_NONE;
  178. /*
  179. * Some events are bogus if the regulator is disabled. Skip such events
  180. * if all relevant regulators are disabled
  181. */
  182. if (d->skip_off) {
  183. for_each_set_bit(i, &rdev_map, num_rdevs) {
  184. struct regulator_dev *rdev;
  185. const struct regulator_ops *ops;
  186. rdev = rid->states[i].rdev;
  187. ops = rdev->desc->ops;
  188. /*
  189. * If any of the flagged regulators is enabled we do
  190. * handle this
  191. */
  192. if (ops->is_enabled(rdev))
  193. break;
  194. }
  195. if (i == num_rdevs)
  196. return IRQ_NONE;
  197. }
  198. /* Disable IRQ if HW keeps line asserted */
  199. if (d->irq_off_ms)
  200. disable_irq_nosync(irq);
  201. /*
  202. * IRQ seems to be for us. Let's fire correct notifiers / store error
  203. * flags
  204. */
  205. for_each_set_bit(i, &rdev_map, num_rdevs) {
  206. struct regulator_err_state *stat;
  207. struct regulator_dev *rdev;
  208. stat = &rid->states[i];
  209. rdev = stat->rdev;
  210. rdev_dbg(rdev, "Sending regulator notification EVT 0x%lx\n",
  211. stat->notifs);
  212. regulator_notifier_call_chain(rdev, stat->notifs, NULL);
  213. rdev_flag_err(rdev, stat->errors);
  214. }
  215. if (d->irq_off_ms) {
  216. if (!d->high_prio)
  217. schedule_delayed_work(&h->isr_work,
  218. msecs_to_jiffies(d->irq_off_ms));
  219. else
  220. mod_delayed_work(system_highpri_wq,
  221. &h->isr_work,
  222. msecs_to_jiffies(d->irq_off_ms));
  223. }
  224. return IRQ_HANDLED;
  225. fail_out:
  226. if (d->fatal_cnt && h->retry_cnt > d->fatal_cnt) {
  227. /* If we have no recovery, just try shut down straight away */
  228. if (!d->die) {
  229. hw_protection_shutdown("Regulator failure. Retry count exceeded",
  230. REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
  231. } else {
  232. ret = d->die(rid);
  233. /* If die() failed shut down as a last attempt to save the HW */
  234. if (ret)
  235. hw_protection_shutdown("Regulator failure. Recovery failed",
  236. REGULATOR_FORCED_SAFETY_SHUTDOWN_WAIT_MS);
  237. }
  238. }
  239. return IRQ_NONE;
  240. }
  241. static int init_rdev_state(struct device *dev, struct regulator_irq *h,
  242. struct regulator_dev **rdev, int common_err,
  243. int *rdev_err, int rdev_amount)
  244. {
  245. int i;
  246. h->rdata.states = devm_kzalloc(dev, sizeof(*h->rdata.states) *
  247. rdev_amount, GFP_KERNEL);
  248. if (!h->rdata.states)
  249. return -ENOMEM;
  250. h->rdata.num_states = rdev_amount;
  251. h->rdata.data = h->desc.data;
  252. for (i = 0; i < rdev_amount; i++) {
  253. h->rdata.states[i].possible_errs = common_err;
  254. if (rdev_err)
  255. h->rdata.states[i].possible_errs |= *rdev_err++;
  256. h->rdata.states[i].rdev = *rdev++;
  257. }
  258. return 0;
  259. }
  260. static void init_rdev_errors(struct regulator_irq *h)
  261. {
  262. int i;
  263. for (i = 0; i < h->rdata.num_states; i++)
  264. if (h->rdata.states[i].possible_errs)
  265. h->rdata.states[i].rdev->use_cached_err = true;
  266. }
  267. /**
  268. * regulator_irq_helper - register IRQ based regulator event/error notifier
  269. *
  270. * @dev: device providing the IRQs
  271. * @d: IRQ helper descriptor.
  272. * @irq: IRQ used to inform events/errors to be notified.
  273. * @irq_flags: Extra IRQ flags to be OR'ed with the default
  274. * IRQF_ONESHOT when requesting the (threaded) irq.
  275. * @common_errs: Errors which can be flagged by this IRQ for all rdevs.
  276. * When IRQ is re-enabled these errors will be cleared
  277. * from all associated regulators. Use this instead of the
  278. * per_rdev_errs if you use
  279. * regulator_irq_map_event_simple() for event mapping.
  280. * @per_rdev_errs: Optional error flag array describing errors specific
  281. * for only some of the regulators. These errors will be
  282. * or'ed with common errors. If this is given the array
  283. * should contain rdev_amount flags. Can be set to NULL
  284. * if there is no regulator specific error flags for this
  285. * IRQ.
  286. * @rdev: Array of pointers to regulators associated with this
  287. * IRQ.
  288. * @rdev_amount: Amount of regulators associated with this IRQ.
  289. *
  290. * Return: handle to irq_helper or an ERR_PTR() encoded error code.
  291. */
  292. void *regulator_irq_helper(struct device *dev,
  293. const struct regulator_irq_desc *d, int irq,
  294. int irq_flags, int common_errs, int *per_rdev_errs,
  295. struct regulator_dev **rdev, int rdev_amount)
  296. {
  297. struct regulator_irq *h;
  298. int ret;
  299. if (!rdev_amount || !d || !d->map_event || !d->name)
  300. return ERR_PTR(-EINVAL);
  301. h = devm_kzalloc(dev, sizeof(*h), GFP_KERNEL);
  302. if (!h)
  303. return ERR_PTR(-ENOMEM);
  304. h->irq = irq;
  305. h->desc = *d;
  306. ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
  307. rdev_amount);
  308. if (ret)
  309. return ERR_PTR(ret);
  310. init_rdev_errors(h);
  311. if (h->desc.irq_off_ms)
  312. INIT_DELAYED_WORK(&h->isr_work, regulator_notifier_isr_work);
  313. ret = request_threaded_irq(h->irq, NULL, regulator_notifier_isr,
  314. IRQF_ONESHOT | irq_flags, h->desc.name, h);
  315. if (ret) {
  316. dev_err(dev, "Failed to request IRQ %d\n", irq);
  317. return ERR_PTR(ret);
  318. }
  319. return h;
  320. }
  321. EXPORT_SYMBOL_GPL(regulator_irq_helper);
  322. /**
  323. * regulator_irq_helper_cancel - drop IRQ based regulator event/error notifier
  324. *
  325. * @handle: Pointer to handle returned by a successful call to
  326. * regulator_irq_helper(). Will be NULLed upon return.
  327. *
  328. * The associated IRQ is released and work is cancelled when the function
  329. * returns.
  330. */
  331. void regulator_irq_helper_cancel(void **handle)
  332. {
  333. if (handle && *handle) {
  334. struct regulator_irq *h = *handle;
  335. free_irq(h->irq, h);
  336. if (h->desc.irq_off_ms)
  337. cancel_delayed_work_sync(&h->isr_work);
  338. h = NULL;
  339. }
  340. }
  341. EXPORT_SYMBOL_GPL(regulator_irq_helper_cancel);
  342. /**
  343. * regulator_irq_map_event_simple - regulator IRQ notification for trivial IRQs
  344. *
  345. * @irq: Number of IRQ that occurred
  346. * @rid: Information about the event IRQ indicates
  347. * @dev_mask: mask indicating the regulator originating the IRQ
  348. *
  349. * Regulators whose IRQ has single, well defined purpose (always indicate
  350. * exactly one event, and are relevant to exactly one regulator device) can
  351. * use this function as their map_event callbac for their regulator IRQ
  352. * notification helperk. Exactly one rdev and exactly one error (in
  353. * "common_errs"-field) can be given at IRQ helper registration for
  354. * regulator_irq_map_event_simple() to be viable.
  355. */
  356. int regulator_irq_map_event_simple(int irq, struct regulator_irq_data *rid,
  357. unsigned long *dev_mask)
  358. {
  359. int err = rid->states[0].possible_errs;
  360. *dev_mask = 1;
  361. /*
  362. * This helper should only be used in a situation where the IRQ
  363. * can indicate only one type of problem for one specific rdev.
  364. * Something fishy is going on if we are having multiple rdevs or ERROR
  365. * flags here.
  366. */
  367. if (WARN_ON(rid->num_states != 1 || hweight32(err) != 1))
  368. return 0;
  369. rid->states[0].errors = err;
  370. rid->states[0].notifs = regulator_err2notif(err);
  371. return 0;
  372. }
  373. EXPORT_SYMBOL_GPL(regulator_irq_map_event_simple);