wcd9xxx-irq.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. /* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/bitops.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/sched.h>
  16. #include <linux/irq.h>
  17. #include <linux/mfd/core.h>
  18. #include <linux/regmap.h>
  19. #include <linux/delay.h>
  20. #include <linux/irqdomain.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/slab.h>
  25. #include <linux/ratelimit.h>
  26. #include <soc/qcom/pm.h>
  27. #include <linux/gpio.h>
  28. #include <linux/of_gpio.h>
  29. #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
  30. #include "core.h"
  31. #include "wcd9xxx-irq.h"
  32. #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
  33. #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  34. #define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
  35. #ifndef NO_IRQ
  36. #define NO_IRQ (-1)
  37. #endif
  38. #ifdef CONFIG_OF
  39. struct wcd9xxx_irq_drv_data {
  40. struct irq_domain *domain;
  41. int irq;
  42. };
  43. #endif
  44. static int virq_to_phyirq(
  45. struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
  46. static int phyirq_to_virq(
  47. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  48. static unsigned int wcd9xxx_irq_get_upstream_irq(
  49. struct wcd9xxx_core_resource *wcd9xxx_res);
  50. static void wcd9xxx_irq_put_downstream_irq(
  51. struct wcd9xxx_core_resource *wcd9xxx_res);
  52. static void wcd9xxx_irq_put_upstream_irq(
  53. struct wcd9xxx_core_resource *wcd9xxx_res);
  54. static int wcd9xxx_map_irq(
  55. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  56. static void wcd9xxx_irq_lock(struct irq_data *data)
  57. {
  58. struct wcd9xxx_core_resource *wcd9xxx_res =
  59. irq_data_get_irq_chip_data(data);
  60. mutex_lock(&wcd9xxx_res->irq_lock);
  61. }
  62. static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
  63. {
  64. struct wcd9xxx_core_resource *wcd9xxx_res =
  65. irq_data_get_irq_chip_data(data);
  66. int i;
  67. if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
  68. WCD9XXX_MAX_IRQ_REGS) ||
  69. (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
  70. WCD9XXX_MAX_IRQ_REGS)) {
  71. pr_err("%s: Array Size out of bound\n", __func__);
  72. return;
  73. }
  74. if (!wcd9xxx_res->wcd_core_regmap) {
  75. pr_err("%s: Codec core regmap not defined\n",
  76. __func__);
  77. return;
  78. }
  79. for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
  80. /* If there's been a change in the mask write it back
  81. * to the hardware.
  82. */
  83. if (wcd9xxx_res->irq_masks_cur[i] !=
  84. wcd9xxx_res->irq_masks_cache[i]) {
  85. wcd9xxx_res->irq_masks_cache[i] =
  86. wcd9xxx_res->irq_masks_cur[i];
  87. regmap_write(wcd9xxx_res->wcd_core_regmap,
  88. wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
  89. wcd9xxx_res->irq_masks_cur[i]);
  90. }
  91. }
  92. mutex_unlock(&wcd9xxx_res->irq_lock);
  93. }
  94. static void wcd9xxx_irq_enable(struct irq_data *data)
  95. {
  96. struct wcd9xxx_core_resource *wcd9xxx_res =
  97. irq_data_get_irq_chip_data(data);
  98. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  99. int byte = BIT_BYTE(wcd9xxx_irq);
  100. int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
  101. if ((byte < size) && (byte >= 0)) {
  102. wcd9xxx_res->irq_masks_cur[byte] &=
  103. ~(BYTE_BIT_MASK(wcd9xxx_irq));
  104. } else {
  105. pr_err("%s: Array size is %d but index is %d: Out of range\n",
  106. __func__, size, byte);
  107. }
  108. }
  109. static void wcd9xxx_irq_disable(struct irq_data *data)
  110. {
  111. struct wcd9xxx_core_resource *wcd9xxx_res =
  112. irq_data_get_irq_chip_data(data);
  113. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  114. int byte = BIT_BYTE(wcd9xxx_irq);
  115. int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
  116. if ((byte < size) && (byte >= 0)) {
  117. wcd9xxx_res->irq_masks_cur[byte]
  118. |= BYTE_BIT_MASK(wcd9xxx_irq);
  119. } else {
  120. pr_err("%s: Array size is %d but index is %d: Out of range\n",
  121. __func__, size, byte);
  122. }
  123. }
  124. static void wcd9xxx_irq_ack(struct irq_data *data)
  125. {
  126. int wcd9xxx_irq = 0;
  127. struct wcd9xxx_core_resource *wcd9xxx_res =
  128. irq_data_get_irq_chip_data(data);
  129. if (wcd9xxx_res == NULL) {
  130. pr_err("%s: wcd9xxx_res is NULL\n", __func__);
  131. return;
  132. }
  133. wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  134. pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
  135. __func__, wcd9xxx_irq);
  136. }
  137. static void wcd9xxx_irq_mask(struct irq_data *d)
  138. {
  139. /* do nothing but required as linux calls irq_mask without NULL check */
  140. }
  141. static struct irq_chip wcd9xxx_irq_chip = {
  142. .name = "wcd9xxx",
  143. .irq_bus_lock = wcd9xxx_irq_lock,
  144. .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
  145. .irq_disable = wcd9xxx_irq_disable,
  146. .irq_enable = wcd9xxx_irq_enable,
  147. .irq_mask = wcd9xxx_irq_mask,
  148. .irq_ack = wcd9xxx_irq_ack,
  149. };
  150. bool wcd9xxx_lock_sleep(
  151. struct wcd9xxx_core_resource *wcd9xxx_res)
  152. {
  153. enum wcd9xxx_pm_state os;
  154. /*
  155. * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
  156. * and its subroutines only motly.
  157. * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
  158. * It can race with wcd9xxx_irq_thread.
  159. * So need to embrace wlock_holders with mutex.
  160. *
  161. * If system didn't resume, we can simply return false so codec driver's
  162. * IRQ handler can return without handling IRQ.
  163. * As interrupt line is still active, codec will have another IRQ to
  164. * retry shortly.
  165. */
  166. mutex_lock(&wcd9xxx_res->pm_lock);
  167. if (wcd9xxx_res->wlock_holders++ == 0) {
  168. pr_debug("%s: holding wake lock\n", __func__);
  169. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  170. msm_cpuidle_get_deep_idle_latency());
  171. pm_stay_awake(wcd9xxx_res->dev);
  172. }
  173. mutex_unlock(&wcd9xxx_res->pm_lock);
  174. if (!wait_event_timeout(wcd9xxx_res->pm_wq,
  175. ((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
  176. WCD9XXX_PM_SLEEPABLE,
  177. WCD9XXX_PM_AWAKE)) ==
  178. WCD9XXX_PM_SLEEPABLE ||
  179. (os == WCD9XXX_PM_AWAKE)),
  180. msecs_to_jiffies(
  181. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
  182. pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
  183. __func__,
  184. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
  185. wcd9xxx_res->wlock_holders);
  186. wcd9xxx_unlock_sleep(wcd9xxx_res);
  187. return false;
  188. }
  189. wake_up_all(&wcd9xxx_res->pm_wq);
  190. return true;
  191. }
  192. EXPORT_SYMBOL(wcd9xxx_lock_sleep);
  193. void wcd9xxx_unlock_sleep(
  194. struct wcd9xxx_core_resource *wcd9xxx_res)
  195. {
  196. mutex_lock(&wcd9xxx_res->pm_lock);
  197. if (--wcd9xxx_res->wlock_holders == 0) {
  198. pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
  199. __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
  200. /*
  201. * if wcd9xxx_lock_sleep failed, pm_state would be still
  202. * WCD9XXX_PM_ASLEEP, don't overwrite
  203. */
  204. if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
  205. wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
  206. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  207. PM_QOS_DEFAULT_VALUE);
  208. pm_relax(wcd9xxx_res->dev);
  209. }
  210. mutex_unlock(&wcd9xxx_res->pm_lock);
  211. wake_up_all(&wcd9xxx_res->pm_wq);
  212. }
  213. EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
  214. void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
  215. {
  216. mutex_lock(&wcd9xxx_res->nested_irq_lock);
  217. }
  218. void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
  219. {
  220. mutex_unlock(&wcd9xxx_res->nested_irq_lock);
  221. }
  222. static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
  223. struct intr_data *irqdata)
  224. {
  225. int irqbit = irqdata->intr_num;
  226. if (!wcd9xxx_res->wcd_core_regmap) {
  227. pr_err("%s: codec core regmap not defined\n",
  228. __func__);
  229. return;
  230. }
  231. if (irqdata->clear_first) {
  232. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  233. regmap_write(wcd9xxx_res->wcd_core_regmap,
  234. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
  235. BIT_BYTE(irqbit),
  236. BYTE_BIT_MASK(irqbit));
  237. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  238. regmap_write(wcd9xxx_res->wcd_core_regmap,
  239. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  240. 0x02);
  241. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  242. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  243. } else {
  244. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  245. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  246. regmap_write(wcd9xxx_res->wcd_core_regmap,
  247. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
  248. BIT_BYTE(irqbit),
  249. BYTE_BIT_MASK(irqbit));
  250. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  251. regmap_write(wcd9xxx_res->wcd_core_regmap,
  252. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  253. 0x02);
  254. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  255. }
  256. }
  257. static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
  258. {
  259. int ret;
  260. int i;
  261. struct intr_data irqdata;
  262. char linebuf[128];
  263. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
  264. struct wcd9xxx_core_resource *wcd9xxx_res = data;
  265. int num_irq_regs = wcd9xxx_res->num_irq_regs;
  266. struct wcd9xxx *wcd9xxx;
  267. u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
  268. if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
  269. dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
  270. return IRQ_NONE;
  271. }
  272. if (!wcd9xxx_res->wcd_core_regmap) {
  273. dev_err(wcd9xxx_res->dev,
  274. "%s: Codec core regmap not supplied\n",
  275. __func__);
  276. goto err_disable_irq;
  277. }
  278. wcd9xxx = (struct wcd9xxx *)wcd9xxx_res->parent;
  279. if (!wcd9xxx) {
  280. dev_err(wcd9xxx_res->dev,
  281. "%s: Codec core not supplied\n", __func__);
  282. goto err_disable_irq;
  283. }
  284. if (!wcd9xxx->dev_up) {
  285. dev_info_ratelimited(wcd9xxx_res->dev, "wcd9xxx dev not up\n");
  286. /*
  287. * sleep to not block the core when device is
  288. * not up (slimbus will not be available) to
  289. * process interrupts.
  290. */
  291. msleep(10);
  292. }
  293. memset(status, 0, sizeof(status));
  294. ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
  295. wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
  296. status, num_irq_regs);
  297. if (ret < 0) {
  298. dev_err(wcd9xxx_res->dev,
  299. "Failed to read interrupt status: %d\n", ret);
  300. goto err_disable_irq;
  301. }
  302. /*
  303. * If status is 0 return without clearing.
  304. * status contains: HW status - masked interrupts
  305. * status1 contains: unhandled interrupts - masked interrupts
  306. * unmasked_status contains: unhandled interrupts
  307. */
  308. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  309. pr_debug("%s: status is 0\n", __func__);
  310. wcd9xxx_unlock_sleep(wcd9xxx_res);
  311. return IRQ_HANDLED;
  312. }
  313. /*
  314. * Copy status to unmask_status before masking, otherwise SW may miss
  315. * to clear masked interrupt in corner case.
  316. */
  317. memcpy(unmask_status, status, sizeof(unmask_status));
  318. /* Apply masking */
  319. for (i = 0; i < num_irq_regs; i++)
  320. status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
  321. memcpy(status1, status, sizeof(status1));
  322. /* Find out which interrupt was triggered and call that interrupt's
  323. * handler function
  324. *
  325. * Since codec has only one hardware irq line which is shared by
  326. * codec's different internal interrupts, so it's possible master irq
  327. * handler dispatches multiple nested irq handlers after breaking
  328. * order. Dispatch interrupts in the order that is maintained by
  329. * the interrupt table.
  330. */
  331. for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
  332. irqdata = wcd9xxx_res->intr_table[i];
  333. if (status[BIT_BYTE(irqdata.intr_num)] &
  334. BYTE_BIT_MASK(irqdata.intr_num)) {
  335. wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
  336. status1[BIT_BYTE(irqdata.intr_num)] &=
  337. ~BYTE_BIT_MASK(irqdata.intr_num);
  338. unmask_status[BIT_BYTE(irqdata.intr_num)] &=
  339. ~BYTE_BIT_MASK(irqdata.intr_num);
  340. }
  341. }
  342. /*
  343. * As a failsafe if unhandled irq is found, clear it to prevent
  344. * interrupt storm.
  345. * Note that we can say there was an unhandled irq only when no irq
  346. * handled by nested irq handler since Taiko supports qdsp as irqs'
  347. * destination for few irqs. Therefore driver shouldn't clear pending
  348. * irqs when few handled while few others not.
  349. */
  350. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  351. if (__ratelimit(&ratelimit)) {
  352. pr_warn("%s: Unhandled irq found\n", __func__);
  353. hex_dump_to_buffer(status, sizeof(status), 16, 1,
  354. linebuf, sizeof(linebuf), false);
  355. pr_warn("%s: status0 : %s\n", __func__, linebuf);
  356. hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
  357. linebuf, sizeof(linebuf), false);
  358. pr_warn("%s: status1 : %s\n", __func__, linebuf);
  359. }
  360. /*
  361. * unmask_status contains unhandled interrupts, hence clear all
  362. * unhandled interrupts.
  363. */
  364. ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
  365. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
  366. unmask_status, num_irq_regs);
  367. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  368. regmap_write(wcd9xxx_res->wcd_core_regmap,
  369. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  370. 0x02);
  371. }
  372. wcd9xxx_unlock_sleep(wcd9xxx_res);
  373. return IRQ_HANDLED;
  374. err_disable_irq:
  375. dev_err(wcd9xxx_res->dev,
  376. "Disable irq %d\n", wcd9xxx_res->irq);
  377. disable_irq_wake(wcd9xxx_res->irq);
  378. disable_irq_nosync(wcd9xxx_res->irq);
  379. wcd9xxx_unlock_sleep(wcd9xxx_res);
  380. return IRQ_NONE;
  381. }
  382. /**
  383. * wcd9xxx_free_irq
  384. *
  385. * @wcd9xxx_res: pointer to core resource
  386. * irq: irq number
  387. * @data: data pointer
  388. *
  389. */
  390. void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  391. int irq, void *data)
  392. {
  393. free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
  394. }
  395. EXPORT_SYMBOL(wcd9xxx_free_irq);
  396. /**
  397. * wcd9xxx_enable_irq
  398. *
  399. * @wcd9xxx_res: pointer to core resource
  400. * irq: irq number
  401. *
  402. */
  403. void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  404. {
  405. if (wcd9xxx_res->irq)
  406. enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  407. }
  408. EXPORT_SYMBOL(wcd9xxx_enable_irq);
  409. /**
  410. * wcd9xxx_disable_irq
  411. *
  412. * @wcd9xxx_res: pointer to core resource
  413. * irq: irq number
  414. *
  415. */
  416. void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  417. {
  418. if (wcd9xxx_res->irq)
  419. disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
  420. }
  421. EXPORT_SYMBOL(wcd9xxx_disable_irq);
  422. /**
  423. * wcd9xxx_disable_irq_sync
  424. *
  425. * @wcd9xxx_res: pointer to core resource
  426. * irq: irq number
  427. *
  428. */
  429. void wcd9xxx_disable_irq_sync(
  430. struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  431. {
  432. if (wcd9xxx_res->irq)
  433. disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  434. }
  435. EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
  436. static int wcd9xxx_irq_setup_downstream_irq(
  437. struct wcd9xxx_core_resource *wcd9xxx_res)
  438. {
  439. int irq, virq, ret;
  440. pr_debug("%s: enter\n", __func__);
  441. for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
  442. /* Map OF irq */
  443. virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
  444. pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
  445. if (virq == NO_IRQ) {
  446. pr_err("%s, No interrupt specifier for irq %d\n",
  447. __func__, irq);
  448. return NO_IRQ;
  449. }
  450. ret = irq_set_chip_data(virq, wcd9xxx_res);
  451. if (ret) {
  452. pr_err("%s: Failed to configure irq %d (%d)\n",
  453. __func__, irq, ret);
  454. return ret;
  455. }
  456. if (wcd9xxx_res->irq_level_high[irq])
  457. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  458. handle_level_irq);
  459. else
  460. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  461. handle_edge_irq);
  462. irq_set_nested_thread(virq, 1);
  463. }
  464. pr_debug("%s: leave\n", __func__);
  465. return 0;
  466. }
  467. /**
  468. * wcd9xxx_irq_init
  469. *
  470. * @wcd9xxx_res: pointer to core resource
  471. *
  472. * Returns 0 on success, appropriate error code otherwise
  473. */
  474. int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
  475. {
  476. int i, ret;
  477. u8 irq_level[wcd9xxx_res->num_irq_regs];
  478. struct irq_domain *domain;
  479. struct device_node *pnode;
  480. mutex_init(&wcd9xxx_res->irq_lock);
  481. mutex_init(&wcd9xxx_res->nested_irq_lock);
  482. pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
  483. if (unlikely(!pnode))
  484. return -EINVAL;
  485. domain = irq_find_host(pnode);
  486. if (unlikely(!domain))
  487. return -EINVAL;
  488. wcd9xxx_res->domain = domain;
  489. wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
  490. if (!wcd9xxx_res->irq) {
  491. pr_warn("%s: irq driver is not yet initialized\n", __func__);
  492. mutex_destroy(&wcd9xxx_res->irq_lock);
  493. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  494. return -EPROBE_DEFER;
  495. }
  496. pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
  497. /* Setup downstream IRQs */
  498. ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
  499. if (ret) {
  500. pr_err("%s: Failed to setup downstream IRQ\n", __func__);
  501. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  502. mutex_destroy(&wcd9xxx_res->irq_lock);
  503. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  504. return ret;
  505. }
  506. /* All other wcd9xxx interrupts are edge triggered */
  507. wcd9xxx_res->irq_level_high[0] = true;
  508. /* mask all the interrupts */
  509. memset(irq_level, 0, wcd9xxx_res->num_irq_regs);
  510. for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
  511. wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  512. wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  513. irq_level[BIT_BYTE(i)] |=
  514. wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
  515. }
  516. if (!wcd9xxx_res->wcd_core_regmap) {
  517. dev_err(wcd9xxx_res->dev,
  518. "%s: Codec core regmap not defined\n",
  519. __func__);
  520. ret = -EINVAL;
  521. goto fail_irq_init;
  522. }
  523. for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
  524. /* Initialize interrupt mask and level registers */
  525. regmap_write(wcd9xxx_res->wcd_core_regmap,
  526. wcd9xxx_res->intr_reg[WCD9XXX_INTR_LEVEL_BASE] + i,
  527. irq_level[i]);
  528. regmap_write(wcd9xxx_res->wcd_core_regmap,
  529. wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
  530. wcd9xxx_res->irq_masks_cur[i]);
  531. }
  532. ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
  533. IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  534. "wcd9xxx", wcd9xxx_res);
  535. if (ret != 0)
  536. dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
  537. wcd9xxx_res->irq, ret);
  538. else {
  539. ret = enable_irq_wake(wcd9xxx_res->irq);
  540. if (ret)
  541. dev_err(wcd9xxx_res->dev,
  542. "Failed to set wake interrupt on IRQ %d: %d\n",
  543. wcd9xxx_res->irq, ret);
  544. if (ret)
  545. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  546. }
  547. if (ret)
  548. goto fail_irq_init;
  549. return ret;
  550. fail_irq_init:
  551. dev_err(wcd9xxx_res->dev,
  552. "%s: Failed to init wcd9xxx irq\n", __func__);
  553. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  554. mutex_destroy(&wcd9xxx_res->irq_lock);
  555. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  556. return ret;
  557. }
  558. EXPORT_SYMBOL(wcd9xxx_irq_init);
  559. int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  560. int irq, irq_handler_t handler,
  561. const char *name, void *data)
  562. {
  563. int virq;
  564. virq = phyirq_to_virq(wcd9xxx_res, irq);
  565. return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
  566. name, data);
  567. }
  568. EXPORT_SYMBOL(wcd9xxx_request_irq);
  569. void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
  570. {
  571. dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
  572. wcd9xxx_res->irq);
  573. if (wcd9xxx_res->irq) {
  574. disable_irq_wake(wcd9xxx_res->irq);
  575. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  576. wcd9xxx_res->irq = 0;
  577. wcd9xxx_irq_put_downstream_irq(wcd9xxx_res);
  578. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  579. }
  580. mutex_destroy(&wcd9xxx_res->irq_lock);
  581. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  582. }
  583. #ifndef CONFIG_OF
  584. static int phyirq_to_virq(
  585. struct wcd9xxx_core_resource *wcd9xxx_res,
  586. int offset)
  587. {
  588. return wcd9xxx_res->irq_base + offset;
  589. }
  590. static int virq_to_phyirq(
  591. struct wcd9xxx_core_resource *wcd9xxx_res,
  592. int virq)
  593. {
  594. return virq - wcd9xxx_res->irq_base;
  595. }
  596. static unsigned int wcd9xxx_irq_get_upstream_irq(
  597. struct wcd9xxx_core_resource *wcd9xxx_res)
  598. {
  599. return wcd9xxx_res->irq;
  600. }
  601. static void wcd9xxx_irq_put_upstream_irq(
  602. struct wcd9xxx_core_resource *wcd9xxx_res)
  603. {
  604. /* Do nothing */
  605. }
  606. static int wcd9xxx_map_irq(
  607. struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
  608. {
  609. return phyirq_to_virq(wcd9xxx_core_res, irq);
  610. }
  611. #else
  612. static struct wcd9xxx_irq_drv_data *
  613. wcd9xxx_irq_add_domain(struct device_node *node,
  614. struct device_node *parent)
  615. {
  616. struct wcd9xxx_irq_drv_data *data = NULL;
  617. pr_debug("%s: node %s, node parent %s\n", __func__,
  618. node->name, node->parent->name);
  619. data = kzalloc(sizeof(*data), GFP_KERNEL);
  620. if (!data)
  621. return NULL;
  622. /*
  623. * wcd9xxx_intc interrupt controller supports N to N irq mapping with
  624. * single cell binding with irq numbers(offsets) only.
  625. * Use irq_domain_simple_ops that has irq_domain_simple_map and
  626. * irq_domain_xlate_onetwocell.
  627. */
  628. data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
  629. &irq_domain_simple_ops, data);
  630. if (!data->domain) {
  631. kfree(data);
  632. return NULL;
  633. }
  634. return data;
  635. }
  636. static struct wcd9xxx_irq_drv_data *
  637. wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
  638. {
  639. struct irq_domain *domain;
  640. domain = wcd9xxx_res->domain;
  641. if (domain)
  642. return domain->host_data;
  643. else
  644. return NULL;
  645. }
  646. static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
  647. {
  648. struct wcd9xxx_irq_drv_data *data;
  649. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  650. if (!data) {
  651. pr_warn("%s: not registered to interrupt controller\n",
  652. __func__);
  653. return -EINVAL;
  654. }
  655. return irq_linear_revmap(data->domain, offset);
  656. }
  657. static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
  658. {
  659. struct irq_data *irq_data = irq_get_irq_data(virq);
  660. if (unlikely(!irq_data)) {
  661. pr_err("%s: irq_data is NULL", __func__);
  662. return -EINVAL;
  663. }
  664. return irq_data->hwirq;
  665. }
  666. static unsigned int wcd9xxx_irq_get_upstream_irq(
  667. struct wcd9xxx_core_resource *wcd9xxx_res)
  668. {
  669. struct wcd9xxx_irq_drv_data *data;
  670. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  671. if (!data) {
  672. pr_err("%s: interrupt controller is not registered\n",
  673. __func__);
  674. return 0;
  675. }
  676. /* Make sure data is updated before return. */
  677. rmb();
  678. return data->irq;
  679. }
  680. static void wcd9xxx_irq_put_downstream_irq(
  681. struct wcd9xxx_core_resource *wcd9xxx_res)
  682. {
  683. int irq, virq, ret;
  684. /*
  685. * IRQ migration hits error if the chip data and handles
  686. * are not made NULL. make associated data and handles
  687. * to NULL at irq_exit
  688. */
  689. for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
  690. virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
  691. pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
  692. ret = irq_set_chip_data(virq, NULL);
  693. if (ret) {
  694. pr_err("%s: Failed to configure irq %d (%d)\n",
  695. __func__, irq, ret);
  696. return;
  697. }
  698. irq_set_chip_and_handler(virq, NULL, NULL);
  699. }
  700. }
  701. static void wcd9xxx_irq_put_upstream_irq(
  702. struct wcd9xxx_core_resource *wcd9xxx_res)
  703. {
  704. wcd9xxx_res->domain = NULL;
  705. }
  706. static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  707. {
  708. return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
  709. }
  710. static int wcd9xxx_irq_probe(struct platform_device *pdev)
  711. {
  712. int irq, dir_apps_irq = -EINVAL;
  713. struct wcd9xxx_irq_drv_data *data;
  714. struct device_node *node = pdev->dev.of_node;
  715. int ret = -EINVAL;
  716. irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
  717. if (!gpio_is_valid(irq))
  718. dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
  719. if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
  720. dev_err(&pdev->dev, "TLMM connect gpio not found\n");
  721. return -EPROBE_DEFER;
  722. }
  723. if (dir_apps_irq > 0) {
  724. irq = dir_apps_irq;
  725. } else {
  726. irq = gpio_to_irq(irq);
  727. if (irq < 0) {
  728. dev_err(&pdev->dev, "Unable to configure irq\n");
  729. return irq;
  730. }
  731. }
  732. dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
  733. data = wcd9xxx_irq_add_domain(node, node->parent);
  734. if (!data) {
  735. pr_err("%s: irq_add_domain failed\n", __func__);
  736. return -EINVAL;
  737. }
  738. data->irq = irq;
  739. /* Make sure irq is saved before return. */
  740. wmb();
  741. ret = 0;
  742. return ret;
  743. }
  744. static int wcd9xxx_irq_remove(struct platform_device *pdev)
  745. {
  746. struct irq_domain *domain;
  747. struct wcd9xxx_irq_drv_data *data;
  748. domain = irq_find_host(pdev->dev.of_node);
  749. if (unlikely(!domain)) {
  750. pr_err("%s: domain is NULL", __func__);
  751. return -EINVAL;
  752. }
  753. data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
  754. data->irq = 0;
  755. /* Make sure irq variable is updated in data, before irq removal. */
  756. wmb();
  757. irq_domain_remove(data->domain);
  758. kfree(data);
  759. return 0;
  760. }
  761. static const struct of_device_id of_match[] = {
  762. { .compatible = "qcom,wcd9xxx-irq" },
  763. { }
  764. };
  765. static struct platform_driver wcd9xxx_irq_driver = {
  766. .probe = wcd9xxx_irq_probe,
  767. .remove = wcd9xxx_irq_remove,
  768. .driver = {
  769. .name = "wcd9xxx_intc",
  770. .owner = THIS_MODULE,
  771. .of_match_table = of_match_ptr(of_match),
  772. },
  773. };
  774. int wcd9xxx_irq_drv_init(void)
  775. {
  776. return platform_driver_register(&wcd9xxx_irq_driver);
  777. }
  778. void wcd9xxx_irq_drv_exit(void)
  779. {
  780. platform_driver_unregister(&wcd9xxx_irq_driver);
  781. }
  782. #endif /* CONFIG_OF */