wcd9xxx-irq.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853
  1. /* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/bitops.h>
  13. #include <linux/kernel.h>
  14. #include <linux/module.h>
  15. #include <linux/sched.h>
  16. #include <linux/irq.h>
  17. #include <linux/mfd/core.h>
  18. #include <linux/regmap.h>
  19. #include <linux/delay.h>
  20. #include <linux/irqdomain.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/of.h>
  23. #include <linux/of_irq.h>
  24. #include <linux/slab.h>
  25. #include <linux/ratelimit.h>
  26. #include <soc/qcom/pm.h>
  27. #include <linux/gpio.h>
  28. #include <linux/of_gpio.h>
  29. #include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
  30. #include "core.h"
  31. #include "wcd9xxx-irq.h"
  32. #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
  33. #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  34. #define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
  35. #ifndef NO_IRQ
  36. #define NO_IRQ (-1)
  37. #endif
  38. #ifdef CONFIG_OF
  39. struct wcd9xxx_irq_drv_data {
  40. struct irq_domain *domain;
  41. int irq;
  42. };
  43. #endif
  44. static int virq_to_phyirq(
  45. struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
  46. static int phyirq_to_virq(
  47. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  48. static unsigned int wcd9xxx_irq_get_upstream_irq(
  49. struct wcd9xxx_core_resource *wcd9xxx_res);
  50. static void wcd9xxx_irq_put_upstream_irq(
  51. struct wcd9xxx_core_resource *wcd9xxx_res);
  52. static int wcd9xxx_map_irq(
  53. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  54. static void wcd9xxx_irq_lock(struct irq_data *data)
  55. {
  56. struct wcd9xxx_core_resource *wcd9xxx_res =
  57. irq_data_get_irq_chip_data(data);
  58. mutex_lock(&wcd9xxx_res->irq_lock);
  59. }
  60. static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
  61. {
  62. struct wcd9xxx_core_resource *wcd9xxx_res =
  63. irq_data_get_irq_chip_data(data);
  64. int i;
  65. if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
  66. WCD9XXX_MAX_IRQ_REGS) ||
  67. (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
  68. WCD9XXX_MAX_IRQ_REGS)) {
  69. pr_err("%s: Array Size out of bound\n", __func__);
  70. return;
  71. }
  72. if (!wcd9xxx_res->wcd_core_regmap) {
  73. pr_err("%s: Codec core regmap not defined\n",
  74. __func__);
  75. return;
  76. }
  77. for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
  78. /* If there's been a change in the mask write it back
  79. * to the hardware.
  80. */
  81. if (wcd9xxx_res->irq_masks_cur[i] !=
  82. wcd9xxx_res->irq_masks_cache[i]) {
  83. wcd9xxx_res->irq_masks_cache[i] =
  84. wcd9xxx_res->irq_masks_cur[i];
  85. regmap_write(wcd9xxx_res->wcd_core_regmap,
  86. wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
  87. wcd9xxx_res->irq_masks_cur[i]);
  88. }
  89. }
  90. mutex_unlock(&wcd9xxx_res->irq_lock);
  91. }
  92. static void wcd9xxx_irq_enable(struct irq_data *data)
  93. {
  94. struct wcd9xxx_core_resource *wcd9xxx_res =
  95. irq_data_get_irq_chip_data(data);
  96. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  97. int byte = BIT_BYTE(wcd9xxx_irq);
  98. int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
  99. if ((byte < size) && (byte >= 0)) {
  100. wcd9xxx_res->irq_masks_cur[byte] &=
  101. ~(BYTE_BIT_MASK(wcd9xxx_irq));
  102. } else {
  103. pr_err("%s: Array size is %d but index is %d: Out of range\n",
  104. __func__, size, byte);
  105. }
  106. }
  107. static void wcd9xxx_irq_disable(struct irq_data *data)
  108. {
  109. struct wcd9xxx_core_resource *wcd9xxx_res =
  110. irq_data_get_irq_chip_data(data);
  111. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  112. int byte = BIT_BYTE(wcd9xxx_irq);
  113. int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
  114. if ((byte < size) && (byte >= 0)) {
  115. wcd9xxx_res->irq_masks_cur[byte]
  116. |= BYTE_BIT_MASK(wcd9xxx_irq);
  117. } else {
  118. pr_err("%s: Array size is %d but index is %d: Out of range\n",
  119. __func__, size, byte);
  120. }
  121. }
  122. static void wcd9xxx_irq_ack(struct irq_data *data)
  123. {
  124. int wcd9xxx_irq = 0;
  125. struct wcd9xxx_core_resource *wcd9xxx_res =
  126. irq_data_get_irq_chip_data(data);
  127. if (wcd9xxx_res == NULL) {
  128. pr_err("%s: wcd9xxx_res is NULL\n", __func__);
  129. return;
  130. }
  131. wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  132. pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
  133. __func__, wcd9xxx_irq);
  134. }
  135. static void wcd9xxx_irq_mask(struct irq_data *d)
  136. {
  137. /* do nothing but required as linux calls irq_mask without NULL check */
  138. }
  139. static struct irq_chip wcd9xxx_irq_chip = {
  140. .name = "wcd9xxx",
  141. .irq_bus_lock = wcd9xxx_irq_lock,
  142. .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
  143. .irq_disable = wcd9xxx_irq_disable,
  144. .irq_enable = wcd9xxx_irq_enable,
  145. .irq_mask = wcd9xxx_irq_mask,
  146. .irq_ack = wcd9xxx_irq_ack,
  147. };
  148. bool wcd9xxx_lock_sleep(
  149. struct wcd9xxx_core_resource *wcd9xxx_res)
  150. {
  151. enum wcd9xxx_pm_state os;
  152. /*
  153. * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
  154. * and its subroutines only motly.
  155. * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
  156. * It can race with wcd9xxx_irq_thread.
  157. * So need to embrace wlock_holders with mutex.
  158. *
  159. * If system didn't resume, we can simply return false so codec driver's
  160. * IRQ handler can return without handling IRQ.
  161. * As interrupt line is still active, codec will have another IRQ to
  162. * retry shortly.
  163. */
  164. mutex_lock(&wcd9xxx_res->pm_lock);
  165. if (wcd9xxx_res->wlock_holders++ == 0) {
  166. pr_debug("%s: holding wake lock\n", __func__);
  167. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  168. msm_cpuidle_get_deep_idle_latency());
  169. pm_stay_awake(wcd9xxx_res->dev);
  170. }
  171. mutex_unlock(&wcd9xxx_res->pm_lock);
  172. if (!wait_event_timeout(wcd9xxx_res->pm_wq,
  173. ((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
  174. WCD9XXX_PM_SLEEPABLE,
  175. WCD9XXX_PM_AWAKE)) ==
  176. WCD9XXX_PM_SLEEPABLE ||
  177. (os == WCD9XXX_PM_AWAKE)),
  178. msecs_to_jiffies(
  179. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
  180. pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
  181. __func__,
  182. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
  183. wcd9xxx_res->wlock_holders);
  184. wcd9xxx_unlock_sleep(wcd9xxx_res);
  185. return false;
  186. }
  187. wake_up_all(&wcd9xxx_res->pm_wq);
  188. return true;
  189. }
  190. EXPORT_SYMBOL(wcd9xxx_lock_sleep);
  191. void wcd9xxx_unlock_sleep(
  192. struct wcd9xxx_core_resource *wcd9xxx_res)
  193. {
  194. mutex_lock(&wcd9xxx_res->pm_lock);
  195. if (--wcd9xxx_res->wlock_holders == 0) {
  196. pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
  197. __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
  198. /*
  199. * if wcd9xxx_lock_sleep failed, pm_state would be still
  200. * WCD9XXX_PM_ASLEEP, don't overwrite
  201. */
  202. if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
  203. wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
  204. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  205. PM_QOS_DEFAULT_VALUE);
  206. pm_relax(wcd9xxx_res->dev);
  207. }
  208. mutex_unlock(&wcd9xxx_res->pm_lock);
  209. wake_up_all(&wcd9xxx_res->pm_wq);
  210. }
  211. EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
  212. void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
  213. {
  214. mutex_lock(&wcd9xxx_res->nested_irq_lock);
  215. }
  216. void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
  217. {
  218. mutex_unlock(&wcd9xxx_res->nested_irq_lock);
  219. }
  220. static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
  221. struct intr_data *irqdata)
  222. {
  223. int irqbit = irqdata->intr_num;
  224. if (!wcd9xxx_res->wcd_core_regmap) {
  225. pr_err("%s: codec core regmap not defined\n",
  226. __func__);
  227. return;
  228. }
  229. if (irqdata->clear_first) {
  230. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  231. regmap_write(wcd9xxx_res->wcd_core_regmap,
  232. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
  233. BIT_BYTE(irqbit),
  234. BYTE_BIT_MASK(irqbit));
  235. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  236. regmap_write(wcd9xxx_res->wcd_core_regmap,
  237. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  238. 0x02);
  239. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  240. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  241. } else {
  242. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  243. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  244. regmap_write(wcd9xxx_res->wcd_core_regmap,
  245. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
  246. BIT_BYTE(irqbit),
  247. BYTE_BIT_MASK(irqbit));
  248. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  249. regmap_write(wcd9xxx_res->wcd_core_regmap,
  250. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  251. 0x02);
  252. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  253. }
  254. }
  255. static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
  256. {
  257. int ret;
  258. int i;
  259. struct intr_data irqdata;
  260. char linebuf[128];
  261. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
  262. struct wcd9xxx_core_resource *wcd9xxx_res = data;
  263. int num_irq_regs = wcd9xxx_res->num_irq_regs;
  264. u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
  265. if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
  266. dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
  267. return IRQ_NONE;
  268. }
  269. if (!wcd9xxx_res->wcd_core_regmap) {
  270. dev_err(wcd9xxx_res->dev,
  271. "%s: Codec core regmap not supplied\n",
  272. __func__);
  273. goto err_disable_irq;
  274. }
  275. memset(status, 0, sizeof(status));
  276. ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
  277. wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
  278. status, num_irq_regs);
  279. if (ret < 0) {
  280. dev_err(wcd9xxx_res->dev,
  281. "Failed to read interrupt status: %d\n", ret);
  282. goto err_disable_irq;
  283. }
  284. /*
  285. * If status is 0 return without clearing.
  286. * status contains: HW status - masked interrupts
  287. * status1 contains: unhandled interrupts - masked interrupts
  288. * unmasked_status contains: unhandled interrupts
  289. */
  290. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  291. pr_debug("%s: status is 0\n", __func__);
  292. wcd9xxx_unlock_sleep(wcd9xxx_res);
  293. return IRQ_HANDLED;
  294. }
  295. /*
  296. * Copy status to unmask_status before masking, otherwise SW may miss
  297. * to clear masked interrupt in corner case.
  298. */
  299. memcpy(unmask_status, status, sizeof(unmask_status));
  300. /* Apply masking */
  301. for (i = 0; i < num_irq_regs; i++)
  302. status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
  303. memcpy(status1, status, sizeof(status1));
  304. /* Find out which interrupt was triggered and call that interrupt's
  305. * handler function
  306. *
  307. * Since codec has only one hardware irq line which is shared by
  308. * codec's different internal interrupts, so it's possible master irq
  309. * handler dispatches multiple nested irq handlers after breaking
  310. * order. Dispatch interrupts in the order that is maintained by
  311. * the interrupt table.
  312. */
  313. for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
  314. irqdata = wcd9xxx_res->intr_table[i];
  315. if (status[BIT_BYTE(irqdata.intr_num)] &
  316. BYTE_BIT_MASK(irqdata.intr_num)) {
  317. wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
  318. status1[BIT_BYTE(irqdata.intr_num)] &=
  319. ~BYTE_BIT_MASK(irqdata.intr_num);
  320. unmask_status[BIT_BYTE(irqdata.intr_num)] &=
  321. ~BYTE_BIT_MASK(irqdata.intr_num);
  322. }
  323. }
  324. /*
  325. * As a failsafe if unhandled irq is found, clear it to prevent
  326. * interrupt storm.
  327. * Note that we can say there was an unhandled irq only when no irq
  328. * handled by nested irq handler since Taiko supports qdsp as irqs'
  329. * destination for few irqs. Therefore driver shouldn't clear pending
  330. * irqs when few handled while few others not.
  331. */
  332. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  333. if (__ratelimit(&ratelimit)) {
  334. pr_warn("%s: Unhandled irq found\n", __func__);
  335. hex_dump_to_buffer(status, sizeof(status), 16, 1,
  336. linebuf, sizeof(linebuf), false);
  337. pr_warn("%s: status0 : %s\n", __func__, linebuf);
  338. hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
  339. linebuf, sizeof(linebuf), false);
  340. pr_warn("%s: status1 : %s\n", __func__, linebuf);
  341. }
  342. /*
  343. * unmask_status contains unhandled interrupts, hence clear all
  344. * unhandled interrupts.
  345. */
  346. ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
  347. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
  348. unmask_status, num_irq_regs);
  349. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  350. regmap_write(wcd9xxx_res->wcd_core_regmap,
  351. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  352. 0x02);
  353. }
  354. wcd9xxx_unlock_sleep(wcd9xxx_res);
  355. return IRQ_HANDLED;
  356. err_disable_irq:
  357. dev_err(wcd9xxx_res->dev,
  358. "Disable irq %d\n", wcd9xxx_res->irq);
  359. disable_irq_wake(wcd9xxx_res->irq);
  360. disable_irq_nosync(wcd9xxx_res->irq);
  361. wcd9xxx_unlock_sleep(wcd9xxx_res);
  362. return IRQ_NONE;
  363. }
  364. /**
  365. * wcd9xxx_free_irq
  366. *
  367. * @wcd9xxx_res: pointer to core resource
  368. * irq: irq number
  369. * @data: data pointer
  370. *
  371. */
  372. void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  373. int irq, void *data)
  374. {
  375. free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
  376. }
  377. EXPORT_SYMBOL(wcd9xxx_free_irq);
  378. /**
  379. * wcd9xxx_enable_irq
  380. *
  381. * @wcd9xxx_res: pointer to core resource
  382. * irq: irq number
  383. *
  384. */
  385. void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  386. {
  387. if (wcd9xxx_res->irq)
  388. enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  389. }
  390. EXPORT_SYMBOL(wcd9xxx_enable_irq);
  391. /**
  392. * wcd9xxx_disable_irq
  393. *
  394. * @wcd9xxx_res: pointer to core resource
  395. * irq: irq number
  396. *
  397. */
  398. void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  399. {
  400. if (wcd9xxx_res->irq)
  401. disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
  402. }
  403. EXPORT_SYMBOL(wcd9xxx_disable_irq);
  404. /**
  405. * wcd9xxx_disable_irq_sync
  406. *
  407. * @wcd9xxx_res: pointer to core resource
  408. * irq: irq number
  409. *
  410. */
  411. void wcd9xxx_disable_irq_sync(
  412. struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  413. {
  414. if (wcd9xxx_res->irq)
  415. disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  416. }
  417. EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
  418. static int wcd9xxx_irq_setup_downstream_irq(
  419. struct wcd9xxx_core_resource *wcd9xxx_res)
  420. {
  421. int irq, virq, ret;
  422. pr_debug("%s: enter\n", __func__);
  423. for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
  424. /* Map OF irq */
  425. virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
  426. pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
  427. if (virq == NO_IRQ) {
  428. pr_err("%s, No interrupt specifier for irq %d\n",
  429. __func__, irq);
  430. return NO_IRQ;
  431. }
  432. ret = irq_set_chip_data(virq, wcd9xxx_res);
  433. if (ret) {
  434. pr_err("%s: Failed to configure irq %d (%d)\n",
  435. __func__, irq, ret);
  436. return ret;
  437. }
  438. if (wcd9xxx_res->irq_level_high[irq])
  439. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  440. handle_level_irq);
  441. else
  442. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  443. handle_edge_irq);
  444. irq_set_nested_thread(virq, 1);
  445. }
  446. pr_debug("%s: leave\n", __func__);
  447. return 0;
  448. }
  449. /**
  450. * wcd9xxx_irq_init
  451. *
  452. * @wcd9xxx_res: pointer to core resource
  453. *
  454. * Returns 0 on success, appropriate error code otherwise
  455. */
  456. int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
  457. {
  458. int i, ret;
  459. u8 irq_level[wcd9xxx_res->num_irq_regs];
  460. struct irq_domain *domain;
  461. struct device_node *pnode;
  462. mutex_init(&wcd9xxx_res->irq_lock);
  463. mutex_init(&wcd9xxx_res->nested_irq_lock);
  464. pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
  465. if (unlikely(!pnode))
  466. return -EINVAL;
  467. domain = irq_find_host(pnode);
  468. if (unlikely(!domain))
  469. return -EINVAL;
  470. wcd9xxx_res->domain = domain;
  471. wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
  472. if (!wcd9xxx_res->irq) {
  473. pr_warn("%s: irq driver is not yet initialized\n", __func__);
  474. mutex_destroy(&wcd9xxx_res->irq_lock);
  475. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  476. return -EPROBE_DEFER;
  477. }
  478. pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
  479. /* Setup downstream IRQs */
  480. ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
  481. if (ret) {
  482. pr_err("%s: Failed to setup downstream IRQ\n", __func__);
  483. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  484. mutex_destroy(&wcd9xxx_res->irq_lock);
  485. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  486. return ret;
  487. }
  488. /* All other wcd9xxx interrupts are edge triggered */
  489. wcd9xxx_res->irq_level_high[0] = true;
  490. /* mask all the interrupts */
  491. memset(irq_level, 0, wcd9xxx_res->num_irq_regs);
  492. for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
  493. wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  494. wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  495. irq_level[BIT_BYTE(i)] |=
  496. wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
  497. }
  498. if (!wcd9xxx_res->wcd_core_regmap) {
  499. dev_err(wcd9xxx_res->dev,
  500. "%s: Codec core regmap not defined\n",
  501. __func__);
  502. ret = -EINVAL;
  503. goto fail_irq_init;
  504. }
  505. for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
  506. /* Initialize interrupt mask and level registers */
  507. regmap_write(wcd9xxx_res->wcd_core_regmap,
  508. wcd9xxx_res->intr_reg[WCD9XXX_INTR_LEVEL_BASE] + i,
  509. irq_level[i]);
  510. regmap_write(wcd9xxx_res->wcd_core_regmap,
  511. wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
  512. wcd9xxx_res->irq_masks_cur[i]);
  513. }
  514. ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
  515. IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  516. "wcd9xxx", wcd9xxx_res);
  517. if (ret != 0)
  518. dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
  519. wcd9xxx_res->irq, ret);
  520. else {
  521. ret = enable_irq_wake(wcd9xxx_res->irq);
  522. if (ret)
  523. dev_err(wcd9xxx_res->dev,
  524. "Failed to set wake interrupt on IRQ %d: %d\n",
  525. wcd9xxx_res->irq, ret);
  526. if (ret)
  527. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  528. }
  529. if (ret)
  530. goto fail_irq_init;
  531. return ret;
  532. fail_irq_init:
  533. dev_err(wcd9xxx_res->dev,
  534. "%s: Failed to init wcd9xxx irq\n", __func__);
  535. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  536. mutex_destroy(&wcd9xxx_res->irq_lock);
  537. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  538. return ret;
  539. }
  540. EXPORT_SYMBOL(wcd9xxx_irq_init);
  541. int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  542. int irq, irq_handler_t handler,
  543. const char *name, void *data)
  544. {
  545. int virq;
  546. virq = phyirq_to_virq(wcd9xxx_res, irq);
  547. return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
  548. name, data);
  549. }
  550. EXPORT_SYMBOL(wcd9xxx_request_irq);
  551. void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
  552. {
  553. dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
  554. wcd9xxx_res->irq);
  555. if (wcd9xxx_res->irq) {
  556. disable_irq_wake(wcd9xxx_res->irq);
  557. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  558. wcd9xxx_res->irq = 0;
  559. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  560. }
  561. mutex_destroy(&wcd9xxx_res->irq_lock);
  562. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  563. }
  564. #ifndef CONFIG_OF
  565. static int phyirq_to_virq(
  566. struct wcd9xxx_core_resource *wcd9xxx_res,
  567. int offset)
  568. {
  569. return wcd9xxx_res->irq_base + offset;
  570. }
  571. static int virq_to_phyirq(
  572. struct wcd9xxx_core_resource *wcd9xxx_res,
  573. int virq)
  574. {
  575. return virq - wcd9xxx_res->irq_base;
  576. }
  577. static unsigned int wcd9xxx_irq_get_upstream_irq(
  578. struct wcd9xxx_core_resource *wcd9xxx_res)
  579. {
  580. return wcd9xxx_res->irq;
  581. }
  582. static void wcd9xxx_irq_put_upstream_irq(
  583. struct wcd9xxx_core_resource *wcd9xxx_res)
  584. {
  585. /* Do nothing */
  586. }
  587. static int wcd9xxx_map_irq(
  588. struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
  589. {
  590. return phyirq_to_virq(wcd9xxx_core_res, irq);
  591. }
  592. #else
  593. static struct wcd9xxx_irq_drv_data *
  594. wcd9xxx_irq_add_domain(struct device_node *node,
  595. struct device_node *parent)
  596. {
  597. struct wcd9xxx_irq_drv_data *data = NULL;
  598. pr_debug("%s: node %s, node parent %s\n", __func__,
  599. node->name, node->parent->name);
  600. data = kzalloc(sizeof(*data), GFP_KERNEL);
  601. if (!data)
  602. return NULL;
  603. /*
  604. * wcd9xxx_intc interrupt controller supports N to N irq mapping with
  605. * single cell binding with irq numbers(offsets) only.
  606. * Use irq_domain_simple_ops that has irq_domain_simple_map and
  607. * irq_domain_xlate_onetwocell.
  608. */
  609. data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
  610. &irq_domain_simple_ops, data);
  611. if (!data->domain) {
  612. kfree(data);
  613. return NULL;
  614. }
  615. return data;
  616. }
  617. static struct wcd9xxx_irq_drv_data *
  618. wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
  619. {
  620. struct irq_domain *domain;
  621. domain = wcd9xxx_res->domain;
  622. if (domain)
  623. return domain->host_data;
  624. else
  625. return NULL;
  626. }
  627. static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
  628. {
  629. struct wcd9xxx_irq_drv_data *data;
  630. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  631. if (!data) {
  632. pr_warn("%s: not registered to interrupt controller\n",
  633. __func__);
  634. return -EINVAL;
  635. }
  636. return irq_linear_revmap(data->domain, offset);
  637. }
  638. static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
  639. {
  640. struct irq_data *irq_data = irq_get_irq_data(virq);
  641. if (unlikely(!irq_data)) {
  642. pr_err("%s: irq_data is NULL", __func__);
  643. return -EINVAL;
  644. }
  645. return irq_data->hwirq;
  646. }
  647. static unsigned int wcd9xxx_irq_get_upstream_irq(
  648. struct wcd9xxx_core_resource *wcd9xxx_res)
  649. {
  650. struct wcd9xxx_irq_drv_data *data;
  651. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  652. if (!data) {
  653. pr_err("%s: interrupt controller is not registered\n",
  654. __func__);
  655. return 0;
  656. }
  657. /* Make sure data is updated before return. */
  658. rmb();
  659. return data->irq;
  660. }
  661. static void wcd9xxx_irq_put_upstream_irq(
  662. struct wcd9xxx_core_resource *wcd9xxx_res)
  663. {
  664. wcd9xxx_res->domain = NULL;
  665. }
  666. static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  667. {
  668. return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
  669. }
  670. static int wcd9xxx_irq_probe(struct platform_device *pdev)
  671. {
  672. int irq, dir_apps_irq = -EINVAL;
  673. struct wcd9xxx_irq_drv_data *data;
  674. struct device_node *node = pdev->dev.of_node;
  675. int ret = -EINVAL;
  676. irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
  677. if (!gpio_is_valid(irq))
  678. dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
  679. if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
  680. dev_err(&pdev->dev, "TLMM connect gpio not found\n");
  681. return -EPROBE_DEFER;
  682. }
  683. if (dir_apps_irq > 0) {
  684. irq = dir_apps_irq;
  685. } else {
  686. irq = gpio_to_irq(irq);
  687. if (irq < 0) {
  688. dev_err(&pdev->dev, "Unable to configure irq\n");
  689. return irq;
  690. }
  691. }
  692. dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
  693. data = wcd9xxx_irq_add_domain(node, node->parent);
  694. if (!data) {
  695. pr_err("%s: irq_add_domain failed\n", __func__);
  696. return -EINVAL;
  697. }
  698. data->irq = irq;
  699. /* Make sure irq is saved before return. */
  700. wmb();
  701. ret = 0;
  702. return ret;
  703. }
  704. static int wcd9xxx_irq_remove(struct platform_device *pdev)
  705. {
  706. struct irq_domain *domain;
  707. struct wcd9xxx_irq_drv_data *data;
  708. domain = irq_find_host(pdev->dev.of_node);
  709. if (unlikely(!domain)) {
  710. pr_err("%s: domain is NULL", __func__);
  711. return -EINVAL;
  712. }
  713. data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
  714. data->irq = 0;
  715. /* Make sure irq variable is updated in data, before irq removal. */
  716. wmb();
  717. irq_domain_remove(data->domain);
  718. kfree(data);
  719. domain->host_data = NULL;
  720. return 0;
  721. }
  722. static const struct of_device_id of_match[] = {
  723. { .compatible = "qcom,wcd9xxx-irq" },
  724. { }
  725. };
  726. static struct platform_driver wcd9xxx_irq_driver = {
  727. .probe = wcd9xxx_irq_probe,
  728. .remove = wcd9xxx_irq_remove,
  729. .driver = {
  730. .name = "wcd9xxx_intc",
  731. .owner = THIS_MODULE,
  732. .of_match_table = of_match_ptr(of_match),
  733. },
  734. };
  735. int wcd9xxx_irq_drv_init(void)
  736. {
  737. return platform_driver_register(&wcd9xxx_irq_driver);
  738. }
  739. void wcd9xxx_irq_drv_exit(void)
  740. {
  741. platform_driver_unregister(&wcd9xxx_irq_driver);
  742. }
  743. #endif /* CONFIG_OF */