wcd9xxx-irq.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
  3. */
  4. #include <linux/bitops.h>
  5. #include <linux/kernel.h>
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <linux/irq.h>
  9. #include <linux/mfd/core.h>
  10. #include <linux/regmap.h>
  11. #include <linux/delay.h>
  12. #include <linux/irqdomain.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/of.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/slab.h>
  17. #include <linux/ratelimit.h>
  18. #include <soc/qcom/pm.h>
  19. #include <linux/gpio.h>
  20. #include <linux/of_gpio.h>
  21. #include <asoc/core.h>
  22. #include <asoc/wcd9xxx-irq.h>
  23. #include <asoc/wcd9xxx_registers.h>
  24. #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
  25. #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  26. #define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 100
  27. #ifndef NO_IRQ
  28. #define NO_IRQ (-1)
  29. #endif
  30. #ifdef CONFIG_OF
  31. struct wcd9xxx_irq_drv_data {
  32. struct irq_domain *domain;
  33. int irq;
  34. };
  35. #endif
  36. static int virq_to_phyirq(
  37. struct wcd9xxx_core_resource *wcd9xxx_res, int virq);
  38. static int phyirq_to_virq(
  39. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  40. static unsigned int wcd9xxx_irq_get_upstream_irq(
  41. struct wcd9xxx_core_resource *wcd9xxx_res);
  42. static void wcd9xxx_irq_put_downstream_irq(
  43. struct wcd9xxx_core_resource *wcd9xxx_res);
  44. static void wcd9xxx_irq_put_upstream_irq(
  45. struct wcd9xxx_core_resource *wcd9xxx_res);
  46. static int wcd9xxx_map_irq(
  47. struct wcd9xxx_core_resource *wcd9xxx_res, int irq);
  48. static void wcd9xxx_irq_lock(struct irq_data *data)
  49. {
  50. struct wcd9xxx_core_resource *wcd9xxx_res =
  51. irq_data_get_irq_chip_data(data);
  52. mutex_lock(&wcd9xxx_res->irq_lock);
  53. }
  54. static void wcd9xxx_irq_sync_unlock(struct irq_data *data)
  55. {
  56. struct wcd9xxx_core_resource *wcd9xxx_res =
  57. irq_data_get_irq_chip_data(data);
  58. int i;
  59. if ((ARRAY_SIZE(wcd9xxx_res->irq_masks_cur) >
  60. WCD9XXX_MAX_IRQ_REGS) ||
  61. (ARRAY_SIZE(wcd9xxx_res->irq_masks_cache) >
  62. WCD9XXX_MAX_IRQ_REGS)) {
  63. pr_err("%s: Array Size out of bound\n", __func__);
  64. return;
  65. }
  66. if (!wcd9xxx_res->wcd_core_regmap) {
  67. pr_err("%s: Codec core regmap not defined\n",
  68. __func__);
  69. return;
  70. }
  71. for (i = 0; i < ARRAY_SIZE(wcd9xxx_res->irq_masks_cur); i++) {
  72. /* If there's been a change in the mask write it back
  73. * to the hardware.
  74. */
  75. if (wcd9xxx_res->irq_masks_cur[i] !=
  76. wcd9xxx_res->irq_masks_cache[i]) {
  77. wcd9xxx_res->irq_masks_cache[i] =
  78. wcd9xxx_res->irq_masks_cur[i];
  79. regmap_write(wcd9xxx_res->wcd_core_regmap,
  80. wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
  81. wcd9xxx_res->irq_masks_cur[i]);
  82. }
  83. }
  84. mutex_unlock(&wcd9xxx_res->irq_lock);
  85. }
  86. static void wcd9xxx_irq_enable(struct irq_data *data)
  87. {
  88. struct wcd9xxx_core_resource *wcd9xxx_res =
  89. irq_data_get_irq_chip_data(data);
  90. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  91. int byte = BIT_BYTE(wcd9xxx_irq);
  92. int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
  93. if ((byte < size) && (byte >= 0)) {
  94. wcd9xxx_res->irq_masks_cur[byte] &=
  95. ~(BYTE_BIT_MASK(wcd9xxx_irq));
  96. } else {
  97. pr_err("%s: Array size is %d but index is %d: Out of range\n",
  98. __func__, size, byte);
  99. }
  100. }
  101. static void wcd9xxx_irq_disable(struct irq_data *data)
  102. {
  103. struct wcd9xxx_core_resource *wcd9xxx_res =
  104. irq_data_get_irq_chip_data(data);
  105. int wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  106. int byte = BIT_BYTE(wcd9xxx_irq);
  107. int size = ARRAY_SIZE(wcd9xxx_res->irq_masks_cur);
  108. if ((byte < size) && (byte >= 0)) {
  109. wcd9xxx_res->irq_masks_cur[byte]
  110. |= BYTE_BIT_MASK(wcd9xxx_irq);
  111. } else {
  112. pr_err("%s: Array size is %d but index is %d: Out of range\n",
  113. __func__, size, byte);
  114. }
  115. }
  116. static void wcd9xxx_irq_ack(struct irq_data *data)
  117. {
  118. int wcd9xxx_irq = 0;
  119. struct wcd9xxx_core_resource *wcd9xxx_res =
  120. irq_data_get_irq_chip_data(data);
  121. if (wcd9xxx_res == NULL) {
  122. pr_err("%s: wcd9xxx_res is NULL\n", __func__);
  123. return;
  124. }
  125. wcd9xxx_irq = virq_to_phyirq(wcd9xxx_res, data->irq);
  126. pr_debug("%s: IRQ_ACK called for WCD9XXX IRQ: %d\n",
  127. __func__, wcd9xxx_irq);
  128. }
  129. static void wcd9xxx_irq_mask(struct irq_data *d)
  130. {
  131. /* do nothing but required as linux calls irq_mask without NULL check */
  132. }
  133. static struct irq_chip wcd9xxx_irq_chip = {
  134. .name = "wcd9xxx",
  135. .irq_bus_lock = wcd9xxx_irq_lock,
  136. .irq_bus_sync_unlock = wcd9xxx_irq_sync_unlock,
  137. .irq_disable = wcd9xxx_irq_disable,
  138. .irq_enable = wcd9xxx_irq_enable,
  139. .irq_mask = wcd9xxx_irq_mask,
  140. .irq_ack = wcd9xxx_irq_ack,
  141. };
  142. bool wcd9xxx_lock_sleep(
  143. struct wcd9xxx_core_resource *wcd9xxx_res)
  144. {
  145. enum wcd9xxx_pm_state os;
  146. /*
  147. * wcd9xxx_{lock/unlock}_sleep will be called by wcd9xxx_irq_thread
  148. * and its subroutines only motly.
  149. * but btn0_lpress_fn is not wcd9xxx_irq_thread's subroutine and
  150. * It can race with wcd9xxx_irq_thread.
  151. * So need to embrace wlock_holders with mutex.
  152. *
  153. * If system didn't resume, we can simply return false so codec driver's
  154. * IRQ handler can return without handling IRQ.
  155. * As interrupt line is still active, codec will have another IRQ to
  156. * retry shortly.
  157. */
  158. mutex_lock(&wcd9xxx_res->pm_lock);
  159. if (wcd9xxx_res->wlock_holders++ == 0) {
  160. pr_debug("%s: holding wake lock\n", __func__);
  161. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  162. msm_cpuidle_get_deep_idle_latency());
  163. pm_stay_awake(wcd9xxx_res->dev);
  164. }
  165. mutex_unlock(&wcd9xxx_res->pm_lock);
  166. if (!wait_event_timeout(wcd9xxx_res->pm_wq,
  167. ((os = wcd9xxx_pm_cmpxchg(wcd9xxx_res,
  168. WCD9XXX_PM_SLEEPABLE,
  169. WCD9XXX_PM_AWAKE)) ==
  170. WCD9XXX_PM_SLEEPABLE ||
  171. (os == WCD9XXX_PM_AWAKE)),
  172. msecs_to_jiffies(
  173. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
  174. pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
  175. __func__,
  176. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
  177. wcd9xxx_res->wlock_holders);
  178. wcd9xxx_unlock_sleep(wcd9xxx_res);
  179. return false;
  180. }
  181. wake_up_all(&wcd9xxx_res->pm_wq);
  182. return true;
  183. }
  184. EXPORT_SYMBOL(wcd9xxx_lock_sleep);
  185. void wcd9xxx_unlock_sleep(
  186. struct wcd9xxx_core_resource *wcd9xxx_res)
  187. {
  188. mutex_lock(&wcd9xxx_res->pm_lock);
  189. if (--wcd9xxx_res->wlock_holders == 0) {
  190. pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
  191. __func__, wcd9xxx_res->pm_state, WCD9XXX_PM_SLEEPABLE);
  192. /*
  193. * if wcd9xxx_lock_sleep failed, pm_state would be still
  194. * WCD9XXX_PM_ASLEEP, don't overwrite
  195. */
  196. if (likely(wcd9xxx_res->pm_state == WCD9XXX_PM_AWAKE))
  197. wcd9xxx_res->pm_state = WCD9XXX_PM_SLEEPABLE;
  198. pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
  199. PM_QOS_DEFAULT_VALUE);
  200. pm_relax(wcd9xxx_res->dev);
  201. }
  202. mutex_unlock(&wcd9xxx_res->pm_lock);
  203. wake_up_all(&wcd9xxx_res->pm_wq);
  204. }
  205. EXPORT_SYMBOL(wcd9xxx_unlock_sleep);
  206. void wcd9xxx_nested_irq_lock(struct wcd9xxx_core_resource *wcd9xxx_res)
  207. {
  208. mutex_lock(&wcd9xxx_res->nested_irq_lock);
  209. }
  210. void wcd9xxx_nested_irq_unlock(struct wcd9xxx_core_resource *wcd9xxx_res)
  211. {
  212. mutex_unlock(&wcd9xxx_res->nested_irq_lock);
  213. }
  214. static void wcd9xxx_irq_dispatch(struct wcd9xxx_core_resource *wcd9xxx_res,
  215. struct intr_data *irqdata)
  216. {
  217. int irqbit = irqdata->intr_num;
  218. if (!wcd9xxx_res->wcd_core_regmap) {
  219. pr_err("%s: codec core regmap not defined\n",
  220. __func__);
  221. return;
  222. }
  223. if (irqdata->clear_first) {
  224. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  225. regmap_write(wcd9xxx_res->wcd_core_regmap,
  226. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
  227. BIT_BYTE(irqbit),
  228. BYTE_BIT_MASK(irqbit));
  229. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  230. regmap_write(wcd9xxx_res->wcd_core_regmap,
  231. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  232. 0x02);
  233. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  234. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  235. } else {
  236. wcd9xxx_nested_irq_lock(wcd9xxx_res);
  237. handle_nested_irq(phyirq_to_virq(wcd9xxx_res, irqbit));
  238. regmap_write(wcd9xxx_res->wcd_core_regmap,
  239. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE] +
  240. BIT_BYTE(irqbit),
  241. BYTE_BIT_MASK(irqbit));
  242. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  243. regmap_write(wcd9xxx_res->wcd_core_regmap,
  244. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  245. 0x02);
  246. wcd9xxx_nested_irq_unlock(wcd9xxx_res);
  247. }
  248. }
  249. static irqreturn_t wcd9xxx_irq_thread(int irq, void *data)
  250. {
  251. int ret;
  252. int i;
  253. struct intr_data irqdata;
  254. char linebuf[128];
  255. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 1);
  256. struct wcd9xxx_core_resource *wcd9xxx_res = data;
  257. int num_irq_regs = wcd9xxx_res->num_irq_regs;
  258. struct wcd9xxx *wcd9xxx;
  259. u8 status[4], status1[4] = {0}, unmask_status[4] = {0};
  260. if (unlikely(wcd9xxx_lock_sleep(wcd9xxx_res) == false)) {
  261. dev_err(wcd9xxx_res->dev, "Failed to hold suspend\n");
  262. return IRQ_NONE;
  263. }
  264. if (!wcd9xxx_res->wcd_core_regmap) {
  265. dev_err(wcd9xxx_res->dev,
  266. "%s: Codec core regmap not supplied\n",
  267. __func__);
  268. goto err_disable_irq;
  269. }
  270. wcd9xxx = (struct wcd9xxx *)wcd9xxx_res->parent;
  271. if (!wcd9xxx) {
  272. dev_err(wcd9xxx_res->dev,
  273. "%s: Codec core not supplied\n", __func__);
  274. goto err_disable_irq;
  275. }
  276. if (!wcd9xxx->dev_up) {
  277. dev_info_ratelimited(wcd9xxx_res->dev, "wcd9xxx dev not up\n");
  278. /*
  279. * sleep to not block the core when device is
  280. * not up (slimbus will not be available) to
  281. * process interrupts.
  282. */
  283. msleep(10);
  284. }
  285. memset(status, 0, sizeof(status));
  286. ret = regmap_bulk_read(wcd9xxx_res->wcd_core_regmap,
  287. wcd9xxx_res->intr_reg[WCD9XXX_INTR_STATUS_BASE],
  288. status, num_irq_regs);
  289. if (ret < 0) {
  290. dev_err(wcd9xxx_res->dev,
  291. "Failed to read interrupt status: %d\n", ret);
  292. goto err_disable_irq;
  293. }
  294. /*
  295. * If status is 0 return without clearing.
  296. * status contains: HW status - masked interrupts
  297. * status1 contains: unhandled interrupts - masked interrupts
  298. * unmasked_status contains: unhandled interrupts
  299. */
  300. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  301. pr_debug("%s: status is 0\n", __func__);
  302. wcd9xxx_unlock_sleep(wcd9xxx_res);
  303. return IRQ_HANDLED;
  304. }
  305. /*
  306. * Copy status to unmask_status before masking, otherwise SW may miss
  307. * to clear masked interrupt in corner case.
  308. */
  309. memcpy(unmask_status, status, sizeof(unmask_status));
  310. /* Apply masking */
  311. for (i = 0; i < num_irq_regs; i++)
  312. status[i] &= ~wcd9xxx_res->irq_masks_cur[i];
  313. memcpy(status1, status, sizeof(status1));
  314. /* Find out which interrupt was triggered and call that interrupt's
  315. * handler function
  316. *
  317. * Since codec has only one hardware irq line which is shared by
  318. * codec's different internal interrupts, so it's possible master irq
  319. * handler dispatches multiple nested irq handlers after breaking
  320. * order. Dispatch interrupts in the order that is maintained by
  321. * the interrupt table.
  322. */
  323. for (i = 0; i < wcd9xxx_res->intr_table_size; i++) {
  324. irqdata = wcd9xxx_res->intr_table[i];
  325. if (status[BIT_BYTE(irqdata.intr_num)] &
  326. BYTE_BIT_MASK(irqdata.intr_num)) {
  327. wcd9xxx_irq_dispatch(wcd9xxx_res, &irqdata);
  328. status1[BIT_BYTE(irqdata.intr_num)] &=
  329. ~BYTE_BIT_MASK(irqdata.intr_num);
  330. unmask_status[BIT_BYTE(irqdata.intr_num)] &=
  331. ~BYTE_BIT_MASK(irqdata.intr_num);
  332. }
  333. }
  334. /*
  335. * As a failsafe if unhandled irq is found, clear it to prevent
  336. * interrupt storm.
  337. * Note that we can say there was an unhandled irq only when no irq
  338. * handled by nested irq handler since Taiko supports qdsp as irqs'
  339. * destination for few irqs. Therefore driver shouldn't clear pending
  340. * irqs when few handled while few others not.
  341. */
  342. if (unlikely(!memcmp(status, status1, sizeof(status)))) {
  343. if (__ratelimit(&ratelimit)) {
  344. pr_warn("%s: Unhandled irq found\n", __func__);
  345. hex_dump_to_buffer(status, sizeof(status), 16, 1,
  346. linebuf, sizeof(linebuf), false);
  347. pr_warn("%s: status0 : %s\n", __func__, linebuf);
  348. hex_dump_to_buffer(status1, sizeof(status1), 16, 1,
  349. linebuf, sizeof(linebuf), false);
  350. pr_warn("%s: status1 : %s\n", __func__, linebuf);
  351. }
  352. /*
  353. * unmask_status contains unhandled interrupts, hence clear all
  354. * unhandled interrupts.
  355. */
  356. ret = regmap_bulk_write(wcd9xxx_res->wcd_core_regmap,
  357. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLEAR_BASE],
  358. unmask_status, num_irq_regs);
  359. if (wcd9xxx_get_intf_type() == WCD9XXX_INTERFACE_TYPE_I2C)
  360. regmap_write(wcd9xxx_res->wcd_core_regmap,
  361. wcd9xxx_res->intr_reg[WCD9XXX_INTR_CLR_COMMIT],
  362. 0x02);
  363. }
  364. wcd9xxx_unlock_sleep(wcd9xxx_res);
  365. return IRQ_HANDLED;
  366. err_disable_irq:
  367. dev_err(wcd9xxx_res->dev,
  368. "Disable irq %d\n", wcd9xxx_res->irq);
  369. disable_irq_wake(wcd9xxx_res->irq);
  370. disable_irq_nosync(wcd9xxx_res->irq);
  371. wcd9xxx_unlock_sleep(wcd9xxx_res);
  372. return IRQ_NONE;
  373. }
  374. /**
  375. * wcd9xxx_free_irq
  376. *
  377. * @wcd9xxx_res: pointer to core resource
  378. * irq: irq number
  379. * @data: data pointer
  380. *
  381. */
  382. void wcd9xxx_free_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  383. int irq, void *data)
  384. {
  385. free_irq(phyirq_to_virq(wcd9xxx_res, irq), data);
  386. }
  387. EXPORT_SYMBOL(wcd9xxx_free_irq);
  388. /**
  389. * wcd9xxx_enable_irq
  390. *
  391. * @wcd9xxx_res: pointer to core resource
  392. * irq: irq number
  393. *
  394. */
  395. void wcd9xxx_enable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  396. {
  397. if (wcd9xxx_res->irq)
  398. enable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  399. }
  400. EXPORT_SYMBOL(wcd9xxx_enable_irq);
  401. /**
  402. * wcd9xxx_disable_irq
  403. *
  404. * @wcd9xxx_res: pointer to core resource
  405. * irq: irq number
  406. *
  407. */
  408. void wcd9xxx_disable_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  409. {
  410. if (wcd9xxx_res->irq)
  411. disable_irq_nosync(phyirq_to_virq(wcd9xxx_res, irq));
  412. }
  413. EXPORT_SYMBOL(wcd9xxx_disable_irq);
  414. /**
  415. * wcd9xxx_disable_irq_sync
  416. *
  417. * @wcd9xxx_res: pointer to core resource
  418. * irq: irq number
  419. *
  420. */
  421. void wcd9xxx_disable_irq_sync(
  422. struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  423. {
  424. if (wcd9xxx_res->irq)
  425. disable_irq(phyirq_to_virq(wcd9xxx_res, irq));
  426. }
  427. EXPORT_SYMBOL(wcd9xxx_disable_irq_sync);
  428. static int wcd9xxx_irq_setup_downstream_irq(
  429. struct wcd9xxx_core_resource *wcd9xxx_res)
  430. {
  431. int irq, virq, ret;
  432. pr_debug("%s: enter\n", __func__);
  433. for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
  434. /* Map OF irq */
  435. virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
  436. pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
  437. if (virq == NO_IRQ) {
  438. pr_err("%s, No interrupt specifier for irq %d\n",
  439. __func__, irq);
  440. return NO_IRQ;
  441. }
  442. ret = irq_set_chip_data(virq, wcd9xxx_res);
  443. if (ret) {
  444. pr_err("%s: Failed to configure irq %d (%d)\n",
  445. __func__, irq, ret);
  446. return ret;
  447. }
  448. if (wcd9xxx_res->irq_level_high[irq])
  449. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  450. handle_level_irq);
  451. else
  452. irq_set_chip_and_handler(virq, &wcd9xxx_irq_chip,
  453. handle_edge_irq);
  454. irq_set_nested_thread(virq, 1);
  455. }
  456. pr_debug("%s: leave\n", __func__);
  457. return 0;
  458. }
  459. /**
  460. * wcd9xxx_irq_init
  461. *
  462. * @wcd9xxx_res: pointer to core resource
  463. *
  464. * Returns 0 on success, appropriate error code otherwise
  465. */
  466. int wcd9xxx_irq_init(struct wcd9xxx_core_resource *wcd9xxx_res)
  467. {
  468. int i, ret;
  469. u8 *irq_level = NULL;
  470. struct irq_domain *domain;
  471. struct device_node *pnode;
  472. mutex_init(&wcd9xxx_res->irq_lock);
  473. mutex_init(&wcd9xxx_res->nested_irq_lock);
  474. pnode = of_irq_find_parent(wcd9xxx_res->dev->of_node);
  475. if (unlikely(!pnode))
  476. return -EINVAL;
  477. domain = irq_find_host(pnode);
  478. if (unlikely(!domain))
  479. return -EINVAL;
  480. wcd9xxx_res->domain = domain;
  481. wcd9xxx_res->irq = wcd9xxx_irq_get_upstream_irq(wcd9xxx_res);
  482. if (!wcd9xxx_res->irq) {
  483. pr_warn("%s: irq driver is not yet initialized\n", __func__);
  484. mutex_destroy(&wcd9xxx_res->irq_lock);
  485. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  486. return -EPROBE_DEFER;
  487. }
  488. pr_debug("%s: probed irq %d\n", __func__, wcd9xxx_res->irq);
  489. /* Setup downstream IRQs */
  490. ret = wcd9xxx_irq_setup_downstream_irq(wcd9xxx_res);
  491. if (ret) {
  492. pr_err("%s: Failed to setup downstream IRQ\n", __func__);
  493. goto fail_irq_level;
  494. return ret;
  495. }
  496. /* All other wcd9xxx interrupts are edge triggered */
  497. wcd9xxx_res->irq_level_high[0] = true;
  498. /* mask all the interrupts */
  499. irq_level = kzalloc(wcd9xxx_res->num_irq_regs, GFP_KERNEL);
  500. if (!irq_level) {
  501. ret = -ENOMEM;
  502. goto fail_irq_level;
  503. }
  504. for (i = 0; i < wcd9xxx_res->num_irqs; i++) {
  505. wcd9xxx_res->irq_masks_cur[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  506. wcd9xxx_res->irq_masks_cache[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  507. irq_level[BIT_BYTE(i)] |=
  508. wcd9xxx_res->irq_level_high[i] << (i % BITS_PER_BYTE);
  509. }
  510. if (!wcd9xxx_res->wcd_core_regmap) {
  511. dev_err(wcd9xxx_res->dev,
  512. "%s: Codec core regmap not defined\n",
  513. __func__);
  514. ret = -EINVAL;
  515. goto fail_irq_init;
  516. }
  517. for (i = 0; i < wcd9xxx_res->num_irq_regs; i++) {
  518. /* Initialize interrupt mask and level registers */
  519. regmap_write(wcd9xxx_res->wcd_core_regmap,
  520. wcd9xxx_res->intr_reg[WCD9XXX_INTR_LEVEL_BASE] + i,
  521. irq_level[i]);
  522. regmap_write(wcd9xxx_res->wcd_core_regmap,
  523. wcd9xxx_res->intr_reg[WCD9XXX_INTR_MASK_BASE] + i,
  524. wcd9xxx_res->irq_masks_cur[i]);
  525. }
  526. ret = request_threaded_irq(wcd9xxx_res->irq, NULL, wcd9xxx_irq_thread,
  527. IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
  528. "wcd9xxx", wcd9xxx_res);
  529. if (ret != 0)
  530. dev_err(wcd9xxx_res->dev, "Failed to request IRQ %d: %d\n",
  531. wcd9xxx_res->irq, ret);
  532. else {
  533. ret = enable_irq_wake(wcd9xxx_res->irq);
  534. if (ret)
  535. dev_err(wcd9xxx_res->dev,
  536. "Failed to set wake interrupt on IRQ %d: %d\n",
  537. wcd9xxx_res->irq, ret);
  538. if (ret)
  539. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  540. }
  541. if (ret)
  542. goto fail_irq_init;
  543. kfree(irq_level);
  544. return ret;
  545. fail_irq_init:
  546. dev_err(wcd9xxx_res->dev,
  547. "%s: Failed to init wcd9xxx irq\n", __func__);
  548. kfree(irq_level);
  549. fail_irq_level:
  550. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  551. mutex_destroy(&wcd9xxx_res->irq_lock);
  552. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  553. return ret;
  554. }
  555. EXPORT_SYMBOL(wcd9xxx_irq_init);
  556. int wcd9xxx_request_irq(struct wcd9xxx_core_resource *wcd9xxx_res,
  557. int irq, irq_handler_t handler,
  558. const char *name, void *data)
  559. {
  560. int virq;
  561. virq = phyirq_to_virq(wcd9xxx_res, irq);
  562. return request_threaded_irq(virq, NULL, handler, IRQF_TRIGGER_RISING,
  563. name, data);
  564. }
  565. EXPORT_SYMBOL(wcd9xxx_request_irq);
  566. void wcd9xxx_irq_exit(struct wcd9xxx_core_resource *wcd9xxx_res)
  567. {
  568. dev_dbg(wcd9xxx_res->dev, "%s: Cleaning up irq %d\n", __func__,
  569. wcd9xxx_res->irq);
  570. if (wcd9xxx_res->irq) {
  571. disable_irq_wake(wcd9xxx_res->irq);
  572. free_irq(wcd9xxx_res->irq, wcd9xxx_res);
  573. wcd9xxx_res->irq = 0;
  574. wcd9xxx_irq_put_downstream_irq(wcd9xxx_res);
  575. wcd9xxx_irq_put_upstream_irq(wcd9xxx_res);
  576. }
  577. mutex_destroy(&wcd9xxx_res->irq_lock);
  578. mutex_destroy(&wcd9xxx_res->nested_irq_lock);
  579. }
  580. #ifndef CONFIG_OF
  581. static int phyirq_to_virq(
  582. struct wcd9xxx_core_resource *wcd9xxx_res,
  583. int offset)
  584. {
  585. return wcd9xxx_res->irq_base + offset;
  586. }
  587. static int virq_to_phyirq(
  588. struct wcd9xxx_core_resource *wcd9xxx_res,
  589. int virq)
  590. {
  591. return virq - wcd9xxx_res->irq_base;
  592. }
  593. static unsigned int wcd9xxx_irq_get_upstream_irq(
  594. struct wcd9xxx_core_resource *wcd9xxx_res)
  595. {
  596. return wcd9xxx_res->irq;
  597. }
  598. static void wcd9xxx_irq_put_upstream_irq(
  599. struct wcd9xxx_core_resource *wcd9xxx_res)
  600. {
  601. /* Do nothing */
  602. }
  603. static int wcd9xxx_map_irq(
  604. struct wcd9xxx_core_resource *wcd9xxx_core_res, int irq)
  605. {
  606. return phyirq_to_virq(wcd9xxx_core_res, irq);
  607. }
  608. #else
  609. static struct wcd9xxx_irq_drv_data *
  610. wcd9xxx_irq_add_domain(struct device_node *node,
  611. struct device_node *parent)
  612. {
  613. struct wcd9xxx_irq_drv_data *data = NULL;
  614. pr_debug("%s: node %s, node parent %s\n", __func__,
  615. node->name, node->parent->name);
  616. data = kzalloc(sizeof(*data), GFP_KERNEL);
  617. if (!data)
  618. return NULL;
  619. /*
  620. * wcd9xxx_intc interrupt controller supports N to N irq mapping with
  621. * single cell binding with irq numbers(offsets) only.
  622. * Use irq_domain_simple_ops that has irq_domain_simple_map and
  623. * irq_domain_xlate_onetwocell.
  624. */
  625. data->domain = irq_domain_add_linear(node, WCD9XXX_MAX_NUM_IRQS,
  626. &irq_domain_simple_ops, data);
  627. if (!data->domain) {
  628. kfree(data);
  629. return NULL;
  630. }
  631. return data;
  632. }
  633. static struct wcd9xxx_irq_drv_data *
  634. wcd9xxx_get_irq_drv_d(const struct wcd9xxx_core_resource *wcd9xxx_res)
  635. {
  636. struct irq_domain *domain;
  637. domain = wcd9xxx_res->domain;
  638. if (domain)
  639. return domain->host_data;
  640. else
  641. return NULL;
  642. }
  643. static int phyirq_to_virq(struct wcd9xxx_core_resource *wcd9xxx_res, int offset)
  644. {
  645. struct wcd9xxx_irq_drv_data *data;
  646. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  647. if (!data) {
  648. pr_warn("%s: not registered to interrupt controller\n",
  649. __func__);
  650. return -EINVAL;
  651. }
  652. return irq_linear_revmap(data->domain, offset);
  653. }
  654. static int virq_to_phyirq(struct wcd9xxx_core_resource *wcd9xxx_res, int virq)
  655. {
  656. struct irq_data *irq_data = irq_get_irq_data(virq);
  657. if (unlikely(!irq_data)) {
  658. pr_err("%s: irq_data is NULL", __func__);
  659. return -EINVAL;
  660. }
  661. return irq_data->hwirq;
  662. }
  663. static unsigned int wcd9xxx_irq_get_upstream_irq(
  664. struct wcd9xxx_core_resource *wcd9xxx_res)
  665. {
  666. struct wcd9xxx_irq_drv_data *data;
  667. data = wcd9xxx_get_irq_drv_d(wcd9xxx_res);
  668. if (!data) {
  669. pr_err("%s: interrupt controller is not registered\n",
  670. __func__);
  671. return 0;
  672. }
  673. /* Make sure data is updated before return. */
  674. rmb();
  675. return data->irq;
  676. }
  677. static void wcd9xxx_irq_put_downstream_irq(
  678. struct wcd9xxx_core_resource *wcd9xxx_res)
  679. {
  680. int irq, virq, ret;
  681. /*
  682. * IRQ migration hits error if the chip data and handles
  683. * are not made NULL. make associated data and handles
  684. * to NULL at irq_exit
  685. */
  686. for (irq = 0; irq < wcd9xxx_res->num_irqs; irq++) {
  687. virq = wcd9xxx_map_irq(wcd9xxx_res, irq);
  688. pr_debug("%s: irq %d -> %d\n", __func__, irq, virq);
  689. ret = irq_set_chip_data(virq, NULL);
  690. if (ret) {
  691. pr_err("%s: Failed to configure irq %d (%d)\n",
  692. __func__, irq, ret);
  693. return;
  694. }
  695. irq_set_chip_and_handler(virq, NULL, NULL);
  696. }
  697. }
  698. static void wcd9xxx_irq_put_upstream_irq(
  699. struct wcd9xxx_core_resource *wcd9xxx_res)
  700. {
  701. wcd9xxx_res->domain = NULL;
  702. }
  703. static int wcd9xxx_map_irq(struct wcd9xxx_core_resource *wcd9xxx_res, int irq)
  704. {
  705. return of_irq_to_resource(wcd9xxx_res->dev->of_node, irq, NULL);
  706. }
  707. static int wcd9xxx_irq_probe(struct platform_device *pdev)
  708. {
  709. int irq, dir_apps_irq = -EINVAL;
  710. struct wcd9xxx_irq_drv_data *data;
  711. struct device_node *node = pdev->dev.of_node;
  712. int ret = -EINVAL;
  713. irq = of_get_named_gpio(node, "qcom,gpio-connect", 0);
  714. if (!gpio_is_valid(irq))
  715. dir_apps_irq = platform_get_irq_byname(pdev, "wcd_irq");
  716. if (!gpio_is_valid(irq) && dir_apps_irq < 0) {
  717. dev_err(&pdev->dev, "TLMM connect gpio not found\n");
  718. return -EPROBE_DEFER;
  719. }
  720. if (dir_apps_irq > 0) {
  721. irq = dir_apps_irq;
  722. } else {
  723. irq = gpio_to_irq(irq);
  724. if (irq < 0) {
  725. dev_err(&pdev->dev, "Unable to configure irq\n");
  726. return irq;
  727. }
  728. }
  729. dev_dbg(&pdev->dev, "%s: virq = %d\n", __func__, irq);
  730. data = wcd9xxx_irq_add_domain(node, node->parent);
  731. if (!data) {
  732. pr_err("%s: irq_add_domain failed\n", __func__);
  733. return -EINVAL;
  734. }
  735. data->irq = irq;
  736. /* Make sure irq is saved before return. */
  737. wmb();
  738. ret = 0;
  739. return ret;
  740. }
  741. static int wcd9xxx_irq_remove(struct platform_device *pdev)
  742. {
  743. struct irq_domain *domain;
  744. struct wcd9xxx_irq_drv_data *data;
  745. domain = irq_find_host(pdev->dev.of_node);
  746. if (unlikely(!domain)) {
  747. pr_err("%s: domain is NULL", __func__);
  748. return -EINVAL;
  749. }
  750. data = (struct wcd9xxx_irq_drv_data *)domain->host_data;
  751. data->irq = 0;
  752. /* Make sure irq variable is updated in data, before irq removal. */
  753. wmb();
  754. irq_domain_remove(data->domain);
  755. kfree(data);
  756. return 0;
  757. }
  758. static const struct of_device_id of_match[] = {
  759. { .compatible = "qcom,wcd9xxx-irq" },
  760. { }
  761. };
  762. static struct platform_driver wcd9xxx_irq_driver = {
  763. .probe = wcd9xxx_irq_probe,
  764. .remove = wcd9xxx_irq_remove,
  765. .driver = {
  766. .name = "wcd9xxx_intc",
  767. .owner = THIS_MODULE,
  768. .of_match_table = of_match_ptr(of_match),
  769. .suppress_bind_attrs = true,
  770. },
  771. };
  772. int wcd9xxx_irq_drv_init(void)
  773. {
  774. return platform_driver_register(&wcd9xxx_irq_driver);
  775. }
  776. void wcd9xxx_irq_drv_exit(void)
  777. {
  778. platform_driver_unregister(&wcd9xxx_irq_driver);
  779. }
  780. #endif /* CONFIG_OF */