sdm660-cdc-irq.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. /* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. #include <linux/bitops.h>
  13. #include <linux/module.h>
  14. #include <linux/init.h>
  15. #include <linux/of_irq.h>
  16. #include <linux/kernel.h>
  17. #include <linux/errno.h>
  18. #include <linux/slab.h>
  19. #include <linux/spmi.h>
  20. #include <linux/platform_device.h>
  21. #include <linux/delay.h>
  22. #include <linux/of.h>
  23. #include <linux/of_device.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pm_qos.h>
  26. #include <soc/qcom/pm.h>
  27. #include <sound/soc.h>
  28. #include "msm-analog-cdc.h"
  29. #include "sdm660-cdc-irq.h"
  30. #include "sdm660-cdc-registers.h"
  31. #define MAX_NUM_IRQS 14
  32. #define NUM_IRQ_REGS 2
  33. #define WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS 700
  34. #define BYTE_BIT_MASK(nr) (1UL << ((nr) % BITS_PER_BYTE))
  35. #define BIT_BYTE(nr) ((nr) / BITS_PER_BYTE)
  36. static irqreturn_t wcd9xxx_spmi_irq_handler(int linux_irq, void *data);
  37. char *irq_names[MAX_NUM_IRQS] = {
  38. "spk_cnp_int",
  39. "spk_clip_int",
  40. "spk_ocp_int",
  41. "ins_rem_det1",
  42. "but_rel_det",
  43. "but_press_det",
  44. "ins_rem_det",
  45. "mbhc_int",
  46. "ear_ocp_int",
  47. "hphr_ocp_int",
  48. "hphl_ocp_det",
  49. "ear_cnp_int",
  50. "hphr_cnp_int",
  51. "hphl_cnp_int"
  52. };
  53. int order[MAX_NUM_IRQS] = {
  54. MSM89XX_IRQ_SPKR_CNP,
  55. MSM89XX_IRQ_SPKR_CLIP,
  56. MSM89XX_IRQ_SPKR_OCP,
  57. MSM89XX_IRQ_MBHC_INSREM_DET1,
  58. MSM89XX_IRQ_MBHC_RELEASE,
  59. MSM89XX_IRQ_MBHC_PRESS,
  60. MSM89XX_IRQ_MBHC_INSREM_DET,
  61. MSM89XX_IRQ_MBHC_HS_DET,
  62. MSM89XX_IRQ_EAR_OCP,
  63. MSM89XX_IRQ_HPHR_OCP,
  64. MSM89XX_IRQ_HPHL_OCP,
  65. MSM89XX_IRQ_EAR_CNP,
  66. MSM89XX_IRQ_HPHR_CNP,
  67. MSM89XX_IRQ_HPHL_CNP,
  68. };
  69. enum wcd9xxx_spmi_pm_state {
  70. WCD9XXX_PM_SLEEPABLE,
  71. WCD9XXX_PM_AWAKE,
  72. WCD9XXX_PM_ASLEEP,
  73. };
  74. struct wcd9xxx_spmi_map {
  75. uint8_t handled[NUM_IRQ_REGS];
  76. uint8_t mask[NUM_IRQ_REGS];
  77. int linuxirq[MAX_NUM_IRQS];
  78. irq_handler_t handler[MAX_NUM_IRQS];
  79. struct platform_device *spmi[NUM_IRQ_REGS];
  80. struct snd_soc_codec *codec;
  81. enum wcd9xxx_spmi_pm_state pm_state;
  82. struct mutex pm_lock;
  83. /* pm_wq notifies change of pm_state */
  84. wait_queue_head_t pm_wq;
  85. struct pm_qos_request pm_qos_req;
  86. int wlock_holders;
  87. };
  88. struct wcd9xxx_spmi_map map;
  89. void wcd9xxx_spmi_enable_irq(int irq)
  90. {
  91. pr_debug("%s: irqno =%d\n", __func__, irq);
  92. if (!(map.mask[BIT_BYTE(irq)] & (BYTE_BIT_MASK(irq))))
  93. return;
  94. map.mask[BIT_BYTE(irq)] &=
  95. ~(BYTE_BIT_MASK(irq));
  96. enable_irq(map.linuxirq[irq]);
  97. }
  98. void wcd9xxx_spmi_disable_irq(int irq)
  99. {
  100. pr_debug("%s: irqno =%d\n", __func__, irq);
  101. if (map.mask[BIT_BYTE(irq)] & (BYTE_BIT_MASK(irq)))
  102. return;
  103. map.mask[BIT_BYTE(irq)] |=
  104. (BYTE_BIT_MASK(irq));
  105. disable_irq_nosync(map.linuxirq[irq]);
  106. }
  107. int wcd9xxx_spmi_request_irq(int irq, irq_handler_t handler,
  108. const char *name, void *priv)
  109. {
  110. int rc;
  111. unsigned long irq_flags;
  112. map.linuxirq[irq] =
  113. platform_get_irq_byname(map.spmi[BIT_BYTE(irq)],
  114. irq_names[irq]);
  115. if (strcmp(name, "mbhc sw intr"))
  116. irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
  117. IRQF_ONESHOT;
  118. else
  119. irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
  120. IRQF_ONESHOT | IRQF_NO_SUSPEND;
  121. pr_debug("%s: name:%s irq_flags = %lx\n", __func__, name, irq_flags);
  122. rc = devm_request_threaded_irq(&map.spmi[BIT_BYTE(irq)]->dev,
  123. map.linuxirq[irq], NULL,
  124. wcd9xxx_spmi_irq_handler,
  125. irq_flags,
  126. name, priv);
  127. if (rc < 0) {
  128. dev_err(&map.spmi[BIT_BYTE(irq)]->dev,
  129. "Can't request %d IRQ\n", irq);
  130. return rc;
  131. }
  132. dev_dbg(&map.spmi[BIT_BYTE(irq)]->dev,
  133. "irq %d linuxIRQ: %d\n", irq, map.linuxirq[irq]);
  134. map.mask[BIT_BYTE(irq)] &= ~BYTE_BIT_MASK(irq);
  135. map.handler[irq] = handler;
  136. enable_irq_wake(map.linuxirq[irq]);
  137. return 0;
  138. }
  139. int wcd9xxx_spmi_free_irq(int irq, void *priv)
  140. {
  141. devm_free_irq(&map.spmi[BIT_BYTE(irq)]->dev, map.linuxirq[irq],
  142. priv);
  143. map.mask[BIT_BYTE(irq)] |= BYTE_BIT_MASK(irq);
  144. return 0;
  145. }
  146. static int get_irq_bit(int linux_irq)
  147. {
  148. int i = 0;
  149. for (; i < MAX_NUM_IRQS; i++)
  150. if (map.linuxirq[i] == linux_irq)
  151. return i;
  152. return i;
  153. }
  154. static int get_order_irq(int i)
  155. {
  156. return order[i];
  157. }
  158. static irqreturn_t wcd9xxx_spmi_irq_handler(int linux_irq, void *data)
  159. {
  160. int irq, i, j;
  161. unsigned long status[NUM_IRQ_REGS] = {0};
  162. if (unlikely(wcd9xxx_spmi_lock_sleep() == false)) {
  163. pr_err("Failed to hold suspend\n");
  164. return IRQ_NONE;
  165. }
  166. irq = get_irq_bit(linux_irq);
  167. if (irq == MAX_NUM_IRQS)
  168. return IRQ_HANDLED;
  169. status[BIT_BYTE(irq)] |= BYTE_BIT_MASK(irq);
  170. for (i = 0; i < NUM_IRQ_REGS; i++) {
  171. status[i] |= snd_soc_read(map.codec,
  172. BIT_BYTE(irq) * 0x100 +
  173. MSM89XX_PMIC_DIGITAL_INT_LATCHED_STS);
  174. status[i] &= ~map.mask[i];
  175. }
  176. for (i = 0; i < MAX_NUM_IRQS; i++) {
  177. j = get_order_irq(i);
  178. if ((status[BIT_BYTE(j)] & BYTE_BIT_MASK(j)) &&
  179. ((map.handled[BIT_BYTE(j)] &
  180. BYTE_BIT_MASK(j)) == 0)) {
  181. map.handler[j](irq, data);
  182. map.handled[BIT_BYTE(j)] |=
  183. BYTE_BIT_MASK(j);
  184. }
  185. }
  186. map.handled[BIT_BYTE(irq)] &= ~BYTE_BIT_MASK(irq);
  187. wcd9xxx_spmi_unlock_sleep();
  188. return IRQ_HANDLED;
  189. }
  190. enum wcd9xxx_spmi_pm_state wcd9xxx_spmi_pm_cmpxchg(
  191. enum wcd9xxx_spmi_pm_state o,
  192. enum wcd9xxx_spmi_pm_state n)
  193. {
  194. enum wcd9xxx_spmi_pm_state old;
  195. mutex_lock(&map.pm_lock);
  196. old = map.pm_state;
  197. if (old == o)
  198. map.pm_state = n;
  199. pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state);
  200. mutex_unlock(&map.pm_lock);
  201. return old;
  202. }
  203. EXPORT_SYMBOL(wcd9xxx_spmi_pm_cmpxchg);
  204. int wcd9xxx_spmi_suspend(pm_message_t pmesg)
  205. {
  206. int ret = 0;
  207. pr_debug("%s: enter\n", __func__);
  208. /*
  209. * pm_qos_update_request() can be called after this suspend chain call
  210. * started. thus suspend can be called while lock is being held
  211. */
  212. mutex_lock(&map.pm_lock);
  213. if (map.pm_state == WCD9XXX_PM_SLEEPABLE) {
  214. pr_debug("%s: suspending system, state %d, wlock %d\n",
  215. __func__, map.pm_state,
  216. map.wlock_holders);
  217. map.pm_state = WCD9XXX_PM_ASLEEP;
  218. } else if (map.pm_state == WCD9XXX_PM_AWAKE) {
  219. /*
  220. * unlock to wait for pm_state == WCD9XXX_PM_SLEEPABLE
  221. * then set to WCD9XXX_PM_ASLEEP
  222. */
  223. pr_debug("%s: waiting to suspend system, state %d, wlock %d\n",
  224. __func__, map.pm_state,
  225. map.wlock_holders);
  226. mutex_unlock(&map.pm_lock);
  227. if (!(wait_event_timeout(map.pm_wq,
  228. wcd9xxx_spmi_pm_cmpxchg(
  229. WCD9XXX_PM_SLEEPABLE,
  230. WCD9XXX_PM_ASLEEP) ==
  231. WCD9XXX_PM_SLEEPABLE,
  232. HZ))) {
  233. pr_debug("%s: suspend failed state %d, wlock %d\n",
  234. __func__, map.pm_state,
  235. map.wlock_holders);
  236. ret = -EBUSY;
  237. } else {
  238. pr_debug("%s: done, state %d, wlock %d\n", __func__,
  239. map.pm_state,
  240. map.wlock_holders);
  241. }
  242. mutex_lock(&map.pm_lock);
  243. } else if (map.pm_state == WCD9XXX_PM_ASLEEP) {
  244. pr_warn("%s: system is already suspended, state %d, wlock %dn",
  245. __func__, map.pm_state,
  246. map.wlock_holders);
  247. }
  248. mutex_unlock(&map.pm_lock);
  249. return ret;
  250. }
  251. EXPORT_SYMBOL(wcd9xxx_spmi_suspend);
  252. int wcd9xxx_spmi_resume(void)
  253. {
  254. int ret = 0;
  255. pr_debug("%s: enter\n", __func__);
  256. mutex_lock(&map.pm_lock);
  257. if (map.pm_state == WCD9XXX_PM_ASLEEP) {
  258. pr_debug("%s: resuming system, state %d, wlock %d\n", __func__,
  259. map.pm_state,
  260. map.wlock_holders);
  261. map.pm_state = WCD9XXX_PM_SLEEPABLE;
  262. } else {
  263. pr_warn("%s: system is already awake, state %d wlock %d\n",
  264. __func__, map.pm_state,
  265. map.wlock_holders);
  266. }
  267. mutex_unlock(&map.pm_lock);
  268. wake_up_all(&map.pm_wq);
  269. return ret;
  270. }
  271. EXPORT_SYMBOL(wcd9xxx_spmi_resume);
  272. bool wcd9xxx_spmi_lock_sleep(void)
  273. {
  274. /*
  275. * wcd9xxx_spmi_{lock/unlock}_sleep will be called by
  276. * wcd9xxx_spmi_irq_thread
  277. * and its subroutines only motly.
  278. * but btn0_lpress_fn is not wcd9xxx_spmi_irq_thread's subroutine and
  279. * It can race with wcd9xxx_spmi_irq_thread.
  280. * So need to embrace wlock_holders with mutex.
  281. */
  282. mutex_lock(&map.pm_lock);
  283. if (map.wlock_holders++ == 0) {
  284. pr_debug("%s: holding wake lock\n", __func__);
  285. pm_qos_update_request(&map.pm_qos_req,
  286. msm_cpuidle_get_deep_idle_latency());
  287. pm_stay_awake(&map.spmi[0]->dev);
  288. }
  289. mutex_unlock(&map.pm_lock);
  290. pr_debug("%s: wake lock counter %d\n", __func__,
  291. map.wlock_holders);
  292. pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state);
  293. if (!wait_event_timeout(map.pm_wq,
  294. ((wcd9xxx_spmi_pm_cmpxchg(
  295. WCD9XXX_PM_SLEEPABLE,
  296. WCD9XXX_PM_AWAKE)) ==
  297. WCD9XXX_PM_SLEEPABLE ||
  298. (wcd9xxx_spmi_pm_cmpxchg(
  299. WCD9XXX_PM_SLEEPABLE,
  300. WCD9XXX_PM_AWAKE) ==
  301. WCD9XXX_PM_AWAKE)),
  302. msecs_to_jiffies(
  303. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
  304. pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
  305. __func__,
  306. WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, map.pm_state,
  307. map.wlock_holders);
  308. wcd9xxx_spmi_unlock_sleep();
  309. return false;
  310. }
  311. wake_up_all(&map.pm_wq);
  312. pr_debug("%s: leaving pm_state = %d\n", __func__, map.pm_state);
  313. return true;
  314. }
  315. EXPORT_SYMBOL(wcd9xxx_spmi_lock_sleep);
  316. void wcd9xxx_spmi_unlock_sleep(void)
  317. {
  318. mutex_lock(&map.pm_lock);
  319. if (--map.wlock_holders == 0) {
  320. pr_debug("%s: releasing wake lock pm_state %d -> %d\n",
  321. __func__, map.pm_state, WCD9XXX_PM_SLEEPABLE);
  322. /*
  323. * if wcd9xxx_spmi_lock_sleep failed, pm_state would be still
  324. * WCD9XXX_PM_ASLEEP, don't overwrite
  325. */
  326. if (likely(map.pm_state == WCD9XXX_PM_AWAKE))
  327. map.pm_state = WCD9XXX_PM_SLEEPABLE;
  328. pm_qos_update_request(&map.pm_qos_req,
  329. PM_QOS_DEFAULT_VALUE);
  330. pm_relax(&map.spmi[0]->dev);
  331. }
  332. mutex_unlock(&map.pm_lock);
  333. pr_debug("%s: wake lock counter %d\n", __func__,
  334. map.wlock_holders);
  335. pr_debug("%s: map.pm_state = %d\n", __func__, map.pm_state);
  336. wake_up_all(&map.pm_wq);
  337. }
  338. EXPORT_SYMBOL(wcd9xxx_spmi_unlock_sleep);
  339. void wcd9xxx_spmi_set_codec(struct snd_soc_codec *codec)
  340. {
  341. map.codec = codec;
  342. }
  343. void wcd9xxx_spmi_set_dev(struct platform_device *spmi, int i)
  344. {
  345. if (i < NUM_IRQ_REGS)
  346. map.spmi[i] = spmi;
  347. }
  348. int wcd9xxx_spmi_irq_init(void)
  349. {
  350. int i = 0;
  351. for (; i < MAX_NUM_IRQS; i++)
  352. map.mask[BIT_BYTE(i)] |= BYTE_BIT_MASK(i);
  353. mutex_init(&map.pm_lock);
  354. map.wlock_holders = 0;
  355. map.pm_state = WCD9XXX_PM_SLEEPABLE;
  356. init_waitqueue_head(&map.pm_wq);
  357. pm_qos_add_request(&map.pm_qos_req,
  358. PM_QOS_CPU_DMA_LATENCY,
  359. PM_QOS_DEFAULT_VALUE);
  360. return 0;
  361. }
  362. MODULE_DESCRIPTION("MSM8x16 SPMI IRQ driver");
  363. MODULE_LICENSE("GPL v2");