ipa_interrupts.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/interrupt.h>
  6. #include "ipa_i.h"
  7. #define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
  8. #define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
  9. #define IPA_IRQ_NUM_MAX 32
  10. struct ipa3_interrupt_info {
  11. ipa_irq_handler_t handler;
  12. enum ipa_irq_type interrupt;
  13. void *private_data;
  14. bool deferred_flag;
  15. };
  16. struct ipa3_interrupt_work_wrap {
  17. struct work_struct interrupt_work;
  18. ipa_irq_handler_t handler;
  19. enum ipa_irq_type interrupt;
  20. void *private_data;
  21. void *interrupt_data;
  22. };
  23. static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
  24. static struct workqueue_struct *ipa_interrupt_wq;
  25. static u32 ipa_ee;
  26. static void ipa3_tx_suspend_interrupt_wa(void);
  27. static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
  28. static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
  29. ipa3_enable_tx_suspend_wa);
  30. static spinlock_t suspend_wa_lock;
  31. static void ipa3_process_interrupts(bool isr_context);
  32. static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
  33. [IPA_BAD_SNOC_ACCESS_IRQ] = 0,
  34. [IPA_UC_IRQ_0] = 2,
  35. [IPA_UC_IRQ_1] = 3,
  36. [IPA_UC_IRQ_2] = 4,
  37. [IPA_UC_IRQ_3] = 5,
  38. [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6,
  39. [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7,
  40. [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 8,
  41. [IPA_RX_ERR_IRQ] = 9,
  42. [IPA_DEAGGR_ERR_IRQ] = 10,
  43. [IPA_TX_ERR_IRQ] = 11,
  44. [IPA_STEP_MODE_IRQ] = 12,
  45. [IPA_PROC_ERR_IRQ] = 13,
  46. [IPA_TX_SUSPEND_IRQ] = 14,
  47. [IPA_TX_HOLB_DROP_IRQ] = 15,
  48. [IPA_BAM_GSI_IDLE_IRQ] = 16,
  49. [IPA_PIPE_YELLOW_MARKER_BELOW_IRQ] = 17,
  50. [IPA_PIPE_RED_MARKER_BELOW_IRQ] = 18,
  51. [IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ] = 19,
  52. [IPA_PIPE_RED_MARKER_ABOVE_IRQ] = 20,
  53. [IPA_UCP_IRQ] = 21,
  54. [IPA_DCMP_IRQ] = 22,
  55. [IPA_GSI_EE_IRQ] = 23,
  56. [IPA_GSI_IPA_IF_TLV_RCVD_IRQ] = 24,
  57. [IPA_GSI_UC_IRQ] = 25,
  58. [IPA_TLV_LEN_MIN_DSM_IRQ] = 26,
  59. [IPA_DRBIP_PKT_EXCEED_MAX_SIZE_IRQ] = 27,
  60. [IPA_DRBIP_DATA_SCTR_CFG_ERROR_IRQ] = 28,
  61. [IPA_DRBIP_IMM_CMD_NO_FLSH_HZRD_IRQ] = 29,
  62. };
  63. static void ipa3_interrupt_defer(struct work_struct *work);
  64. static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
  65. static void ipa3_deferred_interrupt_work(struct work_struct *work)
  66. {
  67. struct ipa3_interrupt_work_wrap *work_data =
  68. container_of(work,
  69. struct ipa3_interrupt_work_wrap,
  70. interrupt_work);
  71. IPADBG("call handler from workq for interrupt %d...\n",
  72. work_data->interrupt);
  73. work_data->handler(work_data->interrupt, work_data->private_data,
  74. work_data->interrupt_data);
  75. kfree(work_data->interrupt_data);
  76. kfree(work_data);
  77. }
  78. static bool ipa3_is_valid_ep(u32 ep_suspend_data)
  79. {
  80. u32 bmsk = 1;
  81. u32 i = 0;
  82. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  83. if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
  84. return true;
  85. bmsk = bmsk << 1;
  86. }
  87. return false;
  88. }
  89. static int ipa3_handle_interrupt(int irq_num, bool isr_context)
  90. {
  91. struct ipa3_interrupt_info interrupt_info;
  92. struct ipa3_interrupt_work_wrap *work_data;
  93. u32 suspend_data;
  94. void *interrupt_data = NULL;
  95. struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
  96. int res;
  97. interrupt_info = ipa_interrupt_to_cb[irq_num];
  98. if (interrupt_info.handler == NULL) {
  99. IPAERR("A callback function wasn't set for interrupt num %d\n",
  100. irq_num);
  101. return -EINVAL;
  102. }
  103. switch (interrupt_info.interrupt) {
  104. case IPA_TX_SUSPEND_IRQ:
  105. IPADBG_LOW("processing TX_SUSPEND interrupt\n");
  106. ipa3_tx_suspend_interrupt_wa();
  107. suspend_data = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
  108. ipa_ee);
  109. IPADBG_LOW("get interrupt %d\n", suspend_data);
  110. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
  111. /* Clearing L2 interrupts status */
  112. ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
  113. ipa_ee, suspend_data);
  114. }
  115. if (!ipa3_is_valid_ep(suspend_data))
  116. return 0;
  117. suspend_interrupt_data =
  118. kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
  119. if (!suspend_interrupt_data) {
  120. IPAERR("failed allocating suspend_interrupt_data\n");
  121. return -ENOMEM;
  122. }
  123. suspend_interrupt_data->endpoints = suspend_data;
  124. interrupt_data = suspend_interrupt_data;
  125. break;
  126. default:
  127. break;
  128. }
  129. /* Force defer processing if in ISR context. */
  130. if (interrupt_info.deferred_flag || isr_context) {
  131. IPADBG_LOW("Defer handling interrupt %d\n",
  132. interrupt_info.interrupt);
  133. work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
  134. GFP_ATOMIC);
  135. if (!work_data) {
  136. IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
  137. res = -ENOMEM;
  138. goto fail_alloc_work;
  139. }
  140. INIT_WORK(&work_data->interrupt_work,
  141. ipa3_deferred_interrupt_work);
  142. work_data->handler = interrupt_info.handler;
  143. work_data->interrupt = interrupt_info.interrupt;
  144. work_data->private_data = interrupt_info.private_data;
  145. work_data->interrupt_data = interrupt_data;
  146. queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
  147. } else {
  148. IPADBG_LOW("Handle interrupt %d\n", interrupt_info.interrupt);
  149. interrupt_info.handler(interrupt_info.interrupt,
  150. interrupt_info.private_data,
  151. interrupt_data);
  152. kfree(interrupt_data);
  153. }
  154. return 0;
  155. fail_alloc_work:
  156. kfree(interrupt_data);
  157. return res;
  158. }
  159. static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
  160. {
  161. u32 en;
  162. u32 suspend_bmask;
  163. int irq_num;
  164. IPADBG_LOW("Enter\n");
  165. irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
  166. if (irq_num == -1) {
  167. WARN_ON(1);
  168. return;
  169. }
  170. /* make sure ipa hw is clocked on*/
  171. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  172. en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  173. suspend_bmask = 1 << irq_num;
  174. /*enable TX_SUSPEND_IRQ*/
  175. en |= suspend_bmask;
  176. IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
  177. , en);
  178. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, en);
  179. ipa3_process_interrupts(false);
  180. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  181. IPADBG_LOW("Exit\n");
  182. }
  183. static void ipa3_tx_suspend_interrupt_wa(void)
  184. {
  185. u32 val;
  186. u32 suspend_bmask;
  187. int irq_num;
  188. int wa_delay;
  189. IPADBG_LOW("Enter\n");
  190. irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
  191. if (irq_num == -1) {
  192. WARN_ON(1);
  193. return;
  194. }
  195. /*disable TX_SUSPEND_IRQ*/
  196. val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  197. suspend_bmask = 1 << irq_num;
  198. val &= ~suspend_bmask;
  199. IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
  200. val);
  201. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
  202. IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
  203. wa_delay = DIS_SUSPEND_INTERRUPT_TIMEOUT;
  204. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  205. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  206. wa_delay *= 400;
  207. }
  208. IPADBG_LOW("Delay period %d msec\n", wa_delay);
  209. queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
  210. msecs_to_jiffies(wa_delay));
  211. IPADBG_LOW("Exit\n");
  212. }
  213. static inline bool is_uc_irq(int irq_num)
  214. {
  215. if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
  216. ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
  217. return true;
  218. else
  219. return false;
  220. }
  221. static void ipa3_process_interrupts(bool isr_context)
  222. {
  223. u32 reg;
  224. u32 bmsk;
  225. u32 i = 0;
  226. u32 en;
  227. unsigned long flags;
  228. bool uc_irq;
  229. IPADBG_LOW("Enter isr_context=%d\n", isr_context);
  230. spin_lock_irqsave(&suspend_wa_lock, flags);
  231. en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  232. reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
  233. while (en & reg) {
  234. IPADBG_LOW("en=0x%x reg=0x%x\n", en, reg);
  235. bmsk = 1;
  236. for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
  237. IPADBG_LOW("Check irq number %d\n", i);
  238. if (en & reg & bmsk) {
  239. IPADBG_LOW("Irq number %d asserted\n", i);
  240. uc_irq = is_uc_irq(i);
  241. /*
  242. * Clear uC interrupt before processing to avoid
  243. * clearing unhandled interrupts
  244. */
  245. if (uc_irq)
  246. ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
  247. ipa_ee, bmsk);
  248. /*
  249. * handle the interrupt with spin_lock
  250. * unlocked to avoid calling client in atomic
  251. * context. mutual exclusion still preserved
  252. * as the read/clr is done with spin_lock
  253. * locked.
  254. */
  255. spin_unlock_irqrestore(&suspend_wa_lock, flags);
  256. ipa3_handle_interrupt(i, isr_context);
  257. spin_lock_irqsave(&suspend_wa_lock, flags);
  258. /*
  259. * Clear non uC interrupt after processing
  260. * to avoid clearing interrupt data
  261. */
  262. if (!uc_irq)
  263. ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
  264. ipa_ee, bmsk);
  265. }
  266. bmsk = bmsk << 1;
  267. }
  268. reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
  269. /* since the suspend interrupt HW bug we must
  270. * read again the EN register, otherwise the while is endless
  271. */
  272. en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  273. }
  274. spin_unlock_irqrestore(&suspend_wa_lock, flags);
  275. IPADBG_LOW("Exit\n");
  276. }
  277. static void ipa3_interrupt_defer(struct work_struct *work)
  278. {
  279. IPADBG("processing interrupts in wq\n");
  280. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  281. ipa3_process_interrupts(false);
  282. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  283. IPADBG("Done\n");
  284. }
  285. static irqreturn_t ipa3_isr(int irq, void *ctxt)
  286. {
  287. struct ipa_active_client_logging_info log_info;
  288. IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
  289. IPADBG_LOW("Enter\n");
  290. /* defer interrupt handling in case IPA is not clocked on */
  291. if (ipa3_inc_client_enable_clks_no_block(&log_info)) {
  292. IPADBG("defer interrupt processing\n");
  293. queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
  294. return IRQ_HANDLED;
  295. }
  296. ipa3_process_interrupts(true);
  297. IPADBG_LOW("Exit\n");
  298. ipa3_dec_client_disable_clks_no_block(&log_info);
  299. return IRQ_HANDLED;
  300. }
  301. irq_handler_t ipa3_get_isr(void)
  302. {
  303. return ipa3_isr;
  304. }
  305. /**
  306. * ipa3_add_interrupt_handler() - Adds handler to an interrupt type
  307. * @interrupt: Interrupt type
  308. * @handler: The handler to be added
  309. * @deferred_flag: whether the handler processing should be deferred in
  310. * a workqueue
  311. * @private_data: the client's private data
  312. *
  313. * Adds handler to an interrupt type and enable the specific bit
  314. * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
  315. */
  316. int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
  317. ipa_irq_handler_t handler,
  318. bool deferred_flag,
  319. void *private_data)
  320. {
  321. u32 val;
  322. u32 bmsk;
  323. int irq_num;
  324. int client_idx, ep_idx;
  325. IPADBG("interrupt_enum(%d)\n", interrupt);
  326. if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
  327. interrupt >= IPA_IRQ_MAX) {
  328. IPAERR("invalid interrupt number %d\n", interrupt);
  329. return -EINVAL;
  330. }
  331. irq_num = ipa3_irq_mapping[interrupt];
  332. if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
  333. IPAERR("interrupt %d not supported\n", interrupt);
  334. WARN_ON(1);
  335. return -EFAULT;
  336. }
  337. IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
  338. ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
  339. ipa_interrupt_to_cb[irq_num].handler = handler;
  340. ipa_interrupt_to_cb[irq_num].private_data = private_data;
  341. ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
  342. val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  343. IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
  344. bmsk = 1 << irq_num;
  345. val |= bmsk;
  346. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
  347. IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
  348. /* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
  349. if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
  350. (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
  351. val = ~0;
  352. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
  353. if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
  354. IPA_CLIENT_IS_Q6_PROD(client_idx)) {
  355. ep_idx = ipa3_get_ep_mapping(client_idx);
  356. IPADBG("modem ep_idx(%d) client_idx = %d\n",
  357. ep_idx, client_idx);
  358. if (ep_idx == -1)
  359. IPADBG("Invalid IPA client\n");
  360. else
  361. val &= ~(1 << ep_idx);
  362. }
  363. ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
  364. IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
  365. }
  366. return 0;
  367. }
  368. /**
  369. * ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
  370. * @interrupt: Interrupt type
  371. *
  372. * Removes the handler and disable the specific bit in IRQ_EN register
  373. */
  374. int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
  375. {
  376. u32 val;
  377. u32 bmsk;
  378. int irq_num;
  379. if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
  380. interrupt >= IPA_IRQ_MAX) {
  381. IPAERR("invalid interrupt number %d\n", interrupt);
  382. return -EINVAL;
  383. }
  384. irq_num = ipa3_irq_mapping[interrupt];
  385. if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
  386. IPAERR("interrupt %d not supported\n", interrupt);
  387. WARN_ON(1);
  388. return -EFAULT;
  389. }
  390. kfree(ipa_interrupt_to_cb[irq_num].private_data);
  391. ipa_interrupt_to_cb[irq_num].deferred_flag = false;
  392. ipa_interrupt_to_cb[irq_num].handler = NULL;
  393. ipa_interrupt_to_cb[irq_num].private_data = NULL;
  394. ipa_interrupt_to_cb[irq_num].interrupt = -1;
  395. /* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
  396. if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
  397. (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
  398. ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
  399. IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
  400. }
  401. val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  402. bmsk = 1 << irq_num;
  403. val &= ~bmsk;
  404. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
  405. return 0;
  406. }
  407. /**
  408. * ipa3_interrupts_init() - Initialize the IPA interrupts framework
  409. * @ipa_irq: The interrupt number to allocate
  410. * @ee: Execution environment
  411. * @ipa_dev: The basic device structure representing the IPA driver
  412. *
  413. * - Initialize the ipa_interrupt_to_cb array
  414. * - Clear interrupts status
  415. * - Register the ipa interrupt handler - ipa3_isr
  416. * - Enable apps processor wakeup by IPA interrupts
  417. */
  418. int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
  419. {
  420. int idx;
  421. int res = 0;
  422. ipa_ee = ee;
  423. for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
  424. ipa_interrupt_to_cb[idx].deferred_flag = false;
  425. ipa_interrupt_to_cb[idx].handler = NULL;
  426. ipa_interrupt_to_cb[idx].private_data = NULL;
  427. ipa_interrupt_to_cb[idx].interrupt = -1;
  428. }
  429. ipa_interrupt_wq = create_singlethread_workqueue(
  430. INTERRUPT_WORKQUEUE_NAME);
  431. if (!ipa_interrupt_wq) {
  432. IPAERR("workqueue creation failed\n");
  433. return -ENOMEM;
  434. }
  435. /*
  436. * NOTE:
  437. *
  438. * We'll only register an isr on non-emulator (ie. real UE)
  439. * systems.
  440. *
  441. * On the emulator, emulator_soft_irq_isr() will be calling
  442. * ipa3_isr, so hence, no isr registration here, and instead,
  443. * we'll pass the address of ipa3_isr to the gsi layer where
  444. * emulator interrupts are handled...
  445. */
  446. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
  447. res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
  448. IRQF_TRIGGER_RISING, "ipa", ipa_dev);
  449. if (res) {
  450. IPAERR(
  451. "fail to register IPA IRQ handler irq=%d\n",
  452. ipa_irq);
  453. destroy_workqueue(ipa_interrupt_wq);
  454. ipa_interrupt_wq = NULL;
  455. return -ENODEV;
  456. }
  457. IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
  458. res = enable_irq_wake(ipa_irq);
  459. if (res)
  460. IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
  461. ipa_irq, res);
  462. else
  463. IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
  464. }
  465. spin_lock_init(&suspend_wa_lock);
  466. return 0;
  467. }
  468. /**
  469. * ipa3_interrupts_destroy() - Destroy the IPA interrupts framework
  470. * @ipa_irq: The interrupt number to allocate
  471. * @ee: Execution environment
  472. * @ipa_dev: The basic device structure representing the IPA driver
  473. *
  474. * - Disable apps processor wakeup by IPA interrupts
  475. * - Unregister the ipa interrupt handler - ipa3_isr
  476. * - Destroy the interrupt workqueue
  477. */
  478. void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev)
  479. {
  480. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
  481. disable_irq_wake(ipa_irq);
  482. free_irq(ipa_irq, ipa_dev);
  483. }
  484. destroy_workqueue(ipa_interrupt_wq);
  485. ipa_interrupt_wq = NULL;
  486. }
  487. /**
  488. * ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
  489. * @clnt_hndl: suspended client handle, IRQ is emulated for this pipe
  490. *
  491. * Emulate suspend IRQ to unsuspend client which was suspended with an open
  492. * aggregation frame in order to bypass HW bug of IRQ not generated when
  493. * endpoint is suspended during an open aggregation.
  494. */
  495. void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
  496. {
  497. struct ipa3_interrupt_info interrupt_info;
  498. struct ipa3_interrupt_work_wrap *work_data;
  499. struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
  500. int irq_num;
  501. int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
  502. if (aggr_active_bitmap & (1 << clnt_hdl)) {
  503. /* force close aggregation */
  504. ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
  505. /* simulate suspend IRQ */
  506. irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
  507. interrupt_info = ipa_interrupt_to_cb[irq_num];
  508. if (interrupt_info.handler == NULL) {
  509. IPAERR("no CB function for IPA_TX_SUSPEND_IRQ\n");
  510. return;
  511. }
  512. suspend_interrupt_data = kzalloc(
  513. sizeof(*suspend_interrupt_data),
  514. GFP_ATOMIC);
  515. if (!suspend_interrupt_data) {
  516. IPAERR("failed allocating suspend_interrupt_data\n");
  517. return;
  518. }
  519. suspend_interrupt_data->endpoints = 1 << clnt_hdl;
  520. work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
  521. GFP_ATOMIC);
  522. if (!work_data) {
  523. IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
  524. goto fail_alloc_work;
  525. }
  526. INIT_WORK(&work_data->interrupt_work,
  527. ipa3_deferred_interrupt_work);
  528. work_data->handler = interrupt_info.handler;
  529. work_data->interrupt = IPA_TX_SUSPEND_IRQ;
  530. work_data->private_data = interrupt_info.private_data;
  531. work_data->interrupt_data = (void *)suspend_interrupt_data;
  532. queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
  533. return;
  534. fail_alloc_work:
  535. kfree(suspend_interrupt_data);
  536. }
  537. }