ipa_interrupts.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/interrupt.h>
  6. #include "ipa_i.h"
  7. #define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
  8. #define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
  9. #define IPA_IRQ_NUM_MAX 32
  10. struct ipa3_interrupt_info {
  11. ipa_irq_handler_t handler;
  12. enum ipa_irq_type interrupt;
  13. void *private_data;
  14. bool deferred_flag;
  15. };
  16. struct ipa3_interrupt_work_wrap {
  17. struct work_struct interrupt_work;
  18. ipa_irq_handler_t handler;
  19. enum ipa_irq_type interrupt;
  20. void *private_data;
  21. void *interrupt_data;
  22. };
  23. static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
  24. static struct workqueue_struct *ipa_interrupt_wq;
  25. static u32 ipa_ee;
  26. static void ipa3_tx_suspend_interrupt_wa(void);
  27. static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
  28. static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
  29. ipa3_enable_tx_suspend_wa);
  30. static spinlock_t suspend_wa_lock;
  31. static void ipa3_process_interrupts(bool isr_context);
  32. static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
  33. [IPA_BAD_SNOC_ACCESS_IRQ] = 0,
  34. [IPA_UC_IRQ_0] = 2,
  35. [IPA_UC_IRQ_1] = 3,
  36. [IPA_UC_IRQ_2] = 4,
  37. [IPA_UC_IRQ_3] = 5,
  38. [IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6,
  39. [IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7,
  40. [IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 8,
  41. [IPA_RX_ERR_IRQ] = 9,
  42. [IPA_DEAGGR_ERR_IRQ] = 10,
  43. [IPA_TX_ERR_IRQ] = 11,
  44. [IPA_STEP_MODE_IRQ] = 12,
  45. [IPA_PROC_ERR_IRQ] = 13,
  46. [IPA_TX_SUSPEND_IRQ] = 14,
  47. [IPA_TX_HOLB_DROP_IRQ] = 15,
  48. [IPA_BAM_GSI_IDLE_IRQ] = 16,
  49. [IPA_PIPE_YELLOW_MARKER_BELOW_IRQ] = 17,
  50. [IPA_PIPE_RED_MARKER_BELOW_IRQ] = 18,
  51. [IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ] = 19,
  52. [IPA_PIPE_RED_MARKER_ABOVE_IRQ] = 20,
  53. [IPA_UCP_IRQ] = 21,
  54. [IPA_DCMP_IRQ] = 22,
  55. [IPA_GSI_EE_IRQ] = 23,
  56. [IPA_GSI_IPA_IF_TLV_RCVD_IRQ] = 24,
  57. [IPA_GSI_UC_IRQ] = 25,
  58. [IPA_TLV_LEN_MIN_DSM_IRQ] = 26,
  59. };
  60. static void ipa3_interrupt_defer(struct work_struct *work);
  61. static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
  62. static void ipa3_deferred_interrupt_work(struct work_struct *work)
  63. {
  64. struct ipa3_interrupt_work_wrap *work_data =
  65. container_of(work,
  66. struct ipa3_interrupt_work_wrap,
  67. interrupt_work);
  68. IPADBG("call handler from workq for interrupt %d...\n",
  69. work_data->interrupt);
  70. work_data->handler(work_data->interrupt, work_data->private_data,
  71. work_data->interrupt_data);
  72. kfree(work_data->interrupt_data);
  73. kfree(work_data);
  74. }
  75. static bool ipa3_is_valid_ep(u32 ep_suspend_data)
  76. {
  77. u32 bmsk = 1;
  78. u32 i = 0;
  79. for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
  80. if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
  81. return true;
  82. bmsk = bmsk << 1;
  83. }
  84. return false;
  85. }
  86. static int ipa3_handle_interrupt(int irq_num, bool isr_context)
  87. {
  88. struct ipa3_interrupt_info interrupt_info;
  89. struct ipa3_interrupt_work_wrap *work_data;
  90. u32 suspend_data;
  91. void *interrupt_data = NULL;
  92. struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
  93. int res;
  94. interrupt_info = ipa_interrupt_to_cb[irq_num];
  95. if (interrupt_info.handler == NULL) {
  96. IPAERR("A callback function wasn't set for interrupt num %d\n",
  97. irq_num);
  98. return -EINVAL;
  99. }
  100. switch (interrupt_info.interrupt) {
  101. case IPA_TX_SUSPEND_IRQ:
  102. IPADBG_LOW("processing TX_SUSPEND interrupt\n");
  103. ipa3_tx_suspend_interrupt_wa();
  104. suspend_data = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
  105. ipa_ee);
  106. IPADBG_LOW("get interrupt %d\n", suspend_data);
  107. if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
  108. /* Clearing L2 interrupts status */
  109. ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
  110. ipa_ee, suspend_data);
  111. }
  112. if (!ipa3_is_valid_ep(suspend_data))
  113. return 0;
  114. suspend_interrupt_data =
  115. kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
  116. if (!suspend_interrupt_data) {
  117. IPAERR("failed allocating suspend_interrupt_data\n");
  118. return -ENOMEM;
  119. }
  120. suspend_interrupt_data->endpoints = suspend_data;
  121. interrupt_data = suspend_interrupt_data;
  122. break;
  123. default:
  124. break;
  125. }
  126. /* Force defer processing if in ISR context. */
  127. if (interrupt_info.deferred_flag || isr_context) {
  128. IPADBG_LOW("Defer handling interrupt %d\n",
  129. interrupt_info.interrupt);
  130. work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
  131. GFP_ATOMIC);
  132. if (!work_data) {
  133. IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
  134. res = -ENOMEM;
  135. goto fail_alloc_work;
  136. }
  137. INIT_WORK(&work_data->interrupt_work,
  138. ipa3_deferred_interrupt_work);
  139. work_data->handler = interrupt_info.handler;
  140. work_data->interrupt = interrupt_info.interrupt;
  141. work_data->private_data = interrupt_info.private_data;
  142. work_data->interrupt_data = interrupt_data;
  143. queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
  144. } else {
  145. IPADBG_LOW("Handle interrupt %d\n", interrupt_info.interrupt);
  146. interrupt_info.handler(interrupt_info.interrupt,
  147. interrupt_info.private_data,
  148. interrupt_data);
  149. kfree(interrupt_data);
  150. }
  151. return 0;
  152. fail_alloc_work:
  153. kfree(interrupt_data);
  154. return res;
  155. }
  156. static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
  157. {
  158. u32 en;
  159. u32 suspend_bmask;
  160. int irq_num;
  161. IPADBG_LOW("Enter\n");
  162. irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
  163. if (irq_num == -1) {
  164. WARN_ON(1);
  165. return;
  166. }
  167. /* make sure ipa hw is clocked on*/
  168. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  169. en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  170. suspend_bmask = 1 << irq_num;
  171. /*enable TX_SUSPEND_IRQ*/
  172. en |= suspend_bmask;
  173. IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
  174. , en);
  175. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, en);
  176. ipa3_process_interrupts(false);
  177. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  178. IPADBG_LOW("Exit\n");
  179. }
  180. static void ipa3_tx_suspend_interrupt_wa(void)
  181. {
  182. u32 val;
  183. u32 suspend_bmask;
  184. int irq_num;
  185. int wa_delay;
  186. IPADBG_LOW("Enter\n");
  187. irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
  188. if (irq_num == -1) {
  189. WARN_ON(1);
  190. return;
  191. }
  192. /*disable TX_SUSPEND_IRQ*/
  193. val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  194. suspend_bmask = 1 << irq_num;
  195. val &= ~suspend_bmask;
  196. IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
  197. val);
  198. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
  199. IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
  200. wa_delay = DIS_SUSPEND_INTERRUPT_TIMEOUT;
  201. if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
  202. ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
  203. wa_delay *= 400;
  204. }
  205. IPADBG_LOW("Delay period %d msec\n", wa_delay);
  206. queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
  207. msecs_to_jiffies(wa_delay));
  208. IPADBG_LOW("Exit\n");
  209. }
  210. static inline bool is_uc_irq(int irq_num)
  211. {
  212. if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
  213. ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
  214. return true;
  215. else
  216. return false;
  217. }
  218. static void ipa3_process_interrupts(bool isr_context)
  219. {
  220. u32 reg;
  221. u32 bmsk;
  222. u32 i = 0;
  223. u32 en;
  224. unsigned long flags;
  225. bool uc_irq;
  226. IPADBG_LOW("Enter isr_context=%d\n", isr_context);
  227. spin_lock_irqsave(&suspend_wa_lock, flags);
  228. en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  229. reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
  230. while (en & reg) {
  231. IPADBG_LOW("en=0x%x reg=0x%x\n", en, reg);
  232. bmsk = 1;
  233. for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
  234. IPADBG_LOW("Check irq number %d\n", i);
  235. if (en & reg & bmsk) {
  236. IPADBG_LOW("Irq number %d asserted\n", i);
  237. uc_irq = is_uc_irq(i);
  238. /*
  239. * Clear uC interrupt before processing to avoid
  240. * clearing unhandled interrupts
  241. */
  242. if (uc_irq)
  243. ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
  244. ipa_ee, bmsk);
  245. /*
  246. * handle the interrupt with spin_lock
  247. * unlocked to avoid calling client in atomic
  248. * context. mutual exclusion still preserved
  249. * as the read/clr is done with spin_lock
  250. * locked.
  251. */
  252. spin_unlock_irqrestore(&suspend_wa_lock, flags);
  253. ipa3_handle_interrupt(i, isr_context);
  254. spin_lock_irqsave(&suspend_wa_lock, flags);
  255. /*
  256. * Clear non uC interrupt after processing
  257. * to avoid clearing interrupt data
  258. */
  259. if (!uc_irq)
  260. ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
  261. ipa_ee, bmsk);
  262. }
  263. bmsk = bmsk << 1;
  264. }
  265. reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
  266. /* since the suspend interrupt HW bug we must
  267. * read again the EN register, otherwise the while is endless
  268. */
  269. en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  270. }
  271. spin_unlock_irqrestore(&suspend_wa_lock, flags);
  272. IPADBG_LOW("Exit\n");
  273. }
  274. static void ipa3_interrupt_defer(struct work_struct *work)
  275. {
  276. IPADBG("processing interrupts in wq\n");
  277. IPA_ACTIVE_CLIENTS_INC_SIMPLE();
  278. ipa3_process_interrupts(false);
  279. IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
  280. IPADBG("Done\n");
  281. }
  282. static irqreturn_t ipa3_isr(int irq, void *ctxt)
  283. {
  284. struct ipa_active_client_logging_info log_info;
  285. IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
  286. IPADBG_LOW("Enter\n");
  287. /* defer interrupt handling in case IPA is not clocked on */
  288. if (ipa3_inc_client_enable_clks_no_block(&log_info)) {
  289. IPADBG("defer interrupt processing\n");
  290. queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
  291. return IRQ_HANDLED;
  292. }
  293. ipa3_process_interrupts(true);
  294. IPADBG_LOW("Exit\n");
  295. ipa3_dec_client_disable_clks_no_block(&log_info);
  296. return IRQ_HANDLED;
  297. }
  298. irq_handler_t ipa3_get_isr(void)
  299. {
  300. return ipa3_isr;
  301. }
  302. /**
  303. * ipa3_add_interrupt_handler() - Adds handler to an interrupt type
  304. * @interrupt: Interrupt type
  305. * @handler: The handler to be added
  306. * @deferred_flag: whether the handler processing should be deferred in
  307. * a workqueue
  308. * @private_data: the client's private data
  309. *
  310. * Adds handler to an interrupt type and enable the specific bit
  311. * in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
  312. */
  313. int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
  314. ipa_irq_handler_t handler,
  315. bool deferred_flag,
  316. void *private_data)
  317. {
  318. u32 val;
  319. u32 bmsk;
  320. int irq_num;
  321. int client_idx, ep_idx;
  322. IPADBG("interrupt_enum(%d)\n", interrupt);
  323. if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
  324. interrupt >= IPA_IRQ_MAX) {
  325. IPAERR("invalid interrupt number %d\n", interrupt);
  326. return -EINVAL;
  327. }
  328. irq_num = ipa3_irq_mapping[interrupt];
  329. if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
  330. IPAERR("interrupt %d not supported\n", interrupt);
  331. WARN_ON(1);
  332. return -EFAULT;
  333. }
  334. IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
  335. ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
  336. ipa_interrupt_to_cb[irq_num].handler = handler;
  337. ipa_interrupt_to_cb[irq_num].private_data = private_data;
  338. ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
  339. val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  340. IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
  341. bmsk = 1 << irq_num;
  342. val |= bmsk;
  343. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
  344. IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
  345. /* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
  346. if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
  347. (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
  348. val = ~0;
  349. for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
  350. if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
  351. IPA_CLIENT_IS_Q6_PROD(client_idx)) {
  352. ep_idx = ipa3_get_ep_mapping(client_idx);
  353. IPADBG("modem ep_idx(%d) client_idx = %d\n",
  354. ep_idx, client_idx);
  355. if (ep_idx == -1)
  356. IPADBG("Invalid IPA client\n");
  357. else
  358. val &= ~(1 << ep_idx);
  359. }
  360. ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
  361. IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
  362. }
  363. return 0;
  364. }
  365. /**
  366. * ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
  367. * @interrupt: Interrupt type
  368. *
  369. * Removes the handler and disable the specific bit in IRQ_EN register
  370. */
  371. int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
  372. {
  373. u32 val;
  374. u32 bmsk;
  375. int irq_num;
  376. if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
  377. interrupt >= IPA_IRQ_MAX) {
  378. IPAERR("invalid interrupt number %d\n", interrupt);
  379. return -EINVAL;
  380. }
  381. irq_num = ipa3_irq_mapping[interrupt];
  382. if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
  383. IPAERR("interrupt %d not supported\n", interrupt);
  384. WARN_ON(1);
  385. return -EFAULT;
  386. }
  387. kfree(ipa_interrupt_to_cb[irq_num].private_data);
  388. ipa_interrupt_to_cb[irq_num].deferred_flag = false;
  389. ipa_interrupt_to_cb[irq_num].handler = NULL;
  390. ipa_interrupt_to_cb[irq_num].private_data = NULL;
  391. ipa_interrupt_to_cb[irq_num].interrupt = -1;
  392. /* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
  393. if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
  394. (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
  395. ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
  396. IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
  397. }
  398. val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
  399. bmsk = 1 << irq_num;
  400. val &= ~bmsk;
  401. ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
  402. return 0;
  403. }
  404. /**
  405. * ipa3_interrupts_init() - Initialize the IPA interrupts framework
  406. * @ipa_irq: The interrupt number to allocate
  407. * @ee: Execution environment
  408. * @ipa_dev: The basic device structure representing the IPA driver
  409. *
  410. * - Initialize the ipa_interrupt_to_cb array
  411. * - Clear interrupts status
  412. * - Register the ipa interrupt handler - ipa3_isr
  413. * - Enable apps processor wakeup by IPA interrupts
  414. */
  415. int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
  416. {
  417. int idx;
  418. int res = 0;
  419. ipa_ee = ee;
  420. for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
  421. ipa_interrupt_to_cb[idx].deferred_flag = false;
  422. ipa_interrupt_to_cb[idx].handler = NULL;
  423. ipa_interrupt_to_cb[idx].private_data = NULL;
  424. ipa_interrupt_to_cb[idx].interrupt = -1;
  425. }
  426. ipa_interrupt_wq = create_singlethread_workqueue(
  427. INTERRUPT_WORKQUEUE_NAME);
  428. if (!ipa_interrupt_wq) {
  429. IPAERR("workqueue creation failed\n");
  430. return -ENOMEM;
  431. }
  432. /*
  433. * NOTE:
  434. *
  435. * We'll only register an isr on non-emulator (ie. real UE)
  436. * systems.
  437. *
  438. * On the emulator, emulator_soft_irq_isr() will be calling
  439. * ipa3_isr, so hence, no isr registration here, and instead,
  440. * we'll pass the address of ipa3_isr to the gsi layer where
  441. * emulator interrupts are handled...
  442. */
  443. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
  444. res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
  445. IRQF_TRIGGER_RISING, "ipa", ipa_dev);
  446. if (res) {
  447. IPAERR(
  448. "fail to register IPA IRQ handler irq=%d\n",
  449. ipa_irq);
  450. destroy_workqueue(ipa_interrupt_wq);
  451. ipa_interrupt_wq = NULL;
  452. return -ENODEV;
  453. }
  454. IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
  455. res = enable_irq_wake(ipa_irq);
  456. if (res)
  457. IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
  458. ipa_irq, res);
  459. else
  460. IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
  461. }
  462. spin_lock_init(&suspend_wa_lock);
  463. return 0;
  464. }
  465. /**
  466. * ipa3_interrupts_destroy() - Destroy the IPA interrupts framework
  467. * @ipa_irq: The interrupt number to allocate
  468. * @ee: Execution environment
  469. * @ipa_dev: The basic device structure representing the IPA driver
  470. *
  471. * - Disable apps processor wakeup by IPA interrupts
  472. * - Unregister the ipa interrupt handler - ipa3_isr
  473. * - Destroy the interrupt workqueue
  474. */
  475. void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev)
  476. {
  477. if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
  478. disable_irq_wake(ipa_irq);
  479. free_irq(ipa_irq, ipa_dev);
  480. }
  481. destroy_workqueue(ipa_interrupt_wq);
  482. ipa_interrupt_wq = NULL;
  483. }
  484. /**
  485. * ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
  486. * @clnt_hndl: suspended client handle, IRQ is emulated for this pipe
  487. *
  488. * Emulate suspend IRQ to unsuspend client which was suspended with an open
  489. * aggregation frame in order to bypass HW bug of IRQ not generated when
  490. * endpoint is suspended during an open aggregation.
  491. */
  492. void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
  493. {
  494. struct ipa3_interrupt_info interrupt_info;
  495. struct ipa3_interrupt_work_wrap *work_data;
  496. struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
  497. int irq_num;
  498. int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
  499. if (aggr_active_bitmap & (1 << clnt_hdl)) {
  500. /* force close aggregation */
  501. ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
  502. /* simulate suspend IRQ */
  503. irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
  504. interrupt_info = ipa_interrupt_to_cb[irq_num];
  505. if (interrupt_info.handler == NULL) {
  506. IPAERR("no CB function for IPA_TX_SUSPEND_IRQ\n");
  507. return;
  508. }
  509. suspend_interrupt_data = kzalloc(
  510. sizeof(*suspend_interrupt_data),
  511. GFP_ATOMIC);
  512. if (!suspend_interrupt_data) {
  513. IPAERR("failed allocating suspend_interrupt_data\n");
  514. return;
  515. }
  516. suspend_interrupt_data->endpoints = 1 << clnt_hdl;
  517. work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
  518. GFP_ATOMIC);
  519. if (!work_data) {
  520. IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
  521. goto fail_alloc_work;
  522. }
  523. INIT_WORK(&work_data->interrupt_work,
  524. ipa3_deferred_interrupt_work);
  525. work_data->handler = interrupt_info.handler;
  526. work_data->interrupt = IPA_TX_SUSPEND_IRQ;
  527. work_data->private_data = interrupt_info.private_data;
  528. work_data->interrupt_data = (void *)suspend_interrupt_data;
  529. queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
  530. return;
  531. fail_alloc_work:
  532. kfree(suspend_interrupt_data);
  533. }
  534. }