if_ahb.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. /**
  19. * DOC: if_ahb.c
  20. *
  21. * c file for ahb specific implementations.
  22. */
  23. #include "hif.h"
  24. #include "hif_main.h"
  25. #include "hif_debug.h"
  26. #include "hif_io32.h"
  27. #include "ce_main.h"
  28. #include "ce_tasklet.h"
  29. #include "if_ahb.h"
  30. #include "if_pci.h"
  31. #include "ahb_api.h"
  32. #include "pci_api.h"
  33. #include "hif_napi.h"
  34. #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0)
  35. #define IRQF_DISABLED 0x00000020
  36. #endif
  37. #define HIF_IC_CE0_IRQ_OFFSET 4
  38. #define HIF_IC_MAX_IRQ 54
  39. static uint8_t ic_irqnum[HIF_IC_MAX_IRQ];
  40. /* integrated chip irq names */
  41. const char *ic_irqname[HIF_IC_MAX_IRQ] = {
  42. "misc-pulse1",
  43. "misc-latch",
  44. "sw-exception",
  45. "watchdog",
  46. "ce0",
  47. "ce1",
  48. "ce2",
  49. "ce3",
  50. "ce4",
  51. "ce5",
  52. "ce6",
  53. "ce7",
  54. "ce8",
  55. "ce9",
  56. "ce10",
  57. "ce11",
  58. "ce12",
  59. "ce13",
  60. "host2wbm-desc-feed",
  61. "host2reo-re-injection",
  62. "host2reo-command",
  63. "host2rxdma-monitor-ring3",
  64. "host2rxdma-monitor-ring2",
  65. "host2rxdma-monitor-ring1",
  66. "reo2ost-exception",
  67. "wbm2host-rx-release",
  68. "reo2host-status",
  69. "reo2host-destination-ring4",
  70. "reo2host-destination-ring3",
  71. "reo2host-destination-ring2",
  72. "reo2host-destination-ring1",
  73. "rxdma2host-monitor-destination-mac3",
  74. "rxdma2host-monitor-destination-mac2",
  75. "rxdma2host-monitor-destination-mac1",
  76. "ppdu-end-interrupts-mac3",
  77. "ppdu-end-interrupts-mac2",
  78. "ppdu-end-interrupts-mac1",
  79. "rxdma2host-monitor-status-ring-mac3",
  80. "rxdma2host-monitor-status-ring-mac2",
  81. "rxdma2host-monitor-status-ring-mac1",
  82. "host2rxdma-host-buf-ring-mac3",
  83. "host2rxdma-host-buf-ring-mac2",
  84. "host2rxdma-host-buf-ring-mac1",
  85. "rxdma2host-destination-ring-mac3",
  86. "rxdma2host-destination-ring-mac2",
  87. "rxdma2host-destination-ring-mac1",
  88. "host2tcl-input-ring4",
  89. "host2tcl-input-ring3",
  90. "host2tcl-input-ring2",
  91. "host2tcl-input-ring1",
  92. "wbm2host-tx-completions-ring3",
  93. "wbm2host-tx-completions-ring2",
  94. "wbm2host-tx-completions-ring1",
  95. "tcl2host-status-ring",
  96. };
  97. /**
  98. * hif_disable_isr() - disable isr
  99. *
  100. * This function disables isr and kills tasklets
  101. *
  102. * @hif_ctx: struct hif_softc
  103. *
  104. * Return: void
  105. */
  106. void hif_ahb_disable_isr(struct hif_softc *scn)
  107. {
  108. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  109. hif_nointrs(scn);
  110. ce_tasklet_kill(scn);
  111. hif_grp_tasklet_kill(scn);
  112. tasklet_kill(&sc->intr_tq);
  113. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  114. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  115. }
  116. /**
  117. * hif_dump_registers() - dump bus debug registers
  118. * @scn: struct hif_opaque_softc
  119. *
  120. * This function dumps hif bus debug registers
  121. *
  122. * Return: 0 for success or error code
  123. */
  124. int hif_ahb_dump_registers(struct hif_softc *hif_ctx)
  125. {
  126. int status;
  127. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  128. status = hif_dump_ce_registers(scn);
  129. if (status)
  130. HIF_ERROR("%s: Dump CE Registers Failed status %d", __func__,
  131. status);
  132. return 0;
  133. }
  134. /**
  135. * hif_ahb_close() - hif_bus_close
  136. * @scn: pointer to the hif context.
  137. *
  138. * This is a callback function for hif_bus_close.
  139. *
  140. *
  141. * Return: n/a
  142. */
  143. void hif_ahb_close(struct hif_softc *scn)
  144. {
  145. hif_ce_close(scn);
  146. }
  147. /**
  148. * hif_bus_open() - hif_ahb open
  149. * @hif_ctx: hif context
  150. * @bus_type: bus type
  151. *
  152. * This is a callback function for hif_bus_open.
  153. *
  154. * Return: n/a
  155. */
  156. QDF_STATUS hif_ahb_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  157. {
  158. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  159. qdf_spinlock_create(&sc->irq_lock);
  160. return hif_ce_open(hif_ctx);
  161. }
  162. /**
  163. * hif_bus_configure() - Configure the bus
  164. * @scn: pointer to the hif context.
  165. *
  166. * This function configure the ahb bus
  167. *
  168. * return: 0 for success. nonzero for failure.
  169. */
  170. int hif_ahb_bus_configure(struct hif_softc *scn)
  171. {
  172. return hif_pci_bus_configure(scn);
  173. }
  174. /**
  175. * hif_configure_msi_ahb - Configure MSI interrupts
  176. * @sc : pointer to the hif context
  177. *
  178. * return: 0 for success. nonzero for failure.
  179. */
  180. int hif_configure_msi_ahb(struct hif_pci_softc *sc)
  181. {
  182. return 0;
  183. }
  184. /**
  185. * hif_ahb_configure_legacy_irq() - Configure Legacy IRQ
  186. * @sc: pointer to the hif context.
  187. *
  188. * This function registers the irq handler and enables legacy interrupts
  189. *
  190. * return: 0 for success. nonzero for failure.
  191. */
  192. int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
  193. {
  194. int ret = 0;
  195. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  196. struct platform_device *pdev = (struct platform_device *)sc->pdev;
  197. int irq = 0;
  198. /* do not support MSI or MSI IRQ failed */
  199. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  200. irq = platform_get_irq_byname(pdev, "legacy");
  201. if (irq < 0) {
  202. dev_err(&pdev->dev, "Unable to get irq\n");
  203. ret = -1;
  204. goto end;
  205. }
  206. ret = request_irq(irq, hif_pci_interrupt_handler,
  207. IRQF_DISABLED, "wlan_ahb", sc);
  208. if (ret) {
  209. dev_err(&pdev->dev, "ath_request_irq failed\n");
  210. ret = -1;
  211. goto end;
  212. }
  213. sc->irq = irq;
  214. /* Use Legacy PCI Interrupts */
  215. hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
  216. PCIE_INTR_ENABLE_ADDRESS),
  217. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  218. /* read once to flush */
  219. hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
  220. PCIE_INTR_ENABLE_ADDRESS)
  221. );
  222. end:
  223. return ret;
  224. }
  225. int hif_ahb_configure_irq(struct hif_pci_softc *sc)
  226. {
  227. int ret = 0;
  228. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  229. struct platform_device *pdev = (struct platform_device *)sc->pdev;
  230. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  231. struct hif_ext_group_entry *hif_ext_group;
  232. int irq = 0;
  233. int i, j;
  234. /* configure per CE interrupts */
  235. for (i = 0; i < scn->ce_count; i++) {
  236. irq = platform_get_irq_byname(pdev, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i]);
  237. ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i] = irq;
  238. ret = request_irq(irq ,
  239. hif_ahb_interrupt_handler,
  240. IRQF_TRIGGER_RISING, ic_irqname[HIF_IC_CE0_IRQ_OFFSET + i],
  241. &hif_state->tasklets[i]);
  242. if (ret) {
  243. dev_err(&pdev->dev, "ath_request_irq failed\n");
  244. ret = -1;
  245. goto end;
  246. }
  247. hif_ahb_irq_enable(scn, i);
  248. }
  249. /* configure external interrupts */
  250. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  251. hif_ext_group = &hif_state->hif_ext_group[i];
  252. if (hif_ext_group->configured) {
  253. tasklet_init(&hif_ext_group->intr_tq,
  254. hif_ext_grp_tasklet,
  255. (unsigned long)hif_ext_group);
  256. hif_ext_group->inited = true;
  257. for (j = 0; j < hif_ext_group->numirq; j++) {
  258. irq = platform_get_irq_byname(pdev,
  259. ic_irqname[hif_ext_group->irq[j]]);
  260. ic_irqnum[hif_ext_group->irq[j]] = irq;
  261. ret = request_irq(irq,
  262. hif_ext_group_ahb_interrupt_handler,
  263. IRQF_TRIGGER_RISING, "wlan_ahb",
  264. hif_ext_group);
  265. if (ret) {
  266. dev_err(&pdev->dev,
  267. "ath_request_irq failed\n");
  268. ret = -1;
  269. goto end;
  270. }
  271. }
  272. }
  273. }
  274. end:
  275. return ret;
  276. }
  277. irqreturn_t hif_ahb_interrupt_handler(int irq, void *context)
  278. {
  279. struct ce_tasklet_entry *tasklet_entry = context;
  280. return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
  281. }
  282. irqreturn_t hif_ext_group_ahb_interrupt_handler(int irq, void *context)
  283. {
  284. struct hif_ext_group_entry *hif_ext_group = context;
  285. struct HIF_CE_state *hif_state = hif_ext_group->hif_state;
  286. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  287. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  288. uint32_t grp_id = hif_ext_group->grp_id;
  289. hif_grp_irq_disable(scn, grp_id);
  290. qdf_atomic_inc(&scn->active_grp_tasklet_cnt);
  291. if (hif_ext_napi_enabled(hif_hdl, grp_id)) {
  292. hif_napi_schedule_grp(hif_hdl, grp_id);
  293. } else {
  294. tasklet_schedule(&hif_ext_group->intr_tq);
  295. }
  296. return IRQ_HANDLED;
  297. }
  298. /**
  299. * hif_target_sync() : ensure the target is ready
  300. * @scn: hif control structure
  301. *
  302. * Informs fw that we plan to use legacy interupts so that
  303. * it can begin booting. Ensures that the fw finishes booting
  304. * before continuing. Should be called before trying to write
  305. * to the targets other registers for the first time.
  306. *
  307. * Return: none
  308. */
  309. int hif_target_sync_ahb(struct hif_softc *scn)
  310. {
  311. hif_write32_mb(scn->mem + FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
  312. if (HAS_FW_INDICATOR) {
  313. int wait_limit = 500;
  314. int fw_ind = 0;
  315. while (1) {
  316. fw_ind = hif_read32_mb(scn->mem +
  317. FW_INDICATOR_ADDRESS);
  318. if (fw_ind & FW_IND_INITIALIZED)
  319. break;
  320. if (wait_limit-- < 0)
  321. break;
  322. hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
  323. PCIE_INTR_ENABLE_ADDRESS),
  324. PCIE_INTR_FIRMWARE_MASK);
  325. qdf_mdelay(10);
  326. }
  327. if (wait_limit < 0) {
  328. HIF_TRACE("%s: FW signal timed out", __func__);
  329. return -EIO;
  330. } else {
  331. HIF_TRACE("%s: Got FW signal, retries = %x", __func__,
  332. 500-wait_limit);
  333. }
  334. }
  335. return 0;
  336. }
  337. /**
  338. * hif_disable_bus() - Disable the bus
  339. * @scn : pointer to the hif context
  340. *
  341. * This function disables the bus and helds the target in reset state
  342. *
  343. * Return: none
  344. */
  345. void hif_ahb_disable_bus(struct hif_softc *scn)
  346. {
  347. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  348. void __iomem *mem;
  349. struct platform_device *pdev = (struct platform_device *)sc->pdev;
  350. struct resource *memres = NULL;
  351. int mem_pa_size = 0;
  352. /*Disable WIFI clock input*/
  353. if (sc->mem) {
  354. memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  355. if (!memres) {
  356. HIF_INFO("%s: Failed to get IORESOURCE_MEM\n",
  357. __func__);
  358. return;
  359. }
  360. mem_pa_size = memres->end - memres->start + 1;
  361. hif_ahb_clk_enable_disable(&pdev->dev, 0);
  362. hif_ahb_device_reset(scn);
  363. mem = (void __iomem *)sc->mem;
  364. if (mem) {
  365. devm_iounmap(&pdev->dev, mem);
  366. devm_release_mem_region(&pdev->dev, scn->mem_pa,
  367. mem_pa_size);
  368. sc->mem = NULL;
  369. }
  370. }
  371. scn->mem = NULL;
  372. }
  373. /**
  374. * hif_enable_bus() - Enable the bus
  375. * @dev: dev
  376. * @bdev: bus dev
  377. * @bid: bus id
  378. * @type: bus type
  379. *
  380. * This function enables the radio bus by enabling necessary
  381. * clocks and waits for the target to get ready to proceed futher
  382. *
  383. * Return: QDF_STATUS
  384. */
  385. QDF_STATUS hif_ahb_enable_bus(struct hif_softc *ol_sc,
  386. struct device *dev, void *bdev,
  387. const hif_bus_id *bid,
  388. enum hif_enable_type type)
  389. {
  390. int ret = 0;
  391. int hif_type;
  392. int target_type;
  393. const struct platform_device_id *id = (struct platform_device_id *)bid;
  394. struct platform_device *pdev = bdev;
  395. struct hif_target_info *tgt_info = NULL;
  396. struct resource *memres = NULL;
  397. void __iomem *mem = NULL;
  398. uint32_t revision_id = 0;
  399. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
  400. sc->pdev = (struct pci_dev *)pdev;
  401. sc->dev = &pdev->dev;
  402. sc->devid = id->driver_data;
  403. ret = hif_get_device_type(id->driver_data, revision_id,
  404. &hif_type, &target_type);
  405. if (ret < 0) {
  406. HIF_ERROR("%s: invalid device ret %d id %d revision_id %d",
  407. __func__, ret, (int)id->driver_data, revision_id);
  408. return QDF_STATUS_E_FAILURE;
  409. }
  410. memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  411. if (!memres) {
  412. HIF_INFO("%s: Failed to get IORESOURCE_MEM\n", __func__);
  413. return -EIO;
  414. }
  415. ret = dma_set_mask(dev, DMA_BIT_MASK(32));
  416. if (ret) {
  417. HIF_INFO("ath: 32-bit DMA not available\n");
  418. goto err_cleanup1;
  419. }
  420. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  421. ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
  422. #else
  423. ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
  424. #endif
  425. if (ret) {
  426. HIF_ERROR("%s: failed to set dma mask error = %d",
  427. __func__, ret);
  428. return ret;
  429. }
  430. /* Arrange for access to Target SoC registers. */
  431. #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
  432. mem = devm_ioremap_resource(&pdev->dev, memres);
  433. #else
  434. mem = devm_request_and_ioremap(&pdev->dev, memres);
  435. #endif
  436. if (IS_ERR(mem)) {
  437. HIF_INFO("ath: ioremap error\n");
  438. ret = PTR_ERR(mem);
  439. goto err_cleanup1;
  440. }
  441. sc->mem = mem;
  442. ol_sc->mem = mem;
  443. ol_sc->mem_pa = memres->start;
  444. tgt_info = hif_get_target_info_handle((struct hif_opaque_softc *)ol_sc);
  445. tgt_info->target_type = target_type;
  446. hif_register_tbl_attach(ol_sc, hif_type);
  447. hif_target_register_tbl_attach(ol_sc, target_type);
  448. /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
  449. if (tgt_info->target_type != TARGET_TYPE_QCA8074) {
  450. if (hif_ahb_enable_radio(sc, pdev, id) != 0) {
  451. HIF_INFO("error in enabling soc\n");
  452. return -EIO;
  453. }
  454. if (hif_target_sync_ahb(ol_sc) < 0) {
  455. ret = -EIO;
  456. goto err_target_sync;
  457. }
  458. }
  459. HIF_TRACE("%s: X - hif_type = 0x%x, target_type = 0x%x",
  460. __func__, hif_type, target_type);
  461. return QDF_STATUS_SUCCESS;
  462. err_target_sync:
  463. /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
  464. if (tgt_info->target_type != TARGET_TYPE_QCA8074) {
  465. HIF_INFO("Error: Disabling target\n");
  466. hif_ahb_disable_bus(ol_sc);
  467. }
  468. err_cleanup1:
  469. return ret;
  470. }
  471. /**
  472. * hif_reset_soc() - reset soc
  473. *
  474. * @hif_ctx: HIF context
  475. *
  476. * This function resets soc and helds the
  477. * target in reset state
  478. *
  479. * Return: void
  480. */
  481. /* Function to reset SoC */
  482. void hif_ahb_reset_soc(struct hif_softc *hif_ctx)
  483. {
  484. hif_ahb_device_reset(hif_ctx);
  485. }
  486. /**
  487. * hif_nointrs() - disable IRQ
  488. *
  489. * @scn: struct hif_softc
  490. *
  491. * This function stops interrupt(s)
  492. *
  493. * Return: none
  494. */
  495. void hif_ahb_nointrs(struct hif_softc *scn)
  496. {
  497. int i;
  498. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  499. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  500. ce_unregister_irq(hif_state, CE_ALL_BITMAP);
  501. if (scn->request_irq_done == false)
  502. return;
  503. if (sc->num_msi_intrs > 0) {
  504. /* MSI interrupt(s) */
  505. for (i = 0; i < sc->num_msi_intrs; i++) {
  506. free_irq(sc->irq + i, sc);
  507. }
  508. sc->num_msi_intrs = 0;
  509. } else {
  510. if (!scn->per_ce_irq) {
  511. free_irq(sc->irq, sc);
  512. } else {
  513. for (i = 0; i < scn->ce_count; i++) {
  514. free_irq(ic_irqnum[HIF_IC_CE0_IRQ_OFFSET + i],
  515. sc);
  516. }
  517. }
  518. }
  519. scn->request_irq_done = false;
  520. }
  521. /**
  522. * ce_irq_enable() - enable copy engine IRQ
  523. * @scn: struct hif_softc
  524. * @ce_id: ce_id
  525. *
  526. * This function enables the interrupt for the radio.
  527. *
  528. * Return: N/A
  529. */
  530. void hif_ahb_irq_enable(struct hif_softc *scn, int ce_id)
  531. {
  532. uint32_t regval;
  533. uint32_t reg_offset = 0;
  534. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  535. struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
  536. if (scn->per_ce_irq) {
  537. if (target_ce_conf->pipedir & PIPEDIR_OUT) {
  538. reg_offset = HOST_IE_ADDRESS;
  539. qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
  540. regval = hif_read32_mb(scn->mem + reg_offset);
  541. regval |= (1 << ce_id);
  542. hif_write32_mb(scn->mem + reg_offset, regval);
  543. qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
  544. }
  545. if (target_ce_conf->pipedir & PIPEDIR_IN) {
  546. reg_offset = HOST_IE_ADDRESS_2;
  547. qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
  548. regval = hif_read32_mb(scn->mem + reg_offset);
  549. regval |= (1 << ce_id);
  550. hif_write32_mb(scn->mem + reg_offset, regval);
  551. qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
  552. }
  553. } else {
  554. hif_pci_irq_enable(scn, ce_id);
  555. }
  556. }
  557. /**
  558. * ce_irq_disable() - disable copy engine IRQ
  559. * @scn: struct hif_softc
  560. * @ce_id: ce_id
  561. *
  562. * Return: N/A
  563. */
  564. void hif_ahb_irq_disable(struct hif_softc *scn, int ce_id)
  565. {
  566. uint32_t regval;
  567. uint32_t reg_offset = 0;
  568. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  569. struct CE_pipe_config *target_ce_conf = &hif_state->target_ce_config[ce_id];
  570. if (scn->per_ce_irq) {
  571. if (target_ce_conf->pipedir & PIPEDIR_OUT) {
  572. reg_offset = HOST_IE_ADDRESS;
  573. qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
  574. regval = hif_read32_mb(scn->mem + reg_offset);
  575. regval &= ~(1 << ce_id);
  576. hif_write32_mb(scn->mem + reg_offset, regval);
  577. qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
  578. }
  579. if (target_ce_conf->pipedir & PIPEDIR_IN) {
  580. reg_offset = HOST_IE_ADDRESS_2;
  581. qdf_spin_lock_irqsave(&hif_state->irq_reg_lock);
  582. regval = hif_read32_mb(scn->mem + reg_offset);
  583. regval &= ~(1 << ce_id);
  584. hif_write32_mb(scn->mem + reg_offset, regval);
  585. qdf_spin_unlock_irqrestore(&hif_state->irq_reg_lock);
  586. }
  587. }
  588. }
  589. void hif_ahb_grp_irq_disable(struct hif_softc *scn, uint32_t grp_id)
  590. {
  591. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  592. struct hif_ext_group_entry *hif_ext_group;
  593. uint32_t i;
  594. hif_ext_group = &hif_state->hif_ext_group[grp_id];
  595. for (i = 0; i < hif_ext_group->numirq; i++) {
  596. disable_irq_nosync(ic_irqnum[hif_ext_group->irq[i]]);
  597. }
  598. }
  599. void hif_ahb_grp_irq_enable(struct hif_softc *scn, uint32_t grp_id)
  600. {
  601. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  602. struct hif_ext_group_entry *hif_ext_group;
  603. uint32_t i;
  604. hif_ext_group = &hif_state->hif_ext_group[grp_id];
  605. for (i = 0; i < hif_ext_group->numirq; i++) {
  606. enable_irq(ic_irqnum[hif_ext_group->irq[i]]);
  607. }
  608. }