if_ipci.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for any
  6. * purpose with or without fee is hereby granted, provided that the above
  7. * copyright notice and this permission notice appear in all copies.
  8. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  9. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  10. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  11. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  12. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  13. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  14. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/if_arp.h>
  19. #include "hif_io32.h"
  20. #include "if_ipci.h"
  21. #include "hif.h"
  22. #include "target_type.h"
  23. #include "hif_main.h"
  24. #include "ce_main.h"
  25. #include "ce_api.h"
  26. #include "ce_internal.h"
  27. #include "ce_reg.h"
  28. #include "ce_bmi.h"
  29. #include "regtable.h"
  30. #include "hif_hw_version.h"
  31. #include <linux/debugfs.h>
  32. #include <linux/seq_file.h>
  33. #include "qdf_status.h"
  34. #include "qdf_atomic.h"
  35. #include "pld_common.h"
  36. #include "mp_dev.h"
  37. #include "hif_debug.h"
  38. #include "ce_tasklet.h"
  39. #include "targaddrs.h"
  40. #include "hif_exec.h"
  41. #include "ipci_api.h"
  42. void hif_ipci_enable_power_management(struct hif_softc *hif_sc,
  43. bool is_packet_log_enabled)
  44. {
  45. hif_rtpm_start(hif_sc);
  46. }
  47. void hif_ipci_disable_power_management(struct hif_softc *hif_ctx)
  48. {
  49. hif_rtpm_stop(hif_ctx);
  50. }
  51. void hif_ipci_display_stats(struct hif_softc *hif_ctx)
  52. {
  53. hif_display_ce_stats(hif_ctx);
  54. }
  55. void hif_ipci_clear_stats(struct hif_softc *hif_ctx)
  56. {
  57. struct hif_ipci_softc *ipci_ctx = HIF_GET_IPCI_SOFTC(hif_ctx);
  58. if (!ipci_ctx) {
  59. hif_err("hif_ctx null");
  60. return;
  61. }
  62. hif_clear_ce_stats(&ipci_ctx->ce_sc);
  63. }
  64. QDF_STATUS hif_ipci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  65. {
  66. struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(hif_ctx);
  67. hif_ctx->bus_type = bus_type;
  68. hif_rtpm_open(hif_ctx);
  69. qdf_spinlock_create(&sc->irq_lock);
  70. return hif_ce_open(hif_ctx);
  71. }
  72. /**
  73. * hif_ce_msi_map_ce_to_irq() - map CE to IRQ
  74. * @scn: hif context
  75. * @ce_id: CE Id
  76. *
  77. * Return: IRQ number
  78. */
  79. static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  80. {
  81. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  82. return ipci_scn->ce_msi_irq_num[ce_id];
  83. }
  84. int hif_ipci_bus_configure(struct hif_softc *hif_sc)
  85. {
  86. int status = 0;
  87. uint8_t wake_ce_id;
  88. hif_ce_prepare_config(hif_sc);
  89. status = hif_wlan_enable(hif_sc);
  90. if (status) {
  91. hif_err("hif_wlan_enable error = %d", status);
  92. return status;
  93. }
  94. A_TARGET_ACCESS_LIKELY(hif_sc);
  95. status = hif_config_ce(hif_sc);
  96. if (status)
  97. goto disable_wlan;
  98. status = hif_get_wake_ce_id(hif_sc, &wake_ce_id);
  99. if (status)
  100. goto unconfig_ce;
  101. status = hif_configure_irq(hif_sc);
  102. if (status < 0)
  103. goto unconfig_ce;
  104. hif_sc->wake_irq = hif_ce_msi_map_ce_to_irq(hif_sc, wake_ce_id);
  105. hif_sc->wake_irq_type = HIF_PM_CE_WAKE;
  106. hif_info("expecting wake from ce %d, irq %d",
  107. wake_ce_id, hif_sc->wake_irq);
  108. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  109. return status;
  110. unconfig_ce:
  111. hif_unconfig_ce(hif_sc);
  112. disable_wlan:
  113. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  114. hif_wlan_disable(hif_sc);
  115. hif_err("Failed, status = %d", status);
  116. return status;
  117. }
  118. void hif_ipci_close(struct hif_softc *hif_sc)
  119. {
  120. hif_rtpm_close(hif_sc);
  121. hif_ce_close(hif_sc);
  122. }
  123. /**
  124. * hif_ce_srng_msi_free_irq(): free CE msi IRQ
  125. * @scn: struct hif_softc
  126. *
  127. * Return: ErrorNo
  128. */
  129. static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
  130. {
  131. int ret;
  132. int ce_id, irq;
  133. uint32_t msi_data_start;
  134. uint32_t msi_data_count;
  135. uint32_t msi_irq_start;
  136. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  137. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  138. &msi_data_count, &msi_data_start,
  139. &msi_irq_start);
  140. if (ret)
  141. return ret;
  142. /* needs to match the ce_id -> irq data mapping
  143. * used in the srng parameter configuration
  144. */
  145. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  146. unsigned int msi_data;
  147. if (!ce_sc->tasklets[ce_id].inited)
  148. continue;
  149. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  150. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  151. hif_ce_irq_remove_affinity_hint(irq);
  152. hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
  153. ce_id, msi_data, irq);
  154. pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
  155. }
  156. return ret;
  157. }
  158. /**
  159. * hif_ipci_deconfigure_grp_irq(): deconfigure HW block IRQ
  160. * @scn: struct hif_softc
  161. *
  162. * Return: none
  163. */
  164. void hif_ipci_deconfigure_grp_irq(struct hif_softc *scn)
  165. {
  166. int i, j, irq;
  167. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  168. struct hif_exec_context *hif_ext_group;
  169. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  170. hif_ext_group = hif_state->hif_ext_group[i];
  171. if (hif_ext_group->irq_requested) {
  172. hif_ext_group->irq_requested = false;
  173. for (j = 0; j < hif_ext_group->numirq; j++) {
  174. irq = hif_ext_group->os_irq[j];
  175. pfrm_free_irq(scn->qdf_dev->dev,
  176. irq, hif_ext_group);
  177. }
  178. hif_ext_group->numirq = 0;
  179. }
  180. }
  181. }
  182. void hif_ipci_nointrs(struct hif_softc *scn)
  183. {
  184. int ret;
  185. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  186. scn->free_irq_done = true;
  187. ce_unregister_irq(hif_state, CE_ALL_BITMAP);
  188. if (scn->request_irq_done == false)
  189. return;
  190. hif_ipci_deconfigure_grp_irq(scn);
  191. ret = hif_ce_srng_msi_free_irq(scn);
  192. scn->request_irq_done = false;
  193. }
  194. void hif_ipci_disable_bus(struct hif_softc *scn)
  195. {
  196. struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
  197. void __iomem *mem;
  198. /* Attach did not succeed, all resources have been
  199. * freed in error handler
  200. */
  201. if (!sc)
  202. return;
  203. mem = (void __iomem *)sc->mem;
  204. if (mem) {
  205. hif_dump_pipe_debug_count(scn);
  206. if (scn->athdiag_procfs_inited) {
  207. athdiag_procfs_remove();
  208. scn->athdiag_procfs_inited = false;
  209. }
  210. scn->mem = NULL;
  211. }
  212. hif_info("X");
  213. }
  214. #ifdef CONFIG_PLD_PCIE_CNSS
  215. void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
  216. {
  217. int errno;
  218. hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
  219. hif_runtime_prevent_linkdown(scn, flag);
  220. errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
  221. if (errno)
  222. hif_err("Failed pld_wlan_pm_control; errno %d", errno);
  223. }
  224. #else
  225. void hif_ipci_prevent_linkdown(struct hif_softc *scn, bool flag)
  226. {
  227. }
  228. #endif
  229. int hif_ipci_bus_suspend(struct hif_softc *scn)
  230. {
  231. int ret;
  232. ret = hif_apps_disable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
  233. if (ret) {
  234. hif_err("Failed to disable IRQs");
  235. goto disable_irq_fail;
  236. }
  237. ret = hif_apps_enable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
  238. if (ret) {
  239. hif_err("Failed to enable Wake-IRQ");
  240. goto enable_wake_irq_fail;
  241. }
  242. if (QDF_IS_STATUS_ERROR(hif_try_complete_tasks(scn))) {
  243. hif_err("hif_try_complete_tasks timed-out, so abort suspend");
  244. ret = -EBUSY;
  245. goto drain_tasks_fail;
  246. }
  247. /*
  248. * In an unlikely case, if draining becomes infinite loop,
  249. * it returns an error, shall abort the bus suspend.
  250. */
  251. ret = hif_drain_fw_diag_ce(scn);
  252. if (ret) {
  253. hif_err("draining fw_diag_ce goes infinite, so abort suspend");
  254. goto drain_tasks_fail;
  255. }
  256. scn->bus_suspended = true;
  257. return 0;
  258. drain_tasks_fail:
  259. hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
  260. enable_wake_irq_fail:
  261. hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
  262. disable_irq_fail:
  263. return ret;
  264. }
  265. int hif_ipci_bus_resume(struct hif_softc *scn)
  266. {
  267. int ret = 0;
  268. ret = hif_apps_disable_irq_wake(GET_HIF_OPAQUE_HDL(scn));
  269. if (ret) {
  270. hif_err("Failed to disable Wake-IRQ");
  271. goto fail;
  272. }
  273. ret = hif_apps_enable_irqs_except_wake_irq(GET_HIF_OPAQUE_HDL(scn));
  274. if (ret)
  275. hif_err("Failed to enable IRQs");
  276. scn->bus_suspended = false;
  277. fail:
  278. return ret;
  279. }
  280. int hif_ipci_bus_suspend_noirq(struct hif_softc *scn)
  281. {
  282. /*
  283. * If it is system suspend case and wake-IRQ received
  284. * just before Kernel issuing suspend_noirq, that must
  285. * have scheduled CE2 tasklet, so suspend activity can
  286. * be aborted.
  287. * Similar scenario for runtime suspend case, would be
  288. * handled by hif_rtpm_check_and_request_resume
  289. * in hif_ce_interrupt_handler.
  290. *
  291. */
  292. if (!hif_rtpm_get_monitor_wake_intr() &&
  293. hif_get_num_active_tasklets(scn)) {
  294. hif_err("Tasklets are pending, abort sys suspend_noirq");
  295. return -EBUSY;
  296. }
  297. return 0;
  298. }
  299. int hif_ipci_bus_resume_noirq(struct hif_softc *scn)
  300. {
  301. return 0;
  302. }
  303. void hif_ipci_disable_isr(struct hif_softc *scn)
  304. {
  305. struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(scn);
  306. hif_exec_kill(&scn->osc);
  307. hif_nointrs(scn);
  308. /* Cancel the pending tasklet */
  309. ce_tasklet_kill(scn);
  310. tasklet_kill(&sc->intr_tq);
  311. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  312. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  313. }
  314. int hif_ipci_dump_registers(struct hif_softc *hif_ctx)
  315. {
  316. int status;
  317. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  318. status = hif_dump_ce_registers(scn);
  319. if (status)
  320. hif_err("Dump CE Registers Failed");
  321. return 0;
  322. }
  323. /**
  324. * hif_ce_interrupt_handler() - interrupt handler for copy engine
  325. * @irq: irq number
  326. * @context: tasklet context
  327. *
  328. * Return: irqreturn_t
  329. */
  330. static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
  331. {
  332. struct ce_tasklet_entry *tasklet_entry = context;
  333. hif_rtpm_check_and_request_resume(false);
  334. return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
  335. }
  336. extern const char *ce_name[];
  337. /* hif_ce_srng_msi_irq_disable() - disable the irq for msi
  338. * @hif_sc: hif context
  339. * @ce_id: which ce to disable copy complete interrupts for
  340. *
  341. * @Return: none
  342. */
  343. static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  344. {
  345. pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
  346. hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  347. }
  348. /* hif_ce_srng_msi_irq_enable() - enable the irq for msi
  349. * @hif_sc: hif context
  350. * @ce_id: which ce to enable copy complete interrupts for
  351. *
  352. * @Return: none
  353. */
  354. static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  355. {
  356. pfrm_enable_irq(hif_sc->qdf_dev->dev,
  357. hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  358. }
  359. /* hif_ce_msi_configure_irq() - configure the irq
  360. * @scn: hif context
  361. *
  362. * @Return: none
  363. */
  364. static int hif_ce_msi_configure_irq(struct hif_softc *scn)
  365. {
  366. int ret;
  367. int ce_id, irq;
  368. uint32_t msi_data_start;
  369. uint32_t msi_data_count;
  370. uint32_t msi_irq_start;
  371. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  372. struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
  373. uint8_t wake_ce_id;
  374. ret = hif_get_wake_ce_id(scn, &wake_ce_id);
  375. if (ret)
  376. return ret;
  377. /* do ce irq assignments */
  378. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  379. &msi_data_count, &msi_data_start,
  380. &msi_irq_start);
  381. if (ret)
  382. return ret;
  383. scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
  384. scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
  385. scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
  386. /* needs to match the ce_id -> irq data mapping
  387. * used in the srng parameter configuration
  388. */
  389. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  390. unsigned long irqflags = IRQF_SHARED;
  391. unsigned int msi_data = (ce_id % msi_data_count) +
  392. msi_irq_start;
  393. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  394. hif_debug("(ce_id %d, msi_data %d, irq %d tasklet %pK)",
  395. ce_id, msi_data, irq,
  396. &ce_sc->tasklets[ce_id]);
  397. /* implies the ce is also initialized */
  398. if (!ce_sc->tasklets[ce_id].inited)
  399. continue;
  400. ipci_sc->ce_msi_irq_num[ce_id] = irq;
  401. ret = pfrm_request_irq(scn->qdf_dev->dev,
  402. irq, hif_ce_interrupt_handler,
  403. irqflags,
  404. ce_name[ce_id],
  405. &ce_sc->tasklets[ce_id]);
  406. if (ret)
  407. goto free_irq;
  408. }
  409. return ret;
  410. free_irq:
  411. /* the request_irq for the last ce_id failed so skip it. */
  412. while (ce_id > 0 && ce_id < scn->ce_count) {
  413. unsigned int msi_data;
  414. ce_id--;
  415. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  416. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  417. pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
  418. }
  419. return ret;
  420. }
  421. /**
  422. * hif_exec_grp_irq_disable() - disable the irq for group
  423. * @hif_ext_group: hif exec context
  424. *
  425. * Return: none
  426. */
  427. static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
  428. {
  429. int i;
  430. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  431. for (i = 0; i < hif_ext_group->numirq; i++)
  432. pfrm_disable_irq_nosync(scn->qdf_dev->dev,
  433. hif_ext_group->os_irq[i]);
  434. }
  435. /**
  436. * hif_exec_grp_irq_enable() - enable the irq for group
  437. * @hif_ext_group: hif exec context
  438. *
  439. * Return: none
  440. */
  441. static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
  442. {
  443. int i;
  444. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  445. for (i = 0; i < hif_ext_group->numirq; i++)
  446. pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
  447. }
  448. const char *hif_ipci_get_irq_name(int irq_no)
  449. {
  450. return "pci-dummy";
  451. }
  452. #ifdef FEATURE_IRQ_AFFINITY
  453. static
  454. void hif_ipci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
  455. bool perf)
  456. {
  457. int i, ret;
  458. unsigned int cpus;
  459. bool mask_set = false;
  460. int package_id;
  461. int cpu_cluster = perf ? hif_get_perf_cluster_bitmap() :
  462. BIT(CPU_CLUSTER_TYPE_LITTLE);
  463. for (i = 0; i < hif_ext_group->numirq; i++)
  464. qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
  465. for (i = 0; i < hif_ext_group->numirq; i++) {
  466. qdf_for_each_online_cpu(cpus) {
  467. package_id = qdf_topology_physical_package_id(cpus);
  468. if (package_id >= 0 && BIT(package_id) & cpu_cluster) {
  469. qdf_cpumask_set_cpu(cpus,
  470. &hif_ext_group->
  471. new_cpu_mask[i]);
  472. mask_set = true;
  473. }
  474. }
  475. }
  476. for (i = 0; i < hif_ext_group->numirq && i < HIF_MAX_GRP_IRQ; i++) {
  477. if (mask_set) {
  478. ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
  479. hif_ext_group->os_irq[i],
  480. hif_ext_group->grp_id, i,
  481. &hif_ext_group->new_cpu_mask[i]);
  482. if (ret)
  483. qdf_debug("Set affinity %*pbl fails for IRQ %d ",
  484. qdf_cpumask_pr_args(&hif_ext_group->
  485. new_cpu_mask[i]),
  486. hif_ext_group->os_irq[i]);
  487. } else {
  488. qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
  489. hif_ext_group->os_irq[i]);
  490. }
  491. }
  492. }
  493. void hif_ipci_set_grp_intr_affinity(struct hif_softc *scn,
  494. uint32_t grp_intr_bitmask, bool perf)
  495. {
  496. int i;
  497. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  498. struct hif_exec_context *hif_ext_group;
  499. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  500. if (!(grp_intr_bitmask & BIT(i)))
  501. continue;
  502. hif_ext_group = hif_state->hif_ext_group[i];
  503. hif_ipci_irq_set_affinity_hint(hif_ext_group, perf);
  504. qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
  505. }
  506. }
  507. #endif
  508. #ifdef HIF_CPU_PERF_AFFINE_MASK
  509. static void hif_ipci_ce_irq_set_affinity_hint(struct hif_softc *scn)
  510. {
  511. int ret;
  512. unsigned int cpus;
  513. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  514. struct hif_ipci_softc *ipci_sc = HIF_GET_IPCI_SOFTC(scn);
  515. struct CE_attr *host_ce_conf;
  516. int ce_id;
  517. qdf_cpu_mask ce_cpu_mask, updated_mask;
  518. int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
  519. int package_id;
  520. host_ce_conf = ce_sc->host_ce_config;
  521. qdf_cpumask_clear(&ce_cpu_mask);
  522. qdf_for_each_online_cpu(cpus) {
  523. package_id = qdf_topology_physical_package_id(cpus);
  524. if (package_id >= 0 && BIT(package_id) & perf_cpu_cluster) {
  525. qdf_cpumask_set_cpu(cpus,
  526. &ce_cpu_mask);
  527. }
  528. }
  529. if (qdf_cpumask_empty(&ce_cpu_mask)) {
  530. hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
  531. return;
  532. }
  533. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  534. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  535. continue;
  536. qdf_cpumask_copy(&updated_mask, &ce_cpu_mask);
  537. ret = hif_affinity_mgr_set_ce_irq_affinity(scn, ipci_sc->ce_msi_irq_num[ce_id],
  538. ce_id,
  539. &updated_mask);
  540. qdf_cpumask_clear(&ipci_sc->ce_irq_cpu_mask[ce_id]);
  541. qdf_cpumask_copy(&ipci_sc->ce_irq_cpu_mask[ce_id],
  542. &updated_mask);
  543. if (ret)
  544. hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
  545. qdf_cpumask_pr_args(
  546. &ipci_sc->ce_irq_cpu_mask[ce_id]),
  547. ipci_sc->ce_msi_irq_num[ce_id]);
  548. else
  549. hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
  550. qdf_cpumask_pr_args(
  551. &ipci_sc->ce_irq_cpu_mask[ce_id]),
  552. ipci_sc->ce_msi_irq_num[ce_id]);
  553. }
  554. }
  555. void hif_ipci_config_irq_affinity(struct hif_softc *scn)
  556. {
  557. hif_core_ctl_set_boost(true);
  558. /* Set IRQ affinity for CE interrupts*/
  559. hif_ipci_ce_irq_set_affinity_hint(scn);
  560. }
  561. #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
  562. #ifdef HIF_CPU_CLEAR_AFFINITY
  563. void hif_ipci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
  564. int intr_ctxt_id, int cpu)
  565. {
  566. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  567. struct hif_exec_context *hif_ext_group;
  568. int i, ret;
  569. if (intr_ctxt_id < hif_state->hif_num_extgroup) {
  570. hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
  571. for (i = 0; i < hif_ext_group->numirq; i++) {
  572. qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
  573. qdf_cpumask_clear_cpu(cpu,
  574. &hif_ext_group->new_cpu_mask[i]);
  575. ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
  576. hif_ext_group->os_irq[i],
  577. hif_ext_group->grp_id, i,
  578. &hif_ext_group->new_cpu_mask[i]);
  579. if (ret)
  580. hif_err("Set affinity %*pbl fails for IRQ %d ",
  581. qdf_cpumask_pr_args(&hif_ext_group->
  582. new_cpu_mask[i]),
  583. hif_ext_group->os_irq[i]);
  584. else
  585. hif_debug("Set affinity %*pbl for IRQ: %d",
  586. qdf_cpumask_pr_args(&hif_ext_group->
  587. new_cpu_mask[i]),
  588. hif_ext_group->os_irq[0]);
  589. }
  590. }
  591. }
  592. #endif
  593. int hif_ipci_configure_grp_irq(struct hif_softc *scn,
  594. struct hif_exec_context *hif_ext_group)
  595. {
  596. int ret = 0;
  597. int irq = 0;
  598. int j;
  599. hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
  600. hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
  601. hif_ext_group->irq_name = &hif_ipci_get_irq_name;
  602. hif_ext_group->work_complete = &hif_dummy_grp_done;
  603. for (j = 0; j < hif_ext_group->numirq; j++) {
  604. irq = hif_ext_group->irq[j];
  605. hif_info("request_irq = %d for grp %d",
  606. irq, hif_ext_group->grp_id);
  607. ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
  608. hif_ext_group_interrupt_handler,
  609. IRQF_SHARED | IRQF_NO_SUSPEND,
  610. "wlan_EXT_GRP",
  611. hif_ext_group);
  612. if (ret) {
  613. hif_err("request_irq failed ret = %d", ret);
  614. return -EFAULT;
  615. }
  616. hif_ext_group->os_irq[j] = irq;
  617. }
  618. hif_ext_group->irq_requested = true;
  619. return 0;
  620. }
  621. int hif_configure_irq(struct hif_softc *scn)
  622. {
  623. int ret = 0;
  624. if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
  625. scn->request_irq_done = false;
  626. return 0;
  627. }
  628. ret = hif_ce_msi_configure_irq(scn);
  629. if (ret == 0)
  630. goto end;
  631. if (ret < 0) {
  632. hif_err("hif_ipci_configure_irq error = %d", ret);
  633. return ret;
  634. }
  635. end:
  636. scn->request_irq_done = true;
  637. return 0;
  638. }
  639. /**
  640. * hif_ipci_get_soc_info_pld() - get soc info for ipcie bus from pld target
  641. * @sc: ipci context
  642. * @dev: device structure
  643. *
  644. * Return: none
  645. */
  646. static void hif_ipci_get_soc_info_pld(struct hif_ipci_softc *sc,
  647. struct device *dev)
  648. {
  649. struct pld_soc_info info;
  650. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  651. pld_get_soc_info(dev, &info);
  652. sc->mem = info.v_addr;
  653. sc->ce_sc.ol_sc.mem = info.v_addr;
  654. sc->ce_sc.ol_sc.mem_pa = info.p_addr;
  655. scn->target_info.target_version = info.soc_id;
  656. scn->target_info.target_revision = 0;
  657. }
  658. /**
  659. * hif_ipci_get_soc_info_nopld() - get soc info for ipcie bus for non pld target
  660. * @sc: ipci context
  661. * @dev: device structure
  662. *
  663. * Return: none
  664. */
  665. static void hif_ipci_get_soc_info_nopld(struct hif_ipci_softc *sc,
  666. struct device *dev)
  667. {}
  668. /**
  669. * hif_is_pld_based_target() - verify if the target is pld based
  670. * @sc: ipci context
  671. * @device_id: device id
  672. *
  673. * Return: none
  674. */
  675. static bool hif_is_pld_based_target(struct hif_ipci_softc *sc,
  676. int device_id)
  677. {
  678. if (!pld_have_platform_driver_support(sc->dev))
  679. return false;
  680. switch (device_id) {
  681. #ifdef QCA_WIFI_QCA6750
  682. case QCA6750_DEVICE_ID:
  683. #endif
  684. case WCN6450_DEVICE_ID:
  685. return true;
  686. }
  687. return false;
  688. }
  689. /**
  690. * hif_ipci_init_deinit_ops_attach() - attach ops for ipci
  691. * @sc: ipci context
  692. * @device_id: device id
  693. *
  694. * Return: none
  695. */
  696. static void hif_ipci_init_deinit_ops_attach(struct hif_ipci_softc *sc,
  697. int device_id)
  698. {
  699. if (hif_is_pld_based_target(sc, device_id))
  700. sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_pld;
  701. else
  702. sc->hif_ipci_get_soc_info = hif_ipci_get_soc_info_nopld;
  703. }
  704. QDF_STATUS hif_ipci_enable_bus(struct hif_softc *ol_sc,
  705. struct device *dev, void *bdev,
  706. const struct hif_bus_id *bid,
  707. enum hif_enable_type type)
  708. {
  709. int ret = 0;
  710. uint32_t hif_type, target_type;
  711. struct hif_ipci_softc *sc = HIF_GET_IPCI_SOFTC(ol_sc);
  712. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
  713. uint16_t revision_id = 0;
  714. struct hif_target_info *tgt_info;
  715. int device_id = HIF_IPCI_DEVICE_ID;
  716. if (!ol_sc) {
  717. hif_err("hif_ctx is NULL");
  718. return QDF_STATUS_E_NOMEM;
  719. }
  720. ret = qdf_set_dma_coherent_mask(dev,
  721. DMA_COHERENT_MASK_DEFAULT);
  722. if (ret) {
  723. hif_err("Failed to set dma mask error = %d", ret);
  724. return qdf_status_from_os_return(ret);
  725. }
  726. sc->dev = dev;
  727. tgt_info = hif_get_target_info_handle(hif_hdl);
  728. hif_ipci_init_deinit_ops_attach(sc, device_id);
  729. sc->hif_ipci_get_soc_info(sc, dev);
  730. hif_debug("hif_enable_pci done");
  731. ret = hif_get_device_type(device_id, revision_id,
  732. &hif_type, &target_type);
  733. if (ret < 0) {
  734. hif_err("Invalid device id/revision_id");
  735. return QDF_STATUS_E_ABORTED;
  736. }
  737. hif_debug("hif_type = 0x%x, target_type = 0x%x",
  738. hif_type, target_type);
  739. hif_register_tbl_attach(ol_sc, hif_type);
  740. hif_target_register_tbl_attach(ol_sc, target_type);
  741. sc->use_register_windowing = false;
  742. tgt_info->target_type = target_type;
  743. if (!ol_sc->mem_pa) {
  744. hif_err("BAR0 uninitialized");
  745. return QDF_STATUS_E_ABORTED;
  746. }
  747. return QDF_STATUS_SUCCESS;
  748. }
  749. bool hif_ipci_needs_bmi(struct hif_softc *scn)
  750. {
  751. return !ce_srng_based(scn);
  752. }
  753. #ifdef FORCE_WAKE
  754. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  755. {
  756. uint32_t timeout = 0;
  757. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  758. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  759. if (pld_force_wake_request(scn->qdf_dev->dev)) {
  760. hif_err_rl("force wake request send failed");
  761. return -EINVAL;
  762. }
  763. HIF_STATS_INC(ipci_scn, mhi_force_wake_request_vote, 1);
  764. while (!pld_is_device_awake(scn->qdf_dev->dev) &&
  765. timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
  766. if (qdf_in_interrupt())
  767. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  768. else
  769. qdf_sleep(FORCE_WAKE_DELAY_MS);
  770. timeout += FORCE_WAKE_DELAY_MS;
  771. }
  772. if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
  773. hif_err("Unable to wake up mhi");
  774. HIF_STATS_INC(ipci_scn, mhi_force_wake_failure, 1);
  775. hif_force_wake_release(hif_handle);
  776. return -EINVAL;
  777. }
  778. HIF_STATS_INC(ipci_scn, mhi_force_wake_success, 1);
  779. HIF_STATS_INC(ipci_scn, soc_force_wake_success, 1);
  780. return 0;
  781. }
  782. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  783. {
  784. int ret;
  785. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  786. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  787. ret = pld_force_wake_release(scn->qdf_dev->dev);
  788. if (ret) {
  789. hif_err("force wake release failure");
  790. HIF_STATS_INC(ipci_scn, mhi_force_wake_release_failure, 1);
  791. return ret;
  792. }
  793. HIF_STATS_INC(ipci_scn, mhi_force_wake_release_success, 1);
  794. HIF_STATS_INC(ipci_scn, soc_force_wake_release_success, 1);
  795. return 0;
  796. }
  797. void hif_print_ipci_stats(struct hif_ipci_softc *ipci_handle)
  798. {
  799. hif_debug("mhi_force_wake_request_vote: %d",
  800. ipci_handle->stats.mhi_force_wake_request_vote);
  801. hif_debug("mhi_force_wake_failure: %d",
  802. ipci_handle->stats.mhi_force_wake_failure);
  803. hif_debug("mhi_force_wake_success: %d",
  804. ipci_handle->stats.mhi_force_wake_success);
  805. hif_debug("soc_force_wake_register_write_success: %d",
  806. ipci_handle->stats.soc_force_wake_register_write_success);
  807. hif_debug("soc_force_wake_failure: %d",
  808. ipci_handle->stats.soc_force_wake_failure);
  809. hif_debug("soc_force_wake_success: %d",
  810. ipci_handle->stats.soc_force_wake_success);
  811. hif_debug("mhi_force_wake_release_failure: %d",
  812. ipci_handle->stats.mhi_force_wake_release_failure);
  813. hif_debug("mhi_force_wake_release_success: %d",
  814. ipci_handle->stats.mhi_force_wake_release_success);
  815. hif_debug("oc_force_wake_release_success: %d",
  816. ipci_handle->stats.soc_force_wake_release_success);
  817. }
  818. #endif /* FORCE_WAKE */
  819. #if defined(FEATURE_HAL_DELAYED_REG_WRITE) || \
  820. defined(FEATURE_HIF_DELAYED_REG_WRITE)
  821. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
  822. {
  823. struct hif_softc *scn = HIF_GET_SOFTC(hif);
  824. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  825. uint32_t start_time = 0, curr_time = 0;
  826. uint32_t count = 0;
  827. if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
  828. return 0;
  829. if (hif_is_ep_vote_access_disabled(scn)) {
  830. hif_info_high("EP access disabled in flight skip vote");
  831. return 0;
  832. }
  833. start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
  834. while (pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
  835. curr_time <= start_time + EP_WAKE_RESET_DELAY_TIMEOUT_MS) {
  836. if (count < EP_VOTE_POLL_TIME_CNT) {
  837. qdf_udelay(EP_VOTE_POLL_TIME_US);
  838. count++;
  839. } else {
  840. qdf_sleep_us(EP_WAKE_RESET_DELAY_US);
  841. }
  842. curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
  843. }
  844. if (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
  845. hif_err_rl(" EP state reset is not done to prevent l1");
  846. ipci_scn->ep_awake_reset_fail++;
  847. return 0;
  848. }
  849. if (pld_prevent_l1(scn->qdf_dev->dev)) {
  850. hif_err_rl("pld prevent l1 failed");
  851. ipci_scn->prevent_l1_fail++;
  852. return 0;
  853. }
  854. count = 0;
  855. ipci_scn->prevent_l1 = true;
  856. start_time = curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
  857. while (!pld_is_pci_ep_awake(scn->qdf_dev->dev) &&
  858. curr_time <= start_time + EP_WAKE_DELAY_TIMEOUT_MS) {
  859. if (count < EP_VOTE_POLL_TIME_CNT) {
  860. qdf_udelay(EP_WAKE_RESET_DELAY_US);
  861. count++;
  862. } else {
  863. qdf_sleep_us(EP_WAKE_DELAY_US);
  864. }
  865. curr_time = qdf_system_ticks_to_msecs(qdf_system_ticks());
  866. }
  867. if (pld_is_pci_ep_awake(scn->qdf_dev->dev) <= 0) {
  868. hif_err_rl("Unable to wakeup pci ep");
  869. ipci_scn->ep_awake_set_fail++;
  870. return 0;
  871. }
  872. return 0;
  873. }
  874. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
  875. {
  876. struct hif_softc *scn = HIF_GET_SOFTC(hif);
  877. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  878. if (qdf_likely(ipci_scn->prevent_l1)) {
  879. pld_allow_l1(scn->qdf_dev->dev);
  880. ipci_scn->prevent_l1 = false;
  881. }
  882. }
  883. #endif
  884. #ifndef QCA_WIFI_WCN6450
  885. int hif_ipci_enable_grp_irqs(struct hif_softc *scn)
  886. {
  887. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  888. int status;
  889. if (!ipci_scn->grp_irqs_disabled) {
  890. hif_err("Unbalanced group IRQs Enable called");
  891. qdf_assert_always(0);
  892. }
  893. status = hif_apps_grp_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  894. if (!status)
  895. ipci_scn->grp_irqs_disabled = false;
  896. return status;
  897. }
  898. int hif_ipci_disable_grp_irqs(struct hif_softc *scn)
  899. {
  900. struct hif_ipci_softc *ipci_scn = HIF_GET_IPCI_SOFTC(scn);
  901. int status;
  902. if (ipci_scn->grp_irqs_disabled) {
  903. hif_err("Unbalanced group IRQs disable called");
  904. qdf_assert_always(0);
  905. }
  906. status = hif_apps_grp_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
  907. if (!status)
  908. ipci_scn->grp_irqs_disabled = true;
  909. return status;
  910. }
  911. #endif