hif_main.c 55 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "targcfg.h"
  20. #include "qdf_lock.h"
  21. #include "qdf_status.h"
  22. #include "qdf_status.h"
  23. #include <qdf_atomic.h> /* qdf_atomic_read */
  24. #include <targaddrs.h>
  25. #include "hif_io32.h"
  26. #include <hif.h>
  27. #include <target_type.h>
  28. #include "regtable.h"
  29. #define ATH_MODULE_NAME hif
  30. #include <a_debug.h>
  31. #include "hif_main.h"
  32. #include "hif_hw_version.h"
  33. #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  34. defined(HIF_IPCI))
  35. #include "ce_tasklet.h"
  36. #include "ce_api.h"
  37. #endif
  38. #include "qdf_trace.h"
  39. #include "qdf_status.h"
  40. #include "hif_debug.h"
  41. #include "mp_dev.h"
  42. #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  43. defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
  44. defined(QCA_WIFI_QCA5332)
  45. #include "hal_api.h"
  46. #endif
  47. #include "hif_napi.h"
  48. #include "hif_unit_test_suspend_i.h"
  49. #include "qdf_module.h"
  50. #ifdef HIF_CE_LOG_INFO
  51. #include <qdf_notifier.h>
  52. #include <qdf_hang_event_notifier.h>
  53. #endif
  54. #include <linux/cpumask.h>
  55. #include <pld_common.h>
  56. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
  57. {
  58. hif_trigger_dump(hif_ctx, cmd_id, start);
  59. }
  60. /**
  61. * hif_get_target_id(): hif_get_target_id
  62. *
  63. * Return the virtual memory base address to the caller
  64. *
  65. * @scn: hif_softc
  66. *
  67. * Return: A_target_id_t
  68. */
  69. A_target_id_t hif_get_target_id(struct hif_softc *scn)
  70. {
  71. return scn->mem;
  72. }
  73. /**
  74. * hif_get_targetdef(): hif_get_targetdef
  75. * @scn: scn
  76. *
  77. * Return: void *
  78. */
  79. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
  80. {
  81. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  82. return scn->targetdef;
  83. }
  84. #ifdef FORCE_WAKE
  85. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  86. bool init_phase)
  87. {
  88. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  89. if (ce_srng_based(scn))
  90. hal_set_init_phase(scn->hal_soc, init_phase);
  91. }
  92. #endif /* FORCE_WAKE */
  93. #ifdef HIF_IPCI
  94. void hif_shutdown_notifier_cb(void *hif_ctx)
  95. {
  96. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  97. scn->recovery = true;
  98. }
  99. #endif
  100. /**
  101. * hif_vote_link_down(): unvote for link up
  102. *
  103. * Call hif_vote_link_down to release a previous request made using
  104. * hif_vote_link_up. A hif_vote_link_down call should only be made
  105. * after a corresponding hif_vote_link_up, otherwise you could be
  106. * negating a vote from another source. When no votes are present
  107. * hif will not guarantee the linkstate after hif_bus_suspend.
  108. *
  109. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  110. * and initialization deinitialization sequencences.
  111. *
  112. * Return: n/a
  113. */
  114. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
  115. {
  116. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  117. QDF_BUG(scn);
  118. if (scn->linkstate_vote == 0)
  119. QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
  120. scn->linkstate_vote);
  121. scn->linkstate_vote--;
  122. hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
  123. if (scn->linkstate_vote == 0)
  124. hif_bus_prevent_linkdown(scn, false);
  125. }
  126. /**
  127. * hif_vote_link_up(): vote to prevent bus from suspending
  128. *
  129. * Makes hif guarantee that fw can message the host normally
  130. * during suspend.
  131. *
  132. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  133. * and initialization deinitialization sequencences.
  134. *
  135. * Return: n/a
  136. */
  137. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
  138. {
  139. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  140. QDF_BUG(scn);
  141. scn->linkstate_vote++;
  142. hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
  143. if (scn->linkstate_vote == 1)
  144. hif_bus_prevent_linkdown(scn, true);
  145. }
  146. /**
  147. * hif_can_suspend_link(): query if hif is permitted to suspend the link
  148. *
  149. * Hif will ensure that the link won't be suspended if the upperlayers
  150. * don't want it to.
  151. *
  152. * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
  153. * we don't need extra locking to ensure votes dont change while
  154. * we are in the process of suspending or resuming.
  155. *
  156. * Return: false if hif will guarantee link up during suspend.
  157. */
  158. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
  159. {
  160. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  161. QDF_BUG(scn);
  162. return scn->linkstate_vote == 0;
  163. }
  164. /**
  165. * hif_hia_item_address(): hif_hia_item_address
  166. * @target_type: target_type
  167. * @item_offset: item_offset
  168. *
  169. * Return: n/a
  170. */
  171. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
  172. {
  173. switch (target_type) {
  174. case TARGET_TYPE_AR6002:
  175. return AR6002_HOST_INTEREST_ADDRESS + item_offset;
  176. case TARGET_TYPE_AR6003:
  177. return AR6003_HOST_INTEREST_ADDRESS + item_offset;
  178. case TARGET_TYPE_AR6004:
  179. return AR6004_HOST_INTEREST_ADDRESS + item_offset;
  180. case TARGET_TYPE_AR6006:
  181. return AR6006_HOST_INTEREST_ADDRESS + item_offset;
  182. case TARGET_TYPE_AR9888:
  183. return AR9888_HOST_INTEREST_ADDRESS + item_offset;
  184. case TARGET_TYPE_AR6320:
  185. case TARGET_TYPE_AR6320V2:
  186. return AR6320_HOST_INTEREST_ADDRESS + item_offset;
  187. case TARGET_TYPE_ADRASTEA:
  188. /* ADRASTEA doesn't have a host interest address */
  189. ASSERT(0);
  190. return 0;
  191. case TARGET_TYPE_AR900B:
  192. return AR900B_HOST_INTEREST_ADDRESS + item_offset;
  193. case TARGET_TYPE_QCA9984:
  194. return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
  195. case TARGET_TYPE_QCA9888:
  196. return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
  197. default:
  198. ASSERT(0);
  199. return 0;
  200. }
  201. }
  202. /**
  203. * hif_max_num_receives_reached() - check max receive is reached
  204. * @scn: HIF Context
  205. * @count: unsigned int.
  206. *
  207. * Output check status as bool
  208. *
  209. * Return: bool
  210. */
  211. bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
  212. {
  213. if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
  214. return count > 120;
  215. else
  216. return count > MAX_NUM_OF_RECEIVES;
  217. }
  218. /**
  219. * init_buffer_count() - initial buffer count
  220. * @maxSize: qdf_size_t
  221. *
  222. * routine to modify the initial buffer count to be allocated on an os
  223. * platform basis. Platform owner will need to modify this as needed
  224. *
  225. * Return: qdf_size_t
  226. */
  227. qdf_size_t init_buffer_count(qdf_size_t maxSize)
  228. {
  229. return maxSize;
  230. }
  231. /**
  232. * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
  233. * @hif_ctx: hif context
  234. * @htc_htt_tx_endpoint: htt_tx_endpoint
  235. *
  236. * Return: void
  237. */
  238. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  239. int htc_htt_tx_endpoint)
  240. {
  241. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  242. if (!scn) {
  243. hif_err("scn or scn->hif_sc is NULL!");
  244. return;
  245. }
  246. scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
  247. }
  248. qdf_export_symbol(hif_save_htc_htt_config_endpoint);
  249. static const struct qwlan_hw qwlan_hw_list[] = {
  250. {
  251. .id = AR6320_REV1_VERSION,
  252. .subid = 0,
  253. .name = "QCA6174_REV1",
  254. },
  255. {
  256. .id = AR6320_REV1_1_VERSION,
  257. .subid = 0x1,
  258. .name = "QCA6174_REV1_1",
  259. },
  260. {
  261. .id = AR6320_REV1_3_VERSION,
  262. .subid = 0x2,
  263. .name = "QCA6174_REV1_3",
  264. },
  265. {
  266. .id = AR6320_REV2_1_VERSION,
  267. .subid = 0x4,
  268. .name = "QCA6174_REV2_1",
  269. },
  270. {
  271. .id = AR6320_REV2_1_VERSION,
  272. .subid = 0x5,
  273. .name = "QCA6174_REV2_2",
  274. },
  275. {
  276. .id = AR6320_REV3_VERSION,
  277. .subid = 0x6,
  278. .name = "QCA6174_REV2.3",
  279. },
  280. {
  281. .id = AR6320_REV3_VERSION,
  282. .subid = 0x8,
  283. .name = "QCA6174_REV3",
  284. },
  285. {
  286. .id = AR6320_REV3_VERSION,
  287. .subid = 0x9,
  288. .name = "QCA6174_REV3_1",
  289. },
  290. {
  291. .id = AR6320_REV3_2_VERSION,
  292. .subid = 0xA,
  293. .name = "AR6320_REV3_2_VERSION",
  294. },
  295. {
  296. .id = QCA6390_V1,
  297. .subid = 0x0,
  298. .name = "QCA6390_V1",
  299. },
  300. {
  301. .id = QCA6490_V1,
  302. .subid = 0x0,
  303. .name = "QCA6490_V1",
  304. },
  305. {
  306. .id = WCN3990_v1,
  307. .subid = 0x0,
  308. .name = "WCN3990_V1",
  309. },
  310. {
  311. .id = WCN3990_v2,
  312. .subid = 0x0,
  313. .name = "WCN3990_V2",
  314. },
  315. {
  316. .id = WCN3990_v2_1,
  317. .subid = 0x0,
  318. .name = "WCN3990_V2.1",
  319. },
  320. {
  321. .id = WCN3998,
  322. .subid = 0x0,
  323. .name = "WCN3998",
  324. },
  325. {
  326. .id = QCA9379_REV1_VERSION,
  327. .subid = 0xC,
  328. .name = "QCA9379_REV1",
  329. },
  330. {
  331. .id = QCA9379_REV1_VERSION,
  332. .subid = 0xD,
  333. .name = "QCA9379_REV1_1",
  334. },
  335. {
  336. .id = MANGO_V1,
  337. .subid = 0xF,
  338. .name = "MANGO_V1",
  339. },
  340. {
  341. .id = KIWI_V1,
  342. .subid = 0,
  343. .name = "KIWI_V1",
  344. },
  345. {
  346. .id = KIWI_V2,
  347. .subid = 0,
  348. .name = "KIWI_V2",
  349. },
  350. {
  351. .id = WCN6750_V1,
  352. .subid = 0,
  353. .name = "WCN6750_V1",
  354. },
  355. {
  356. .id = QCA6490_v2_1,
  357. .subid = 0,
  358. .name = "QCA6490",
  359. },
  360. {
  361. .id = QCA6490_v2,
  362. .subid = 0,
  363. .name = "QCA6490",
  364. },
  365. {
  366. .id = WCN3990_v2_2,
  367. .subid = 0,
  368. .name = "WCN3990_v2_2",
  369. },
  370. {
  371. .id = WCN3990_TALOS,
  372. .subid = 0,
  373. .name = "WCN3990",
  374. },
  375. {
  376. .id = WCN3990_MOOREA,
  377. .subid = 0,
  378. .name = "WCN3990",
  379. },
  380. {
  381. .id = WCN3990_SAIPAN,
  382. .subid = 0,
  383. .name = "WCN3990",
  384. },
  385. {
  386. .id = WCN3990_RENNELL,
  387. .subid = 0,
  388. .name = "WCN3990",
  389. },
  390. {
  391. .id = WCN3990_BITRA,
  392. .subid = 0,
  393. .name = "WCN3990",
  394. },
  395. {
  396. .id = WCN3990_DIVAR,
  397. .subid = 0,
  398. .name = "WCN3990",
  399. },
  400. {
  401. .id = WCN3990_ATHERTON,
  402. .subid = 0,
  403. .name = "WCN3990",
  404. },
  405. {
  406. .id = WCN3990_STRAIT,
  407. .subid = 0,
  408. .name = "WCN3990",
  409. },
  410. {
  411. .id = WCN3990_NETRANI,
  412. .subid = 0,
  413. .name = "WCN3990",
  414. },
  415. {
  416. .id = WCN3990_CLARENCE,
  417. .subid = 0,
  418. .name = "WCN3990",
  419. }
  420. };
  421. /**
  422. * hif_get_hw_name(): get a human readable name for the hardware
  423. * @info: Target Info
  424. *
  425. * Return: human readable name for the underlying wifi hardware.
  426. */
  427. static const char *hif_get_hw_name(struct hif_target_info *info)
  428. {
  429. int i;
  430. hif_debug("target version = %d, target revision = %d",
  431. info->target_version,
  432. info->target_revision);
  433. if (info->hw_name)
  434. return info->hw_name;
  435. for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
  436. if (info->target_version == qwlan_hw_list[i].id &&
  437. info->target_revision == qwlan_hw_list[i].subid) {
  438. return qwlan_hw_list[i].name;
  439. }
  440. }
  441. info->hw_name = qdf_mem_malloc(64);
  442. if (!info->hw_name)
  443. return "Unknown Device (nomem)";
  444. i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
  445. info->target_version);
  446. if (i < 0)
  447. return "Unknown Device (snprintf failure)";
  448. else
  449. return info->hw_name;
  450. }
  451. /**
  452. * hif_get_hw_info(): hif_get_hw_info
  453. * @scn: scn
  454. * @version: version
  455. * @revision: revision
  456. *
  457. * Return: n/a
  458. */
  459. void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
  460. const char **target_name)
  461. {
  462. struct hif_target_info *info = hif_get_target_info_handle(scn);
  463. struct hif_softc *sc = HIF_GET_SOFTC(scn);
  464. if (sc->bus_type == QDF_BUS_TYPE_USB)
  465. hif_usb_get_hw_info(sc);
  466. *version = info->target_version;
  467. *revision = info->target_revision;
  468. *target_name = hif_get_hw_name(info);
  469. }
  470. /**
  471. * hif_get_dev_ba(): API to get device base address.
  472. * @scn: scn
  473. * @version: version
  474. * @revision: revision
  475. *
  476. * Return: n/a
  477. */
  478. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
  479. {
  480. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  481. return scn->mem;
  482. }
  483. qdf_export_symbol(hif_get_dev_ba);
  484. /**
  485. * hif_get_dev_ba_ce(): API to get device ce base address.
  486. * @scn: scn
  487. *
  488. * Return: dev mem base address for CE
  489. */
  490. void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
  491. {
  492. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  493. return scn->mem_ce;
  494. }
  495. qdf_export_symbol(hif_get_dev_ba_ce);
  496. uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
  497. {
  498. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  499. return scn->target_info.soc_version;
  500. }
  501. qdf_export_symbol(hif_get_soc_version);
  502. /**
  503. * hif_get_dev_ba_cmem(): API to get device ce base address.
  504. * @scn: scn
  505. *
  506. * Return: dev mem base address for CMEM
  507. */
  508. void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
  509. {
  510. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  511. return scn->mem_cmem;
  512. }
  513. qdf_export_symbol(hif_get_dev_ba_cmem);
  514. #ifdef FEATURE_RUNTIME_PM
  515. void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
  516. {
  517. if (is_get)
  518. qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
  519. else
  520. qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
  521. }
  522. static inline
  523. void hif_rtpm_lock_init(struct hif_softc *scn)
  524. {
  525. qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
  526. }
  527. static inline
  528. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  529. {
  530. qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
  531. }
  532. #else
  533. static inline
  534. void hif_rtpm_lock_init(struct hif_softc *scn)
  535. {
  536. }
  537. static inline
  538. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  539. {
  540. }
  541. #endif
  542. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  543. /**
  544. * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
  545. * @scn: hif context
  546. * @psoc: psoc objmgr handle
  547. *
  548. * Return: None
  549. */
  550. static inline
  551. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  552. struct wlan_objmgr_psoc *psoc)
  553. {
  554. if (psoc) {
  555. scn->ini_cfg.ce_status_ring_timer_threshold =
  556. cfg_get(psoc,
  557. CFG_CE_STATUS_RING_TIMER_THRESHOLD);
  558. scn->ini_cfg.ce_status_ring_batch_count_threshold =
  559. cfg_get(psoc,
  560. CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
  561. }
  562. }
  563. #else
  564. static inline
  565. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  566. struct wlan_objmgr_psoc *psoc)
  567. {
  568. }
  569. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  570. /**
  571. * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
  572. * @scn: hif context
  573. * @psoc: psoc objmgr handle
  574. *
  575. * Return: None
  576. */
  577. static inline
  578. void hif_get_cfg_from_psoc(struct hif_softc *scn,
  579. struct wlan_objmgr_psoc *psoc)
  580. {
  581. if (psoc) {
  582. scn->ini_cfg.disable_wake_irq =
  583. cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
  584. /**
  585. * Wake IRQ can't share the same IRQ with the copy engines
  586. * In one MSI mode, we don't know whether wake IRQ is triggered
  587. * or not in wake IRQ handler. known issue CR 2055359
  588. * If you want to support Wake IRQ. Please allocate at least
  589. * 2 MSI vector. The first is for wake IRQ while the others
  590. * share the second vector
  591. */
  592. if (pld_is_one_msi(scn->qdf_dev->dev)) {
  593. hif_debug("Disable wake IRQ once it is one MSI mode");
  594. scn->ini_cfg.disable_wake_irq = true;
  595. }
  596. hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
  597. }
  598. }
  599. #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
  600. /**
  601. * hif_recovery_notifier_cb - Recovery notifier callback to log
  602. * hang event data
  603. * @block: notifier block
  604. * @state: state
  605. * @data: notifier data
  606. *
  607. * Return: status
  608. */
  609. static
  610. int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
  611. void *data)
  612. {
  613. struct qdf_notifer_data *notif_data = data;
  614. qdf_notif_block *notif_block;
  615. struct hif_softc *hif_handle;
  616. bool bus_id_invalid;
  617. if (!data || !block)
  618. return -EINVAL;
  619. notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
  620. hif_handle = notif_block->priv_data;
  621. if (!hif_handle)
  622. return -EINVAL;
  623. bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
  624. &notif_data->offset);
  625. if (bus_id_invalid)
  626. return NOTIFY_STOP_MASK;
  627. hif_log_ce_info(hif_handle, notif_data->hang_data,
  628. &notif_data->offset);
  629. return 0;
  630. }
  631. /**
  632. * hif_register_recovery_notifier - Register hif recovery notifier
  633. * @hif_handle: hif handle
  634. *
  635. * Return: status
  636. */
  637. static
  638. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  639. {
  640. qdf_notif_block *hif_notifier;
  641. if (!hif_handle)
  642. return QDF_STATUS_E_FAILURE;
  643. hif_notifier = &hif_handle->hif_recovery_notifier;
  644. hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
  645. hif_notifier->priv_data = hif_handle;
  646. return qdf_hang_event_register_notifier(hif_notifier);
  647. }
  648. /**
  649. * hif_unregister_recovery_notifier - Un-register hif recovery notifier
  650. * @hif_handle: hif handle
  651. *
  652. * Return: status
  653. */
  654. static
  655. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  656. {
  657. qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
  658. return qdf_hang_event_unregister_notifier(hif_notifier);
  659. }
  660. #else
  661. static inline
  662. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  663. {
  664. return QDF_STATUS_SUCCESS;
  665. }
  666. static inline
  667. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  668. {
  669. return QDF_STATUS_SUCCESS;
  670. }
  671. #endif
  672. #ifdef HIF_CPU_PERF_AFFINE_MASK
  673. /**
  674. * __hif_cpu_hotplug_notify() - CPU hotplug event handler
  675. * @cpu: CPU Id of the CPU generating the event
  676. * @cpu_up: true if the CPU is online
  677. *
  678. * Return: None
  679. */
  680. static void __hif_cpu_hotplug_notify(void *context,
  681. uint32_t cpu, bool cpu_up)
  682. {
  683. struct hif_softc *scn = context;
  684. if (!scn)
  685. return;
  686. if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
  687. return;
  688. if (cpu_up) {
  689. hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
  690. hif_debug("Setting affinity for online CPU: %d", cpu);
  691. } else {
  692. hif_debug("Skip setting affinity for offline CPU: %d", cpu);
  693. }
  694. }
  695. /**
  696. * hif_cpu_hotplug_notify - cpu core up/down notification
  697. * handler
  698. * @cpu: CPU generating the event
  699. * @cpu_up: true if the CPU is online
  700. *
  701. * Return: None
  702. */
  703. static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
  704. {
  705. struct qdf_op_sync *op_sync;
  706. if (qdf_op_protect(&op_sync))
  707. return;
  708. __hif_cpu_hotplug_notify(context, cpu, cpu_up);
  709. qdf_op_unprotect(op_sync);
  710. }
  711. static void hif_cpu_online_cb(void *context, uint32_t cpu)
  712. {
  713. hif_cpu_hotplug_notify(context, cpu, true);
  714. }
  715. static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
  716. {
  717. hif_cpu_hotplug_notify(context, cpu, false);
  718. }
  719. static void hif_cpuhp_register(struct hif_softc *scn)
  720. {
  721. if (!scn) {
  722. hif_info_high("cannot register hotplug notifiers");
  723. return;
  724. }
  725. qdf_cpuhp_register(&scn->cpuhp_event_handle,
  726. scn,
  727. hif_cpu_online_cb,
  728. hif_cpu_before_offline_cb);
  729. }
  730. static void hif_cpuhp_unregister(struct hif_softc *scn)
  731. {
  732. if (!scn) {
  733. hif_info_high("cannot unregister hotplug notifiers");
  734. return;
  735. }
  736. qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
  737. }
  738. #else
  739. static void hif_cpuhp_register(struct hif_softc *scn)
  740. {
  741. }
  742. static void hif_cpuhp_unregister(struct hif_softc *scn)
  743. {
  744. }
  745. #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
  746. #ifdef HIF_DETECTION_LATENCY_ENABLE
  747. void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
  748. {
  749. qdf_time_t ce2_tasklet_sched_time =
  750. scn->latency_detect.ce2_tasklet_sched_time;
  751. qdf_time_t ce2_tasklet_exec_time =
  752. scn->latency_detect.ce2_tasklet_exec_time;
  753. qdf_time_t curr_jiffies = qdf_system_ticks();
  754. uint32_t detect_latency_threshold =
  755. scn->latency_detect.detect_latency_threshold;
  756. int cpu_id = qdf_get_cpu();
  757. /* 2 kinds of check here.
  758. * from_timer==true: check if tasklet stall
  759. * from_timer==false: check tasklet execute comes late
  760. */
  761. if ((from_timer ?
  762. qdf_system_time_after(ce2_tasklet_sched_time,
  763. ce2_tasklet_exec_time) :
  764. qdf_system_time_after(ce2_tasklet_exec_time,
  765. ce2_tasklet_sched_time)) &&
  766. qdf_system_time_after(
  767. curr_jiffies,
  768. ce2_tasklet_sched_time +
  769. qdf_system_msecs_to_ticks(detect_latency_threshold))) {
  770. hif_err("tasklet ce2 latency: from_timer %d, curr_jiffies %lu, ce2_tasklet_sched_time %lu,ce2_tasklet_exec_time %lu, detect_latency_threshold %ums detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
  771. from_timer, curr_jiffies, ce2_tasklet_sched_time,
  772. ce2_tasklet_exec_time, detect_latency_threshold,
  773. scn->latency_detect.detect_latency_timer_timeout,
  774. cpu_id, (void *)_RET_IP_);
  775. goto latency;
  776. }
  777. return;
  778. latency:
  779. qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
  780. }
  781. void hif_credit_latency(struct hif_softc *scn, bool from_timer)
  782. {
  783. qdf_time_t credit_request_time =
  784. scn->latency_detect.credit_request_time;
  785. qdf_time_t credit_report_time =
  786. scn->latency_detect.credit_report_time;
  787. qdf_time_t curr_jiffies = qdf_system_ticks();
  788. uint32_t detect_latency_threshold =
  789. scn->latency_detect.detect_latency_threshold;
  790. int cpu_id = qdf_get_cpu();
  791. /* 2 kinds of check here.
  792. * from_timer==true: check if credit report stall
  793. * from_timer==false: check credit report comes late
  794. */
  795. if ((from_timer ?
  796. qdf_system_time_after(credit_request_time,
  797. credit_report_time) :
  798. qdf_system_time_after(credit_report_time,
  799. credit_request_time)) &&
  800. qdf_system_time_after(
  801. curr_jiffies,
  802. credit_request_time +
  803. qdf_system_msecs_to_ticks(detect_latency_threshold))) {
  804. hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu,credit_report_time %lu, detect_latency_threshold %ums, detect_latency_timer_timeout %ums, cpu_id %d, called: %ps",
  805. from_timer, curr_jiffies, credit_request_time,
  806. credit_report_time, detect_latency_threshold,
  807. scn->latency_detect.detect_latency_timer_timeout,
  808. cpu_id, (void *)_RET_IP_);
  809. goto latency;
  810. }
  811. return;
  812. latency:
  813. qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
  814. }
  815. /**
  816. * hif_check_detection_latency(): to check if latency for tasklet/credit
  817. *
  818. * @scn: hif context
  819. * @from_timer: if called from timer handler
  820. * @bitmap_type: indicate if check tasklet or credit
  821. *
  822. * Return: none
  823. */
  824. void hif_check_detection_latency(struct hif_softc *scn,
  825. bool from_timer,
  826. uint32_t bitmap_type)
  827. {
  828. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  829. return;
  830. if (!scn->latency_detect.enable_detection)
  831. return;
  832. if (bitmap_type & BIT(HIF_DETECT_TASKLET))
  833. hif_tasklet_latency(scn, from_timer);
  834. if (bitmap_type & BIT(HIF_DETECT_CREDIT))
  835. hif_credit_latency(scn, from_timer);
  836. }
  837. static void hif_latency_detect_timeout_handler(void *arg)
  838. {
  839. struct hif_softc *scn = (struct hif_softc *)arg;
  840. int next_cpu;
  841. hif_check_detection_latency(scn, true,
  842. BIT(HIF_DETECT_TASKLET) |
  843. BIT(HIF_DETECT_CREDIT));
  844. /* it need to make sure timer start on a different cpu,
  845. * so it can detect the tasklet schedule stall, but there
  846. * is still chance that, after timer has been started, then
  847. * irq/tasklet happens on the same cpu, then tasklet will
  848. * execute before softirq timer, if this tasklet stall, the
  849. * timer can't detect it, we can accept this as a limitation,
  850. * if tasklet stall, anyway other place will detect it, just
  851. * a little later.
  852. */
  853. next_cpu = cpumask_any_but(
  854. cpu_active_mask,
  855. scn->latency_detect.ce2_tasklet_sched_cpuid);
  856. if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
  857. hif_debug("start timer on local");
  858. /* it doesn't found a available cpu, start on local cpu*/
  859. qdf_timer_mod(
  860. &scn->latency_detect.detect_latency_timer,
  861. scn->latency_detect.detect_latency_timer_timeout);
  862. } else {
  863. qdf_timer_start_on(
  864. &scn->latency_detect.detect_latency_timer,
  865. scn->latency_detect.detect_latency_timer_timeout,
  866. next_cpu);
  867. }
  868. }
  869. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  870. {
  871. if (!scn) {
  872. hif_info_high("scn is null");
  873. return;
  874. }
  875. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  876. return;
  877. scn->latency_detect.detect_latency_timer_timeout =
  878. DETECTION_TIMER_TIMEOUT;
  879. scn->latency_detect.detect_latency_threshold =
  880. DETECTION_LATENCY_THRESHOLD;
  881. hif_info("timer timeout %u, latency threshold %u",
  882. scn->latency_detect.detect_latency_timer_timeout,
  883. scn->latency_detect.detect_latency_threshold);
  884. scn->latency_detect.is_timer_started = false;
  885. qdf_timer_init(NULL,
  886. &scn->latency_detect.detect_latency_timer,
  887. &hif_latency_detect_timeout_handler,
  888. scn,
  889. QDF_TIMER_TYPE_SW_SPIN);
  890. }
  891. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  892. {
  893. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  894. return;
  895. hif_info("deinit timer");
  896. qdf_timer_free(&scn->latency_detect.detect_latency_timer);
  897. }
  898. void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
  899. {
  900. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  901. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  902. return;
  903. hif_debug_rl("start timer");
  904. if (scn->latency_detect.is_timer_started) {
  905. hif_info("timer has been started");
  906. return;
  907. }
  908. qdf_timer_start(&scn->latency_detect.detect_latency_timer,
  909. scn->latency_detect.detect_latency_timer_timeout);
  910. scn->latency_detect.is_timer_started = true;
  911. }
  912. void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
  913. {
  914. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  915. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  916. return;
  917. hif_debug_rl("stop timer");
  918. qdf_timer_sync_cancel(&scn->latency_detect.detect_latency_timer);
  919. scn->latency_detect.is_timer_started = false;
  920. }
  921. void hif_latency_detect_credit_record_time(
  922. enum hif_credit_exchange_type type,
  923. struct hif_opaque_softc *hif_ctx)
  924. {
  925. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  926. if (!scn) {
  927. hif_err("Could not do runtime put, scn is null");
  928. return;
  929. }
  930. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  931. return;
  932. if (HIF_REQUEST_CREDIT == type)
  933. scn->latency_detect.credit_request_time = qdf_system_ticks();
  934. else if (HIF_PROCESS_CREDIT_REPORT == type)
  935. scn->latency_detect.credit_report_time = qdf_system_ticks();
  936. hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
  937. }
  938. void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
  939. {
  940. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  941. if (!scn) {
  942. hif_err("Could not do runtime put, scn is null");
  943. return;
  944. }
  945. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  946. return;
  947. scn->latency_detect.enable_detection = value;
  948. }
  949. #else
  950. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  951. {}
  952. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  953. {}
  954. #endif
  955. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  956. uint32_t mode,
  957. enum qdf_bus_type bus_type,
  958. struct hif_driver_state_callbacks *cbk,
  959. struct wlan_objmgr_psoc *psoc)
  960. {
  961. struct hif_softc *scn;
  962. QDF_STATUS status = QDF_STATUS_SUCCESS;
  963. int bus_context_size = hif_bus_get_context_size(bus_type);
  964. if (bus_context_size == 0) {
  965. hif_err("context size 0 not allowed");
  966. return NULL;
  967. }
  968. scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
  969. if (!scn)
  970. return GET_HIF_OPAQUE_HDL(scn);
  971. scn->qdf_dev = qdf_ctx;
  972. scn->hif_con_param = mode;
  973. qdf_atomic_init(&scn->active_tasklet_cnt);
  974. qdf_atomic_init(&scn->active_grp_tasklet_cnt);
  975. qdf_atomic_init(&scn->link_suspended);
  976. qdf_atomic_init(&scn->tasklet_from_intr);
  977. hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
  978. qdf_mem_copy(&scn->callbacks, cbk,
  979. sizeof(struct hif_driver_state_callbacks));
  980. scn->bus_type = bus_type;
  981. hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
  982. hif_get_cfg_from_psoc(scn, psoc);
  983. hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
  984. status = hif_bus_open(scn, bus_type);
  985. if (status != QDF_STATUS_SUCCESS) {
  986. hif_err("hif_bus_open error = %d, bus_type = %d",
  987. status, bus_type);
  988. qdf_mem_free(scn);
  989. scn = NULL;
  990. goto out;
  991. }
  992. hif_rtpm_lock_init(scn);
  993. hif_cpuhp_register(scn);
  994. hif_latency_detect_timer_init(scn);
  995. out:
  996. return GET_HIF_OPAQUE_HDL(scn);
  997. }
  998. #ifdef ADRASTEA_RRI_ON_DDR
  999. /**
  1000. * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
  1001. * @scn: hif context
  1002. *
  1003. * Return: none
  1004. */
  1005. void hif_uninit_rri_on_ddr(struct hif_softc *scn)
  1006. {
  1007. if (scn->vaddr_rri_on_ddr)
  1008. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  1009. (CE_COUNT * sizeof(uint32_t)),
  1010. scn->vaddr_rri_on_ddr,
  1011. scn->paddr_rri_on_ddr, 0);
  1012. scn->vaddr_rri_on_ddr = NULL;
  1013. }
  1014. #endif
  1015. /**
  1016. * hif_close(): hif_close
  1017. * @hif_ctx: hif_ctx
  1018. *
  1019. * Return: n/a
  1020. */
  1021. void hif_close(struct hif_opaque_softc *hif_ctx)
  1022. {
  1023. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1024. if (!scn) {
  1025. hif_err("hif_opaque_softc is NULL");
  1026. return;
  1027. }
  1028. hif_latency_detect_timer_deinit(scn);
  1029. if (scn->athdiag_procfs_inited) {
  1030. athdiag_procfs_remove();
  1031. scn->athdiag_procfs_inited = false;
  1032. }
  1033. if (scn->target_info.hw_name) {
  1034. char *hw_name = scn->target_info.hw_name;
  1035. scn->target_info.hw_name = "ErrUnloading";
  1036. qdf_mem_free(hw_name);
  1037. }
  1038. hif_uninit_rri_on_ddr(scn);
  1039. hif_cleanup_static_buf_to_target(scn);
  1040. hif_cpuhp_unregister(scn);
  1041. hif_rtpm_lock_deinit(scn);
  1042. hif_bus_close(scn);
  1043. qdf_mem_free(scn);
  1044. }
  1045. /**
  1046. * hif_get_num_active_grp_tasklets() - get the number of active
  1047. * datapath group tasklets pending to be completed.
  1048. * @scn: HIF context
  1049. *
  1050. * Returns: the number of datapath group tasklets which are active
  1051. */
  1052. static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
  1053. {
  1054. return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
  1055. }
  1056. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1057. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1058. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1059. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1060. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1061. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1062. /**
  1063. * hif_get_num_pending_work() - get the number of entries in
  1064. * the workqueue pending to be completed.
  1065. * @scn: HIF context
  1066. *
  1067. * Returns: the number of tasklets which are active
  1068. */
  1069. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1070. {
  1071. return hal_get_reg_write_pending_work(scn->hal_soc);
  1072. }
  1073. #else
  1074. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1075. {
  1076. return 0;
  1077. }
  1078. #endif
  1079. QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
  1080. {
  1081. uint32_t task_drain_wait_cnt = 0;
  1082. int tasklet = 0, grp_tasklet = 0, work = 0;
  1083. while ((tasklet = hif_get_num_active_tasklets(scn)) ||
  1084. (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
  1085. (work = hif_get_num_pending_work(scn))) {
  1086. if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
  1087. hif_err("pending tasklets %d grp tasklets %d work %d",
  1088. tasklet, grp_tasklet, work);
  1089. return QDF_STATUS_E_FAULT;
  1090. }
  1091. hif_info("waiting for tasklets %d grp tasklets %d work %d",
  1092. tasklet, grp_tasklet, work);
  1093. msleep(10);
  1094. }
  1095. return QDF_STATUS_SUCCESS;
  1096. }
  1097. #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
  1098. QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1099. {
  1100. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1101. uint32_t work_drain_wait_cnt = 0;
  1102. uint32_t wait_cnt = 0;
  1103. int work = 0;
  1104. qdf_atomic_set(&scn->dp_ep_vote_access,
  1105. HIF_EP_VOTE_ACCESS_DISABLE);
  1106. qdf_atomic_set(&scn->ep_vote_access,
  1107. HIF_EP_VOTE_ACCESS_DISABLE);
  1108. while ((work = hif_get_num_pending_work(scn))) {
  1109. if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
  1110. qdf_atomic_set(&scn->dp_ep_vote_access,
  1111. HIF_EP_VOTE_ACCESS_ENABLE);
  1112. qdf_atomic_set(&scn->ep_vote_access,
  1113. HIF_EP_VOTE_ACCESS_ENABLE);
  1114. hif_err("timeout wait for pending work %d ", work);
  1115. return QDF_STATUS_E_FAULT;
  1116. }
  1117. qdf_sleep(10);
  1118. }
  1119. if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
  1120. return QDF_STATUS_SUCCESS;
  1121. while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
  1122. if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
  1123. hif_err("Release EP vote is not proceed by Fw");
  1124. return QDF_STATUS_E_FAULT;
  1125. }
  1126. qdf_sleep(5);
  1127. }
  1128. return QDF_STATUS_SUCCESS;
  1129. }
  1130. void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
  1131. {
  1132. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1133. uint8_t vote_access;
  1134. vote_access = qdf_atomic_read(&scn->ep_vote_access);
  1135. if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
  1136. hif_info("EP vote changed from:%u to intermediate state",
  1137. vote_access);
  1138. if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
  1139. QDF_BUG(0);
  1140. qdf_atomic_set(&scn->ep_vote_access,
  1141. HIF_EP_VOTE_INTERMEDIATE_ACCESS);
  1142. }
  1143. void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1144. {
  1145. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1146. qdf_atomic_set(&scn->dp_ep_vote_access,
  1147. HIF_EP_VOTE_ACCESS_ENABLE);
  1148. qdf_atomic_set(&scn->ep_vote_access,
  1149. HIF_EP_VOTE_ACCESS_ENABLE);
  1150. }
  1151. void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1152. uint8_t type, uint8_t access)
  1153. {
  1154. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1155. if (type == HIF_EP_VOTE_DP_ACCESS)
  1156. qdf_atomic_set(&scn->dp_ep_vote_access, access);
  1157. else
  1158. qdf_atomic_set(&scn->ep_vote_access, access);
  1159. }
  1160. uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1161. uint8_t type)
  1162. {
  1163. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1164. if (type == HIF_EP_VOTE_DP_ACCESS)
  1165. return qdf_atomic_read(&scn->dp_ep_vote_access);
  1166. else
  1167. return qdf_atomic_read(&scn->ep_vote_access);
  1168. }
  1169. #endif
  1170. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1171. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1172. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1173. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1174. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1175. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1176. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1177. {
  1178. if (ce_srng_based(scn)) {
  1179. scn->hal_soc = hal_attach(
  1180. hif_softc_to_hif_opaque_softc(scn),
  1181. scn->qdf_dev);
  1182. if (!scn->hal_soc)
  1183. return QDF_STATUS_E_FAILURE;
  1184. }
  1185. return QDF_STATUS_SUCCESS;
  1186. }
  1187. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1188. {
  1189. if (ce_srng_based(scn)) {
  1190. hal_detach(scn->hal_soc);
  1191. scn->hal_soc = NULL;
  1192. }
  1193. return QDF_STATUS_SUCCESS;
  1194. }
  1195. #else
  1196. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1197. {
  1198. return QDF_STATUS_SUCCESS;
  1199. }
  1200. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1201. {
  1202. return QDF_STATUS_SUCCESS;
  1203. }
  1204. #endif
  1205. int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
  1206. {
  1207. int ret;
  1208. switch (bus_type) {
  1209. case QDF_BUS_TYPE_IPCI:
  1210. ret = qdf_set_dma_coherent_mask(dev,
  1211. DMA_COHERENT_MASK_DEFAULT);
  1212. if (ret) {
  1213. hif_err("Failed to set dma mask error = %d", ret);
  1214. return ret;
  1215. }
  1216. break;
  1217. default:
  1218. /* Follow the existing sequence for other targets */
  1219. break;
  1220. }
  1221. return 0;
  1222. }
  1223. /**
  1224. * hif_enable(): hif_enable
  1225. * @hif_ctx: hif_ctx
  1226. * @dev: dev
  1227. * @bdev: bus dev
  1228. * @bid: bus ID
  1229. * @bus_type: bus type
  1230. * @type: enable type
  1231. *
  1232. * Return: QDF_STATUS
  1233. */
  1234. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  1235. void *bdev,
  1236. const struct hif_bus_id *bid,
  1237. enum qdf_bus_type bus_type,
  1238. enum hif_enable_type type)
  1239. {
  1240. QDF_STATUS status;
  1241. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1242. if (!scn) {
  1243. hif_err("hif_ctx = NULL");
  1244. return QDF_STATUS_E_NULL_VALUE;
  1245. }
  1246. status = hif_enable_bus(scn, dev, bdev, bid, type);
  1247. if (status != QDF_STATUS_SUCCESS) {
  1248. hif_err("hif_enable_bus error = %d", status);
  1249. return status;
  1250. }
  1251. status = hif_hal_attach(scn);
  1252. if (status != QDF_STATUS_SUCCESS) {
  1253. hif_err("hal attach failed");
  1254. goto disable_bus;
  1255. }
  1256. if (hif_bus_configure(scn)) {
  1257. hif_err("Target probe failed");
  1258. status = QDF_STATUS_E_FAILURE;
  1259. goto hal_detach;
  1260. }
  1261. hif_ut_suspend_init(scn);
  1262. hif_register_recovery_notifier(scn);
  1263. hif_latency_detect_timer_start(hif_ctx);
  1264. /*
  1265. * Flag to avoid potential unallocated memory access from MSI
  1266. * interrupt handler which could get scheduled as soon as MSI
  1267. * is enabled, i.e to take care of the race due to the order
  1268. * in where MSI is enabled before the memory, that will be
  1269. * in interrupt handlers, is allocated.
  1270. */
  1271. scn->hif_init_done = true;
  1272. hif_debug("OK");
  1273. return QDF_STATUS_SUCCESS;
  1274. hal_detach:
  1275. hif_hal_detach(scn);
  1276. disable_bus:
  1277. hif_disable_bus(scn);
  1278. return status;
  1279. }
  1280. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
  1281. {
  1282. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1283. if (!scn)
  1284. return;
  1285. hif_set_enable_detection(hif_ctx, false);
  1286. hif_latency_detect_timer_stop(hif_ctx);
  1287. hif_unregister_recovery_notifier(scn);
  1288. hif_nointrs(scn);
  1289. if (scn->hif_init_done == false)
  1290. hif_shutdown_device(hif_ctx);
  1291. else
  1292. hif_stop(hif_ctx);
  1293. hif_hal_detach(scn);
  1294. hif_disable_bus(scn);
  1295. hif_wlan_disable(scn);
  1296. scn->notice_send = false;
  1297. hif_debug("X");
  1298. }
  1299. #ifdef CE_TASKLET_DEBUG_ENABLE
  1300. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
  1301. {
  1302. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1303. if (!scn)
  1304. return;
  1305. scn->ce_latency_stats = val;
  1306. }
  1307. #endif
  1308. void hif_display_stats(struct hif_opaque_softc *hif_ctx)
  1309. {
  1310. hif_display_bus_stats(hif_ctx);
  1311. }
  1312. qdf_export_symbol(hif_display_stats);
  1313. void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
  1314. {
  1315. hif_clear_bus_stats(hif_ctx);
  1316. }
  1317. /**
  1318. * hif_crash_shutdown_dump_bus_register() - dump bus registers
  1319. * @hif_ctx: hif_ctx
  1320. *
  1321. * Return: n/a
  1322. */
  1323. #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
  1324. static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
  1325. {
  1326. struct hif_opaque_softc *scn = hif_ctx;
  1327. if (hif_check_soc_status(scn))
  1328. return;
  1329. if (hif_dump_registers(scn))
  1330. hif_err("Failed to dump bus registers!");
  1331. }
  1332. /**
  1333. * hif_crash_shutdown(): hif_crash_shutdown
  1334. *
  1335. * This function is called by the platform driver to dump CE registers
  1336. *
  1337. * @hif_ctx: hif_ctx
  1338. *
  1339. * Return: n/a
  1340. */
  1341. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1342. {
  1343. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1344. if (!hif_ctx)
  1345. return;
  1346. if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
  1347. hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
  1348. return;
  1349. }
  1350. if (TARGET_STATUS_RESET == scn->target_status) {
  1351. hif_warn("Target is already asserted, ignore!");
  1352. return;
  1353. }
  1354. if (hif_is_load_or_unload_in_progress(scn)) {
  1355. hif_err("Load/unload is in progress, ignore!");
  1356. return;
  1357. }
  1358. hif_crash_shutdown_dump_bus_register(hif_ctx);
  1359. hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
  1360. if (ol_copy_ramdump(hif_ctx))
  1361. goto out;
  1362. hif_info("RAM dump collecting completed!");
  1363. out:
  1364. return;
  1365. }
  1366. #else
  1367. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1368. {
  1369. hif_debug("Collecting target RAM dump disabled");
  1370. }
  1371. #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
  1372. #ifdef QCA_WIFI_3_0
  1373. /**
  1374. * hif_check_fw_reg(): hif_check_fw_reg
  1375. * @scn: scn
  1376. * @state:
  1377. *
  1378. * Return: int
  1379. */
  1380. int hif_check_fw_reg(struct hif_opaque_softc *scn)
  1381. {
  1382. return 0;
  1383. }
  1384. #endif
  1385. /**
  1386. * hif_read_phy_mem_base(): hif_read_phy_mem_base
  1387. * @scn: scn
  1388. * @phy_mem_base: physical mem base
  1389. *
  1390. * Return: n/a
  1391. */
  1392. void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
  1393. {
  1394. *phy_mem_base = scn->mem_pa;
  1395. }
  1396. qdf_export_symbol(hif_read_phy_mem_base);
  1397. /**
  1398. * hif_get_device_type(): hif_get_device_type
  1399. * @device_id: device_id
  1400. * @revision_id: revision_id
  1401. * @hif_type: returned hif_type
  1402. * @target_type: returned target_type
  1403. *
  1404. * Return: int
  1405. */
  1406. int hif_get_device_type(uint32_t device_id,
  1407. uint32_t revision_id,
  1408. uint32_t *hif_type, uint32_t *target_type)
  1409. {
  1410. int ret = 0;
  1411. switch (device_id) {
  1412. case ADRASTEA_DEVICE_ID_P2_E12:
  1413. *hif_type = HIF_TYPE_ADRASTEA;
  1414. *target_type = TARGET_TYPE_ADRASTEA;
  1415. break;
  1416. case AR9888_DEVICE_ID:
  1417. *hif_type = HIF_TYPE_AR9888;
  1418. *target_type = TARGET_TYPE_AR9888;
  1419. break;
  1420. case AR6320_DEVICE_ID:
  1421. switch (revision_id) {
  1422. case AR6320_FW_1_1:
  1423. case AR6320_FW_1_3:
  1424. *hif_type = HIF_TYPE_AR6320;
  1425. *target_type = TARGET_TYPE_AR6320;
  1426. break;
  1427. case AR6320_FW_2_0:
  1428. case AR6320_FW_3_0:
  1429. case AR6320_FW_3_2:
  1430. *hif_type = HIF_TYPE_AR6320V2;
  1431. *target_type = TARGET_TYPE_AR6320V2;
  1432. break;
  1433. default:
  1434. hif_err("dev_id = 0x%x, rev_id = 0x%x",
  1435. device_id, revision_id);
  1436. ret = -ENODEV;
  1437. goto end;
  1438. }
  1439. break;
  1440. case AR9887_DEVICE_ID:
  1441. *hif_type = HIF_TYPE_AR9888;
  1442. *target_type = TARGET_TYPE_AR9888;
  1443. hif_info(" *********** AR9887 **************");
  1444. break;
  1445. case QCA9984_DEVICE_ID:
  1446. *hif_type = HIF_TYPE_QCA9984;
  1447. *target_type = TARGET_TYPE_QCA9984;
  1448. hif_info(" *********** QCA9984 *************");
  1449. break;
  1450. case QCA9888_DEVICE_ID:
  1451. *hif_type = HIF_TYPE_QCA9888;
  1452. *target_type = TARGET_TYPE_QCA9888;
  1453. hif_info(" *********** QCA9888 *************");
  1454. break;
  1455. case AR900B_DEVICE_ID:
  1456. *hif_type = HIF_TYPE_AR900B;
  1457. *target_type = TARGET_TYPE_AR900B;
  1458. hif_info(" *********** AR900B *************");
  1459. break;
  1460. case QCA8074_DEVICE_ID:
  1461. *hif_type = HIF_TYPE_QCA8074;
  1462. *target_type = TARGET_TYPE_QCA8074;
  1463. hif_info(" *********** QCA8074 *************");
  1464. break;
  1465. case QCA6290_EMULATION_DEVICE_ID:
  1466. case QCA6290_DEVICE_ID:
  1467. *hif_type = HIF_TYPE_QCA6290;
  1468. *target_type = TARGET_TYPE_QCA6290;
  1469. hif_info(" *********** QCA6290EMU *************");
  1470. break;
  1471. case QCN9000_DEVICE_ID:
  1472. *hif_type = HIF_TYPE_QCN9000;
  1473. *target_type = TARGET_TYPE_QCN9000;
  1474. hif_info(" *********** QCN9000 *************");
  1475. break;
  1476. case QCN9224_DEVICE_ID:
  1477. *hif_type = HIF_TYPE_QCN9224;
  1478. *target_type = TARGET_TYPE_QCN9224;
  1479. hif_info(" *********** QCN9224 *************");
  1480. break;
  1481. case QCN6122_DEVICE_ID:
  1482. *hif_type = HIF_TYPE_QCN6122;
  1483. *target_type = TARGET_TYPE_QCN6122;
  1484. hif_info(" *********** QCN6122 *************");
  1485. break;
  1486. case QCN9160_DEVICE_ID:
  1487. *hif_type = HIF_TYPE_QCN9160;
  1488. *target_type = TARGET_TYPE_QCN9160;
  1489. hif_info(" *********** QCN9160 *************");
  1490. break;
  1491. case QCN7605_DEVICE_ID:
  1492. case QCN7605_COMPOSITE:
  1493. case QCN7605_STANDALONE:
  1494. case QCN7605_STANDALONE_V2:
  1495. case QCN7605_COMPOSITE_V2:
  1496. *hif_type = HIF_TYPE_QCN7605;
  1497. *target_type = TARGET_TYPE_QCN7605;
  1498. hif_info(" *********** QCN7605 *************");
  1499. break;
  1500. case QCA6390_DEVICE_ID:
  1501. case QCA6390_EMULATION_DEVICE_ID:
  1502. *hif_type = HIF_TYPE_QCA6390;
  1503. *target_type = TARGET_TYPE_QCA6390;
  1504. hif_info(" *********** QCA6390 *************");
  1505. break;
  1506. case QCA6490_DEVICE_ID:
  1507. case QCA6490_EMULATION_DEVICE_ID:
  1508. *hif_type = HIF_TYPE_QCA6490;
  1509. *target_type = TARGET_TYPE_QCA6490;
  1510. hif_info(" *********** QCA6490 *************");
  1511. break;
  1512. case QCA6750_DEVICE_ID:
  1513. case QCA6750_EMULATION_DEVICE_ID:
  1514. *hif_type = HIF_TYPE_QCA6750;
  1515. *target_type = TARGET_TYPE_QCA6750;
  1516. hif_info(" *********** QCA6750 *************");
  1517. break;
  1518. case KIWI_DEVICE_ID:
  1519. *hif_type = HIF_TYPE_KIWI;
  1520. *target_type = TARGET_TYPE_KIWI;
  1521. hif_info(" *********** KIWI *************");
  1522. break;
  1523. case MANGO_DEVICE_ID:
  1524. *hif_type = HIF_TYPE_MANGO;
  1525. *target_type = TARGET_TYPE_MANGO;
  1526. hif_info(" *********** MANGO *************");
  1527. break;
  1528. case QCA8074V2_DEVICE_ID:
  1529. *hif_type = HIF_TYPE_QCA8074V2;
  1530. *target_type = TARGET_TYPE_QCA8074V2;
  1531. hif_info(" *********** QCA8074V2 *************");
  1532. break;
  1533. case QCA6018_DEVICE_ID:
  1534. case RUMIM2M_DEVICE_ID_NODE0:
  1535. case RUMIM2M_DEVICE_ID_NODE1:
  1536. case RUMIM2M_DEVICE_ID_NODE2:
  1537. case RUMIM2M_DEVICE_ID_NODE3:
  1538. case RUMIM2M_DEVICE_ID_NODE4:
  1539. case RUMIM2M_DEVICE_ID_NODE5:
  1540. *hif_type = HIF_TYPE_QCA6018;
  1541. *target_type = TARGET_TYPE_QCA6018;
  1542. hif_info(" *********** QCA6018 *************");
  1543. break;
  1544. case QCA5018_DEVICE_ID:
  1545. *hif_type = HIF_TYPE_QCA5018;
  1546. *target_type = TARGET_TYPE_QCA5018;
  1547. hif_info(" *********** qca5018 *************");
  1548. break;
  1549. case QCA5332_DEVICE_ID:
  1550. *hif_type = HIF_TYPE_QCA5332;
  1551. *target_type = TARGET_TYPE_QCA5332;
  1552. hif_info(" *********** QCA5332 *************");
  1553. break;
  1554. case QCA9574_DEVICE_ID:
  1555. *hif_type = HIF_TYPE_QCA9574;
  1556. *target_type = TARGET_TYPE_QCA9574;
  1557. hif_info(" *********** QCA9574 *************");
  1558. break;
  1559. default:
  1560. hif_err("Unsupported device ID = 0x%x!", device_id);
  1561. ret = -ENODEV;
  1562. break;
  1563. }
  1564. if (*target_type == TARGET_TYPE_UNKNOWN) {
  1565. hif_err("Unsupported target_type!");
  1566. ret = -ENODEV;
  1567. }
  1568. end:
  1569. return ret;
  1570. }
  1571. /**
  1572. * hif_get_bus_type() - return the bus type
  1573. *
  1574. * Return: enum qdf_bus_type
  1575. */
  1576. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
  1577. {
  1578. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  1579. return scn->bus_type;
  1580. }
  1581. /**
  1582. * Target info and ini parameters are global to the driver
  1583. * Hence these structures are exposed to all the modules in
  1584. * the driver and they don't need to maintains multiple copies
  1585. * of the same info, instead get the handle from hif and
  1586. * modify them in hif
  1587. */
  1588. /**
  1589. * hif_get_ini_handle() - API to get hif_config_param handle
  1590. * @hif_ctx: HIF Context
  1591. *
  1592. * Return: pointer to hif_config_info
  1593. */
  1594. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
  1595. {
  1596. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  1597. return &sc->hif_config;
  1598. }
  1599. /**
  1600. * hif_get_target_info_handle() - API to get hif_target_info handle
  1601. * @hif_ctx: HIF context
  1602. *
  1603. * Return: Pointer to hif_target_info
  1604. */
  1605. struct hif_target_info *hif_get_target_info_handle(
  1606. struct hif_opaque_softc *hif_ctx)
  1607. {
  1608. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  1609. return &sc->target_info;
  1610. }
  1611. qdf_export_symbol(hif_get_target_info_handle);
  1612. #ifdef RECEIVE_OFFLOAD
  1613. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  1614. void (offld_flush_handler)(void *))
  1615. {
  1616. if (hif_napi_enabled(scn, -1))
  1617. hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
  1618. else
  1619. hif_err("NAPI not enabled");
  1620. }
  1621. qdf_export_symbol(hif_offld_flush_cb_register);
  1622. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
  1623. {
  1624. if (hif_napi_enabled(scn, -1))
  1625. hif_napi_rx_offld_flush_cb_deregister(scn);
  1626. else
  1627. hif_err("NAPI not enabled");
  1628. }
  1629. qdf_export_symbol(hif_offld_flush_cb_deregister);
  1630. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  1631. {
  1632. if (hif_napi_enabled(hif_hdl, -1))
  1633. return NAPI_PIPE2ID(ctx_id);
  1634. else
  1635. return ctx_id;
  1636. }
  1637. #else /* RECEIVE_OFFLOAD */
  1638. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  1639. {
  1640. return 0;
  1641. }
  1642. qdf_export_symbol(hif_get_rx_ctx_id);
  1643. #endif /* RECEIVE_OFFLOAD */
  1644. #if defined(FEATURE_LRO)
  1645. /**
  1646. * hif_get_lro_info - Returns LRO instance for instance ID
  1647. * @ctx_id: LRO instance ID
  1648. * @hif_hdl: HIF Context
  1649. *
  1650. * Return: Pointer to LRO instance.
  1651. */
  1652. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
  1653. {
  1654. void *data;
  1655. if (hif_napi_enabled(hif_hdl, -1))
  1656. data = hif_napi_get_lro_info(hif_hdl, ctx_id);
  1657. else
  1658. data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
  1659. return data;
  1660. }
  1661. #endif
  1662. /**
  1663. * hif_get_target_status - API to get target status
  1664. * @hif_ctx: HIF Context
  1665. *
  1666. * Return: enum hif_target_status
  1667. */
  1668. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
  1669. {
  1670. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1671. return scn->target_status;
  1672. }
  1673. qdf_export_symbol(hif_get_target_status);
  1674. /**
  1675. * hif_set_target_status() - API to set target status
  1676. * @hif_ctx: HIF Context
  1677. * @status: Target Status
  1678. *
  1679. * Return: void
  1680. */
  1681. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  1682. hif_target_status status)
  1683. {
  1684. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1685. scn->target_status = status;
  1686. }
  1687. /**
  1688. * hif_init_ini_config() - API to initialize HIF configuration parameters
  1689. * @hif_ctx: HIF Context
  1690. * @cfg: HIF Configuration
  1691. *
  1692. * Return: void
  1693. */
  1694. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  1695. struct hif_config_info *cfg)
  1696. {
  1697. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1698. qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
  1699. }
  1700. /**
  1701. * hif_get_conparam() - API to get driver mode in HIF
  1702. * @scn: HIF Context
  1703. *
  1704. * Return: driver mode of operation
  1705. */
  1706. uint32_t hif_get_conparam(struct hif_softc *scn)
  1707. {
  1708. if (!scn)
  1709. return 0;
  1710. return scn->hif_con_param;
  1711. }
  1712. /**
  1713. * hif_get_callbacks_handle() - API to get callbacks Handle
  1714. * @scn: HIF Context
  1715. *
  1716. * Return: pointer to HIF Callbacks
  1717. */
  1718. struct hif_driver_state_callbacks *hif_get_callbacks_handle(
  1719. struct hif_softc *scn)
  1720. {
  1721. return &scn->callbacks;
  1722. }
  1723. /**
  1724. * hif_is_driver_unloading() - API to query upper layers if driver is unloading
  1725. * @scn: HIF Context
  1726. *
  1727. * Return: True/False
  1728. */
  1729. bool hif_is_driver_unloading(struct hif_softc *scn)
  1730. {
  1731. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1732. if (cbk && cbk->is_driver_unloading)
  1733. return cbk->is_driver_unloading(cbk->context);
  1734. return false;
  1735. }
  1736. /**
  1737. * hif_is_load_or_unload_in_progress() - API to query upper layers if
  1738. * load/unload in progress
  1739. * @scn: HIF Context
  1740. *
  1741. * Return: True/False
  1742. */
  1743. bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
  1744. {
  1745. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1746. if (cbk && cbk->is_load_unload_in_progress)
  1747. return cbk->is_load_unload_in_progress(cbk->context);
  1748. return false;
  1749. }
  1750. /**
  1751. * hif_is_recovery_in_progress() - API to query upper layers if recovery in
  1752. * progress
  1753. * @scn: HIF Context
  1754. *
  1755. * Return: True/False
  1756. */
  1757. bool hif_is_recovery_in_progress(struct hif_softc *scn)
  1758. {
  1759. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1760. if (cbk && cbk->is_recovery_in_progress)
  1761. return cbk->is_recovery_in_progress(cbk->context);
  1762. return false;
  1763. }
  1764. #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  1765. defined(HIF_IPCI)
  1766. /**
  1767. * hif_update_pipe_callback() - API to register pipe specific callbacks
  1768. * @osc: Opaque softc
  1769. * @pipeid: pipe id
  1770. * @callbacks: callbacks to register
  1771. *
  1772. * Return: void
  1773. */
  1774. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  1775. u_int8_t pipeid,
  1776. struct hif_msg_callbacks *callbacks)
  1777. {
  1778. struct hif_softc *scn = HIF_GET_SOFTC(osc);
  1779. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1780. struct HIF_CE_pipe_info *pipe_info;
  1781. QDF_BUG(pipeid < CE_COUNT_MAX);
  1782. hif_debug("pipeid: %d", pipeid);
  1783. pipe_info = &hif_state->pipe_info[pipeid];
  1784. qdf_mem_copy(&pipe_info->pipe_callbacks,
  1785. callbacks, sizeof(pipe_info->pipe_callbacks));
  1786. }
  1787. qdf_export_symbol(hif_update_pipe_callback);
  1788. /**
  1789. * hif_is_target_ready() - API to query if target is in ready state
  1790. * progress
  1791. * @scn: HIF Context
  1792. *
  1793. * Return: True/False
  1794. */
  1795. bool hif_is_target_ready(struct hif_softc *scn)
  1796. {
  1797. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1798. if (cbk && cbk->is_target_ready)
  1799. return cbk->is_target_ready(cbk->context);
  1800. /*
  1801. * if callback is not registered then there is no way to determine
  1802. * if target is ready. In-such case return true to indicate that
  1803. * target is ready.
  1804. */
  1805. return true;
  1806. }
  1807. qdf_export_symbol(hif_is_target_ready);
  1808. int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
  1809. {
  1810. struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
  1811. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  1812. if (cbk && cbk->get_bandwidth_level)
  1813. return cbk->get_bandwidth_level(cbk->context);
  1814. return 0;
  1815. }
  1816. qdf_export_symbol(hif_get_bandwidth_level);
  1817. #ifdef DP_MEM_PRE_ALLOC
  1818. void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
  1819. qdf_size_t size,
  1820. qdf_dma_addr_t *paddr,
  1821. uint32_t ring_type,
  1822. uint8_t *is_mem_prealloc)
  1823. {
  1824. void *vaddr = NULL;
  1825. struct hif_driver_state_callbacks *cbk =
  1826. hif_get_callbacks_handle(scn);
  1827. *is_mem_prealloc = false;
  1828. if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
  1829. vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
  1830. paddr,
  1831. ring_type);
  1832. if (vaddr) {
  1833. *is_mem_prealloc = true;
  1834. goto end;
  1835. }
  1836. }
  1837. vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
  1838. scn->qdf_dev->dev,
  1839. size,
  1840. paddr);
  1841. end:
  1842. dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
  1843. *is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
  1844. (void *)*paddr, (int)size, ring_type);
  1845. return vaddr;
  1846. }
  1847. void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
  1848. qdf_size_t size,
  1849. void *vaddr,
  1850. qdf_dma_addr_t paddr,
  1851. qdf_dma_context_t memctx,
  1852. uint8_t is_mem_prealloc)
  1853. {
  1854. struct hif_driver_state_callbacks *cbk =
  1855. hif_get_callbacks_handle(scn);
  1856. if (is_mem_prealloc) {
  1857. if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
  1858. cbk->prealloc_put_consistent_mem_unaligned(vaddr);
  1859. } else {
  1860. dp_warn("dp_prealloc_put_consistent_unligned NULL");
  1861. QDF_BUG(0);
  1862. }
  1863. } else {
  1864. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  1865. size, vaddr, paddr, memctx);
  1866. }
  1867. }
  1868. #endif
  1869. /**
  1870. * hif_batch_send() - API to access hif specific function
  1871. * ce_batch_send.
  1872. * @osc: HIF Context
  1873. * @msdu : list of msdus to be sent
  1874. * @transfer_id : transfer id
  1875. * @len : downloaded length
  1876. *
  1877. * Return: list of msds not sent
  1878. */
  1879. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1880. uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
  1881. {
  1882. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  1883. if (!ce_tx_hdl)
  1884. return NULL;
  1885. return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  1886. len, sendhead);
  1887. }
  1888. qdf_export_symbol(hif_batch_send);
  1889. /**
  1890. * hif_update_tx_ring() - API to access hif specific function
  1891. * ce_update_tx_ring.
  1892. * @osc: HIF Context
  1893. * @num_htt_cmpls : number of htt compl received.
  1894. *
  1895. * Return: void
  1896. */
  1897. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
  1898. {
  1899. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  1900. ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
  1901. }
  1902. qdf_export_symbol(hif_update_tx_ring);
  1903. /**
  1904. * hif_send_single() - API to access hif specific function
  1905. * ce_send_single.
  1906. * @osc: HIF Context
  1907. * @msdu : msdu to be sent
  1908. * @transfer_id: transfer id
  1909. * @len : downloaded length
  1910. *
  1911. * Return: msdu sent status
  1912. */
  1913. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  1914. uint32_t transfer_id, u_int32_t len)
  1915. {
  1916. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  1917. if (!ce_tx_hdl)
  1918. return QDF_STATUS_E_NULL_VALUE;
  1919. return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  1920. len);
  1921. }
  1922. qdf_export_symbol(hif_send_single);
  1923. #endif
  1924. /**
  1925. * hif_reg_write() - API to access hif specific function
  1926. * hif_write32_mb.
  1927. * @hif_ctx : HIF Context
  1928. * @offset : offset on which value has to be written
  1929. * @value : value to be written
  1930. *
  1931. * Return: None
  1932. */
  1933. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  1934. uint32_t value)
  1935. {
  1936. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1937. hif_write32_mb(scn, scn->mem + offset, value);
  1938. }
  1939. qdf_export_symbol(hif_reg_write);
  1940. /**
  1941. * hif_reg_read() - API to access hif specific function
  1942. * hif_read32_mb.
  1943. * @hif_ctx : HIF Context
  1944. * @offset : offset from which value has to be read
  1945. *
  1946. * Return: Read value
  1947. */
  1948. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
  1949. {
  1950. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1951. return hif_read32_mb(scn, scn->mem + offset);
  1952. }
  1953. qdf_export_symbol(hif_reg_read);
  1954. /**
  1955. * hif_ramdump_handler(): generic ramdump handler
  1956. * @scn: struct hif_opaque_softc
  1957. *
  1958. * Return: None
  1959. */
  1960. void hif_ramdump_handler(struct hif_opaque_softc *scn)
  1961. {
  1962. if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
  1963. hif_usb_ramdump_handler(scn);
  1964. }
  1965. hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
  1966. {
  1967. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1968. return scn->wake_irq_type;
  1969. }
  1970. irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
  1971. {
  1972. struct hif_softc *scn = context;
  1973. hif_info("wake interrupt received on irq %d", irq);
  1974. hif_rtpm_set_monitor_wake_intr(0);
  1975. hif_rtpm_request_resume();
  1976. if (scn->initial_wakeup_cb)
  1977. scn->initial_wakeup_cb(scn->initial_wakeup_priv);
  1978. if (hif_is_ut_suspended(scn))
  1979. hif_ut_fw_resume(scn);
  1980. qdf_pm_system_wakeup();
  1981. return IRQ_HANDLED;
  1982. }
  1983. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  1984. void (*callback)(void *),
  1985. void *priv)
  1986. {
  1987. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1988. scn->initial_wakeup_cb = callback;
  1989. scn->initial_wakeup_priv = priv;
  1990. }
  1991. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  1992. uint32_t ce_service_max_yield_time)
  1993. {
  1994. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  1995. hif_ctx->ce_service_max_yield_time =
  1996. ce_service_max_yield_time * 1000;
  1997. }
  1998. unsigned long long
  1999. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
  2000. {
  2001. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2002. return hif_ctx->ce_service_max_yield_time;
  2003. }
  2004. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  2005. uint8_t ce_service_max_rx_ind_flush)
  2006. {
  2007. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2008. if (ce_service_max_rx_ind_flush == 0 ||
  2009. ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
  2010. hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
  2011. else
  2012. hif_ctx->ce_service_max_rx_ind_flush =
  2013. ce_service_max_rx_ind_flush;
  2014. }
  2015. #ifdef SYSTEM_PM_CHECK
  2016. void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
  2017. enum hif_system_pm_state state)
  2018. {
  2019. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2020. qdf_atomic_set(&hif_ctx->sys_pm_state, state);
  2021. }
  2022. int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
  2023. {
  2024. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2025. return qdf_atomic_read(&hif_ctx->sys_pm_state);
  2026. }
  2027. int hif_system_pm_state_check(struct hif_opaque_softc *hif)
  2028. {
  2029. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2030. int32_t sys_pm_state;
  2031. if (!hif_ctx) {
  2032. hif_err("hif context is null");
  2033. return -EFAULT;
  2034. }
  2035. sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
  2036. if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
  2037. sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
  2038. hif_info("Triggering system wakeup");
  2039. qdf_pm_system_wakeup();
  2040. return -EAGAIN;
  2041. }
  2042. return 0;
  2043. }
  2044. #endif