hif_main.c 84 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "targcfg.h"
  20. #include "qdf_lock.h"
  21. #include "qdf_status.h"
  22. #include "qdf_status.h"
  23. #include <qdf_atomic.h> /* qdf_atomic_read */
  24. #include <targaddrs.h>
  25. #include "hif_io32.h"
  26. #include <hif.h>
  27. #include <target_type.h>
  28. #include "regtable.h"
  29. #define ATH_MODULE_NAME hif
  30. #include <a_debug.h>
  31. #include "hif_main.h"
  32. #include "hif_hw_version.h"
  33. #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  34. defined(HIF_IPCI))
  35. #include "ce_tasklet.h"
  36. #include "ce_api.h"
  37. #endif
  38. #include "qdf_trace.h"
  39. #include "qdf_status.h"
  40. #include "hif_debug.h"
  41. #include "mp_dev.h"
  42. #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  43. defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
  44. defined(QCA_WIFI_QCA5332)
  45. #include "hal_api.h"
  46. #endif
  47. #include "hif_napi.h"
  48. #include "hif_unit_test_suspend_i.h"
  49. #include "qdf_module.h"
  50. #ifdef HIF_CE_LOG_INFO
  51. #include <qdf_notifier.h>
  52. #include <qdf_hang_event_notifier.h>
  53. #endif
  54. #include <linux/cpumask.h>
  55. #include <pld_common.h>
  56. #include "ce_internal.h"
  57. #include <qdf_tracepoint.h>
  58. #include "qdf_ssr_driver_dump.h"
  59. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
  60. {
  61. hif_trigger_dump(hif_ctx, cmd_id, start);
  62. }
  63. /**
  64. * hif_get_target_id(): hif_get_target_id
  65. * @scn: scn
  66. *
  67. * Return the virtual memory base address to the caller
  68. *
  69. * @scn: hif_softc
  70. *
  71. * Return: A_target_id_t
  72. */
  73. A_target_id_t hif_get_target_id(struct hif_softc *scn)
  74. {
  75. return scn->mem;
  76. }
  77. /**
  78. * hif_get_targetdef(): hif_get_targetdef
  79. * @hif_ctx: hif context
  80. *
  81. * Return: void *
  82. */
  83. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
  84. {
  85. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  86. return scn->targetdef;
  87. }
  88. #ifdef FORCE_WAKE
  89. #ifndef QCA_WIFI_WCN6450
  90. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  91. bool init_phase)
  92. {
  93. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  94. if (ce_srng_based(scn))
  95. hal_set_init_phase(scn->hal_soc, init_phase);
  96. }
  97. #else
  98. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  99. bool init_phase)
  100. {
  101. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  102. hal_set_init_phase(scn->hal_soc, init_phase);
  103. }
  104. #endif
  105. #endif /* FORCE_WAKE */
  106. #ifdef HIF_IPCI
  107. void hif_shutdown_notifier_cb(void *hif_ctx)
  108. {
  109. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  110. scn->recovery = true;
  111. }
  112. #endif
  113. /**
  114. * hif_vote_link_down(): unvote for link up
  115. * @hif_ctx: hif context
  116. *
  117. * Call hif_vote_link_down to release a previous request made using
  118. * hif_vote_link_up. A hif_vote_link_down call should only be made
  119. * after a corresponding hif_vote_link_up, otherwise you could be
  120. * negating a vote from another source. When no votes are present
  121. * hif will not guarantee the linkstate after hif_bus_suspend.
  122. *
  123. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  124. * and initialization deinitialization sequencences.
  125. *
  126. * Return: n/a
  127. */
  128. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
  129. {
  130. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  131. QDF_BUG(scn);
  132. if (scn->linkstate_vote == 0)
  133. QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
  134. scn->linkstate_vote);
  135. scn->linkstate_vote--;
  136. hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
  137. if (scn->linkstate_vote == 0)
  138. hif_bus_prevent_linkdown(scn, false);
  139. }
  140. /**
  141. * hif_vote_link_up(): vote to prevent bus from suspending
  142. * @hif_ctx: hif context
  143. *
  144. * Makes hif guarantee that fw can message the host normally
  145. * during suspend.
  146. *
  147. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  148. * and initialization deinitialization sequencences.
  149. *
  150. * Return: n/a
  151. */
  152. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
  153. {
  154. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  155. QDF_BUG(scn);
  156. scn->linkstate_vote++;
  157. hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
  158. if (scn->linkstate_vote == 1)
  159. hif_bus_prevent_linkdown(scn, true);
  160. }
  161. /**
  162. * hif_can_suspend_link(): query if hif is permitted to suspend the link
  163. * @hif_ctx: hif context
  164. *
  165. * Hif will ensure that the link won't be suspended if the upperlayers
  166. * don't want it to.
  167. *
  168. * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
  169. * we don't need extra locking to ensure votes dont change while
  170. * we are in the process of suspending or resuming.
  171. *
  172. * Return: false if hif will guarantee link up during suspend.
  173. */
  174. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
  175. {
  176. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  177. QDF_BUG(scn);
  178. return scn->linkstate_vote == 0;
  179. }
  180. /**
  181. * hif_hia_item_address(): hif_hia_item_address
  182. * @target_type: target_type
  183. * @item_offset: item_offset
  184. *
  185. * Return: n/a
  186. */
  187. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
  188. {
  189. switch (target_type) {
  190. case TARGET_TYPE_AR6002:
  191. return AR6002_HOST_INTEREST_ADDRESS + item_offset;
  192. case TARGET_TYPE_AR6003:
  193. return AR6003_HOST_INTEREST_ADDRESS + item_offset;
  194. case TARGET_TYPE_AR6004:
  195. return AR6004_HOST_INTEREST_ADDRESS + item_offset;
  196. case TARGET_TYPE_AR6006:
  197. return AR6006_HOST_INTEREST_ADDRESS + item_offset;
  198. case TARGET_TYPE_AR9888:
  199. return AR9888_HOST_INTEREST_ADDRESS + item_offset;
  200. case TARGET_TYPE_AR6320:
  201. case TARGET_TYPE_AR6320V2:
  202. return AR6320_HOST_INTEREST_ADDRESS + item_offset;
  203. case TARGET_TYPE_ADRASTEA:
  204. /* ADRASTEA doesn't have a host interest address */
  205. ASSERT(0);
  206. return 0;
  207. case TARGET_TYPE_AR900B:
  208. return AR900B_HOST_INTEREST_ADDRESS + item_offset;
  209. case TARGET_TYPE_QCA9984:
  210. return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
  211. case TARGET_TYPE_QCA9888:
  212. return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
  213. default:
  214. ASSERT(0);
  215. return 0;
  216. }
  217. }
  218. /**
  219. * hif_max_num_receives_reached() - check max receive is reached
  220. * @scn: HIF Context
  221. * @count: unsigned int.
  222. *
  223. * Output check status as bool
  224. *
  225. * Return: bool
  226. */
  227. bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
  228. {
  229. if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
  230. return count > 120;
  231. else
  232. return count > MAX_NUM_OF_RECEIVES;
  233. }
  234. /**
  235. * init_buffer_count() - initial buffer count
  236. * @maxSize: qdf_size_t
  237. *
  238. * routine to modify the initial buffer count to be allocated on an os
  239. * platform basis. Platform owner will need to modify this as needed
  240. *
  241. * Return: qdf_size_t
  242. */
  243. qdf_size_t init_buffer_count(qdf_size_t maxSize)
  244. {
  245. return maxSize;
  246. }
  247. /**
  248. * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
  249. * @hif_ctx: hif context
  250. * @htc_htt_tx_endpoint: htt_tx_endpoint
  251. *
  252. * Return: void
  253. */
  254. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  255. int htc_htt_tx_endpoint)
  256. {
  257. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  258. if (!scn) {
  259. hif_err("scn or scn->hif_sc is NULL!");
  260. return;
  261. }
  262. scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
  263. }
  264. qdf_export_symbol(hif_save_htc_htt_config_endpoint);
  265. static const struct qwlan_hw qwlan_hw_list[] = {
  266. {
  267. .id = AR6320_REV1_VERSION,
  268. .subid = 0,
  269. .name = "QCA6174_REV1",
  270. },
  271. {
  272. .id = AR6320_REV1_1_VERSION,
  273. .subid = 0x1,
  274. .name = "QCA6174_REV1_1",
  275. },
  276. {
  277. .id = AR6320_REV1_3_VERSION,
  278. .subid = 0x2,
  279. .name = "QCA6174_REV1_3",
  280. },
  281. {
  282. .id = AR6320_REV2_1_VERSION,
  283. .subid = 0x4,
  284. .name = "QCA6174_REV2_1",
  285. },
  286. {
  287. .id = AR6320_REV2_1_VERSION,
  288. .subid = 0x5,
  289. .name = "QCA6174_REV2_2",
  290. },
  291. {
  292. .id = AR6320_REV3_VERSION,
  293. .subid = 0x6,
  294. .name = "QCA6174_REV2.3",
  295. },
  296. {
  297. .id = AR6320_REV3_VERSION,
  298. .subid = 0x8,
  299. .name = "QCA6174_REV3",
  300. },
  301. {
  302. .id = AR6320_REV3_VERSION,
  303. .subid = 0x9,
  304. .name = "QCA6174_REV3_1",
  305. },
  306. {
  307. .id = AR6320_REV3_2_VERSION,
  308. .subid = 0xA,
  309. .name = "AR6320_REV3_2_VERSION",
  310. },
  311. {
  312. .id = QCA6390_V1,
  313. .subid = 0x0,
  314. .name = "QCA6390_V1",
  315. },
  316. {
  317. .id = QCA6490_V1,
  318. .subid = 0x0,
  319. .name = "QCA6490_V1",
  320. },
  321. {
  322. .id = WCN3990_v1,
  323. .subid = 0x0,
  324. .name = "WCN3990_V1",
  325. },
  326. {
  327. .id = WCN3990_v2,
  328. .subid = 0x0,
  329. .name = "WCN3990_V2",
  330. },
  331. {
  332. .id = WCN3990_v2_1,
  333. .subid = 0x0,
  334. .name = "WCN3990_V2.1",
  335. },
  336. {
  337. .id = WCN3998,
  338. .subid = 0x0,
  339. .name = "WCN3998",
  340. },
  341. {
  342. .id = QCA9379_REV1_VERSION,
  343. .subid = 0xC,
  344. .name = "QCA9379_REV1",
  345. },
  346. {
  347. .id = QCA9379_REV1_VERSION,
  348. .subid = 0xD,
  349. .name = "QCA9379_REV1_1",
  350. },
  351. {
  352. .id = MANGO_V1,
  353. .subid = 0xF,
  354. .name = "MANGO_V1",
  355. },
  356. {
  357. .id = PEACH_V1,
  358. .subid = 0,
  359. .name = "PEACH_V1",
  360. },
  361. {
  362. .id = KIWI_V1,
  363. .subid = 0,
  364. .name = "KIWI_V1",
  365. },
  366. {
  367. .id = KIWI_V2,
  368. .subid = 0,
  369. .name = "KIWI_V2",
  370. },
  371. {
  372. .id = WCN6750_V1,
  373. .subid = 0,
  374. .name = "WCN6750_V1",
  375. },
  376. {
  377. .id = WCN6750_V2,
  378. .subid = 0,
  379. .name = "WCN6750_V2",
  380. },
  381. {
  382. .id = WCN6450_V1,
  383. .subid = 0,
  384. .name = "WCN6450_V1",
  385. },
  386. {
  387. .id = QCA6490_v2_1,
  388. .subid = 0,
  389. .name = "QCA6490",
  390. },
  391. {
  392. .id = QCA6490_v2,
  393. .subid = 0,
  394. .name = "QCA6490",
  395. },
  396. {
  397. .id = WCN3990_TALOS,
  398. .subid = 0,
  399. .name = "WCN3990",
  400. },
  401. {
  402. .id = WCN3990_MOOREA,
  403. .subid = 0,
  404. .name = "WCN3990",
  405. },
  406. {
  407. .id = WCN3990_SAIPAN,
  408. .subid = 0,
  409. .name = "WCN3990",
  410. },
  411. {
  412. .id = WCN3990_RENNELL,
  413. .subid = 0,
  414. .name = "WCN3990",
  415. },
  416. {
  417. .id = WCN3990_BITRA,
  418. .subid = 0,
  419. .name = "WCN3990",
  420. },
  421. {
  422. .id = WCN3990_DIVAR,
  423. .subid = 0,
  424. .name = "WCN3990",
  425. },
  426. {
  427. .id = WCN3990_ATHERTON,
  428. .subid = 0,
  429. .name = "WCN3990",
  430. },
  431. {
  432. .id = WCN3990_STRAIT,
  433. .subid = 0,
  434. .name = "WCN3990",
  435. },
  436. {
  437. .id = WCN3990_NETRANI,
  438. .subid = 0,
  439. .name = "WCN3990",
  440. },
  441. {
  442. .id = WCN3990_CLARENCE,
  443. .subid = 0,
  444. .name = "WCN3990",
  445. }
  446. };
  447. /**
  448. * hif_get_hw_name(): get a human readable name for the hardware
  449. * @info: Target Info
  450. *
  451. * Return: human readable name for the underlying wifi hardware.
  452. */
  453. static const char *hif_get_hw_name(struct hif_target_info *info)
  454. {
  455. int i;
  456. hif_debug("target version = %d, target revision = %d",
  457. info->target_version,
  458. info->target_revision);
  459. if (info->hw_name)
  460. return info->hw_name;
  461. for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
  462. if (info->target_version == qwlan_hw_list[i].id &&
  463. info->target_revision == qwlan_hw_list[i].subid) {
  464. return qwlan_hw_list[i].name;
  465. }
  466. }
  467. info->hw_name = qdf_mem_malloc(64);
  468. if (!info->hw_name)
  469. return "Unknown Device (nomem)";
  470. i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
  471. info->target_version);
  472. if (i < 0)
  473. return "Unknown Device (snprintf failure)";
  474. else
  475. return info->hw_name;
  476. }
  477. /**
  478. * hif_get_hw_info(): hif_get_hw_info
  479. * @scn: scn
  480. * @version: version
  481. * @revision: revision
  482. * @target_name: target name
  483. *
  484. * Return: n/a
  485. */
  486. void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
  487. const char **target_name)
  488. {
  489. struct hif_target_info *info = hif_get_target_info_handle(scn);
  490. struct hif_softc *sc = HIF_GET_SOFTC(scn);
  491. if (sc->bus_type == QDF_BUS_TYPE_USB)
  492. hif_usb_get_hw_info(sc);
  493. *version = info->target_version;
  494. *revision = info->target_revision;
  495. *target_name = hif_get_hw_name(info);
  496. }
  497. /**
  498. * hif_get_dev_ba(): API to get device base address.
  499. * @hif_handle: hif handle
  500. *
  501. * Return: device base address
  502. */
  503. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
  504. {
  505. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  506. return scn->mem;
  507. }
  508. qdf_export_symbol(hif_get_dev_ba);
  509. /**
  510. * hif_get_dev_ba_ce(): API to get device ce base address.
  511. * @hif_handle: hif handle
  512. *
  513. * Return: dev mem base address for CE
  514. */
  515. void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
  516. {
  517. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  518. return scn->mem_ce;
  519. }
  520. qdf_export_symbol(hif_get_dev_ba_ce);
  521. /**
  522. * hif_get_dev_ba_pmm(): API to get device pmm base address.
  523. * @hif_handle: scn
  524. *
  525. * Return: dev mem base address for PMM
  526. */
  527. void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
  528. {
  529. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  530. return scn->mem_pmm_base;
  531. }
  532. qdf_export_symbol(hif_get_dev_ba_pmm);
  533. uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
  534. {
  535. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  536. return scn->target_info.soc_version;
  537. }
  538. qdf_export_symbol(hif_get_soc_version);
  539. /**
  540. * hif_get_dev_ba_cmem(): API to get device ce base address.
  541. * @hif_handle: hif handle
  542. *
  543. * Return: dev mem base address for CMEM
  544. */
  545. void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
  546. {
  547. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  548. return scn->mem_cmem;
  549. }
  550. qdf_export_symbol(hif_get_dev_ba_cmem);
  551. #ifdef FEATURE_RUNTIME_PM
  552. void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
  553. {
  554. if (is_get)
  555. qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
  556. else
  557. qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
  558. }
  559. static inline
  560. void hif_rtpm_lock_init(struct hif_softc *scn)
  561. {
  562. qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
  563. }
  564. static inline
  565. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  566. {
  567. qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
  568. }
  569. #else
  570. static inline
  571. void hif_rtpm_lock_init(struct hif_softc *scn)
  572. {
  573. }
  574. static inline
  575. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  576. {
  577. }
  578. #endif
  579. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  580. /**
  581. * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
  582. * @scn: hif context
  583. * @psoc: psoc objmgr handle
  584. *
  585. * Return: None
  586. */
  587. static inline
  588. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  589. struct wlan_objmgr_psoc *psoc)
  590. {
  591. if (psoc) {
  592. scn->ini_cfg.ce_status_ring_timer_threshold =
  593. cfg_get(psoc,
  594. CFG_CE_STATUS_RING_TIMER_THRESHOLD);
  595. scn->ini_cfg.ce_status_ring_batch_count_threshold =
  596. cfg_get(psoc,
  597. CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
  598. }
  599. }
  600. #else
  601. static inline
  602. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  603. struct wlan_objmgr_psoc *psoc)
  604. {
  605. }
  606. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  607. /**
  608. * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
  609. * @scn: hif context
  610. * @psoc: psoc objmgr handle
  611. *
  612. * Return: None
  613. */
  614. static inline
  615. void hif_get_cfg_from_psoc(struct hif_softc *scn,
  616. struct wlan_objmgr_psoc *psoc)
  617. {
  618. if (psoc) {
  619. scn->ini_cfg.disable_wake_irq =
  620. cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
  621. /**
  622. * Wake IRQ can't share the same IRQ with the copy engines
  623. * In one MSI mode, we don't know whether wake IRQ is triggered
  624. * or not in wake IRQ handler. known issue CR 2055359
  625. * If you want to support Wake IRQ. Please allocate at least
  626. * 2 MSI vector. The first is for wake IRQ while the others
  627. * share the second vector
  628. */
  629. if (pld_is_one_msi(scn->qdf_dev->dev)) {
  630. hif_debug("Disable wake IRQ once it is one MSI mode");
  631. scn->ini_cfg.disable_wake_irq = true;
  632. }
  633. hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
  634. }
  635. }
  636. #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
  637. /**
  638. * hif_recovery_notifier_cb - Recovery notifier callback to log
  639. * hang event data
  640. * @block: notifier block
  641. * @state: state
  642. * @data: notifier data
  643. *
  644. * Return: status
  645. */
  646. static
  647. int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
  648. void *data)
  649. {
  650. struct qdf_notifer_data *notif_data = data;
  651. qdf_notif_block *notif_block;
  652. struct hif_softc *hif_handle;
  653. bool bus_id_invalid;
  654. if (!data || !block)
  655. return -EINVAL;
  656. notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
  657. hif_handle = notif_block->priv_data;
  658. if (!hif_handle)
  659. return -EINVAL;
  660. bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
  661. &notif_data->offset);
  662. if (bus_id_invalid)
  663. return NOTIFY_STOP_MASK;
  664. hif_log_ce_info(hif_handle, notif_data->hang_data,
  665. &notif_data->offset);
  666. return 0;
  667. }
  668. /**
  669. * hif_register_recovery_notifier - Register hif recovery notifier
  670. * @hif_handle: hif handle
  671. *
  672. * Return: status
  673. */
  674. static
  675. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  676. {
  677. qdf_notif_block *hif_notifier;
  678. if (!hif_handle)
  679. return QDF_STATUS_E_FAILURE;
  680. hif_notifier = &hif_handle->hif_recovery_notifier;
  681. hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
  682. hif_notifier->priv_data = hif_handle;
  683. return qdf_hang_event_register_notifier(hif_notifier);
  684. }
  685. /**
  686. * hif_unregister_recovery_notifier - Un-register hif recovery notifier
  687. * @hif_handle: hif handle
  688. *
  689. * Return: status
  690. */
  691. static
  692. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  693. {
  694. qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
  695. return qdf_hang_event_unregister_notifier(hif_notifier);
  696. }
  697. #else
  698. static inline
  699. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  700. {
  701. return QDF_STATUS_SUCCESS;
  702. }
  703. static inline
  704. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  705. {
  706. return QDF_STATUS_SUCCESS;
  707. }
  708. #endif
  709. #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
  710. defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
  711. /**
  712. * __hif_cpu_hotplug_notify() - CPU hotplug event handler
  713. * @context: HIF context
  714. * @cpu: CPU Id of the CPU generating the event
  715. * @cpu_up: true if the CPU is online
  716. *
  717. * Return: None
  718. */
  719. static void __hif_cpu_hotplug_notify(void *context,
  720. uint32_t cpu, bool cpu_up)
  721. {
  722. struct hif_softc *scn = context;
  723. if (!scn)
  724. return;
  725. if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
  726. return;
  727. if (cpu_up) {
  728. hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
  729. hif_debug("Setting affinity for online CPU: %d", cpu);
  730. } else {
  731. hif_debug("Skip setting affinity for offline CPU: %d", cpu);
  732. }
  733. }
  734. /**
  735. * hif_cpu_hotplug_notify - cpu core up/down notification
  736. * handler
  737. * @context: HIF context
  738. * @cpu: CPU generating the event
  739. * @cpu_up: true if the CPU is online
  740. *
  741. * Return: None
  742. */
  743. static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
  744. {
  745. struct qdf_op_sync *op_sync;
  746. if (qdf_op_protect(&op_sync))
  747. return;
  748. __hif_cpu_hotplug_notify(context, cpu, cpu_up);
  749. qdf_op_unprotect(op_sync);
  750. }
  751. static void hif_cpu_online_cb(void *context, uint32_t cpu)
  752. {
  753. hif_cpu_hotplug_notify(context, cpu, true);
  754. }
  755. static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
  756. {
  757. hif_cpu_hotplug_notify(context, cpu, false);
  758. }
  759. static void hif_cpuhp_register(struct hif_softc *scn)
  760. {
  761. if (!scn) {
  762. hif_info_high("cannot register hotplug notifiers");
  763. return;
  764. }
  765. qdf_cpuhp_register(&scn->cpuhp_event_handle,
  766. scn,
  767. hif_cpu_online_cb,
  768. hif_cpu_before_offline_cb);
  769. }
  770. static void hif_cpuhp_unregister(struct hif_softc *scn)
  771. {
  772. if (!scn) {
  773. hif_info_high("cannot unregister hotplug notifiers");
  774. return;
  775. }
  776. qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
  777. }
  778. #else
  779. static void hif_cpuhp_register(struct hif_softc *scn)
  780. {
  781. }
  782. static void hif_cpuhp_unregister(struct hif_softc *scn)
  783. {
  784. }
  785. #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
  786. #ifdef HIF_DETECTION_LATENCY_ENABLE
  787. /*
  788. * Bitmask to control enablement of latency detection for the tasklets,
  789. * bit-X represents for tasklet of WLAN_CE_X.
  790. */
  791. #ifndef DETECTION_LATENCY_TASKLET_MASK
  792. #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
  793. #endif
  794. static inline int
  795. __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
  796. {
  797. qdf_time_t sched_time =
  798. scn->latency_detect.tasklet_info[idx].sched_time;
  799. qdf_time_t exec_time =
  800. scn->latency_detect.tasklet_info[idx].exec_time;
  801. qdf_time_t curr_time = qdf_system_ticks();
  802. uint32_t threshold = scn->latency_detect.threshold;
  803. qdf_time_t expect_exec_time =
  804. sched_time + qdf_system_msecs_to_ticks(threshold);
  805. /* 2 kinds of check here.
  806. * from_timer==true: check if tasklet stall
  807. * from_timer==false: check tasklet execute comes late
  808. */
  809. if (from_timer ?
  810. (qdf_system_time_after(sched_time, exec_time) &&
  811. qdf_system_time_after(curr_time, expect_exec_time)) :
  812. qdf_system_time_after(exec_time, expect_exec_time)) {
  813. hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
  814. idx, from_timer, curr_time, sched_time,
  815. exec_time, threshold,
  816. scn->latency_detect.timeout,
  817. qdf_get_cpu(), (void *)_RET_IP_);
  818. qdf_trigger_self_recovery(NULL,
  819. QDF_TASKLET_CREDIT_LATENCY_DETECT);
  820. return -ETIMEDOUT;
  821. }
  822. return 0;
  823. }
  824. /**
  825. * hif_tasklet_latency_detect_enabled() - check whether latency detect
  826. * is enabled for the tasklet which is specified by idx
  827. * @scn: HIF opaque context
  828. * @idx: CE id
  829. *
  830. * Return: true if latency detect is enabled for the specified tasklet,
  831. * false otherwise.
  832. */
  833. static inline bool
  834. hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
  835. {
  836. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  837. return false;
  838. if (!scn->latency_detect.enable_detection)
  839. return false;
  840. if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
  841. !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
  842. return false;
  843. return true;
  844. }
  845. void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
  846. {
  847. if (!hif_tasklet_latency_detect_enabled(scn, idx))
  848. return;
  849. /*
  850. * hif_set_enable_detection(true) might come between
  851. * hif_tasklet_latency_record_sched() and
  852. * hif_tasklet_latency_record_exec() during wlan startup, then the
  853. * sched_time is 0 but exec_time is not, and hit the timeout case in
  854. * __hif_tasklet_latency().
  855. * To avoid such issue, skip exec_time recording if sched_time has not
  856. * been recorded.
  857. */
  858. if (!scn->latency_detect.tasklet_info[idx].sched_time)
  859. return;
  860. scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
  861. __hif_tasklet_latency(scn, false, idx);
  862. }
  863. void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
  864. {
  865. if (!hif_tasklet_latency_detect_enabled(scn, idx))
  866. return;
  867. scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
  868. scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
  869. }
  870. static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
  871. {
  872. qdf_time_t credit_request_time =
  873. scn->latency_detect.credit_request_time;
  874. qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
  875. qdf_time_t curr_jiffies = qdf_system_ticks();
  876. uint32_t threshold = scn->latency_detect.threshold;
  877. int cpu_id = qdf_get_cpu();
  878. /* 2 kinds of check here.
  879. * from_timer==true: check if credit report stall
  880. * from_timer==false: check credit report comes late
  881. */
  882. if ((from_timer ?
  883. qdf_system_time_after(credit_request_time, credit_report_time) :
  884. qdf_system_time_after(credit_report_time, credit_request_time)) &&
  885. qdf_system_time_after(curr_jiffies,
  886. credit_request_time +
  887. qdf_system_msecs_to_ticks(threshold))) {
  888. hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
  889. from_timer, curr_jiffies, credit_request_time,
  890. credit_report_time, threshold,
  891. scn->latency_detect.timeout,
  892. cpu_id, (void *)_RET_IP_);
  893. goto latency;
  894. }
  895. return;
  896. latency:
  897. qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
  898. }
  899. static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
  900. {
  901. int i, ret;
  902. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  903. if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
  904. continue;
  905. ret = __hif_tasklet_latency(scn, from_timer, i);
  906. if (ret)
  907. return;
  908. }
  909. }
  910. /**
  911. * hif_check_detection_latency(): to check if latency for tasklet/credit
  912. *
  913. * @scn: hif context
  914. * @from_timer: if called from timer handler
  915. * @bitmap_type: indicate if check tasklet or credit
  916. *
  917. * Return: none
  918. */
  919. void hif_check_detection_latency(struct hif_softc *scn,
  920. bool from_timer,
  921. uint32_t bitmap_type)
  922. {
  923. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  924. return;
  925. if (!scn->latency_detect.enable_detection)
  926. return;
  927. if (bitmap_type & BIT(HIF_DETECT_TASKLET))
  928. hif_tasklet_latency(scn, from_timer);
  929. if (bitmap_type & BIT(HIF_DETECT_CREDIT))
  930. hif_credit_latency(scn, from_timer);
  931. }
  932. static void hif_latency_detect_timeout_handler(void *arg)
  933. {
  934. struct hif_softc *scn = (struct hif_softc *)arg;
  935. int next_cpu, i;
  936. qdf_cpu_mask cpu_mask = {0};
  937. struct hif_latency_detect *detect = &scn->latency_detect;
  938. hif_check_detection_latency(scn, true,
  939. BIT(HIF_DETECT_TASKLET) |
  940. BIT(HIF_DETECT_CREDIT));
  941. /* it need to make sure timer start on a different cpu,
  942. * so it can detect the tasklet schedule stall, but there
  943. * is still chance that, after timer has been started, then
  944. * irq/tasklet happens on the same cpu, then tasklet will
  945. * execute before softirq timer, if this tasklet stall, the
  946. * timer can't detect it, we can accept this as a limitation,
  947. * if tasklet stall, anyway other place will detect it, just
  948. * a little later.
  949. */
  950. qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
  951. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  952. if (!qdf_test_bit(i, detect->tasklet_bmap))
  953. continue;
  954. qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
  955. &cpu_mask);
  956. }
  957. next_cpu = cpumask_first(&cpu_mask);
  958. if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
  959. hif_debug("start timer on local");
  960. /* it doesn't found a available cpu, start on local cpu*/
  961. qdf_timer_mod(&detect->timer, detect->timeout);
  962. } else {
  963. qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
  964. }
  965. }
  966. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  967. {
  968. scn->latency_detect.timeout =
  969. DETECTION_TIMER_TIMEOUT;
  970. scn->latency_detect.threshold =
  971. DETECTION_LATENCY_THRESHOLD;
  972. hif_info("timer timeout %u, latency threshold %u",
  973. scn->latency_detect.timeout,
  974. scn->latency_detect.threshold);
  975. scn->latency_detect.is_timer_started = false;
  976. qdf_timer_init(NULL,
  977. &scn->latency_detect.timer,
  978. &hif_latency_detect_timeout_handler,
  979. scn,
  980. QDF_TIMER_TYPE_SW_SPIN);
  981. }
  982. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  983. {
  984. hif_info("deinit timer");
  985. qdf_timer_free(&scn->latency_detect.timer);
  986. }
  987. static void hif_latency_detect_init(struct hif_softc *scn)
  988. {
  989. uint32_t tasklet_mask;
  990. int i;
  991. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  992. return;
  993. tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
  994. hif_info("tasklet mask is 0x%x", tasklet_mask);
  995. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  996. if (BIT(i) & tasklet_mask)
  997. qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
  998. }
  999. hif_latency_detect_timer_init(scn);
  1000. }
  1001. static void hif_latency_detect_deinit(struct hif_softc *scn)
  1002. {
  1003. int i;
  1004. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1005. return;
  1006. hif_latency_detect_timer_deinit(scn);
  1007. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
  1008. qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
  1009. }
  1010. void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
  1011. {
  1012. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1013. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1014. return;
  1015. hif_debug_rl("start timer");
  1016. if (scn->latency_detect.is_timer_started) {
  1017. hif_info("timer has been started");
  1018. return;
  1019. }
  1020. qdf_timer_start(&scn->latency_detect.timer,
  1021. scn->latency_detect.timeout);
  1022. scn->latency_detect.is_timer_started = true;
  1023. }
  1024. void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
  1025. {
  1026. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1027. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1028. return;
  1029. hif_debug_rl("stop timer");
  1030. qdf_timer_sync_cancel(&scn->latency_detect.timer);
  1031. scn->latency_detect.is_timer_started = false;
  1032. }
  1033. void hif_latency_detect_credit_record_time(
  1034. enum hif_credit_exchange_type type,
  1035. struct hif_opaque_softc *hif_ctx)
  1036. {
  1037. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1038. if (!scn) {
  1039. hif_err("Could not do runtime put, scn is null");
  1040. return;
  1041. }
  1042. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1043. return;
  1044. if (HIF_REQUEST_CREDIT == type)
  1045. scn->latency_detect.credit_request_time = qdf_system_ticks();
  1046. else if (HIF_PROCESS_CREDIT_REPORT == type)
  1047. scn->latency_detect.credit_report_time = qdf_system_ticks();
  1048. hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
  1049. }
  1050. void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
  1051. {
  1052. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1053. if (!scn) {
  1054. hif_err("Could not do runtime put, scn is null");
  1055. return;
  1056. }
  1057. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1058. return;
  1059. scn->latency_detect.enable_detection = value;
  1060. }
  1061. #else
  1062. static inline void hif_latency_detect_init(struct hif_softc *scn)
  1063. {}
  1064. static inline void hif_latency_detect_deinit(struct hif_softc *scn)
  1065. {}
  1066. #endif
  1067. #ifdef WLAN_FEATURE_AFFINITY_MGR
  1068. #define AFFINITY_THRESHOLD 5000000
  1069. static inline void
  1070. hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
  1071. {
  1072. unsigned int cpus;
  1073. qdf_cpu_mask allowed_mask = {0};
  1074. scn->affinity_mgr_supported =
  1075. (cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
  1076. qdf_walt_get_cpus_taken_supported());
  1077. hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
  1078. if (!scn->affinity_mgr_supported)
  1079. return;
  1080. scn->time_threshold = AFFINITY_THRESHOLD;
  1081. qdf_for_each_possible_cpu(cpus)
  1082. if (qdf_topology_physical_package_id(cpus) ==
  1083. CPU_CLUSTER_TYPE_LITTLE)
  1084. qdf_cpumask_set_cpu(cpus, &allowed_mask);
  1085. qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
  1086. }
  1087. #else
  1088. static inline void
  1089. hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
  1090. {
  1091. }
  1092. #endif
  1093. #ifdef FEATURE_DIRECT_LINK
  1094. /**
  1095. * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
  1096. * pipe number
  1097. * @scn: hif context
  1098. *
  1099. * Return: None
  1100. */
  1101. static inline
  1102. void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
  1103. {
  1104. scn->dl_recv_pipe_num = INVALID_PIPE_NO;
  1105. }
  1106. #else
  1107. static inline
  1108. void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
  1109. {
  1110. }
  1111. #endif
  1112. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  1113. uint32_t mode,
  1114. enum qdf_bus_type bus_type,
  1115. struct hif_driver_state_callbacks *cbk,
  1116. struct wlan_objmgr_psoc *psoc)
  1117. {
  1118. struct hif_softc *scn;
  1119. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1120. int bus_context_size = hif_bus_get_context_size(bus_type);
  1121. if (bus_context_size == 0) {
  1122. hif_err("context size 0 not allowed");
  1123. return NULL;
  1124. }
  1125. scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
  1126. if (!scn)
  1127. return GET_HIF_OPAQUE_HDL(scn);
  1128. scn->qdf_dev = qdf_ctx;
  1129. scn->hif_con_param = mode;
  1130. qdf_atomic_init(&scn->active_tasklet_cnt);
  1131. qdf_atomic_init(&scn->active_oom_work_cnt);
  1132. qdf_atomic_init(&scn->active_grp_tasklet_cnt);
  1133. qdf_atomic_init(&scn->link_suspended);
  1134. qdf_atomic_init(&scn->tasklet_from_intr);
  1135. hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
  1136. qdf_mem_copy(&scn->callbacks, cbk,
  1137. sizeof(struct hif_driver_state_callbacks));
  1138. scn->bus_type = bus_type;
  1139. hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
  1140. hif_get_cfg_from_psoc(scn, psoc);
  1141. hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
  1142. status = hif_bus_open(scn, bus_type);
  1143. if (status != QDF_STATUS_SUCCESS) {
  1144. hif_err("hif_bus_open error = %d, bus_type = %d",
  1145. status, bus_type);
  1146. qdf_mem_free(scn);
  1147. scn = NULL;
  1148. goto out;
  1149. }
  1150. hif_rtpm_lock_init(scn);
  1151. hif_cpuhp_register(scn);
  1152. hif_latency_detect_init(scn);
  1153. hif_affinity_mgr_init(scn, psoc);
  1154. hif_init_direct_link_rcv_pipe_num(scn);
  1155. hif_ce_desc_history_log_register(scn);
  1156. hif_desc_history_log_register();
  1157. qdf_ssr_driver_dump_register_region("hif", scn, sizeof(*scn));
  1158. out:
  1159. return GET_HIF_OPAQUE_HDL(scn);
  1160. }
  1161. #ifdef ADRASTEA_RRI_ON_DDR
  1162. /**
  1163. * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
  1164. * @scn: hif context
  1165. *
  1166. * Return: none
  1167. */
  1168. void hif_uninit_rri_on_ddr(struct hif_softc *scn)
  1169. {
  1170. if (scn->vaddr_rri_on_ddr)
  1171. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  1172. RRI_ON_DDR_MEM_SIZE,
  1173. scn->vaddr_rri_on_ddr,
  1174. scn->paddr_rri_on_ddr, 0);
  1175. scn->vaddr_rri_on_ddr = NULL;
  1176. }
  1177. #endif
  1178. /**
  1179. * hif_close(): hif_close
  1180. * @hif_ctx: hif_ctx
  1181. *
  1182. * Return: n/a
  1183. */
  1184. void hif_close(struct hif_opaque_softc *hif_ctx)
  1185. {
  1186. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1187. if (!scn) {
  1188. hif_err("hif_opaque_softc is NULL");
  1189. return;
  1190. }
  1191. qdf_ssr_driver_dump_unregister_region("hif");
  1192. hif_desc_history_log_unregister();
  1193. hif_ce_desc_history_log_unregister();
  1194. hif_latency_detect_deinit(scn);
  1195. if (scn->athdiag_procfs_inited) {
  1196. athdiag_procfs_remove();
  1197. scn->athdiag_procfs_inited = false;
  1198. }
  1199. if (scn->target_info.hw_name) {
  1200. char *hw_name = scn->target_info.hw_name;
  1201. scn->target_info.hw_name = "ErrUnloading";
  1202. qdf_mem_free(hw_name);
  1203. }
  1204. hif_uninit_rri_on_ddr(scn);
  1205. hif_cleanup_static_buf_to_target(scn);
  1206. hif_cpuhp_unregister(scn);
  1207. hif_rtpm_lock_deinit(scn);
  1208. hif_bus_close(scn);
  1209. qdf_mem_free(scn);
  1210. }
  1211. /**
  1212. * hif_get_num_active_grp_tasklets() - get the number of active
  1213. * datapath group tasklets pending to be completed.
  1214. * @scn: HIF context
  1215. *
  1216. * Returns: the number of datapath group tasklets which are active
  1217. */
  1218. static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
  1219. {
  1220. return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
  1221. }
  1222. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1223. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1224. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1225. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1226. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1227. defined(QCA_WIFI_QCN6432) || \
  1228. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1229. /**
  1230. * hif_get_num_pending_work() - get the number of entries in
  1231. * the workqueue pending to be completed.
  1232. * @scn: HIF context
  1233. *
  1234. * Returns: the number of tasklets which are active
  1235. */
  1236. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1237. {
  1238. return hal_get_reg_write_pending_work(scn->hal_soc);
  1239. }
  1240. #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
  1241. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1242. {
  1243. return qdf_atomic_read(&scn->active_work_cnt);
  1244. }
  1245. #else
  1246. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1247. {
  1248. return 0;
  1249. }
  1250. #endif
  1251. QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
  1252. {
  1253. uint32_t task_drain_wait_cnt = 0;
  1254. int tasklet = 0, grp_tasklet = 0, work = 0, oom_work = 0;
  1255. while ((tasklet = hif_get_num_active_tasklets(scn)) ||
  1256. (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
  1257. (work = hif_get_num_pending_work(scn)) ||
  1258. (oom_work = hif_get_num_active_oom_work(scn))) {
  1259. if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
  1260. hif_err("pending tasklets %d grp tasklets %d work %d oom work %d",
  1261. tasklet, grp_tasklet, work, oom_work);
  1262. /*
  1263. * There is chance of OOM thread getting scheduled
  1264. * continuously or execution get delayed during low
  1265. * memory state. So avoid panic and prevent suspend
  1266. * if OOM thread is unable to complete pending
  1267. * work.
  1268. */
  1269. if (oom_work)
  1270. hif_err("OOM thread is still pending %d tasklets %d grp tasklets %d work %d",
  1271. oom_work, tasklet, grp_tasklet, work);
  1272. else
  1273. QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d oom_work %d",
  1274. HIF_TASK_DRAIN_WAIT_CNT * 10,
  1275. tasklet, grp_tasklet, work,
  1276. oom_work);
  1277. return QDF_STATUS_E_FAULT;
  1278. }
  1279. hif_info("waiting for tasklets %d grp tasklets %d work %d oom_work %d",
  1280. tasklet, grp_tasklet, work, oom_work);
  1281. msleep(10);
  1282. }
  1283. return QDF_STATUS_SUCCESS;
  1284. }
  1285. QDF_STATUS hif_try_complete_dp_tasks(struct hif_opaque_softc *hif_ctx)
  1286. {
  1287. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1288. uint32_t task_drain_wait_cnt = 0;
  1289. int grp_tasklet = 0, work = 0;
  1290. while ((grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
  1291. (work = hif_get_num_pending_work(scn))) {
  1292. if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
  1293. hif_err("pending grp tasklets %d work %d",
  1294. grp_tasklet, work);
  1295. QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: grp tasklets %d work %d",
  1296. HIF_TASK_DRAIN_WAIT_CNT * 10,
  1297. grp_tasklet, work);
  1298. return QDF_STATUS_E_FAULT;
  1299. }
  1300. hif_info("waiting for grp tasklets %d work %d",
  1301. grp_tasklet, work);
  1302. msleep(10);
  1303. }
  1304. return QDF_STATUS_SUCCESS;
  1305. }
  1306. #ifdef HIF_HAL_REG_ACCESS_SUPPORT
  1307. void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
  1308. uint32_t value)
  1309. {
  1310. hal_write32_mb(scn->hal_soc, offset, value);
  1311. }
  1312. uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
  1313. {
  1314. return hal_read32_mb(scn->hal_soc, offset);
  1315. }
  1316. #endif
  1317. #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
  1318. QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1319. {
  1320. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1321. uint32_t work_drain_wait_cnt = 0;
  1322. uint32_t wait_cnt = 0;
  1323. int work = 0;
  1324. qdf_atomic_set(&scn->dp_ep_vote_access,
  1325. HIF_EP_VOTE_ACCESS_DISABLE);
  1326. qdf_atomic_set(&scn->ep_vote_access,
  1327. HIF_EP_VOTE_ACCESS_DISABLE);
  1328. while ((work = hif_get_num_pending_work(scn))) {
  1329. if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
  1330. qdf_atomic_set(&scn->dp_ep_vote_access,
  1331. HIF_EP_VOTE_ACCESS_ENABLE);
  1332. qdf_atomic_set(&scn->ep_vote_access,
  1333. HIF_EP_VOTE_ACCESS_ENABLE);
  1334. hif_err("timeout wait for pending work %d ", work);
  1335. return QDF_STATUS_E_FAULT;
  1336. }
  1337. qdf_sleep(10);
  1338. }
  1339. if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
  1340. return QDF_STATUS_SUCCESS;
  1341. while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
  1342. if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
  1343. hif_err("Release EP vote is not proceed by Fw");
  1344. return QDF_STATUS_E_FAULT;
  1345. }
  1346. qdf_sleep(5);
  1347. }
  1348. return QDF_STATUS_SUCCESS;
  1349. }
  1350. void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
  1351. {
  1352. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1353. uint8_t vote_access;
  1354. vote_access = qdf_atomic_read(&scn->ep_vote_access);
  1355. if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
  1356. hif_info("EP vote changed from:%u to intermediate state",
  1357. vote_access);
  1358. if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
  1359. QDF_BUG(0);
  1360. qdf_atomic_set(&scn->ep_vote_access,
  1361. HIF_EP_VOTE_INTERMEDIATE_ACCESS);
  1362. }
  1363. void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1364. {
  1365. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1366. qdf_atomic_set(&scn->dp_ep_vote_access,
  1367. HIF_EP_VOTE_ACCESS_ENABLE);
  1368. qdf_atomic_set(&scn->ep_vote_access,
  1369. HIF_EP_VOTE_ACCESS_ENABLE);
  1370. }
  1371. void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1372. uint8_t type, uint8_t access)
  1373. {
  1374. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1375. if (type == HIF_EP_VOTE_DP_ACCESS)
  1376. qdf_atomic_set(&scn->dp_ep_vote_access, access);
  1377. else
  1378. qdf_atomic_set(&scn->ep_vote_access, access);
  1379. }
  1380. uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1381. uint8_t type)
  1382. {
  1383. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1384. if (type == HIF_EP_VOTE_DP_ACCESS)
  1385. return qdf_atomic_read(&scn->dp_ep_vote_access);
  1386. else
  1387. return qdf_atomic_read(&scn->ep_vote_access);
  1388. }
  1389. #endif
  1390. #ifdef FEATURE_HIF_DELAYED_REG_WRITE
  1391. #ifdef MEMORY_DEBUG
  1392. #define HIF_REG_WRITE_QUEUE_LEN 128
  1393. #else
  1394. #define HIF_REG_WRITE_QUEUE_LEN 32
  1395. #endif
  1396. /**
  1397. * hif_print_reg_write_stats() - Print hif delayed reg write stats
  1398. * @hif_ctx: hif opaque handle
  1399. *
  1400. * Return: None
  1401. */
  1402. void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
  1403. {
  1404. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1405. struct CE_state *ce_state;
  1406. uint32_t *hist;
  1407. int i;
  1408. hist = scn->wstats.sched_delay;
  1409. hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
  1410. qdf_atomic_read(&scn->wstats.enqueues),
  1411. scn->wstats.dequeues,
  1412. qdf_atomic_read(&scn->wstats.coalesces),
  1413. qdf_atomic_read(&scn->wstats.direct),
  1414. qdf_atomic_read(&scn->wstats.q_depth),
  1415. scn->wstats.max_q_depth,
  1416. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
  1417. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
  1418. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
  1419. hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
  1420. for (i = 0; i < scn->ce_count; i++) {
  1421. ce_state = scn->ce_id_to_state[i];
  1422. if (!ce_state)
  1423. continue;
  1424. hif_debug("ce%d: enq %u deq %u coal %u direct %u",
  1425. i, ce_state->wstats.enqueues,
  1426. ce_state->wstats.dequeues,
  1427. ce_state->wstats.coalesces,
  1428. ce_state->wstats.direct);
  1429. }
  1430. }
  1431. /**
  1432. * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
  1433. * @scn: hif_softc pointer
  1434. *
  1435. * Return: true if throughput is high, else false.
  1436. */
  1437. static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
  1438. {
  1439. int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
  1440. return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
  1441. }
  1442. /**
  1443. * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
  1444. * @scn: hif_softc pointer
  1445. * @delay_us: delay in us
  1446. *
  1447. * Return: None
  1448. */
  1449. static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
  1450. uint64_t delay_us)
  1451. {
  1452. uint32_t *hist;
  1453. hist = scn->wstats.sched_delay;
  1454. if (delay_us < 100)
  1455. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
  1456. else if (delay_us < 1000)
  1457. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
  1458. else if (delay_us < 5000)
  1459. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
  1460. else
  1461. hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
  1462. }
  1463. /**
  1464. * hif_process_reg_write_q_elem() - process a register write queue element
  1465. * @scn: hif_softc pointer
  1466. * @q_elem: pointer to hal register write queue element
  1467. *
  1468. * Return: The value which was written to the address
  1469. */
  1470. static int32_t
  1471. hif_process_reg_write_q_elem(struct hif_softc *scn,
  1472. struct hif_reg_write_q_elem *q_elem)
  1473. {
  1474. struct CE_state *ce_state = q_elem->ce_state;
  1475. uint32_t write_val = -1;
  1476. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  1477. ce_state->reg_write_in_progress = false;
  1478. ce_state->wstats.dequeues++;
  1479. if (ce_state->src_ring) {
  1480. q_elem->dequeue_val = ce_state->src_ring->write_index;
  1481. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
  1482. ce_state->src_ring->write_index);
  1483. write_val = ce_state->src_ring->write_index;
  1484. } else if (ce_state->dest_ring) {
  1485. q_elem->dequeue_val = ce_state->dest_ring->write_index;
  1486. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
  1487. ce_state->dest_ring->write_index);
  1488. write_val = ce_state->dest_ring->write_index;
  1489. } else {
  1490. hif_debug("invalid reg write received");
  1491. qdf_assert(0);
  1492. }
  1493. q_elem->valid = 0;
  1494. ce_state->last_dequeue_time = q_elem->dequeue_time;
  1495. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  1496. return write_val;
  1497. }
  1498. /**
  1499. * hif_reg_write_work() - Worker to process delayed writes
  1500. * @arg: hif_softc pointer
  1501. *
  1502. * Return: None
  1503. */
  1504. static void hif_reg_write_work(void *arg)
  1505. {
  1506. struct hif_softc *scn = arg;
  1507. struct hif_reg_write_q_elem *q_elem;
  1508. uint32_t offset;
  1509. uint64_t delta_us;
  1510. int32_t q_depth, write_val;
  1511. uint32_t num_processed = 0;
  1512. int32_t ring_id;
  1513. q_elem = &scn->reg_write_queue[scn->read_idx];
  1514. q_elem->work_scheduled_time = qdf_get_log_timestamp();
  1515. q_elem->cpu_id = qdf_get_cpu();
  1516. /* Make sure q_elem consistent in the memory for multi-cores */
  1517. qdf_rmb();
  1518. if (!q_elem->valid)
  1519. return;
  1520. q_depth = qdf_atomic_read(&scn->wstats.q_depth);
  1521. if (q_depth > scn->wstats.max_q_depth)
  1522. scn->wstats.max_q_depth = q_depth;
  1523. if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
  1524. scn->wstats.prevent_l1_fails++;
  1525. return;
  1526. }
  1527. while (true) {
  1528. qdf_rmb();
  1529. if (!q_elem->valid)
  1530. break;
  1531. qdf_rmb();
  1532. q_elem->dequeue_time = qdf_get_log_timestamp();
  1533. ring_id = q_elem->ce_state->id;
  1534. offset = q_elem->offset;
  1535. delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
  1536. q_elem->enqueue_time);
  1537. hif_reg_write_fill_sched_delay_hist(scn, delta_us);
  1538. scn->wstats.dequeues++;
  1539. qdf_atomic_dec(&scn->wstats.q_depth);
  1540. write_val = hif_process_reg_write_q_elem(scn, q_elem);
  1541. hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
  1542. scn->read_idx, ring_id, offset, write_val);
  1543. qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
  1544. q_elem->dequeue_val,
  1545. q_elem->enqueue_time,
  1546. q_elem->dequeue_time);
  1547. num_processed++;
  1548. scn->read_idx = (scn->read_idx + 1) &
  1549. (HIF_REG_WRITE_QUEUE_LEN - 1);
  1550. q_elem = &scn->reg_write_queue[scn->read_idx];
  1551. }
  1552. hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
  1553. /*
  1554. * Decrement active_work_cnt by the number of elements dequeued after
  1555. * hif_allow_link_low_power_states.
  1556. * This makes sure that hif_try_complete_tasks will wait till we make
  1557. * the bus access in hif_allow_link_low_power_states. This will avoid
  1558. * race condition between delayed register worker and bus suspend
  1559. * (system suspend or runtime suspend).
  1560. *
  1561. * The following decrement should be done at the end!
  1562. */
  1563. qdf_atomic_sub(num_processed, &scn->active_work_cnt);
  1564. }
  1565. /**
  1566. * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
  1567. * @scn: hif_softc pointer
  1568. *
  1569. * De-initialize main data structures to process register writes in a delayed
  1570. * workqueue.
  1571. *
  1572. * Return: None
  1573. */
  1574. static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
  1575. {
  1576. qdf_flush_work(&scn->reg_write_work);
  1577. qdf_disable_work(&scn->reg_write_work);
  1578. qdf_flush_workqueue(0, scn->reg_write_wq);
  1579. qdf_destroy_workqueue(0, scn->reg_write_wq);
  1580. qdf_mem_free(scn->reg_write_queue);
  1581. }
  1582. /**
  1583. * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
  1584. * @scn: hif_softc pointer
  1585. *
  1586. * Initialize main data structures to process register writes in a delayed
  1587. * workqueue.
  1588. */
  1589. static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
  1590. {
  1591. qdf_atomic_init(&scn->active_work_cnt);
  1592. scn->reg_write_wq =
  1593. qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
  1594. qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
  1595. scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
  1596. sizeof(*scn->reg_write_queue));
  1597. if (!scn->reg_write_queue) {
  1598. hif_err("unable to allocate memory for delayed reg write");
  1599. QDF_BUG(0);
  1600. return QDF_STATUS_E_NOMEM;
  1601. }
  1602. /* Initial value of indices */
  1603. scn->read_idx = 0;
  1604. qdf_atomic_set(&scn->write_idx, -1);
  1605. return QDF_STATUS_SUCCESS;
  1606. }
  1607. static void hif_reg_write_enqueue(struct hif_softc *scn,
  1608. struct CE_state *ce_state,
  1609. uint32_t value)
  1610. {
  1611. struct hif_reg_write_q_elem *q_elem;
  1612. uint32_t write_idx;
  1613. if (ce_state->reg_write_in_progress) {
  1614. hif_debug("Already in progress ce_id %d offset 0x%x value %u",
  1615. ce_state->id, ce_state->ce_wrt_idx_offset, value);
  1616. qdf_atomic_inc(&scn->wstats.coalesces);
  1617. ce_state->wstats.coalesces++;
  1618. return;
  1619. }
  1620. write_idx = qdf_atomic_inc_return(&scn->write_idx);
  1621. write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
  1622. q_elem = &scn->reg_write_queue[write_idx];
  1623. if (q_elem->valid) {
  1624. hif_err("queue full");
  1625. QDF_BUG(0);
  1626. return;
  1627. }
  1628. qdf_atomic_inc(&scn->wstats.enqueues);
  1629. ce_state->wstats.enqueues++;
  1630. qdf_atomic_inc(&scn->wstats.q_depth);
  1631. q_elem->ce_state = ce_state;
  1632. q_elem->offset = ce_state->ce_wrt_idx_offset;
  1633. q_elem->enqueue_val = value;
  1634. q_elem->enqueue_time = qdf_get_log_timestamp();
  1635. /*
  1636. * Before the valid flag is set to true, all the other
  1637. * fields in the q_elem needs to be updated in memory.
  1638. * Else there is a chance that the dequeuing worker thread
  1639. * might read stale entries and process incorrect srng.
  1640. */
  1641. qdf_wmb();
  1642. q_elem->valid = true;
  1643. /*
  1644. * After all other fields in the q_elem has been updated
  1645. * in memory successfully, the valid flag needs to be updated
  1646. * in memory in time too.
  1647. * Else there is a chance that the dequeuing worker thread
  1648. * might read stale valid flag and the work will be bypassed
  1649. * for this round. And if there is no other work scheduled
  1650. * later, this hal register writing won't be updated any more.
  1651. */
  1652. qdf_wmb();
  1653. ce_state->reg_write_in_progress = true;
  1654. qdf_atomic_inc(&scn->active_work_cnt);
  1655. hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
  1656. write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
  1657. qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
  1658. &scn->reg_write_work);
  1659. }
  1660. void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
  1661. uint32_t val)
  1662. {
  1663. struct CE_state *ce_state;
  1664. int ce_id = COPY_ENGINE_ID(ctrl_addr);
  1665. ce_state = scn->ce_id_to_state[ce_id];
  1666. if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
  1667. hif_reg_write_enqueue(scn, ce_state, val);
  1668. return;
  1669. }
  1670. if (hif_is_reg_write_tput_level_high(scn) ||
  1671. (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
  1672. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
  1673. qdf_atomic_inc(&scn->wstats.direct);
  1674. ce_state->wstats.direct++;
  1675. } else {
  1676. hif_reg_write_enqueue(scn, ce_state, val);
  1677. }
  1678. }
  1679. #else
  1680. static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
  1681. {
  1682. return QDF_STATUS_SUCCESS;
  1683. }
  1684. static inline void hif_delayed_reg_write_deinit(struct hif_softc *scn)
  1685. {
  1686. }
  1687. #endif
  1688. #if defined(QCA_WIFI_WCN6450)
  1689. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1690. {
  1691. scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
  1692. scn->qdf_dev);
  1693. if (!scn->hal_soc)
  1694. return QDF_STATUS_E_FAILURE;
  1695. return QDF_STATUS_SUCCESS;
  1696. }
  1697. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1698. {
  1699. hal_detach(scn->hal_soc);
  1700. scn->hal_soc = NULL;
  1701. return QDF_STATUS_SUCCESS;
  1702. }
  1703. #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1704. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1705. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1706. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1707. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1708. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1709. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1710. {
  1711. if (ce_srng_based(scn)) {
  1712. scn->hal_soc = hal_attach(
  1713. hif_softc_to_hif_opaque_softc(scn),
  1714. scn->qdf_dev);
  1715. if (!scn->hal_soc)
  1716. return QDF_STATUS_E_FAILURE;
  1717. }
  1718. return QDF_STATUS_SUCCESS;
  1719. }
  1720. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1721. {
  1722. if (ce_srng_based(scn)) {
  1723. hal_detach(scn->hal_soc);
  1724. scn->hal_soc = NULL;
  1725. }
  1726. return QDF_STATUS_SUCCESS;
  1727. }
  1728. #else
  1729. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1730. {
  1731. return QDF_STATUS_SUCCESS;
  1732. }
  1733. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1734. {
  1735. return QDF_STATUS_SUCCESS;
  1736. }
  1737. #endif
  1738. int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
  1739. {
  1740. int ret;
  1741. switch (bus_type) {
  1742. case QDF_BUS_TYPE_IPCI:
  1743. ret = qdf_set_dma_coherent_mask(dev,
  1744. DMA_COHERENT_MASK_DEFAULT);
  1745. if (ret) {
  1746. hif_err("Failed to set dma mask error = %d", ret);
  1747. return ret;
  1748. }
  1749. break;
  1750. default:
  1751. /* Follow the existing sequence for other targets */
  1752. break;
  1753. }
  1754. return 0;
  1755. }
  1756. /**
  1757. * hif_enable(): hif_enable
  1758. * @hif_ctx: hif_ctx
  1759. * @dev: dev
  1760. * @bdev: bus dev
  1761. * @bid: bus ID
  1762. * @bus_type: bus type
  1763. * @type: enable type
  1764. *
  1765. * Return: QDF_STATUS
  1766. */
  1767. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  1768. void *bdev,
  1769. const struct hif_bus_id *bid,
  1770. enum qdf_bus_type bus_type,
  1771. enum hif_enable_type type)
  1772. {
  1773. QDF_STATUS status;
  1774. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1775. if (!scn) {
  1776. hif_err("hif_ctx = NULL");
  1777. return QDF_STATUS_E_NULL_VALUE;
  1778. }
  1779. status = hif_enable_bus(scn, dev, bdev, bid, type);
  1780. if (status != QDF_STATUS_SUCCESS) {
  1781. hif_err("hif_enable_bus error = %d", status);
  1782. return status;
  1783. }
  1784. status = hif_hal_attach(scn);
  1785. if (status != QDF_STATUS_SUCCESS) {
  1786. hif_err("hal attach failed");
  1787. goto disable_bus;
  1788. }
  1789. if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
  1790. hif_err("unable to initialize delayed reg write");
  1791. goto hal_detach;
  1792. }
  1793. if (hif_bus_configure(scn)) {
  1794. hif_err("Target probe failed");
  1795. status = QDF_STATUS_E_FAILURE;
  1796. goto hal_detach;
  1797. }
  1798. hif_ut_suspend_init(scn);
  1799. hif_register_recovery_notifier(scn);
  1800. hif_latency_detect_timer_start(hif_ctx);
  1801. /*
  1802. * Flag to avoid potential unallocated memory access from MSI
  1803. * interrupt handler which could get scheduled as soon as MSI
  1804. * is enabled, i.e to take care of the race due to the order
  1805. * in where MSI is enabled before the memory, that will be
  1806. * in interrupt handlers, is allocated.
  1807. */
  1808. scn->hif_init_done = true;
  1809. hif_debug("OK");
  1810. return QDF_STATUS_SUCCESS;
  1811. hal_detach:
  1812. hif_hal_detach(scn);
  1813. disable_bus:
  1814. hif_disable_bus(scn);
  1815. return status;
  1816. }
  1817. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
  1818. {
  1819. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1820. if (!scn)
  1821. return;
  1822. hif_delayed_reg_write_deinit(scn);
  1823. hif_set_enable_detection(hif_ctx, false);
  1824. hif_latency_detect_timer_stop(hif_ctx);
  1825. hif_unregister_recovery_notifier(scn);
  1826. hif_nointrs(scn);
  1827. if (scn->hif_init_done == false)
  1828. hif_shutdown_device(hif_ctx);
  1829. else
  1830. hif_stop(hif_ctx);
  1831. hif_hal_detach(scn);
  1832. hif_disable_bus(scn);
  1833. hif_wlan_disable(scn);
  1834. scn->notice_send = false;
  1835. hif_debug("X");
  1836. }
  1837. #ifdef CE_TASKLET_DEBUG_ENABLE
  1838. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
  1839. {
  1840. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1841. if (!scn)
  1842. return;
  1843. scn->ce_latency_stats = val;
  1844. }
  1845. #endif
  1846. void hif_display_stats(struct hif_opaque_softc *hif_ctx)
  1847. {
  1848. hif_display_bus_stats(hif_ctx);
  1849. }
  1850. qdf_export_symbol(hif_display_stats);
  1851. void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
  1852. {
  1853. hif_clear_bus_stats(hif_ctx);
  1854. }
  1855. /**
  1856. * hif_crash_shutdown_dump_bus_register() - dump bus registers
  1857. * @hif_ctx: hif_ctx
  1858. *
  1859. * Return: n/a
  1860. */
  1861. #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
  1862. static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
  1863. {
  1864. struct hif_opaque_softc *scn = hif_ctx;
  1865. if (hif_check_soc_status(scn))
  1866. return;
  1867. if (hif_dump_registers(scn))
  1868. hif_err("Failed to dump bus registers!");
  1869. }
  1870. /**
  1871. * hif_crash_shutdown(): hif_crash_shutdown
  1872. *
  1873. * This function is called by the platform driver to dump CE registers
  1874. *
  1875. * @hif_ctx: hif_ctx
  1876. *
  1877. * Return: n/a
  1878. */
  1879. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1880. {
  1881. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1882. if (!hif_ctx)
  1883. return;
  1884. if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
  1885. hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
  1886. return;
  1887. }
  1888. if (TARGET_STATUS_RESET == scn->target_status) {
  1889. hif_warn("Target is already asserted, ignore!");
  1890. return;
  1891. }
  1892. if (hif_is_load_or_unload_in_progress(scn)) {
  1893. hif_err("Load/unload is in progress, ignore!");
  1894. return;
  1895. }
  1896. hif_crash_shutdown_dump_bus_register(hif_ctx);
  1897. hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
  1898. if (ol_copy_ramdump(hif_ctx))
  1899. goto out;
  1900. hif_info("RAM dump collecting completed!");
  1901. out:
  1902. return;
  1903. }
  1904. #else
  1905. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1906. {
  1907. hif_debug("Collecting target RAM dump disabled");
  1908. }
  1909. #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
  1910. #ifdef QCA_WIFI_3_0
  1911. /**
  1912. * hif_check_fw_reg(): hif_check_fw_reg
  1913. * @scn: scn
  1914. *
  1915. * Return: int
  1916. */
  1917. int hif_check_fw_reg(struct hif_opaque_softc *scn)
  1918. {
  1919. return 0;
  1920. }
  1921. #endif
  1922. /**
  1923. * hif_read_phy_mem_base(): hif_read_phy_mem_base
  1924. * @scn: scn
  1925. * @phy_mem_base: physical mem base
  1926. *
  1927. * Return: n/a
  1928. */
  1929. void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
  1930. {
  1931. *phy_mem_base = scn->mem_pa;
  1932. }
  1933. qdf_export_symbol(hif_read_phy_mem_base);
  1934. /**
  1935. * hif_get_device_type(): hif_get_device_type
  1936. * @device_id: device_id
  1937. * @revision_id: revision_id
  1938. * @hif_type: returned hif_type
  1939. * @target_type: returned target_type
  1940. *
  1941. * Return: int
  1942. */
  1943. int hif_get_device_type(uint32_t device_id,
  1944. uint32_t revision_id,
  1945. uint32_t *hif_type, uint32_t *target_type)
  1946. {
  1947. int ret = 0;
  1948. switch (device_id) {
  1949. case ADRASTEA_DEVICE_ID_P2_E12:
  1950. *hif_type = HIF_TYPE_ADRASTEA;
  1951. *target_type = TARGET_TYPE_ADRASTEA;
  1952. break;
  1953. case AR9888_DEVICE_ID:
  1954. *hif_type = HIF_TYPE_AR9888;
  1955. *target_type = TARGET_TYPE_AR9888;
  1956. break;
  1957. case AR6320_DEVICE_ID:
  1958. switch (revision_id) {
  1959. case AR6320_FW_1_1:
  1960. case AR6320_FW_1_3:
  1961. *hif_type = HIF_TYPE_AR6320;
  1962. *target_type = TARGET_TYPE_AR6320;
  1963. break;
  1964. case AR6320_FW_2_0:
  1965. case AR6320_FW_3_0:
  1966. case AR6320_FW_3_2:
  1967. *hif_type = HIF_TYPE_AR6320V2;
  1968. *target_type = TARGET_TYPE_AR6320V2;
  1969. break;
  1970. default:
  1971. hif_err("dev_id = 0x%x, rev_id = 0x%x",
  1972. device_id, revision_id);
  1973. ret = -ENODEV;
  1974. goto end;
  1975. }
  1976. break;
  1977. case AR9887_DEVICE_ID:
  1978. *hif_type = HIF_TYPE_AR9888;
  1979. *target_type = TARGET_TYPE_AR9888;
  1980. hif_info(" *********** AR9887 **************");
  1981. break;
  1982. case QCA9984_DEVICE_ID:
  1983. *hif_type = HIF_TYPE_QCA9984;
  1984. *target_type = TARGET_TYPE_QCA9984;
  1985. hif_info(" *********** QCA9984 *************");
  1986. break;
  1987. case QCA9888_DEVICE_ID:
  1988. *hif_type = HIF_TYPE_QCA9888;
  1989. *target_type = TARGET_TYPE_QCA9888;
  1990. hif_info(" *********** QCA9888 *************");
  1991. break;
  1992. case AR900B_DEVICE_ID:
  1993. *hif_type = HIF_TYPE_AR900B;
  1994. *target_type = TARGET_TYPE_AR900B;
  1995. hif_info(" *********** AR900B *************");
  1996. break;
  1997. case QCA8074_DEVICE_ID:
  1998. *hif_type = HIF_TYPE_QCA8074;
  1999. *target_type = TARGET_TYPE_QCA8074;
  2000. hif_info(" *********** QCA8074 *************");
  2001. break;
  2002. case QCA6290_EMULATION_DEVICE_ID:
  2003. case QCA6290_DEVICE_ID:
  2004. *hif_type = HIF_TYPE_QCA6290;
  2005. *target_type = TARGET_TYPE_QCA6290;
  2006. hif_info(" *********** QCA6290EMU *************");
  2007. break;
  2008. case QCN9000_DEVICE_ID:
  2009. *hif_type = HIF_TYPE_QCN9000;
  2010. *target_type = TARGET_TYPE_QCN9000;
  2011. hif_info(" *********** QCN9000 *************");
  2012. break;
  2013. case QCN9224_DEVICE_ID:
  2014. *hif_type = HIF_TYPE_QCN9224;
  2015. *target_type = TARGET_TYPE_QCN9224;
  2016. hif_info(" *********** QCN9224 *************");
  2017. break;
  2018. case QCN6122_DEVICE_ID:
  2019. *hif_type = HIF_TYPE_QCN6122;
  2020. *target_type = TARGET_TYPE_QCN6122;
  2021. hif_info(" *********** QCN6122 *************");
  2022. break;
  2023. case QCN9160_DEVICE_ID:
  2024. *hif_type = HIF_TYPE_QCN9160;
  2025. *target_type = TARGET_TYPE_QCN9160;
  2026. hif_info(" *********** QCN9160 *************");
  2027. break;
  2028. case QCN6432_DEVICE_ID:
  2029. *hif_type = HIF_TYPE_QCN6432;
  2030. *target_type = TARGET_TYPE_QCN6432;
  2031. hif_info(" *********** QCN6432 *************");
  2032. break;
  2033. case QCN7605_DEVICE_ID:
  2034. case QCN7605_COMPOSITE:
  2035. case QCN7605_STANDALONE:
  2036. case QCN7605_STANDALONE_V2:
  2037. case QCN7605_COMPOSITE_V2:
  2038. *hif_type = HIF_TYPE_QCN7605;
  2039. *target_type = TARGET_TYPE_QCN7605;
  2040. hif_info(" *********** QCN7605 *************");
  2041. break;
  2042. case QCA6390_DEVICE_ID:
  2043. case QCA6390_EMULATION_DEVICE_ID:
  2044. *hif_type = HIF_TYPE_QCA6390;
  2045. *target_type = TARGET_TYPE_QCA6390;
  2046. hif_info(" *********** QCA6390 *************");
  2047. break;
  2048. case QCA6490_DEVICE_ID:
  2049. case QCA6490_EMULATION_DEVICE_ID:
  2050. *hif_type = HIF_TYPE_QCA6490;
  2051. *target_type = TARGET_TYPE_QCA6490;
  2052. hif_info(" *********** QCA6490 *************");
  2053. break;
  2054. case QCA6750_DEVICE_ID:
  2055. case QCA6750_EMULATION_DEVICE_ID:
  2056. *hif_type = HIF_TYPE_QCA6750;
  2057. *target_type = TARGET_TYPE_QCA6750;
  2058. hif_info(" *********** QCA6750 *************");
  2059. break;
  2060. case KIWI_DEVICE_ID:
  2061. *hif_type = HIF_TYPE_KIWI;
  2062. *target_type = TARGET_TYPE_KIWI;
  2063. hif_info(" *********** KIWI *************");
  2064. break;
  2065. case MANGO_DEVICE_ID:
  2066. *hif_type = HIF_TYPE_MANGO;
  2067. *target_type = TARGET_TYPE_MANGO;
  2068. hif_info(" *********** MANGO *************");
  2069. break;
  2070. case PEACH_DEVICE_ID:
  2071. *hif_type = HIF_TYPE_PEACH;
  2072. *target_type = TARGET_TYPE_PEACH;
  2073. hif_info(" *********** PEACH *************");
  2074. break;
  2075. case QCA8074V2_DEVICE_ID:
  2076. *hif_type = HIF_TYPE_QCA8074V2;
  2077. *target_type = TARGET_TYPE_QCA8074V2;
  2078. hif_info(" *********** QCA8074V2 *************");
  2079. break;
  2080. case QCA6018_DEVICE_ID:
  2081. case RUMIM2M_DEVICE_ID_NODE0:
  2082. case RUMIM2M_DEVICE_ID_NODE1:
  2083. case RUMIM2M_DEVICE_ID_NODE2:
  2084. case RUMIM2M_DEVICE_ID_NODE3:
  2085. case RUMIM2M_DEVICE_ID_NODE4:
  2086. case RUMIM2M_DEVICE_ID_NODE5:
  2087. *hif_type = HIF_TYPE_QCA6018;
  2088. *target_type = TARGET_TYPE_QCA6018;
  2089. hif_info(" *********** QCA6018 *************");
  2090. break;
  2091. case QCA5018_DEVICE_ID:
  2092. *hif_type = HIF_TYPE_QCA5018;
  2093. *target_type = TARGET_TYPE_QCA5018;
  2094. hif_info(" *********** qca5018 *************");
  2095. break;
  2096. case QCA5332_DEVICE_ID:
  2097. *hif_type = HIF_TYPE_QCA5332;
  2098. *target_type = TARGET_TYPE_QCA5332;
  2099. hif_info(" *********** QCA5332 *************");
  2100. break;
  2101. case QCA9574_DEVICE_ID:
  2102. *hif_type = HIF_TYPE_QCA9574;
  2103. *target_type = TARGET_TYPE_QCA9574;
  2104. hif_info(" *********** QCA9574 *************");
  2105. break;
  2106. case WCN6450_DEVICE_ID:
  2107. *hif_type = HIF_TYPE_WCN6450;
  2108. *target_type = TARGET_TYPE_WCN6450;
  2109. hif_info(" *********** WCN6450 *************");
  2110. break;
  2111. default:
  2112. hif_err("Unsupported device ID = 0x%x!", device_id);
  2113. ret = -ENODEV;
  2114. break;
  2115. }
  2116. if (*target_type == TARGET_TYPE_UNKNOWN) {
  2117. hif_err("Unsupported target_type!");
  2118. ret = -ENODEV;
  2119. }
  2120. end:
  2121. return ret;
  2122. }
  2123. /**
  2124. * hif_get_bus_type() - return the bus type
  2125. * @hif_hdl: HIF Context
  2126. *
  2127. * Return: enum qdf_bus_type
  2128. */
  2129. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
  2130. {
  2131. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  2132. return scn->bus_type;
  2133. }
  2134. /*
  2135. * Target info and ini parameters are global to the driver
  2136. * Hence these structures are exposed to all the modules in
  2137. * the driver and they don't need to maintains multiple copies
  2138. * of the same info, instead get the handle from hif and
  2139. * modify them in hif
  2140. */
  2141. /**
  2142. * hif_get_ini_handle() - API to get hif_config_param handle
  2143. * @hif_ctx: HIF Context
  2144. *
  2145. * Return: pointer to hif_config_info
  2146. */
  2147. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
  2148. {
  2149. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  2150. return &sc->hif_config;
  2151. }
  2152. /**
  2153. * hif_get_target_info_handle() - API to get hif_target_info handle
  2154. * @hif_ctx: HIF context
  2155. *
  2156. * Return: Pointer to hif_target_info
  2157. */
  2158. struct hif_target_info *hif_get_target_info_handle(
  2159. struct hif_opaque_softc *hif_ctx)
  2160. {
  2161. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  2162. return &sc->target_info;
  2163. }
  2164. qdf_export_symbol(hif_get_target_info_handle);
  2165. #ifdef RECEIVE_OFFLOAD
  2166. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  2167. void (offld_flush_handler)(void *))
  2168. {
  2169. if (hif_napi_enabled(scn, -1))
  2170. hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
  2171. else
  2172. hif_err("NAPI not enabled");
  2173. }
  2174. qdf_export_symbol(hif_offld_flush_cb_register);
  2175. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
  2176. {
  2177. if (hif_napi_enabled(scn, -1))
  2178. hif_napi_rx_offld_flush_cb_deregister(scn);
  2179. else
  2180. hif_err("NAPI not enabled");
  2181. }
  2182. qdf_export_symbol(hif_offld_flush_cb_deregister);
  2183. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2184. {
  2185. if (hif_napi_enabled(hif_hdl, -1))
  2186. return NAPI_PIPE2ID(ctx_id);
  2187. else
  2188. return ctx_id;
  2189. }
  2190. #else /* RECEIVE_OFFLOAD */
  2191. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2192. {
  2193. return 0;
  2194. }
  2195. qdf_export_symbol(hif_get_rx_ctx_id);
  2196. #endif /* RECEIVE_OFFLOAD */
  2197. #if defined(FEATURE_LRO)
  2198. /**
  2199. * hif_get_lro_info - Returns LRO instance for instance ID
  2200. * @ctx_id: LRO instance ID
  2201. * @hif_hdl: HIF Context
  2202. *
  2203. * Return: Pointer to LRO instance.
  2204. */
  2205. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2206. {
  2207. void *data;
  2208. if (hif_napi_enabled(hif_hdl, -1))
  2209. data = hif_napi_get_lro_info(hif_hdl, ctx_id);
  2210. else
  2211. data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
  2212. return data;
  2213. }
  2214. #endif
  2215. /**
  2216. * hif_get_target_status - API to get target status
  2217. * @hif_ctx: HIF Context
  2218. *
  2219. * Return: enum hif_target_status
  2220. */
  2221. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
  2222. {
  2223. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2224. return scn->target_status;
  2225. }
  2226. qdf_export_symbol(hif_get_target_status);
  2227. /**
  2228. * hif_set_target_status() - API to set target status
  2229. * @hif_ctx: HIF Context
  2230. * @status: Target Status
  2231. *
  2232. * Return: void
  2233. */
  2234. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  2235. hif_target_status status)
  2236. {
  2237. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2238. scn->target_status = status;
  2239. }
  2240. /**
  2241. * hif_init_ini_config() - API to initialize HIF configuration parameters
  2242. * @hif_ctx: HIF Context
  2243. * @cfg: HIF Configuration
  2244. *
  2245. * Return: void
  2246. */
  2247. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  2248. struct hif_config_info *cfg)
  2249. {
  2250. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2251. qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
  2252. }
  2253. /**
  2254. * hif_get_conparam() - API to get driver mode in HIF
  2255. * @scn: HIF Context
  2256. *
  2257. * Return: driver mode of operation
  2258. */
  2259. uint32_t hif_get_conparam(struct hif_softc *scn)
  2260. {
  2261. if (!scn)
  2262. return 0;
  2263. return scn->hif_con_param;
  2264. }
  2265. /**
  2266. * hif_get_callbacks_handle() - API to get callbacks Handle
  2267. * @scn: HIF Context
  2268. *
  2269. * Return: pointer to HIF Callbacks
  2270. */
  2271. struct hif_driver_state_callbacks *hif_get_callbacks_handle(
  2272. struct hif_softc *scn)
  2273. {
  2274. return &scn->callbacks;
  2275. }
  2276. /**
  2277. * hif_is_driver_unloading() - API to query upper layers if driver is unloading
  2278. * @scn: HIF Context
  2279. *
  2280. * Return: True/False
  2281. */
  2282. bool hif_is_driver_unloading(struct hif_softc *scn)
  2283. {
  2284. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2285. if (cbk && cbk->is_driver_unloading)
  2286. return cbk->is_driver_unloading(cbk->context);
  2287. return false;
  2288. }
  2289. /**
  2290. * hif_is_load_or_unload_in_progress() - API to query upper layers if
  2291. * load/unload in progress
  2292. * @scn: HIF Context
  2293. *
  2294. * Return: True/False
  2295. */
  2296. bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
  2297. {
  2298. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2299. if (cbk && cbk->is_load_unload_in_progress)
  2300. return cbk->is_load_unload_in_progress(cbk->context);
  2301. return false;
  2302. }
  2303. /**
  2304. * hif_is_recovery_in_progress() - API to query upper layers if recovery in
  2305. * progress
  2306. * @scn: HIF Context
  2307. *
  2308. * Return: True/False
  2309. */
  2310. bool hif_is_recovery_in_progress(struct hif_softc *scn)
  2311. {
  2312. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2313. if (cbk && cbk->is_recovery_in_progress)
  2314. return cbk->is_recovery_in_progress(cbk->context);
  2315. return false;
  2316. }
  2317. #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  2318. defined(HIF_IPCI)
  2319. /**
  2320. * hif_update_pipe_callback() - API to register pipe specific callbacks
  2321. * @osc: Opaque softc
  2322. * @pipeid: pipe id
  2323. * @callbacks: callbacks to register
  2324. *
  2325. * Return: void
  2326. */
  2327. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  2328. u_int8_t pipeid,
  2329. struct hif_msg_callbacks *callbacks)
  2330. {
  2331. struct hif_softc *scn = HIF_GET_SOFTC(osc);
  2332. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2333. struct HIF_CE_pipe_info *pipe_info;
  2334. QDF_BUG(pipeid < CE_COUNT_MAX);
  2335. hif_debug("pipeid: %d", pipeid);
  2336. pipe_info = &hif_state->pipe_info[pipeid];
  2337. qdf_mem_copy(&pipe_info->pipe_callbacks,
  2338. callbacks, sizeof(pipe_info->pipe_callbacks));
  2339. }
  2340. qdf_export_symbol(hif_update_pipe_callback);
  2341. /**
  2342. * hif_is_target_ready() - API to query if target is in ready state
  2343. * progress
  2344. * @scn: HIF Context
  2345. *
  2346. * Return: True/False
  2347. */
  2348. bool hif_is_target_ready(struct hif_softc *scn)
  2349. {
  2350. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2351. if (cbk && cbk->is_target_ready)
  2352. return cbk->is_target_ready(cbk->context);
  2353. /*
  2354. * if callback is not registered then there is no way to determine
  2355. * if target is ready. In-such case return true to indicate that
  2356. * target is ready.
  2357. */
  2358. return true;
  2359. }
  2360. qdf_export_symbol(hif_is_target_ready);
  2361. int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
  2362. {
  2363. struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
  2364. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2365. if (cbk && cbk->get_bandwidth_level)
  2366. return cbk->get_bandwidth_level(cbk->context);
  2367. return 0;
  2368. }
  2369. qdf_export_symbol(hif_get_bandwidth_level);
  2370. #ifdef DP_MEM_PRE_ALLOC
  2371. void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
  2372. qdf_size_t size,
  2373. qdf_dma_addr_t *paddr,
  2374. uint32_t ring_type,
  2375. uint8_t *is_mem_prealloc)
  2376. {
  2377. void *vaddr = NULL;
  2378. struct hif_driver_state_callbacks *cbk =
  2379. hif_get_callbacks_handle(scn);
  2380. *is_mem_prealloc = false;
  2381. if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
  2382. vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
  2383. paddr,
  2384. ring_type);
  2385. if (vaddr) {
  2386. *is_mem_prealloc = true;
  2387. goto end;
  2388. }
  2389. }
  2390. vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
  2391. scn->qdf_dev->dev,
  2392. size,
  2393. paddr);
  2394. end:
  2395. dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
  2396. *is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
  2397. (void *)*paddr, (int)size, ring_type);
  2398. return vaddr;
  2399. }
  2400. void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
  2401. qdf_size_t size,
  2402. void *vaddr,
  2403. qdf_dma_addr_t paddr,
  2404. qdf_dma_context_t memctx,
  2405. uint8_t is_mem_prealloc)
  2406. {
  2407. struct hif_driver_state_callbacks *cbk =
  2408. hif_get_callbacks_handle(scn);
  2409. if (is_mem_prealloc) {
  2410. if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
  2411. cbk->prealloc_put_consistent_mem_unaligned(vaddr);
  2412. } else {
  2413. dp_warn("dp_prealloc_put_consistent_unligned NULL");
  2414. QDF_BUG(0);
  2415. }
  2416. } else {
  2417. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  2418. size, vaddr, paddr, memctx);
  2419. }
  2420. }
  2421. void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
  2422. qdf_size_t elem_size, uint16_t elem_num,
  2423. struct qdf_mem_multi_page_t *pages,
  2424. bool cacheable)
  2425. {
  2426. struct hif_driver_state_callbacks *cbk =
  2427. hif_get_callbacks_handle(scn);
  2428. if (cbk && cbk->prealloc_get_multi_pages)
  2429. cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
  2430. pages, cacheable);
  2431. if (!pages->num_pages)
  2432. qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
  2433. elem_size, elem_num, 0, cacheable);
  2434. }
  2435. void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
  2436. struct qdf_mem_multi_page_t *pages,
  2437. bool cacheable)
  2438. {
  2439. struct hif_driver_state_callbacks *cbk =
  2440. hif_get_callbacks_handle(scn);
  2441. if (cbk && cbk->prealloc_put_multi_pages &&
  2442. pages->is_mem_prealloc)
  2443. cbk->prealloc_put_multi_pages(desc_type, pages);
  2444. if (!pages->is_mem_prealloc)
  2445. qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
  2446. cacheable);
  2447. }
  2448. #endif
  2449. /**
  2450. * hif_batch_send() - API to access hif specific function
  2451. * ce_batch_send.
  2452. * @osc: HIF Context
  2453. * @msdu: list of msdus to be sent
  2454. * @transfer_id: transfer id
  2455. * @len: downloaded length
  2456. * @sendhead:
  2457. *
  2458. * Return: list of msds not sent
  2459. */
  2460. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  2461. uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
  2462. {
  2463. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2464. if (!ce_tx_hdl)
  2465. return NULL;
  2466. return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  2467. len, sendhead);
  2468. }
  2469. qdf_export_symbol(hif_batch_send);
  2470. /**
  2471. * hif_update_tx_ring() - API to access hif specific function
  2472. * ce_update_tx_ring.
  2473. * @osc: HIF Context
  2474. * @num_htt_cmpls: number of htt compl received.
  2475. *
  2476. * Return: void
  2477. */
  2478. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
  2479. {
  2480. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2481. ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
  2482. }
  2483. qdf_export_symbol(hif_update_tx_ring);
  2484. /**
  2485. * hif_send_single() - API to access hif specific function
  2486. * ce_send_single.
  2487. * @osc: HIF Context
  2488. * @msdu : msdu to be sent
  2489. * @transfer_id: transfer id
  2490. * @len : downloaded length
  2491. *
  2492. * Return: msdu sent status
  2493. */
  2494. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  2495. uint32_t transfer_id, u_int32_t len)
  2496. {
  2497. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2498. if (!ce_tx_hdl)
  2499. return QDF_STATUS_E_NULL_VALUE;
  2500. return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  2501. len);
  2502. }
  2503. qdf_export_symbol(hif_send_single);
  2504. #endif
  2505. /**
  2506. * hif_reg_write() - API to access hif specific function
  2507. * hif_write32_mb.
  2508. * @hif_ctx : HIF Context
  2509. * @offset : offset on which value has to be written
  2510. * @value : value to be written
  2511. *
  2512. * Return: None
  2513. */
  2514. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  2515. uint32_t value)
  2516. {
  2517. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2518. hif_write32_mb(scn, scn->mem + offset, value);
  2519. }
  2520. qdf_export_symbol(hif_reg_write);
  2521. /**
  2522. * hif_reg_read() - API to access hif specific function
  2523. * hif_read32_mb.
  2524. * @hif_ctx : HIF Context
  2525. * @offset : offset from which value has to be read
  2526. *
  2527. * Return: Read value
  2528. */
  2529. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
  2530. {
  2531. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2532. return hif_read32_mb(scn, scn->mem + offset);
  2533. }
  2534. qdf_export_symbol(hif_reg_read);
  2535. /**
  2536. * hif_ramdump_handler(): generic ramdump handler
  2537. * @scn: struct hif_opaque_softc
  2538. *
  2539. * Return: None
  2540. */
  2541. void hif_ramdump_handler(struct hif_opaque_softc *scn)
  2542. {
  2543. if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
  2544. hif_usb_ramdump_handler(scn);
  2545. }
  2546. hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
  2547. {
  2548. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2549. return scn->wake_irq_type;
  2550. }
  2551. irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
  2552. {
  2553. struct hif_softc *scn = context;
  2554. hif_info("wake interrupt received on irq %d", irq);
  2555. hif_rtpm_set_monitor_wake_intr(0);
  2556. hif_rtpm_request_resume();
  2557. if (scn->initial_wakeup_cb)
  2558. scn->initial_wakeup_cb(scn->initial_wakeup_priv);
  2559. if (hif_is_ut_suspended(scn))
  2560. hif_ut_fw_resume(scn);
  2561. qdf_pm_system_wakeup();
  2562. return IRQ_HANDLED;
  2563. }
  2564. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  2565. void (*callback)(void *),
  2566. void *priv)
  2567. {
  2568. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2569. scn->initial_wakeup_cb = callback;
  2570. scn->initial_wakeup_priv = priv;
  2571. }
  2572. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  2573. uint32_t ce_service_max_yield_time)
  2574. {
  2575. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2576. hif_ctx->ce_service_max_yield_time =
  2577. ce_service_max_yield_time * 1000;
  2578. }
  2579. unsigned long long
  2580. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
  2581. {
  2582. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2583. return hif_ctx->ce_service_max_yield_time;
  2584. }
  2585. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  2586. uint8_t ce_service_max_rx_ind_flush)
  2587. {
  2588. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2589. if (ce_service_max_rx_ind_flush == 0 ||
  2590. ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
  2591. hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
  2592. else
  2593. hif_ctx->ce_service_max_rx_ind_flush =
  2594. ce_service_max_rx_ind_flush;
  2595. }
  2596. #ifdef SYSTEM_PM_CHECK
  2597. void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
  2598. enum hif_system_pm_state state)
  2599. {
  2600. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2601. qdf_atomic_set(&hif_ctx->sys_pm_state, state);
  2602. }
  2603. int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
  2604. {
  2605. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2606. return qdf_atomic_read(&hif_ctx->sys_pm_state);
  2607. }
  2608. int hif_system_pm_state_check(struct hif_opaque_softc *hif)
  2609. {
  2610. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2611. int32_t sys_pm_state;
  2612. if (!hif_ctx) {
  2613. hif_err("hif context is null");
  2614. return -EFAULT;
  2615. }
  2616. sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
  2617. if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
  2618. sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
  2619. hif_info("Triggering system wakeup");
  2620. qdf_pm_system_wakeup();
  2621. return -EAGAIN;
  2622. }
  2623. return 0;
  2624. }
  2625. #endif
  2626. #ifdef WLAN_FEATURE_AFFINITY_MGR
  2627. /*
  2628. * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
  2629. *
  2630. * @scn: hif handle
  2631. * @cfg: hif affinity manager configuration for IRQ
  2632. * @audio_taken_cpu: Current CPUs which are taken by audio.
  2633. * @current_time: Current system time.
  2634. *
  2635. * This API checks for 2 conditions
  2636. * 1) Last audio taken mask and current taken mask are different
  2637. * 2) Last time when IRQ was affined away due to audio taken CPUs is
  2638. * more than time threshold (5 Seconds in current case).
  2639. * If both condition satisfies then only return true.
  2640. *
  2641. * Return: bool: true if it is allowed to affine away audio taken cpus.
  2642. */
  2643. static inline bool
  2644. hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
  2645. struct hif_cpu_affinity *cfg,
  2646. qdf_cpu_mask audio_taken_cpu,
  2647. uint64_t current_time)
  2648. {
  2649. if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
  2650. (qdf_log_timestamp_to_usecs(current_time -
  2651. cfg->last_affined_away)
  2652. < scn->time_threshold))
  2653. return false;
  2654. return true;
  2655. }
  2656. /*
  2657. * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
  2658. *
  2659. * @scn: hif handle
  2660. * @cfg: hif affinity manager configuration for IRQ
  2661. * @audio_taken_cpu: Current CPUs which are taken by audio.
  2662. * @cpu_mask: CPU mask which need to be updated.
  2663. * @current_time: Current system time.
  2664. *
  2665. * This API checks if Pro audio use case is running and if cpu_mask need
  2666. * to be updated
  2667. *
  2668. * Return: QDF_STATUS
  2669. */
  2670. static inline QDF_STATUS
  2671. hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
  2672. struct hif_cpu_affinity *cfg,
  2673. qdf_cpu_mask audio_taken_cpu,
  2674. qdf_cpu_mask *cpu_mask,
  2675. uint64_t current_time)
  2676. {
  2677. qdf_cpu_mask allowed_mask;
  2678. /*
  2679. * Case 1: audio_taken_mask is empty
  2680. * Check if passed cpu_mask and wlan_requested_mask is same or not.
  2681. * If both mask are different copy wlan_requested_mask(IRQ affinity
  2682. * mask requested by WLAN) to cpu_mask.
  2683. *
  2684. * Case 2: audio_taken_mask is not empty
  2685. * 1. Only allow update if last time when IRQ was affined away due to
  2686. * audio taken CPUs is more than 5 seconds or update is requested
  2687. * by WLAN
  2688. * 2. Only allow silver cores to be affined away.
  2689. * 3. Check if any allowed CPUs for audio use case is set in cpu_mask.
  2690. * i. If any CPU mask is set, mask out that CPU from the cpu_mask
  2691. * ii. If after masking out audio taken cpu(Silver cores) cpu_mask
  2692. * is empty, set mask to all cpu except cpus taken by audio.
  2693. * Example:
  2694. *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
  2695. *| 0x00 | 0x00 | 0x0C | 0x0C | 0x0C |
  2696. *| 0x00 | 0x00 | 0x03 | 0x03 | 0x03 |
  2697. *| 0x00 | 0x00 | 0xFC | 0x03 | 0x03 |
  2698. *| 0x00 | 0x00 | 0x03 | 0x0C | 0x0C |
  2699. *| 0x0F | 0x03 | 0x0C | 0x0C | 0x0C |
  2700. *| 0x0F | 0x03 | 0x03 | 0x03 | 0xFC |
  2701. *| 0x03 | 0x03 | 0x0C | 0x0C | 0x0C |
  2702. *| 0x03 | 0x03 | 0x03 | 0x03 | 0xFC |
  2703. *| 0x03 | 0x03 | 0xFC | 0x03 | 0xFC |
  2704. *| 0xF0 | 0x00 | 0x0C | 0x0C | 0x0C |
  2705. *| 0xF0 | 0x00 | 0x03 | 0x03 | 0x03 |
  2706. */
  2707. /* Check if audio taken mask is empty*/
  2708. if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
  2709. /* If CPU mask requested by WLAN for the IRQ and
  2710. * cpu_mask passed CPU mask set for IRQ is different
  2711. * Copy requested mask into cpu_mask and return
  2712. */
  2713. if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
  2714. &cfg->wlan_requested_mask))) {
  2715. qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
  2716. return QDF_STATUS_SUCCESS;
  2717. }
  2718. return QDF_STATUS_E_ALREADY;
  2719. }
  2720. if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
  2721. current_time) ||
  2722. cfg->update_requested))
  2723. return QDF_STATUS_E_AGAIN;
  2724. /* Only allow Silver cores to be affine away */
  2725. qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
  2726. if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
  2727. /* If any of taken CPU(Silver cores) mask is set in cpu_mask,
  2728. * mask out the audio taken CPUs from the cpu_mask.
  2729. */
  2730. qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
  2731. &allowed_mask);
  2732. /* If cpu_mask is empty set it to all CPUs
  2733. * except taken by audio(Silver cores)
  2734. */
  2735. if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
  2736. qdf_cpumask_complement(cpu_mask, &allowed_mask);
  2737. return QDF_STATUS_SUCCESS;
  2738. }
  2739. return QDF_STATUS_E_ALREADY;
  2740. }
  2741. static inline QDF_STATUS
  2742. hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
  2743. qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
  2744. uint64_t current_time)
  2745. {
  2746. QDF_STATUS status;
  2747. status = hif_affinity_mgr_check_update_mask(scn, cfg,
  2748. audio_taken_cpu,
  2749. &cpu_mask,
  2750. current_time);
  2751. /* Set IRQ affinity if CPU mask was updated */
  2752. if (QDF_IS_STATUS_SUCCESS(status)) {
  2753. status = hif_irq_set_affinity_hint(cfg->irq,
  2754. &cpu_mask);
  2755. if (QDF_IS_STATUS_SUCCESS(status)) {
  2756. /* Store audio taken CPU mask */
  2757. qdf_cpumask_copy(&cfg->walt_taken_mask,
  2758. &audio_taken_cpu);
  2759. /* Store CPU mask which was set for IRQ*/
  2760. qdf_cpumask_copy(&cfg->current_irq_mask,
  2761. &cpu_mask);
  2762. /* Set time when IRQ affinity was updated */
  2763. cfg->last_updated = current_time;
  2764. if (hif_audio_cpu_affinity_allowed(scn, cfg,
  2765. audio_taken_cpu,
  2766. current_time))
  2767. /* If CPU mask was updated due to CPU
  2768. * taken by audio, update
  2769. * last_affined_away time
  2770. */
  2771. cfg->last_affined_away = current_time;
  2772. }
  2773. }
  2774. return status;
  2775. }
  2776. void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
  2777. {
  2778. bool audio_affinity_allowed = false;
  2779. int i, j, ce_id;
  2780. uint64_t current_time;
  2781. char cpu_str[10];
  2782. QDF_STATUS status;
  2783. qdf_cpu_mask cpu_mask, audio_taken_cpu;
  2784. struct HIF_CE_state *hif_state;
  2785. struct hif_exec_context *hif_ext_group;
  2786. struct CE_attr *host_ce_conf;
  2787. struct HIF_CE_state *ce_sc;
  2788. struct hif_cpu_affinity *cfg;
  2789. if (!scn->affinity_mgr_supported)
  2790. return;
  2791. current_time = hif_get_log_timestamp();
  2792. /* Get CPU mask for audio taken CPUs */
  2793. audio_taken_cpu = qdf_walt_get_cpus_taken();
  2794. ce_sc = HIF_GET_CE_STATE(scn);
  2795. host_ce_conf = ce_sc->host_ce_config;
  2796. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2797. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2798. continue;
  2799. cfg = &scn->ce_irq_cpu_mask[ce_id];
  2800. qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
  2801. status =
  2802. hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
  2803. cpu_mask, current_time);
  2804. if (QDF_IS_STATUS_SUCCESS(status))
  2805. audio_affinity_allowed = true;
  2806. }
  2807. hif_state = HIF_GET_CE_STATE(scn);
  2808. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2809. hif_ext_group = hif_state->hif_ext_group[i];
  2810. for (j = 0; j < hif_ext_group->numirq; j++) {
  2811. cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
  2812. qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
  2813. status =
  2814. hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
  2815. cpu_mask, current_time);
  2816. if (QDF_IS_STATUS_SUCCESS(status)) {
  2817. qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
  2818. audio_affinity_allowed = true;
  2819. }
  2820. }
  2821. }
  2822. if (audio_affinity_allowed) {
  2823. qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
  2824. &audio_taken_cpu);
  2825. hif_info("Audio taken CPU mask: %s", cpu_str);
  2826. }
  2827. }
  2828. static inline QDF_STATUS
  2829. hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2830. struct hif_cpu_affinity *cfg,
  2831. qdf_cpu_mask *cpu_mask)
  2832. {
  2833. uint64_t current_time;
  2834. char cpu_str[10];
  2835. QDF_STATUS status, mask_updated;
  2836. qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
  2837. current_time = hif_get_log_timestamp();
  2838. qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
  2839. cfg->update_requested = true;
  2840. mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
  2841. audio_taken_cpu,
  2842. cpu_mask,
  2843. current_time);
  2844. status = hif_irq_set_affinity_hint(irq, cpu_mask);
  2845. if (QDF_IS_STATUS_SUCCESS(status)) {
  2846. qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
  2847. qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
  2848. if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
  2849. cfg->last_updated = current_time;
  2850. if (hif_audio_cpu_affinity_allowed(scn, cfg,
  2851. audio_taken_cpu,
  2852. current_time)) {
  2853. cfg->last_affined_away = current_time;
  2854. qdf_thread_cpumap_print_to_pagebuf(false,
  2855. cpu_str,
  2856. &audio_taken_cpu);
  2857. hif_info_rl("Audio taken CPU mask: %s",
  2858. cpu_str);
  2859. }
  2860. }
  2861. }
  2862. cfg->update_requested = false;
  2863. return status;
  2864. }
  2865. QDF_STATUS
  2866. hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2867. uint32_t grp_id, uint32_t irq_index,
  2868. qdf_cpu_mask *cpu_mask)
  2869. {
  2870. struct hif_cpu_affinity *cfg;
  2871. if (!scn->affinity_mgr_supported)
  2872. return hif_irq_set_affinity_hint(irq, cpu_mask);
  2873. cfg = &scn->irq_cpu_mask[grp_id][irq_index];
  2874. return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
  2875. }
  2876. QDF_STATUS
  2877. hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2878. uint32_t ce_id, qdf_cpu_mask *cpu_mask)
  2879. {
  2880. struct hif_cpu_affinity *cfg;
  2881. if (!scn->affinity_mgr_supported)
  2882. return hif_irq_set_affinity_hint(irq, cpu_mask);
  2883. cfg = &scn->ce_irq_cpu_mask[ce_id];
  2884. return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
  2885. }
  2886. void
  2887. hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
  2888. {
  2889. unsigned int cpus;
  2890. qdf_cpu_mask cpu_mask = {0};
  2891. struct hif_cpu_affinity *cfg = NULL;
  2892. if (!scn->affinity_mgr_supported)
  2893. return;
  2894. /* Set CPU Mask to Silver core */
  2895. qdf_for_each_possible_cpu(cpus)
  2896. if (qdf_topology_physical_package_id(cpus) ==
  2897. CPU_CLUSTER_TYPE_LITTLE)
  2898. qdf_cpumask_set_cpu(cpus, &cpu_mask);
  2899. cfg = &scn->ce_irq_cpu_mask[id];
  2900. qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
  2901. qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
  2902. cfg->irq = irq;
  2903. cfg->last_updated = 0;
  2904. cfg->last_affined_away = 0;
  2905. cfg->update_requested = false;
  2906. }
  2907. void
  2908. hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
  2909. int irq_num, int irq)
  2910. {
  2911. unsigned int cpus;
  2912. qdf_cpu_mask cpu_mask = {0};
  2913. struct hif_cpu_affinity *cfg = NULL;
  2914. if (!scn->affinity_mgr_supported)
  2915. return;
  2916. /* Set CPU Mask to Silver core */
  2917. qdf_for_each_possible_cpu(cpus)
  2918. if (qdf_topology_physical_package_id(cpus) ==
  2919. CPU_CLUSTER_TYPE_LITTLE)
  2920. qdf_cpumask_set_cpu(cpus, &cpu_mask);
  2921. cfg = &scn->irq_cpu_mask[grp_id][irq_num];
  2922. qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
  2923. qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
  2924. cfg->irq = irq;
  2925. cfg->last_updated = 0;
  2926. cfg->last_affined_away = 0;
  2927. cfg->update_requested = false;
  2928. }
  2929. #endif
  2930. #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
  2931. defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
  2932. void hif_config_irq_set_perf_affinity_hint(
  2933. struct hif_opaque_softc *hif_ctx)
  2934. {
  2935. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2936. hif_config_irq_affinity(scn);
  2937. }
  2938. qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
  2939. #endif