hif_main.c 82 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "targcfg.h"
  20. #include "qdf_lock.h"
  21. #include "qdf_status.h"
  22. #include "qdf_status.h"
  23. #include <qdf_atomic.h> /* qdf_atomic_read */
  24. #include <targaddrs.h>
  25. #include "hif_io32.h"
  26. #include <hif.h>
  27. #include <target_type.h>
  28. #include "regtable.h"
  29. #define ATH_MODULE_NAME hif
  30. #include <a_debug.h>
  31. #include "hif_main.h"
  32. #include "hif_hw_version.h"
  33. #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  34. defined(HIF_IPCI))
  35. #include "ce_tasklet.h"
  36. #include "ce_api.h"
  37. #endif
  38. #include "qdf_trace.h"
  39. #include "qdf_status.h"
  40. #include "hif_debug.h"
  41. #include "mp_dev.h"
  42. #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  43. defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
  44. defined(QCA_WIFI_QCA5332)
  45. #include "hal_api.h"
  46. #endif
  47. #include "hif_napi.h"
  48. #include "hif_unit_test_suspend_i.h"
  49. #include "qdf_module.h"
  50. #ifdef HIF_CE_LOG_INFO
  51. #include <qdf_notifier.h>
  52. #include <qdf_hang_event_notifier.h>
  53. #endif
  54. #include <linux/cpumask.h>
  55. #include <pld_common.h>
  56. #include "ce_internal.h"
  57. #include <qdf_tracepoint.h>
  58. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
  59. {
  60. hif_trigger_dump(hif_ctx, cmd_id, start);
  61. }
  62. /**
  63. * hif_get_target_id(): hif_get_target_id
  64. * @scn: scn
  65. *
  66. * Return the virtual memory base address to the caller
  67. *
  68. * @scn: hif_softc
  69. *
  70. * Return: A_target_id_t
  71. */
  72. A_target_id_t hif_get_target_id(struct hif_softc *scn)
  73. {
  74. return scn->mem;
  75. }
  76. /**
  77. * hif_get_targetdef(): hif_get_targetdef
  78. * @hif_ctx: hif context
  79. *
  80. * Return: void *
  81. */
  82. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
  83. {
  84. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  85. return scn->targetdef;
  86. }
  87. #ifdef FORCE_WAKE
  88. #ifndef QCA_WIFI_WCN6450
  89. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  90. bool init_phase)
  91. {
  92. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  93. if (ce_srng_based(scn))
  94. hal_set_init_phase(scn->hal_soc, init_phase);
  95. }
  96. #else
  97. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  98. bool init_phase)
  99. {
  100. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  101. hal_set_init_phase(scn->hal_soc, init_phase);
  102. }
  103. #endif
  104. #endif /* FORCE_WAKE */
  105. #ifdef HIF_IPCI
  106. void hif_shutdown_notifier_cb(void *hif_ctx)
  107. {
  108. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  109. scn->recovery = true;
  110. }
  111. #endif
  112. /**
  113. * hif_vote_link_down(): unvote for link up
  114. * @hif_ctx: hif context
  115. *
  116. * Call hif_vote_link_down to release a previous request made using
  117. * hif_vote_link_up. A hif_vote_link_down call should only be made
  118. * after a corresponding hif_vote_link_up, otherwise you could be
  119. * negating a vote from another source. When no votes are present
  120. * hif will not guarantee the linkstate after hif_bus_suspend.
  121. *
  122. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  123. * and initialization deinitialization sequencences.
  124. *
  125. * Return: n/a
  126. */
  127. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
  128. {
  129. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  130. QDF_BUG(scn);
  131. if (scn->linkstate_vote == 0)
  132. QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
  133. scn->linkstate_vote);
  134. scn->linkstate_vote--;
  135. hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
  136. if (scn->linkstate_vote == 0)
  137. hif_bus_prevent_linkdown(scn, false);
  138. }
  139. /**
  140. * hif_vote_link_up(): vote to prevent bus from suspending
  141. * @hif_ctx: hif context
  142. *
  143. * Makes hif guarantee that fw can message the host normally
  144. * during suspend.
  145. *
  146. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  147. * and initialization deinitialization sequencences.
  148. *
  149. * Return: n/a
  150. */
  151. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
  152. {
  153. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  154. QDF_BUG(scn);
  155. scn->linkstate_vote++;
  156. hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
  157. if (scn->linkstate_vote == 1)
  158. hif_bus_prevent_linkdown(scn, true);
  159. }
  160. /**
  161. * hif_can_suspend_link(): query if hif is permitted to suspend the link
  162. * @hif_ctx: hif context
  163. *
  164. * Hif will ensure that the link won't be suspended if the upperlayers
  165. * don't want it to.
  166. *
  167. * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
  168. * we don't need extra locking to ensure votes dont change while
  169. * we are in the process of suspending or resuming.
  170. *
  171. * Return: false if hif will guarantee link up during suspend.
  172. */
  173. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
  174. {
  175. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  176. QDF_BUG(scn);
  177. return scn->linkstate_vote == 0;
  178. }
  179. /**
  180. * hif_hia_item_address(): hif_hia_item_address
  181. * @target_type: target_type
  182. * @item_offset: item_offset
  183. *
  184. * Return: n/a
  185. */
  186. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
  187. {
  188. switch (target_type) {
  189. case TARGET_TYPE_AR6002:
  190. return AR6002_HOST_INTEREST_ADDRESS + item_offset;
  191. case TARGET_TYPE_AR6003:
  192. return AR6003_HOST_INTEREST_ADDRESS + item_offset;
  193. case TARGET_TYPE_AR6004:
  194. return AR6004_HOST_INTEREST_ADDRESS + item_offset;
  195. case TARGET_TYPE_AR6006:
  196. return AR6006_HOST_INTEREST_ADDRESS + item_offset;
  197. case TARGET_TYPE_AR9888:
  198. return AR9888_HOST_INTEREST_ADDRESS + item_offset;
  199. case TARGET_TYPE_AR6320:
  200. case TARGET_TYPE_AR6320V2:
  201. return AR6320_HOST_INTEREST_ADDRESS + item_offset;
  202. case TARGET_TYPE_ADRASTEA:
  203. /* ADRASTEA doesn't have a host interest address */
  204. ASSERT(0);
  205. return 0;
  206. case TARGET_TYPE_AR900B:
  207. return AR900B_HOST_INTEREST_ADDRESS + item_offset;
  208. case TARGET_TYPE_QCA9984:
  209. return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
  210. case TARGET_TYPE_QCA9888:
  211. return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
  212. default:
  213. ASSERT(0);
  214. return 0;
  215. }
  216. }
  217. /**
  218. * hif_max_num_receives_reached() - check max receive is reached
  219. * @scn: HIF Context
  220. * @count: unsigned int.
  221. *
  222. * Output check status as bool
  223. *
  224. * Return: bool
  225. */
  226. bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
  227. {
  228. if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
  229. return count > 120;
  230. else
  231. return count > MAX_NUM_OF_RECEIVES;
  232. }
  233. /**
  234. * init_buffer_count() - initial buffer count
  235. * @maxSize: qdf_size_t
  236. *
  237. * routine to modify the initial buffer count to be allocated on an os
  238. * platform basis. Platform owner will need to modify this as needed
  239. *
  240. * Return: qdf_size_t
  241. */
  242. qdf_size_t init_buffer_count(qdf_size_t maxSize)
  243. {
  244. return maxSize;
  245. }
  246. /**
  247. * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
  248. * @hif_ctx: hif context
  249. * @htc_htt_tx_endpoint: htt_tx_endpoint
  250. *
  251. * Return: void
  252. */
  253. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  254. int htc_htt_tx_endpoint)
  255. {
  256. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  257. if (!scn) {
  258. hif_err("scn or scn->hif_sc is NULL!");
  259. return;
  260. }
  261. scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
  262. }
  263. qdf_export_symbol(hif_save_htc_htt_config_endpoint);
  264. static const struct qwlan_hw qwlan_hw_list[] = {
  265. {
  266. .id = AR6320_REV1_VERSION,
  267. .subid = 0,
  268. .name = "QCA6174_REV1",
  269. },
  270. {
  271. .id = AR6320_REV1_1_VERSION,
  272. .subid = 0x1,
  273. .name = "QCA6174_REV1_1",
  274. },
  275. {
  276. .id = AR6320_REV1_3_VERSION,
  277. .subid = 0x2,
  278. .name = "QCA6174_REV1_3",
  279. },
  280. {
  281. .id = AR6320_REV2_1_VERSION,
  282. .subid = 0x4,
  283. .name = "QCA6174_REV2_1",
  284. },
  285. {
  286. .id = AR6320_REV2_1_VERSION,
  287. .subid = 0x5,
  288. .name = "QCA6174_REV2_2",
  289. },
  290. {
  291. .id = AR6320_REV3_VERSION,
  292. .subid = 0x6,
  293. .name = "QCA6174_REV2.3",
  294. },
  295. {
  296. .id = AR6320_REV3_VERSION,
  297. .subid = 0x8,
  298. .name = "QCA6174_REV3",
  299. },
  300. {
  301. .id = AR6320_REV3_VERSION,
  302. .subid = 0x9,
  303. .name = "QCA6174_REV3_1",
  304. },
  305. {
  306. .id = AR6320_REV3_2_VERSION,
  307. .subid = 0xA,
  308. .name = "AR6320_REV3_2_VERSION",
  309. },
  310. {
  311. .id = QCA6390_V1,
  312. .subid = 0x0,
  313. .name = "QCA6390_V1",
  314. },
  315. {
  316. .id = QCA6490_V1,
  317. .subid = 0x0,
  318. .name = "QCA6490_V1",
  319. },
  320. {
  321. .id = WCN3990_v1,
  322. .subid = 0x0,
  323. .name = "WCN3990_V1",
  324. },
  325. {
  326. .id = WCN3990_v2,
  327. .subid = 0x0,
  328. .name = "WCN3990_V2",
  329. },
  330. {
  331. .id = WCN3990_v2_1,
  332. .subid = 0x0,
  333. .name = "WCN3990_V2.1",
  334. },
  335. {
  336. .id = WCN3998,
  337. .subid = 0x0,
  338. .name = "WCN3998",
  339. },
  340. {
  341. .id = QCA9379_REV1_VERSION,
  342. .subid = 0xC,
  343. .name = "QCA9379_REV1",
  344. },
  345. {
  346. .id = QCA9379_REV1_VERSION,
  347. .subid = 0xD,
  348. .name = "QCA9379_REV1_1",
  349. },
  350. {
  351. .id = MANGO_V1,
  352. .subid = 0xF,
  353. .name = "MANGO_V1",
  354. },
  355. {
  356. .id = PEACH_V1,
  357. .subid = 0,
  358. .name = "PEACH_V1",
  359. },
  360. {
  361. .id = KIWI_V1,
  362. .subid = 0,
  363. .name = "KIWI_V1",
  364. },
  365. {
  366. .id = KIWI_V2,
  367. .subid = 0,
  368. .name = "KIWI_V2",
  369. },
  370. {
  371. .id = WCN6750_V1,
  372. .subid = 0,
  373. .name = "WCN6750_V1",
  374. },
  375. {
  376. .id = WCN6750_V2,
  377. .subid = 0,
  378. .name = "WCN6750_V2",
  379. },
  380. {
  381. .id = WCN6450_V1,
  382. .subid = 0,
  383. .name = "WCN6450_V1",
  384. },
  385. {
  386. .id = QCA6490_v2_1,
  387. .subid = 0,
  388. .name = "QCA6490",
  389. },
  390. {
  391. .id = QCA6490_v2,
  392. .subid = 0,
  393. .name = "QCA6490",
  394. },
  395. {
  396. .id = WCN3990_TALOS,
  397. .subid = 0,
  398. .name = "WCN3990",
  399. },
  400. {
  401. .id = WCN3990_MOOREA,
  402. .subid = 0,
  403. .name = "WCN3990",
  404. },
  405. {
  406. .id = WCN3990_SAIPAN,
  407. .subid = 0,
  408. .name = "WCN3990",
  409. },
  410. {
  411. .id = WCN3990_RENNELL,
  412. .subid = 0,
  413. .name = "WCN3990",
  414. },
  415. {
  416. .id = WCN3990_BITRA,
  417. .subid = 0,
  418. .name = "WCN3990",
  419. },
  420. {
  421. .id = WCN3990_DIVAR,
  422. .subid = 0,
  423. .name = "WCN3990",
  424. },
  425. {
  426. .id = WCN3990_ATHERTON,
  427. .subid = 0,
  428. .name = "WCN3990",
  429. },
  430. {
  431. .id = WCN3990_STRAIT,
  432. .subid = 0,
  433. .name = "WCN3990",
  434. },
  435. {
  436. .id = WCN3990_NETRANI,
  437. .subid = 0,
  438. .name = "WCN3990",
  439. },
  440. {
  441. .id = WCN3990_CLARENCE,
  442. .subid = 0,
  443. .name = "WCN3990",
  444. }
  445. };
  446. /**
  447. * hif_get_hw_name(): get a human readable name for the hardware
  448. * @info: Target Info
  449. *
  450. * Return: human readable name for the underlying wifi hardware.
  451. */
  452. static const char *hif_get_hw_name(struct hif_target_info *info)
  453. {
  454. int i;
  455. hif_debug("target version = %d, target revision = %d",
  456. info->target_version,
  457. info->target_revision);
  458. if (info->hw_name)
  459. return info->hw_name;
  460. for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
  461. if (info->target_version == qwlan_hw_list[i].id &&
  462. info->target_revision == qwlan_hw_list[i].subid) {
  463. return qwlan_hw_list[i].name;
  464. }
  465. }
  466. info->hw_name = qdf_mem_malloc(64);
  467. if (!info->hw_name)
  468. return "Unknown Device (nomem)";
  469. i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
  470. info->target_version);
  471. if (i < 0)
  472. return "Unknown Device (snprintf failure)";
  473. else
  474. return info->hw_name;
  475. }
  476. /**
  477. * hif_get_hw_info(): hif_get_hw_info
  478. * @scn: scn
  479. * @version: version
  480. * @revision: revision
  481. * @target_name: target name
  482. *
  483. * Return: n/a
  484. */
  485. void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
  486. const char **target_name)
  487. {
  488. struct hif_target_info *info = hif_get_target_info_handle(scn);
  489. struct hif_softc *sc = HIF_GET_SOFTC(scn);
  490. if (sc->bus_type == QDF_BUS_TYPE_USB)
  491. hif_usb_get_hw_info(sc);
  492. *version = info->target_version;
  493. *revision = info->target_revision;
  494. *target_name = hif_get_hw_name(info);
  495. }
  496. /**
  497. * hif_get_dev_ba(): API to get device base address.
  498. * @hif_handle: hif handle
  499. *
  500. * Return: device base address
  501. */
  502. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
  503. {
  504. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  505. return scn->mem;
  506. }
  507. qdf_export_symbol(hif_get_dev_ba);
  508. /**
  509. * hif_get_dev_ba_ce(): API to get device ce base address.
  510. * @hif_handle: hif handle
  511. *
  512. * Return: dev mem base address for CE
  513. */
  514. void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
  515. {
  516. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  517. return scn->mem_ce;
  518. }
  519. qdf_export_symbol(hif_get_dev_ba_ce);
  520. /**
  521. * hif_get_dev_ba_pmm(): API to get device pmm base address.
  522. * @hif_handle: scn
  523. *
  524. * Return: dev mem base address for PMM
  525. */
  526. void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
  527. {
  528. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  529. return scn->mem_pmm_base;
  530. }
  531. qdf_export_symbol(hif_get_dev_ba_pmm);
  532. uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
  533. {
  534. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  535. return scn->target_info.soc_version;
  536. }
  537. qdf_export_symbol(hif_get_soc_version);
  538. /**
  539. * hif_get_dev_ba_cmem(): API to get device ce base address.
  540. * @hif_handle: hif handle
  541. *
  542. * Return: dev mem base address for CMEM
  543. */
  544. void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
  545. {
  546. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  547. return scn->mem_cmem;
  548. }
  549. qdf_export_symbol(hif_get_dev_ba_cmem);
  550. #ifdef FEATURE_RUNTIME_PM
  551. void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
  552. {
  553. if (is_get)
  554. qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
  555. else
  556. qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
  557. }
  558. static inline
  559. void hif_rtpm_lock_init(struct hif_softc *scn)
  560. {
  561. qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
  562. }
  563. static inline
  564. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  565. {
  566. qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
  567. }
  568. #else
  569. static inline
  570. void hif_rtpm_lock_init(struct hif_softc *scn)
  571. {
  572. }
  573. static inline
  574. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  575. {
  576. }
  577. #endif
  578. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  579. /**
  580. * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
  581. * @scn: hif context
  582. * @psoc: psoc objmgr handle
  583. *
  584. * Return: None
  585. */
  586. static inline
  587. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  588. struct wlan_objmgr_psoc *psoc)
  589. {
  590. if (psoc) {
  591. scn->ini_cfg.ce_status_ring_timer_threshold =
  592. cfg_get(psoc,
  593. CFG_CE_STATUS_RING_TIMER_THRESHOLD);
  594. scn->ini_cfg.ce_status_ring_batch_count_threshold =
  595. cfg_get(psoc,
  596. CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
  597. }
  598. }
  599. #else
  600. static inline
  601. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  602. struct wlan_objmgr_psoc *psoc)
  603. {
  604. }
  605. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  606. /**
  607. * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
  608. * @scn: hif context
  609. * @psoc: psoc objmgr handle
  610. *
  611. * Return: None
  612. */
  613. static inline
  614. void hif_get_cfg_from_psoc(struct hif_softc *scn,
  615. struct wlan_objmgr_psoc *psoc)
  616. {
  617. if (psoc) {
  618. scn->ini_cfg.disable_wake_irq =
  619. cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
  620. /**
  621. * Wake IRQ can't share the same IRQ with the copy engines
  622. * In one MSI mode, we don't know whether wake IRQ is triggered
  623. * or not in wake IRQ handler. known issue CR 2055359
  624. * If you want to support Wake IRQ. Please allocate at least
  625. * 2 MSI vector. The first is for wake IRQ while the others
  626. * share the second vector
  627. */
  628. if (pld_is_one_msi(scn->qdf_dev->dev)) {
  629. hif_debug("Disable wake IRQ once it is one MSI mode");
  630. scn->ini_cfg.disable_wake_irq = true;
  631. }
  632. hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
  633. }
  634. }
  635. #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
  636. /**
  637. * hif_recovery_notifier_cb - Recovery notifier callback to log
  638. * hang event data
  639. * @block: notifier block
  640. * @state: state
  641. * @data: notifier data
  642. *
  643. * Return: status
  644. */
  645. static
  646. int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
  647. void *data)
  648. {
  649. struct qdf_notifer_data *notif_data = data;
  650. qdf_notif_block *notif_block;
  651. struct hif_softc *hif_handle;
  652. bool bus_id_invalid;
  653. if (!data || !block)
  654. return -EINVAL;
  655. notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
  656. hif_handle = notif_block->priv_data;
  657. if (!hif_handle)
  658. return -EINVAL;
  659. bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
  660. &notif_data->offset);
  661. if (bus_id_invalid)
  662. return NOTIFY_STOP_MASK;
  663. hif_log_ce_info(hif_handle, notif_data->hang_data,
  664. &notif_data->offset);
  665. return 0;
  666. }
  667. /**
  668. * hif_register_recovery_notifier - Register hif recovery notifier
  669. * @hif_handle: hif handle
  670. *
  671. * Return: status
  672. */
  673. static
  674. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  675. {
  676. qdf_notif_block *hif_notifier;
  677. if (!hif_handle)
  678. return QDF_STATUS_E_FAILURE;
  679. hif_notifier = &hif_handle->hif_recovery_notifier;
  680. hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
  681. hif_notifier->priv_data = hif_handle;
  682. return qdf_hang_event_register_notifier(hif_notifier);
  683. }
  684. /**
  685. * hif_unregister_recovery_notifier - Un-register hif recovery notifier
  686. * @hif_handle: hif handle
  687. *
  688. * Return: status
  689. */
  690. static
  691. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  692. {
  693. qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
  694. return qdf_hang_event_unregister_notifier(hif_notifier);
  695. }
  696. #else
  697. static inline
  698. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  699. {
  700. return QDF_STATUS_SUCCESS;
  701. }
  702. static inline
  703. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  704. {
  705. return QDF_STATUS_SUCCESS;
  706. }
  707. #endif
  708. #ifdef HIF_CPU_PERF_AFFINE_MASK
  709. /**
  710. * __hif_cpu_hotplug_notify() - CPU hotplug event handler
  711. * @context: HIF context
  712. * @cpu: CPU Id of the CPU generating the event
  713. * @cpu_up: true if the CPU is online
  714. *
  715. * Return: None
  716. */
  717. static void __hif_cpu_hotplug_notify(void *context,
  718. uint32_t cpu, bool cpu_up)
  719. {
  720. struct hif_softc *scn = context;
  721. if (!scn)
  722. return;
  723. if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
  724. return;
  725. if (cpu_up) {
  726. hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
  727. hif_debug("Setting affinity for online CPU: %d", cpu);
  728. } else {
  729. hif_debug("Skip setting affinity for offline CPU: %d", cpu);
  730. }
  731. }
  732. /**
  733. * hif_cpu_hotplug_notify - cpu core up/down notification
  734. * handler
  735. * @context: HIF context
  736. * @cpu: CPU generating the event
  737. * @cpu_up: true if the CPU is online
  738. *
  739. * Return: None
  740. */
  741. static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
  742. {
  743. struct qdf_op_sync *op_sync;
  744. if (qdf_op_protect(&op_sync))
  745. return;
  746. __hif_cpu_hotplug_notify(context, cpu, cpu_up);
  747. qdf_op_unprotect(op_sync);
  748. }
  749. static void hif_cpu_online_cb(void *context, uint32_t cpu)
  750. {
  751. hif_cpu_hotplug_notify(context, cpu, true);
  752. }
  753. static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
  754. {
  755. hif_cpu_hotplug_notify(context, cpu, false);
  756. }
  757. static void hif_cpuhp_register(struct hif_softc *scn)
  758. {
  759. if (!scn) {
  760. hif_info_high("cannot register hotplug notifiers");
  761. return;
  762. }
  763. qdf_cpuhp_register(&scn->cpuhp_event_handle,
  764. scn,
  765. hif_cpu_online_cb,
  766. hif_cpu_before_offline_cb);
  767. }
  768. static void hif_cpuhp_unregister(struct hif_softc *scn)
  769. {
  770. if (!scn) {
  771. hif_info_high("cannot unregister hotplug notifiers");
  772. return;
  773. }
  774. qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
  775. }
  776. #else
  777. static void hif_cpuhp_register(struct hif_softc *scn)
  778. {
  779. }
  780. static void hif_cpuhp_unregister(struct hif_softc *scn)
  781. {
  782. }
  783. #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
  784. #ifdef HIF_DETECTION_LATENCY_ENABLE
  785. /*
  786. * Bitmask to control enablement of latency detection for the tasklets,
  787. * bit-X represents for tasklet of WLAN_CE_X.
  788. */
  789. #ifndef DETECTION_LATENCY_TASKLET_MASK
  790. #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
  791. #endif
  792. static inline int
  793. __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
  794. {
  795. qdf_time_t sched_time =
  796. scn->latency_detect.tasklet_info[idx].sched_time;
  797. qdf_time_t exec_time =
  798. scn->latency_detect.tasklet_info[idx].exec_time;
  799. qdf_time_t curr_time = qdf_system_ticks();
  800. uint32_t threshold = scn->latency_detect.threshold;
  801. qdf_time_t expect_exec_time =
  802. sched_time + qdf_system_msecs_to_ticks(threshold);
  803. /* 2 kinds of check here.
  804. * from_timer==true: check if tasklet stall
  805. * from_timer==false: check tasklet execute comes late
  806. */
  807. if (from_timer ?
  808. (qdf_system_time_after(sched_time, exec_time) &&
  809. qdf_system_time_after(curr_time, expect_exec_time)) :
  810. qdf_system_time_after(exec_time, expect_exec_time)) {
  811. hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
  812. idx, from_timer, curr_time, sched_time,
  813. exec_time, threshold,
  814. scn->latency_detect.timeout,
  815. qdf_get_cpu(), (void *)_RET_IP_);
  816. qdf_trigger_self_recovery(NULL,
  817. QDF_TASKLET_CREDIT_LATENCY_DETECT);
  818. return -ETIMEDOUT;
  819. }
  820. return 0;
  821. }
  822. /**
  823. * hif_tasklet_latency_detect_enabled() - check whether latency detect
  824. * is enabled for the tasklet which is specified by idx
  825. * @scn: HIF opaque context
  826. * @idx: CE id
  827. *
  828. * Return: true if latency detect is enabled for the specified tasklet,
  829. * false otherwise.
  830. */
  831. static inline bool
  832. hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
  833. {
  834. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  835. return false;
  836. if (!scn->latency_detect.enable_detection)
  837. return false;
  838. if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
  839. !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
  840. return false;
  841. return true;
  842. }
  843. void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
  844. {
  845. if (!hif_tasklet_latency_detect_enabled(scn, idx))
  846. return;
  847. /*
  848. * hif_set_enable_detection(true) might come between
  849. * hif_tasklet_latency_record_sched() and
  850. * hif_tasklet_latency_record_exec() during wlan startup, then the
  851. * sched_time is 0 but exec_time is not, and hit the timeout case in
  852. * __hif_tasklet_latency().
  853. * To avoid such issue, skip exec_time recording if sched_time has not
  854. * been recorded.
  855. */
  856. if (!scn->latency_detect.tasklet_info[idx].sched_time)
  857. return;
  858. scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
  859. __hif_tasklet_latency(scn, false, idx);
  860. }
  861. void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
  862. {
  863. if (!hif_tasklet_latency_detect_enabled(scn, idx))
  864. return;
  865. scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
  866. scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
  867. }
  868. static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
  869. {
  870. qdf_time_t credit_request_time =
  871. scn->latency_detect.credit_request_time;
  872. qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
  873. qdf_time_t curr_jiffies = qdf_system_ticks();
  874. uint32_t threshold = scn->latency_detect.threshold;
  875. int cpu_id = qdf_get_cpu();
  876. /* 2 kinds of check here.
  877. * from_timer==true: check if credit report stall
  878. * from_timer==false: check credit report comes late
  879. */
  880. if ((from_timer ?
  881. qdf_system_time_after(credit_request_time, credit_report_time) :
  882. qdf_system_time_after(credit_report_time, credit_request_time)) &&
  883. qdf_system_time_after(curr_jiffies,
  884. credit_request_time +
  885. qdf_system_msecs_to_ticks(threshold))) {
  886. hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
  887. from_timer, curr_jiffies, credit_request_time,
  888. credit_report_time, threshold,
  889. scn->latency_detect.timeout,
  890. cpu_id, (void *)_RET_IP_);
  891. goto latency;
  892. }
  893. return;
  894. latency:
  895. qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
  896. }
  897. static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
  898. {
  899. int i, ret;
  900. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  901. if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
  902. continue;
  903. ret = __hif_tasklet_latency(scn, from_timer, i);
  904. if (ret)
  905. return;
  906. }
  907. }
  908. /**
  909. * hif_check_detection_latency(): to check if latency for tasklet/credit
  910. *
  911. * @scn: hif context
  912. * @from_timer: if called from timer handler
  913. * @bitmap_type: indicate if check tasklet or credit
  914. *
  915. * Return: none
  916. */
  917. void hif_check_detection_latency(struct hif_softc *scn,
  918. bool from_timer,
  919. uint32_t bitmap_type)
  920. {
  921. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  922. return;
  923. if (!scn->latency_detect.enable_detection)
  924. return;
  925. if (bitmap_type & BIT(HIF_DETECT_TASKLET))
  926. hif_tasklet_latency(scn, from_timer);
  927. if (bitmap_type & BIT(HIF_DETECT_CREDIT))
  928. hif_credit_latency(scn, from_timer);
  929. }
  930. static void hif_latency_detect_timeout_handler(void *arg)
  931. {
  932. struct hif_softc *scn = (struct hif_softc *)arg;
  933. int next_cpu, i;
  934. qdf_cpu_mask cpu_mask = {0};
  935. struct hif_latency_detect *detect = &scn->latency_detect;
  936. hif_check_detection_latency(scn, true,
  937. BIT(HIF_DETECT_TASKLET) |
  938. BIT(HIF_DETECT_CREDIT));
  939. /* it need to make sure timer start on a different cpu,
  940. * so it can detect the tasklet schedule stall, but there
  941. * is still chance that, after timer has been started, then
  942. * irq/tasklet happens on the same cpu, then tasklet will
  943. * execute before softirq timer, if this tasklet stall, the
  944. * timer can't detect it, we can accept this as a limitation,
  945. * if tasklet stall, anyway other place will detect it, just
  946. * a little later.
  947. */
  948. qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
  949. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  950. if (!qdf_test_bit(i, detect->tasklet_bmap))
  951. continue;
  952. qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
  953. &cpu_mask);
  954. }
  955. next_cpu = cpumask_first(&cpu_mask);
  956. if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
  957. hif_debug("start timer on local");
  958. /* it doesn't found a available cpu, start on local cpu*/
  959. qdf_timer_mod(&detect->timer, detect->timeout);
  960. } else {
  961. qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
  962. }
  963. }
  964. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  965. {
  966. scn->latency_detect.timeout =
  967. DETECTION_TIMER_TIMEOUT;
  968. scn->latency_detect.threshold =
  969. DETECTION_LATENCY_THRESHOLD;
  970. hif_info("timer timeout %u, latency threshold %u",
  971. scn->latency_detect.timeout,
  972. scn->latency_detect.threshold);
  973. scn->latency_detect.is_timer_started = false;
  974. qdf_timer_init(NULL,
  975. &scn->latency_detect.timer,
  976. &hif_latency_detect_timeout_handler,
  977. scn,
  978. QDF_TIMER_TYPE_SW_SPIN);
  979. }
  980. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  981. {
  982. hif_info("deinit timer");
  983. qdf_timer_free(&scn->latency_detect.timer);
  984. }
  985. static void hif_latency_detect_init(struct hif_softc *scn)
  986. {
  987. uint32_t tasklet_mask;
  988. int i;
  989. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  990. return;
  991. tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
  992. hif_info("tasklet mask is 0x%x", tasklet_mask);
  993. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  994. if (BIT(i) & tasklet_mask)
  995. qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
  996. }
  997. hif_latency_detect_timer_init(scn);
  998. }
  999. static void hif_latency_detect_deinit(struct hif_softc *scn)
  1000. {
  1001. int i;
  1002. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1003. return;
  1004. hif_latency_detect_timer_deinit(scn);
  1005. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
  1006. qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
  1007. }
  1008. void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
  1009. {
  1010. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1011. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1012. return;
  1013. hif_debug_rl("start timer");
  1014. if (scn->latency_detect.is_timer_started) {
  1015. hif_info("timer has been started");
  1016. return;
  1017. }
  1018. qdf_timer_start(&scn->latency_detect.timer,
  1019. scn->latency_detect.timeout);
  1020. scn->latency_detect.is_timer_started = true;
  1021. }
  1022. void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
  1023. {
  1024. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1025. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1026. return;
  1027. hif_debug_rl("stop timer");
  1028. qdf_timer_sync_cancel(&scn->latency_detect.timer);
  1029. scn->latency_detect.is_timer_started = false;
  1030. }
  1031. void hif_latency_detect_credit_record_time(
  1032. enum hif_credit_exchange_type type,
  1033. struct hif_opaque_softc *hif_ctx)
  1034. {
  1035. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1036. if (!scn) {
  1037. hif_err("Could not do runtime put, scn is null");
  1038. return;
  1039. }
  1040. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1041. return;
  1042. if (HIF_REQUEST_CREDIT == type)
  1043. scn->latency_detect.credit_request_time = qdf_system_ticks();
  1044. else if (HIF_PROCESS_CREDIT_REPORT == type)
  1045. scn->latency_detect.credit_report_time = qdf_system_ticks();
  1046. hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
  1047. }
  1048. void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
  1049. {
  1050. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1051. if (!scn) {
  1052. hif_err("Could not do runtime put, scn is null");
  1053. return;
  1054. }
  1055. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1056. return;
  1057. scn->latency_detect.enable_detection = value;
  1058. }
  1059. #else
  1060. static inline void hif_latency_detect_init(struct hif_softc *scn)
  1061. {}
  1062. static inline void hif_latency_detect_deinit(struct hif_softc *scn)
  1063. {}
  1064. #endif
  1065. #ifdef WLAN_FEATURE_AFFINITY_MGR
  1066. #define AFFINITY_THRESHOLD 5000000
  1067. static inline void
  1068. hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
  1069. {
  1070. unsigned int cpus;
  1071. qdf_cpu_mask allowed_mask = {0};
  1072. scn->affinity_mgr_supported =
  1073. (cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
  1074. qdf_walt_get_cpus_taken_supported());
  1075. hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
  1076. if (!scn->affinity_mgr_supported)
  1077. return;
  1078. scn->time_threshold = AFFINITY_THRESHOLD;
  1079. qdf_for_each_possible_cpu(cpus)
  1080. if (qdf_topology_physical_package_id(cpus) ==
  1081. CPU_CLUSTER_TYPE_LITTLE)
  1082. qdf_cpumask_set_cpu(cpus, &allowed_mask);
  1083. qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
  1084. }
  1085. #else
  1086. static inline void
  1087. hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
  1088. {
  1089. }
  1090. #endif
  1091. #ifdef FEATURE_DIRECT_LINK
  1092. /**
  1093. * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
  1094. * pipe number
  1095. * @scn: hif context
  1096. *
  1097. * Return: None
  1098. */
  1099. static inline
  1100. void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
  1101. {
  1102. scn->dl_recv_pipe_num = INVALID_PIPE_NO;
  1103. }
  1104. #else
  1105. static inline
  1106. void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
  1107. {
  1108. }
  1109. #endif
  1110. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  1111. uint32_t mode,
  1112. enum qdf_bus_type bus_type,
  1113. struct hif_driver_state_callbacks *cbk,
  1114. struct wlan_objmgr_psoc *psoc)
  1115. {
  1116. struct hif_softc *scn;
  1117. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1118. int bus_context_size = hif_bus_get_context_size(bus_type);
  1119. if (bus_context_size == 0) {
  1120. hif_err("context size 0 not allowed");
  1121. return NULL;
  1122. }
  1123. scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
  1124. if (!scn)
  1125. return GET_HIF_OPAQUE_HDL(scn);
  1126. scn->qdf_dev = qdf_ctx;
  1127. scn->hif_con_param = mode;
  1128. qdf_atomic_init(&scn->active_tasklet_cnt);
  1129. qdf_atomic_init(&scn->active_grp_tasklet_cnt);
  1130. qdf_atomic_init(&scn->link_suspended);
  1131. qdf_atomic_init(&scn->tasklet_from_intr);
  1132. hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
  1133. qdf_mem_copy(&scn->callbacks, cbk,
  1134. sizeof(struct hif_driver_state_callbacks));
  1135. scn->bus_type = bus_type;
  1136. hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
  1137. hif_get_cfg_from_psoc(scn, psoc);
  1138. hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
  1139. status = hif_bus_open(scn, bus_type);
  1140. if (status != QDF_STATUS_SUCCESS) {
  1141. hif_err("hif_bus_open error = %d, bus_type = %d",
  1142. status, bus_type);
  1143. qdf_mem_free(scn);
  1144. scn = NULL;
  1145. goto out;
  1146. }
  1147. hif_rtpm_lock_init(scn);
  1148. hif_cpuhp_register(scn);
  1149. hif_latency_detect_init(scn);
  1150. hif_affinity_mgr_init(scn, psoc);
  1151. hif_init_direct_link_rcv_pipe_num(scn);
  1152. out:
  1153. return GET_HIF_OPAQUE_HDL(scn);
  1154. }
  1155. #ifdef ADRASTEA_RRI_ON_DDR
  1156. /**
  1157. * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
  1158. * @scn: hif context
  1159. *
  1160. * Return: none
  1161. */
  1162. void hif_uninit_rri_on_ddr(struct hif_softc *scn)
  1163. {
  1164. if (scn->vaddr_rri_on_ddr)
  1165. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  1166. RRI_ON_DDR_MEM_SIZE,
  1167. scn->vaddr_rri_on_ddr,
  1168. scn->paddr_rri_on_ddr, 0);
  1169. scn->vaddr_rri_on_ddr = NULL;
  1170. }
  1171. #endif
  1172. /**
  1173. * hif_close(): hif_close
  1174. * @hif_ctx: hif_ctx
  1175. *
  1176. * Return: n/a
  1177. */
  1178. void hif_close(struct hif_opaque_softc *hif_ctx)
  1179. {
  1180. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1181. if (!scn) {
  1182. hif_err("hif_opaque_softc is NULL");
  1183. return;
  1184. }
  1185. hif_latency_detect_deinit(scn);
  1186. if (scn->athdiag_procfs_inited) {
  1187. athdiag_procfs_remove();
  1188. scn->athdiag_procfs_inited = false;
  1189. }
  1190. if (scn->target_info.hw_name) {
  1191. char *hw_name = scn->target_info.hw_name;
  1192. scn->target_info.hw_name = "ErrUnloading";
  1193. qdf_mem_free(hw_name);
  1194. }
  1195. hif_uninit_rri_on_ddr(scn);
  1196. hif_cleanup_static_buf_to_target(scn);
  1197. hif_cpuhp_unregister(scn);
  1198. hif_rtpm_lock_deinit(scn);
  1199. hif_bus_close(scn);
  1200. qdf_mem_free(scn);
  1201. }
  1202. /**
  1203. * hif_get_num_active_grp_tasklets() - get the number of active
  1204. * datapath group tasklets pending to be completed.
  1205. * @scn: HIF context
  1206. *
  1207. * Returns: the number of datapath group tasklets which are active
  1208. */
  1209. static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
  1210. {
  1211. return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
  1212. }
  1213. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1214. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1215. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1216. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1217. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1218. defined(QCA_WIFI_QCN6432) || \
  1219. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1220. /**
  1221. * hif_get_num_pending_work() - get the number of entries in
  1222. * the workqueue pending to be completed.
  1223. * @scn: HIF context
  1224. *
  1225. * Returns: the number of tasklets which are active
  1226. */
  1227. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1228. {
  1229. return hal_get_reg_write_pending_work(scn->hal_soc);
  1230. }
  1231. #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
  1232. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1233. {
  1234. return qdf_atomic_read(&scn->active_work_cnt);
  1235. }
  1236. #else
  1237. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1238. {
  1239. return 0;
  1240. }
  1241. #endif
  1242. QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
  1243. {
  1244. uint32_t task_drain_wait_cnt = 0;
  1245. int tasklet = 0, grp_tasklet = 0, work = 0;
  1246. while ((tasklet = hif_get_num_active_tasklets(scn)) ||
  1247. (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
  1248. (work = hif_get_num_pending_work(scn))) {
  1249. if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
  1250. hif_err("pending tasklets %d grp tasklets %d work %d",
  1251. tasklet, grp_tasklet, work);
  1252. QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d",
  1253. HIF_TASK_DRAIN_WAIT_CNT * 10,
  1254. tasklet, grp_tasklet, work);
  1255. return QDF_STATUS_E_FAULT;
  1256. }
  1257. hif_info("waiting for tasklets %d grp tasklets %d work %d",
  1258. tasklet, grp_tasklet, work);
  1259. msleep(10);
  1260. }
  1261. return QDF_STATUS_SUCCESS;
  1262. }
  1263. #ifdef HIF_HAL_REG_ACCESS_SUPPORT
  1264. void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
  1265. uint32_t value)
  1266. {
  1267. hal_write32_mb(scn->hal_soc, offset, value);
  1268. }
  1269. uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
  1270. {
  1271. return hal_read32_mb(scn->hal_soc, offset);
  1272. }
  1273. #endif
  1274. #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
  1275. QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1276. {
  1277. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1278. uint32_t work_drain_wait_cnt = 0;
  1279. uint32_t wait_cnt = 0;
  1280. int work = 0;
  1281. qdf_atomic_set(&scn->dp_ep_vote_access,
  1282. HIF_EP_VOTE_ACCESS_DISABLE);
  1283. qdf_atomic_set(&scn->ep_vote_access,
  1284. HIF_EP_VOTE_ACCESS_DISABLE);
  1285. while ((work = hif_get_num_pending_work(scn))) {
  1286. if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
  1287. qdf_atomic_set(&scn->dp_ep_vote_access,
  1288. HIF_EP_VOTE_ACCESS_ENABLE);
  1289. qdf_atomic_set(&scn->ep_vote_access,
  1290. HIF_EP_VOTE_ACCESS_ENABLE);
  1291. hif_err("timeout wait for pending work %d ", work);
  1292. return QDF_STATUS_E_FAULT;
  1293. }
  1294. qdf_sleep(10);
  1295. }
  1296. if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
  1297. return QDF_STATUS_SUCCESS;
  1298. while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
  1299. if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
  1300. hif_err("Release EP vote is not proceed by Fw");
  1301. return QDF_STATUS_E_FAULT;
  1302. }
  1303. qdf_sleep(5);
  1304. }
  1305. return QDF_STATUS_SUCCESS;
  1306. }
  1307. void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
  1308. {
  1309. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1310. uint8_t vote_access;
  1311. vote_access = qdf_atomic_read(&scn->ep_vote_access);
  1312. if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
  1313. hif_info("EP vote changed from:%u to intermediate state",
  1314. vote_access);
  1315. if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
  1316. QDF_BUG(0);
  1317. qdf_atomic_set(&scn->ep_vote_access,
  1318. HIF_EP_VOTE_INTERMEDIATE_ACCESS);
  1319. }
  1320. void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1321. {
  1322. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1323. qdf_atomic_set(&scn->dp_ep_vote_access,
  1324. HIF_EP_VOTE_ACCESS_ENABLE);
  1325. qdf_atomic_set(&scn->ep_vote_access,
  1326. HIF_EP_VOTE_ACCESS_ENABLE);
  1327. }
  1328. void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1329. uint8_t type, uint8_t access)
  1330. {
  1331. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1332. if (type == HIF_EP_VOTE_DP_ACCESS)
  1333. qdf_atomic_set(&scn->dp_ep_vote_access, access);
  1334. else
  1335. qdf_atomic_set(&scn->ep_vote_access, access);
  1336. }
  1337. uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1338. uint8_t type)
  1339. {
  1340. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1341. if (type == HIF_EP_VOTE_DP_ACCESS)
  1342. return qdf_atomic_read(&scn->dp_ep_vote_access);
  1343. else
  1344. return qdf_atomic_read(&scn->ep_vote_access);
  1345. }
  1346. #endif
  1347. #ifdef FEATURE_HIF_DELAYED_REG_WRITE
  1348. #ifdef MEMORY_DEBUG
  1349. #define HIF_REG_WRITE_QUEUE_LEN 128
  1350. #else
  1351. #define HIF_REG_WRITE_QUEUE_LEN 32
  1352. #endif
  1353. /**
  1354. * hif_print_reg_write_stats() - Print hif delayed reg write stats
  1355. * @hif_ctx: hif opaque handle
  1356. *
  1357. * Return: None
  1358. */
  1359. void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
  1360. {
  1361. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1362. struct CE_state *ce_state;
  1363. uint32_t *hist;
  1364. int i;
  1365. hist = scn->wstats.sched_delay;
  1366. hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
  1367. qdf_atomic_read(&scn->wstats.enqueues),
  1368. scn->wstats.dequeues,
  1369. qdf_atomic_read(&scn->wstats.coalesces),
  1370. qdf_atomic_read(&scn->wstats.direct),
  1371. qdf_atomic_read(&scn->wstats.q_depth),
  1372. scn->wstats.max_q_depth,
  1373. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
  1374. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
  1375. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
  1376. hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
  1377. for (i = 0; i < scn->ce_count; i++) {
  1378. ce_state = scn->ce_id_to_state[i];
  1379. if (!ce_state)
  1380. continue;
  1381. hif_debug("ce%d: enq %u deq %u coal %u direct %u",
  1382. i, ce_state->wstats.enqueues,
  1383. ce_state->wstats.dequeues,
  1384. ce_state->wstats.coalesces,
  1385. ce_state->wstats.direct);
  1386. }
  1387. }
  1388. /**
  1389. * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
  1390. * @scn: hif_softc pointer
  1391. *
  1392. * Return: true if throughput is high, else false.
  1393. */
  1394. static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
  1395. {
  1396. int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
  1397. return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
  1398. }
  1399. /**
  1400. * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
  1401. * @scn: hif_softc pointer
  1402. * @delay_us: delay in us
  1403. *
  1404. * Return: None
  1405. */
  1406. static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
  1407. uint64_t delay_us)
  1408. {
  1409. uint32_t *hist;
  1410. hist = scn->wstats.sched_delay;
  1411. if (delay_us < 100)
  1412. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
  1413. else if (delay_us < 1000)
  1414. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
  1415. else if (delay_us < 5000)
  1416. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
  1417. else
  1418. hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
  1419. }
  1420. /**
  1421. * hif_process_reg_write_q_elem() - process a register write queue element
  1422. * @scn: hif_softc pointer
  1423. * @q_elem: pointer to hal register write queue element
  1424. *
  1425. * Return: The value which was written to the address
  1426. */
  1427. static int32_t
  1428. hif_process_reg_write_q_elem(struct hif_softc *scn,
  1429. struct hif_reg_write_q_elem *q_elem)
  1430. {
  1431. struct CE_state *ce_state = q_elem->ce_state;
  1432. uint32_t write_val = -1;
  1433. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  1434. ce_state->reg_write_in_progress = false;
  1435. ce_state->wstats.dequeues++;
  1436. if (ce_state->src_ring) {
  1437. q_elem->dequeue_val = ce_state->src_ring->write_index;
  1438. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
  1439. ce_state->src_ring->write_index);
  1440. write_val = ce_state->src_ring->write_index;
  1441. } else if (ce_state->dest_ring) {
  1442. q_elem->dequeue_val = ce_state->dest_ring->write_index;
  1443. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
  1444. ce_state->dest_ring->write_index);
  1445. write_val = ce_state->dest_ring->write_index;
  1446. } else {
  1447. hif_debug("invalid reg write received");
  1448. qdf_assert(0);
  1449. }
  1450. q_elem->valid = 0;
  1451. ce_state->last_dequeue_time = q_elem->dequeue_time;
  1452. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  1453. return write_val;
  1454. }
  1455. /**
  1456. * hif_reg_write_work() - Worker to process delayed writes
  1457. * @arg: hif_softc pointer
  1458. *
  1459. * Return: None
  1460. */
  1461. static void hif_reg_write_work(void *arg)
  1462. {
  1463. struct hif_softc *scn = arg;
  1464. struct hif_reg_write_q_elem *q_elem;
  1465. uint32_t offset;
  1466. uint64_t delta_us;
  1467. int32_t q_depth, write_val;
  1468. uint32_t num_processed = 0;
  1469. int32_t ring_id;
  1470. q_elem = &scn->reg_write_queue[scn->read_idx];
  1471. q_elem->work_scheduled_time = qdf_get_log_timestamp();
  1472. q_elem->cpu_id = qdf_get_cpu();
  1473. /* Make sure q_elem consistent in the memory for multi-cores */
  1474. qdf_rmb();
  1475. if (!q_elem->valid)
  1476. return;
  1477. q_depth = qdf_atomic_read(&scn->wstats.q_depth);
  1478. if (q_depth > scn->wstats.max_q_depth)
  1479. scn->wstats.max_q_depth = q_depth;
  1480. if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
  1481. scn->wstats.prevent_l1_fails++;
  1482. return;
  1483. }
  1484. while (true) {
  1485. qdf_rmb();
  1486. if (!q_elem->valid)
  1487. break;
  1488. q_elem->dequeue_time = qdf_get_log_timestamp();
  1489. ring_id = q_elem->ce_state->id;
  1490. offset = q_elem->offset;
  1491. delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
  1492. q_elem->enqueue_time);
  1493. hif_reg_write_fill_sched_delay_hist(scn, delta_us);
  1494. scn->wstats.dequeues++;
  1495. qdf_atomic_dec(&scn->wstats.q_depth);
  1496. write_val = hif_process_reg_write_q_elem(scn, q_elem);
  1497. hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
  1498. scn->read_idx, ring_id, offset, write_val);
  1499. qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
  1500. q_elem->dequeue_val,
  1501. q_elem->enqueue_time,
  1502. q_elem->dequeue_time);
  1503. num_processed++;
  1504. scn->read_idx = (scn->read_idx + 1) &
  1505. (HIF_REG_WRITE_QUEUE_LEN - 1);
  1506. q_elem = &scn->reg_write_queue[scn->read_idx];
  1507. }
  1508. hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
  1509. /*
  1510. * Decrement active_work_cnt by the number of elements dequeued after
  1511. * hif_allow_link_low_power_states.
  1512. * This makes sure that hif_try_complete_tasks will wait till we make
  1513. * the bus access in hif_allow_link_low_power_states. This will avoid
  1514. * race condition between delayed register worker and bus suspend
  1515. * (system suspend or runtime suspend).
  1516. *
  1517. * The following decrement should be done at the end!
  1518. */
  1519. qdf_atomic_sub(num_processed, &scn->active_work_cnt);
  1520. }
  1521. /**
  1522. * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
  1523. * @scn: hif_softc pointer
  1524. *
  1525. * De-initialize main data structures to process register writes in a delayed
  1526. * workqueue.
  1527. *
  1528. * Return: None
  1529. */
  1530. static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
  1531. {
  1532. qdf_flush_work(&scn->reg_write_work);
  1533. qdf_disable_work(&scn->reg_write_work);
  1534. qdf_flush_workqueue(0, scn->reg_write_wq);
  1535. qdf_destroy_workqueue(0, scn->reg_write_wq);
  1536. qdf_mem_free(scn->reg_write_queue);
  1537. }
  1538. /**
  1539. * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
  1540. * @scn: hif_softc pointer
  1541. *
  1542. * Initialize main data structures to process register writes in a delayed
  1543. * workqueue.
  1544. */
  1545. static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
  1546. {
  1547. qdf_atomic_init(&scn->active_work_cnt);
  1548. scn->reg_write_wq =
  1549. qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
  1550. qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
  1551. scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
  1552. sizeof(*scn->reg_write_queue));
  1553. if (!scn->reg_write_queue) {
  1554. hif_err("unable to allocate memory for delayed reg write");
  1555. QDF_BUG(0);
  1556. return QDF_STATUS_E_NOMEM;
  1557. }
  1558. /* Initial value of indices */
  1559. scn->read_idx = 0;
  1560. qdf_atomic_set(&scn->write_idx, -1);
  1561. return QDF_STATUS_SUCCESS;
  1562. }
  1563. static void hif_reg_write_enqueue(struct hif_softc *scn,
  1564. struct CE_state *ce_state,
  1565. uint32_t value)
  1566. {
  1567. struct hif_reg_write_q_elem *q_elem;
  1568. uint32_t write_idx;
  1569. if (ce_state->reg_write_in_progress) {
  1570. hif_debug("Already in progress ce_id %d offset 0x%x value %u",
  1571. ce_state->id, ce_state->ce_wrt_idx_offset, value);
  1572. qdf_atomic_inc(&scn->wstats.coalesces);
  1573. ce_state->wstats.coalesces++;
  1574. return;
  1575. }
  1576. write_idx = qdf_atomic_inc_return(&scn->write_idx);
  1577. write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
  1578. q_elem = &scn->reg_write_queue[write_idx];
  1579. if (q_elem->valid) {
  1580. hif_err("queue full");
  1581. QDF_BUG(0);
  1582. return;
  1583. }
  1584. qdf_atomic_inc(&scn->wstats.enqueues);
  1585. ce_state->wstats.enqueues++;
  1586. qdf_atomic_inc(&scn->wstats.q_depth);
  1587. q_elem->ce_state = ce_state;
  1588. q_elem->offset = ce_state->ce_wrt_idx_offset;
  1589. q_elem->enqueue_val = value;
  1590. q_elem->enqueue_time = qdf_get_log_timestamp();
  1591. /*
  1592. * Before the valid flag is set to true, all the other
  1593. * fields in the q_elem needs to be updated in memory.
  1594. * Else there is a chance that the dequeuing worker thread
  1595. * might read stale entries and process incorrect srng.
  1596. */
  1597. qdf_wmb();
  1598. q_elem->valid = true;
  1599. /*
  1600. * After all other fields in the q_elem has been updated
  1601. * in memory successfully, the valid flag needs to be updated
  1602. * in memory in time too.
  1603. * Else there is a chance that the dequeuing worker thread
  1604. * might read stale valid flag and the work will be bypassed
  1605. * for this round. And if there is no other work scheduled
  1606. * later, this hal register writing won't be updated any more.
  1607. */
  1608. qdf_wmb();
  1609. ce_state->reg_write_in_progress = true;
  1610. qdf_atomic_inc(&scn->active_work_cnt);
  1611. hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
  1612. write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
  1613. qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
  1614. &scn->reg_write_work);
  1615. }
  1616. void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
  1617. uint32_t val)
  1618. {
  1619. struct CE_state *ce_state;
  1620. int ce_id = COPY_ENGINE_ID(ctrl_addr);
  1621. ce_state = scn->ce_id_to_state[ce_id];
  1622. if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
  1623. hif_reg_write_enqueue(scn, ce_state, val);
  1624. return;
  1625. }
  1626. if (hif_is_reg_write_tput_level_high(scn) ||
  1627. (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
  1628. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
  1629. qdf_atomic_inc(&scn->wstats.direct);
  1630. ce_state->wstats.direct++;
  1631. } else {
  1632. hif_reg_write_enqueue(scn, ce_state, val);
  1633. }
  1634. }
  1635. #else
  1636. static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
  1637. {
  1638. return QDF_STATUS_SUCCESS;
  1639. }
  1640. static inline void hif_delayed_reg_write_deinit(struct hif_softc *scn)
  1641. {
  1642. }
  1643. #endif
  1644. #if defined(QCA_WIFI_WCN6450)
  1645. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1646. {
  1647. scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
  1648. scn->qdf_dev);
  1649. if (!scn->hal_soc)
  1650. return QDF_STATUS_E_FAILURE;
  1651. return QDF_STATUS_SUCCESS;
  1652. }
  1653. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1654. {
  1655. hal_detach(scn->hal_soc);
  1656. scn->hal_soc = NULL;
  1657. return QDF_STATUS_SUCCESS;
  1658. }
  1659. #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1660. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1661. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1662. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1663. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1664. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1665. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1666. {
  1667. if (ce_srng_based(scn)) {
  1668. scn->hal_soc = hal_attach(
  1669. hif_softc_to_hif_opaque_softc(scn),
  1670. scn->qdf_dev);
  1671. if (!scn->hal_soc)
  1672. return QDF_STATUS_E_FAILURE;
  1673. }
  1674. return QDF_STATUS_SUCCESS;
  1675. }
  1676. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1677. {
  1678. if (ce_srng_based(scn)) {
  1679. hal_detach(scn->hal_soc);
  1680. scn->hal_soc = NULL;
  1681. }
  1682. return QDF_STATUS_SUCCESS;
  1683. }
  1684. #else
  1685. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1686. {
  1687. return QDF_STATUS_SUCCESS;
  1688. }
  1689. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1690. {
  1691. return QDF_STATUS_SUCCESS;
  1692. }
  1693. #endif
  1694. int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
  1695. {
  1696. int ret;
  1697. switch (bus_type) {
  1698. case QDF_BUS_TYPE_IPCI:
  1699. ret = qdf_set_dma_coherent_mask(dev,
  1700. DMA_COHERENT_MASK_DEFAULT);
  1701. if (ret) {
  1702. hif_err("Failed to set dma mask error = %d", ret);
  1703. return ret;
  1704. }
  1705. break;
  1706. default:
  1707. /* Follow the existing sequence for other targets */
  1708. break;
  1709. }
  1710. return 0;
  1711. }
  1712. /**
  1713. * hif_enable(): hif_enable
  1714. * @hif_ctx: hif_ctx
  1715. * @dev: dev
  1716. * @bdev: bus dev
  1717. * @bid: bus ID
  1718. * @bus_type: bus type
  1719. * @type: enable type
  1720. *
  1721. * Return: QDF_STATUS
  1722. */
  1723. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  1724. void *bdev,
  1725. const struct hif_bus_id *bid,
  1726. enum qdf_bus_type bus_type,
  1727. enum hif_enable_type type)
  1728. {
  1729. QDF_STATUS status;
  1730. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1731. if (!scn) {
  1732. hif_err("hif_ctx = NULL");
  1733. return QDF_STATUS_E_NULL_VALUE;
  1734. }
  1735. status = hif_enable_bus(scn, dev, bdev, bid, type);
  1736. if (status != QDF_STATUS_SUCCESS) {
  1737. hif_err("hif_enable_bus error = %d", status);
  1738. return status;
  1739. }
  1740. status = hif_hal_attach(scn);
  1741. if (status != QDF_STATUS_SUCCESS) {
  1742. hif_err("hal attach failed");
  1743. goto disable_bus;
  1744. }
  1745. if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
  1746. hif_err("unable to initialize delayed reg write");
  1747. goto hal_detach;
  1748. }
  1749. if (hif_bus_configure(scn)) {
  1750. hif_err("Target probe failed");
  1751. status = QDF_STATUS_E_FAILURE;
  1752. goto hal_detach;
  1753. }
  1754. hif_ut_suspend_init(scn);
  1755. hif_register_recovery_notifier(scn);
  1756. hif_latency_detect_timer_start(hif_ctx);
  1757. /*
  1758. * Flag to avoid potential unallocated memory access from MSI
  1759. * interrupt handler which could get scheduled as soon as MSI
  1760. * is enabled, i.e to take care of the race due to the order
  1761. * in where MSI is enabled before the memory, that will be
  1762. * in interrupt handlers, is allocated.
  1763. */
  1764. scn->hif_init_done = true;
  1765. hif_debug("OK");
  1766. return QDF_STATUS_SUCCESS;
  1767. hal_detach:
  1768. hif_hal_detach(scn);
  1769. disable_bus:
  1770. hif_disable_bus(scn);
  1771. return status;
  1772. }
  1773. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
  1774. {
  1775. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1776. if (!scn)
  1777. return;
  1778. hif_delayed_reg_write_deinit(scn);
  1779. hif_set_enable_detection(hif_ctx, false);
  1780. hif_latency_detect_timer_stop(hif_ctx);
  1781. hif_unregister_recovery_notifier(scn);
  1782. hif_nointrs(scn);
  1783. if (scn->hif_init_done == false)
  1784. hif_shutdown_device(hif_ctx);
  1785. else
  1786. hif_stop(hif_ctx);
  1787. hif_hal_detach(scn);
  1788. hif_disable_bus(scn);
  1789. hif_wlan_disable(scn);
  1790. scn->notice_send = false;
  1791. hif_debug("X");
  1792. }
  1793. #ifdef CE_TASKLET_DEBUG_ENABLE
  1794. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
  1795. {
  1796. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1797. if (!scn)
  1798. return;
  1799. scn->ce_latency_stats = val;
  1800. }
  1801. #endif
  1802. void hif_display_stats(struct hif_opaque_softc *hif_ctx)
  1803. {
  1804. hif_display_bus_stats(hif_ctx);
  1805. }
  1806. qdf_export_symbol(hif_display_stats);
  1807. void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
  1808. {
  1809. hif_clear_bus_stats(hif_ctx);
  1810. }
  1811. /**
  1812. * hif_crash_shutdown_dump_bus_register() - dump bus registers
  1813. * @hif_ctx: hif_ctx
  1814. *
  1815. * Return: n/a
  1816. */
  1817. #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
  1818. static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
  1819. {
  1820. struct hif_opaque_softc *scn = hif_ctx;
  1821. if (hif_check_soc_status(scn))
  1822. return;
  1823. if (hif_dump_registers(scn))
  1824. hif_err("Failed to dump bus registers!");
  1825. }
  1826. /**
  1827. * hif_crash_shutdown(): hif_crash_shutdown
  1828. *
  1829. * This function is called by the platform driver to dump CE registers
  1830. *
  1831. * @hif_ctx: hif_ctx
  1832. *
  1833. * Return: n/a
  1834. */
  1835. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1836. {
  1837. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1838. if (!hif_ctx)
  1839. return;
  1840. if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
  1841. hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
  1842. return;
  1843. }
  1844. if (TARGET_STATUS_RESET == scn->target_status) {
  1845. hif_warn("Target is already asserted, ignore!");
  1846. return;
  1847. }
  1848. if (hif_is_load_or_unload_in_progress(scn)) {
  1849. hif_err("Load/unload is in progress, ignore!");
  1850. return;
  1851. }
  1852. hif_crash_shutdown_dump_bus_register(hif_ctx);
  1853. hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
  1854. if (ol_copy_ramdump(hif_ctx))
  1855. goto out;
  1856. hif_info("RAM dump collecting completed!");
  1857. out:
  1858. return;
  1859. }
  1860. #else
  1861. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1862. {
  1863. hif_debug("Collecting target RAM dump disabled");
  1864. }
  1865. #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
  1866. #ifdef QCA_WIFI_3_0
  1867. /**
  1868. * hif_check_fw_reg(): hif_check_fw_reg
  1869. * @scn: scn
  1870. *
  1871. * Return: int
  1872. */
  1873. int hif_check_fw_reg(struct hif_opaque_softc *scn)
  1874. {
  1875. return 0;
  1876. }
  1877. #endif
  1878. /**
  1879. * hif_read_phy_mem_base(): hif_read_phy_mem_base
  1880. * @scn: scn
  1881. * @phy_mem_base: physical mem base
  1882. *
  1883. * Return: n/a
  1884. */
  1885. void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
  1886. {
  1887. *phy_mem_base = scn->mem_pa;
  1888. }
  1889. qdf_export_symbol(hif_read_phy_mem_base);
  1890. /**
  1891. * hif_get_device_type(): hif_get_device_type
  1892. * @device_id: device_id
  1893. * @revision_id: revision_id
  1894. * @hif_type: returned hif_type
  1895. * @target_type: returned target_type
  1896. *
  1897. * Return: int
  1898. */
  1899. int hif_get_device_type(uint32_t device_id,
  1900. uint32_t revision_id,
  1901. uint32_t *hif_type, uint32_t *target_type)
  1902. {
  1903. int ret = 0;
  1904. switch (device_id) {
  1905. case ADRASTEA_DEVICE_ID_P2_E12:
  1906. *hif_type = HIF_TYPE_ADRASTEA;
  1907. *target_type = TARGET_TYPE_ADRASTEA;
  1908. break;
  1909. case AR9888_DEVICE_ID:
  1910. *hif_type = HIF_TYPE_AR9888;
  1911. *target_type = TARGET_TYPE_AR9888;
  1912. break;
  1913. case AR6320_DEVICE_ID:
  1914. switch (revision_id) {
  1915. case AR6320_FW_1_1:
  1916. case AR6320_FW_1_3:
  1917. *hif_type = HIF_TYPE_AR6320;
  1918. *target_type = TARGET_TYPE_AR6320;
  1919. break;
  1920. case AR6320_FW_2_0:
  1921. case AR6320_FW_3_0:
  1922. case AR6320_FW_3_2:
  1923. *hif_type = HIF_TYPE_AR6320V2;
  1924. *target_type = TARGET_TYPE_AR6320V2;
  1925. break;
  1926. default:
  1927. hif_err("dev_id = 0x%x, rev_id = 0x%x",
  1928. device_id, revision_id);
  1929. ret = -ENODEV;
  1930. goto end;
  1931. }
  1932. break;
  1933. case AR9887_DEVICE_ID:
  1934. *hif_type = HIF_TYPE_AR9888;
  1935. *target_type = TARGET_TYPE_AR9888;
  1936. hif_info(" *********** AR9887 **************");
  1937. break;
  1938. case QCA9984_DEVICE_ID:
  1939. *hif_type = HIF_TYPE_QCA9984;
  1940. *target_type = TARGET_TYPE_QCA9984;
  1941. hif_info(" *********** QCA9984 *************");
  1942. break;
  1943. case QCA9888_DEVICE_ID:
  1944. *hif_type = HIF_TYPE_QCA9888;
  1945. *target_type = TARGET_TYPE_QCA9888;
  1946. hif_info(" *********** QCA9888 *************");
  1947. break;
  1948. case AR900B_DEVICE_ID:
  1949. *hif_type = HIF_TYPE_AR900B;
  1950. *target_type = TARGET_TYPE_AR900B;
  1951. hif_info(" *********** AR900B *************");
  1952. break;
  1953. case QCA8074_DEVICE_ID:
  1954. *hif_type = HIF_TYPE_QCA8074;
  1955. *target_type = TARGET_TYPE_QCA8074;
  1956. hif_info(" *********** QCA8074 *************");
  1957. break;
  1958. case QCA6290_EMULATION_DEVICE_ID:
  1959. case QCA6290_DEVICE_ID:
  1960. *hif_type = HIF_TYPE_QCA6290;
  1961. *target_type = TARGET_TYPE_QCA6290;
  1962. hif_info(" *********** QCA6290EMU *************");
  1963. break;
  1964. case QCN9000_DEVICE_ID:
  1965. *hif_type = HIF_TYPE_QCN9000;
  1966. *target_type = TARGET_TYPE_QCN9000;
  1967. hif_info(" *********** QCN9000 *************");
  1968. break;
  1969. case QCN9224_DEVICE_ID:
  1970. *hif_type = HIF_TYPE_QCN9224;
  1971. *target_type = TARGET_TYPE_QCN9224;
  1972. hif_info(" *********** QCN9224 *************");
  1973. break;
  1974. case QCN6122_DEVICE_ID:
  1975. *hif_type = HIF_TYPE_QCN6122;
  1976. *target_type = TARGET_TYPE_QCN6122;
  1977. hif_info(" *********** QCN6122 *************");
  1978. break;
  1979. case QCN9160_DEVICE_ID:
  1980. *hif_type = HIF_TYPE_QCN9160;
  1981. *target_type = TARGET_TYPE_QCN9160;
  1982. hif_info(" *********** QCN9160 *************");
  1983. break;
  1984. case QCN6432_DEVICE_ID:
  1985. *hif_type = HIF_TYPE_QCN6432;
  1986. *target_type = TARGET_TYPE_QCN6432;
  1987. hif_info(" *********** QCN6432 *************");
  1988. break;
  1989. case QCN7605_DEVICE_ID:
  1990. case QCN7605_COMPOSITE:
  1991. case QCN7605_STANDALONE:
  1992. case QCN7605_STANDALONE_V2:
  1993. case QCN7605_COMPOSITE_V2:
  1994. *hif_type = HIF_TYPE_QCN7605;
  1995. *target_type = TARGET_TYPE_QCN7605;
  1996. hif_info(" *********** QCN7605 *************");
  1997. break;
  1998. case QCA6390_DEVICE_ID:
  1999. case QCA6390_EMULATION_DEVICE_ID:
  2000. *hif_type = HIF_TYPE_QCA6390;
  2001. *target_type = TARGET_TYPE_QCA6390;
  2002. hif_info(" *********** QCA6390 *************");
  2003. break;
  2004. case QCA6490_DEVICE_ID:
  2005. case QCA6490_EMULATION_DEVICE_ID:
  2006. *hif_type = HIF_TYPE_QCA6490;
  2007. *target_type = TARGET_TYPE_QCA6490;
  2008. hif_info(" *********** QCA6490 *************");
  2009. break;
  2010. case QCA6750_DEVICE_ID:
  2011. case QCA6750_EMULATION_DEVICE_ID:
  2012. *hif_type = HIF_TYPE_QCA6750;
  2013. *target_type = TARGET_TYPE_QCA6750;
  2014. hif_info(" *********** QCA6750 *************");
  2015. break;
  2016. case KIWI_DEVICE_ID:
  2017. *hif_type = HIF_TYPE_KIWI;
  2018. *target_type = TARGET_TYPE_KIWI;
  2019. hif_info(" *********** KIWI *************");
  2020. break;
  2021. case MANGO_DEVICE_ID:
  2022. *hif_type = HIF_TYPE_MANGO;
  2023. *target_type = TARGET_TYPE_MANGO;
  2024. hif_info(" *********** MANGO *************");
  2025. break;
  2026. case PEACH_DEVICE_ID:
  2027. *hif_type = HIF_TYPE_PEACH;
  2028. *target_type = TARGET_TYPE_PEACH;
  2029. hif_info(" *********** PEACH *************");
  2030. break;
  2031. case QCA8074V2_DEVICE_ID:
  2032. *hif_type = HIF_TYPE_QCA8074V2;
  2033. *target_type = TARGET_TYPE_QCA8074V2;
  2034. hif_info(" *********** QCA8074V2 *************");
  2035. break;
  2036. case QCA6018_DEVICE_ID:
  2037. case RUMIM2M_DEVICE_ID_NODE0:
  2038. case RUMIM2M_DEVICE_ID_NODE1:
  2039. case RUMIM2M_DEVICE_ID_NODE2:
  2040. case RUMIM2M_DEVICE_ID_NODE3:
  2041. case RUMIM2M_DEVICE_ID_NODE4:
  2042. case RUMIM2M_DEVICE_ID_NODE5:
  2043. *hif_type = HIF_TYPE_QCA6018;
  2044. *target_type = TARGET_TYPE_QCA6018;
  2045. hif_info(" *********** QCA6018 *************");
  2046. break;
  2047. case QCA5018_DEVICE_ID:
  2048. *hif_type = HIF_TYPE_QCA5018;
  2049. *target_type = TARGET_TYPE_QCA5018;
  2050. hif_info(" *********** qca5018 *************");
  2051. break;
  2052. case QCA5332_DEVICE_ID:
  2053. *hif_type = HIF_TYPE_QCA5332;
  2054. *target_type = TARGET_TYPE_QCA5332;
  2055. hif_info(" *********** QCA5332 *************");
  2056. break;
  2057. case QCA9574_DEVICE_ID:
  2058. *hif_type = HIF_TYPE_QCA9574;
  2059. *target_type = TARGET_TYPE_QCA9574;
  2060. hif_info(" *********** QCA9574 *************");
  2061. break;
  2062. case WCN6450_DEVICE_ID:
  2063. *hif_type = HIF_TYPE_WCN6450;
  2064. *target_type = TARGET_TYPE_WCN6450;
  2065. hif_info(" *********** WCN6450 *************");
  2066. break;
  2067. default:
  2068. hif_err("Unsupported device ID = 0x%x!", device_id);
  2069. ret = -ENODEV;
  2070. break;
  2071. }
  2072. if (*target_type == TARGET_TYPE_UNKNOWN) {
  2073. hif_err("Unsupported target_type!");
  2074. ret = -ENODEV;
  2075. }
  2076. end:
  2077. return ret;
  2078. }
  2079. /**
  2080. * hif_get_bus_type() - return the bus type
  2081. * @hif_hdl: HIF Context
  2082. *
  2083. * Return: enum qdf_bus_type
  2084. */
  2085. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
  2086. {
  2087. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  2088. return scn->bus_type;
  2089. }
  2090. /*
  2091. * Target info and ini parameters are global to the driver
  2092. * Hence these structures are exposed to all the modules in
  2093. * the driver and they don't need to maintains multiple copies
  2094. * of the same info, instead get the handle from hif and
  2095. * modify them in hif
  2096. */
  2097. /**
  2098. * hif_get_ini_handle() - API to get hif_config_param handle
  2099. * @hif_ctx: HIF Context
  2100. *
  2101. * Return: pointer to hif_config_info
  2102. */
  2103. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
  2104. {
  2105. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  2106. return &sc->hif_config;
  2107. }
  2108. /**
  2109. * hif_get_target_info_handle() - API to get hif_target_info handle
  2110. * @hif_ctx: HIF context
  2111. *
  2112. * Return: Pointer to hif_target_info
  2113. */
  2114. struct hif_target_info *hif_get_target_info_handle(
  2115. struct hif_opaque_softc *hif_ctx)
  2116. {
  2117. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  2118. return &sc->target_info;
  2119. }
  2120. qdf_export_symbol(hif_get_target_info_handle);
  2121. #ifdef RECEIVE_OFFLOAD
  2122. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  2123. void (offld_flush_handler)(void *))
  2124. {
  2125. if (hif_napi_enabled(scn, -1))
  2126. hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
  2127. else
  2128. hif_err("NAPI not enabled");
  2129. }
  2130. qdf_export_symbol(hif_offld_flush_cb_register);
  2131. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
  2132. {
  2133. if (hif_napi_enabled(scn, -1))
  2134. hif_napi_rx_offld_flush_cb_deregister(scn);
  2135. else
  2136. hif_err("NAPI not enabled");
  2137. }
  2138. qdf_export_symbol(hif_offld_flush_cb_deregister);
  2139. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2140. {
  2141. if (hif_napi_enabled(hif_hdl, -1))
  2142. return NAPI_PIPE2ID(ctx_id);
  2143. else
  2144. return ctx_id;
  2145. }
  2146. #else /* RECEIVE_OFFLOAD */
  2147. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2148. {
  2149. return 0;
  2150. }
  2151. qdf_export_symbol(hif_get_rx_ctx_id);
  2152. #endif /* RECEIVE_OFFLOAD */
  2153. #if defined(FEATURE_LRO)
  2154. /**
  2155. * hif_get_lro_info - Returns LRO instance for instance ID
  2156. * @ctx_id: LRO instance ID
  2157. * @hif_hdl: HIF Context
  2158. *
  2159. * Return: Pointer to LRO instance.
  2160. */
  2161. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2162. {
  2163. void *data;
  2164. if (hif_napi_enabled(hif_hdl, -1))
  2165. data = hif_napi_get_lro_info(hif_hdl, ctx_id);
  2166. else
  2167. data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
  2168. return data;
  2169. }
  2170. #endif
  2171. /**
  2172. * hif_get_target_status - API to get target status
  2173. * @hif_ctx: HIF Context
  2174. *
  2175. * Return: enum hif_target_status
  2176. */
  2177. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
  2178. {
  2179. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2180. return scn->target_status;
  2181. }
  2182. qdf_export_symbol(hif_get_target_status);
  2183. /**
  2184. * hif_set_target_status() - API to set target status
  2185. * @hif_ctx: HIF Context
  2186. * @status: Target Status
  2187. *
  2188. * Return: void
  2189. */
  2190. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  2191. hif_target_status status)
  2192. {
  2193. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2194. scn->target_status = status;
  2195. }
  2196. /**
  2197. * hif_init_ini_config() - API to initialize HIF configuration parameters
  2198. * @hif_ctx: HIF Context
  2199. * @cfg: HIF Configuration
  2200. *
  2201. * Return: void
  2202. */
  2203. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  2204. struct hif_config_info *cfg)
  2205. {
  2206. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2207. qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
  2208. }
  2209. /**
  2210. * hif_get_conparam() - API to get driver mode in HIF
  2211. * @scn: HIF Context
  2212. *
  2213. * Return: driver mode of operation
  2214. */
  2215. uint32_t hif_get_conparam(struct hif_softc *scn)
  2216. {
  2217. if (!scn)
  2218. return 0;
  2219. return scn->hif_con_param;
  2220. }
  2221. /**
  2222. * hif_get_callbacks_handle() - API to get callbacks Handle
  2223. * @scn: HIF Context
  2224. *
  2225. * Return: pointer to HIF Callbacks
  2226. */
  2227. struct hif_driver_state_callbacks *hif_get_callbacks_handle(
  2228. struct hif_softc *scn)
  2229. {
  2230. return &scn->callbacks;
  2231. }
  2232. /**
  2233. * hif_is_driver_unloading() - API to query upper layers if driver is unloading
  2234. * @scn: HIF Context
  2235. *
  2236. * Return: True/False
  2237. */
  2238. bool hif_is_driver_unloading(struct hif_softc *scn)
  2239. {
  2240. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2241. if (cbk && cbk->is_driver_unloading)
  2242. return cbk->is_driver_unloading(cbk->context);
  2243. return false;
  2244. }
  2245. /**
  2246. * hif_is_load_or_unload_in_progress() - API to query upper layers if
  2247. * load/unload in progress
  2248. * @scn: HIF Context
  2249. *
  2250. * Return: True/False
  2251. */
  2252. bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
  2253. {
  2254. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2255. if (cbk && cbk->is_load_unload_in_progress)
  2256. return cbk->is_load_unload_in_progress(cbk->context);
  2257. return false;
  2258. }
  2259. /**
  2260. * hif_is_recovery_in_progress() - API to query upper layers if recovery in
  2261. * progress
  2262. * @scn: HIF Context
  2263. *
  2264. * Return: True/False
  2265. */
  2266. bool hif_is_recovery_in_progress(struct hif_softc *scn)
  2267. {
  2268. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2269. if (cbk && cbk->is_recovery_in_progress)
  2270. return cbk->is_recovery_in_progress(cbk->context);
  2271. return false;
  2272. }
  2273. #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  2274. defined(HIF_IPCI)
  2275. /**
  2276. * hif_update_pipe_callback() - API to register pipe specific callbacks
  2277. * @osc: Opaque softc
  2278. * @pipeid: pipe id
  2279. * @callbacks: callbacks to register
  2280. *
  2281. * Return: void
  2282. */
  2283. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  2284. u_int8_t pipeid,
  2285. struct hif_msg_callbacks *callbacks)
  2286. {
  2287. struct hif_softc *scn = HIF_GET_SOFTC(osc);
  2288. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2289. struct HIF_CE_pipe_info *pipe_info;
  2290. QDF_BUG(pipeid < CE_COUNT_MAX);
  2291. hif_debug("pipeid: %d", pipeid);
  2292. pipe_info = &hif_state->pipe_info[pipeid];
  2293. qdf_mem_copy(&pipe_info->pipe_callbacks,
  2294. callbacks, sizeof(pipe_info->pipe_callbacks));
  2295. }
  2296. qdf_export_symbol(hif_update_pipe_callback);
  2297. /**
  2298. * hif_is_target_ready() - API to query if target is in ready state
  2299. * progress
  2300. * @scn: HIF Context
  2301. *
  2302. * Return: True/False
  2303. */
  2304. bool hif_is_target_ready(struct hif_softc *scn)
  2305. {
  2306. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2307. if (cbk && cbk->is_target_ready)
  2308. return cbk->is_target_ready(cbk->context);
  2309. /*
  2310. * if callback is not registered then there is no way to determine
  2311. * if target is ready. In-such case return true to indicate that
  2312. * target is ready.
  2313. */
  2314. return true;
  2315. }
  2316. qdf_export_symbol(hif_is_target_ready);
  2317. int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
  2318. {
  2319. struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
  2320. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2321. if (cbk && cbk->get_bandwidth_level)
  2322. return cbk->get_bandwidth_level(cbk->context);
  2323. return 0;
  2324. }
  2325. qdf_export_symbol(hif_get_bandwidth_level);
  2326. #ifdef DP_MEM_PRE_ALLOC
  2327. void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
  2328. qdf_size_t size,
  2329. qdf_dma_addr_t *paddr,
  2330. uint32_t ring_type,
  2331. uint8_t *is_mem_prealloc)
  2332. {
  2333. void *vaddr = NULL;
  2334. struct hif_driver_state_callbacks *cbk =
  2335. hif_get_callbacks_handle(scn);
  2336. *is_mem_prealloc = false;
  2337. if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
  2338. vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
  2339. paddr,
  2340. ring_type);
  2341. if (vaddr) {
  2342. *is_mem_prealloc = true;
  2343. goto end;
  2344. }
  2345. }
  2346. vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
  2347. scn->qdf_dev->dev,
  2348. size,
  2349. paddr);
  2350. end:
  2351. dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
  2352. *is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
  2353. (void *)*paddr, (int)size, ring_type);
  2354. return vaddr;
  2355. }
  2356. void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
  2357. qdf_size_t size,
  2358. void *vaddr,
  2359. qdf_dma_addr_t paddr,
  2360. qdf_dma_context_t memctx,
  2361. uint8_t is_mem_prealloc)
  2362. {
  2363. struct hif_driver_state_callbacks *cbk =
  2364. hif_get_callbacks_handle(scn);
  2365. if (is_mem_prealloc) {
  2366. if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
  2367. cbk->prealloc_put_consistent_mem_unaligned(vaddr);
  2368. } else {
  2369. dp_warn("dp_prealloc_put_consistent_unligned NULL");
  2370. QDF_BUG(0);
  2371. }
  2372. } else {
  2373. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  2374. size, vaddr, paddr, memctx);
  2375. }
  2376. }
  2377. void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
  2378. qdf_size_t elem_size, uint16_t elem_num,
  2379. struct qdf_mem_multi_page_t *pages,
  2380. bool cacheable)
  2381. {
  2382. struct hif_driver_state_callbacks *cbk =
  2383. hif_get_callbacks_handle(scn);
  2384. if (cbk && cbk->prealloc_get_multi_pages)
  2385. cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
  2386. pages, cacheable);
  2387. if (!pages->num_pages)
  2388. qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
  2389. elem_size, elem_num, 0, cacheable);
  2390. }
  2391. void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
  2392. struct qdf_mem_multi_page_t *pages,
  2393. bool cacheable)
  2394. {
  2395. struct hif_driver_state_callbacks *cbk =
  2396. hif_get_callbacks_handle(scn);
  2397. if (cbk && cbk->prealloc_put_multi_pages &&
  2398. pages->is_mem_prealloc)
  2399. cbk->prealloc_put_multi_pages(desc_type, pages);
  2400. if (!pages->is_mem_prealloc)
  2401. qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
  2402. cacheable);
  2403. }
  2404. #endif
  2405. /**
  2406. * hif_batch_send() - API to access hif specific function
  2407. * ce_batch_send.
  2408. * @osc: HIF Context
  2409. * @msdu: list of msdus to be sent
  2410. * @transfer_id: transfer id
  2411. * @len: downloaded length
  2412. * @sendhead:
  2413. *
  2414. * Return: list of msds not sent
  2415. */
  2416. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  2417. uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
  2418. {
  2419. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2420. if (!ce_tx_hdl)
  2421. return NULL;
  2422. return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  2423. len, sendhead);
  2424. }
  2425. qdf_export_symbol(hif_batch_send);
  2426. /**
  2427. * hif_update_tx_ring() - API to access hif specific function
  2428. * ce_update_tx_ring.
  2429. * @osc: HIF Context
  2430. * @num_htt_cmpls: number of htt compl received.
  2431. *
  2432. * Return: void
  2433. */
  2434. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
  2435. {
  2436. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2437. ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
  2438. }
  2439. qdf_export_symbol(hif_update_tx_ring);
  2440. /**
  2441. * hif_send_single() - API to access hif specific function
  2442. * ce_send_single.
  2443. * @osc: HIF Context
  2444. * @msdu : msdu to be sent
  2445. * @transfer_id: transfer id
  2446. * @len : downloaded length
  2447. *
  2448. * Return: msdu sent status
  2449. */
  2450. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  2451. uint32_t transfer_id, u_int32_t len)
  2452. {
  2453. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2454. if (!ce_tx_hdl)
  2455. return QDF_STATUS_E_NULL_VALUE;
  2456. return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  2457. len);
  2458. }
  2459. qdf_export_symbol(hif_send_single);
  2460. #endif
  2461. /**
  2462. * hif_reg_write() - API to access hif specific function
  2463. * hif_write32_mb.
  2464. * @hif_ctx : HIF Context
  2465. * @offset : offset on which value has to be written
  2466. * @value : value to be written
  2467. *
  2468. * Return: None
  2469. */
  2470. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  2471. uint32_t value)
  2472. {
  2473. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2474. hif_write32_mb(scn, scn->mem + offset, value);
  2475. }
  2476. qdf_export_symbol(hif_reg_write);
  2477. /**
  2478. * hif_reg_read() - API to access hif specific function
  2479. * hif_read32_mb.
  2480. * @hif_ctx : HIF Context
  2481. * @offset : offset from which value has to be read
  2482. *
  2483. * Return: Read value
  2484. */
  2485. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
  2486. {
  2487. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2488. return hif_read32_mb(scn, scn->mem + offset);
  2489. }
  2490. qdf_export_symbol(hif_reg_read);
  2491. /**
  2492. * hif_ramdump_handler(): generic ramdump handler
  2493. * @scn: struct hif_opaque_softc
  2494. *
  2495. * Return: None
  2496. */
  2497. void hif_ramdump_handler(struct hif_opaque_softc *scn)
  2498. {
  2499. if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
  2500. hif_usb_ramdump_handler(scn);
  2501. }
  2502. hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
  2503. {
  2504. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2505. return scn->wake_irq_type;
  2506. }
  2507. irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
  2508. {
  2509. struct hif_softc *scn = context;
  2510. hif_info("wake interrupt received on irq %d", irq);
  2511. hif_rtpm_set_monitor_wake_intr(0);
  2512. hif_rtpm_request_resume();
  2513. if (scn->initial_wakeup_cb)
  2514. scn->initial_wakeup_cb(scn->initial_wakeup_priv);
  2515. if (hif_is_ut_suspended(scn))
  2516. hif_ut_fw_resume(scn);
  2517. qdf_pm_system_wakeup();
  2518. return IRQ_HANDLED;
  2519. }
  2520. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  2521. void (*callback)(void *),
  2522. void *priv)
  2523. {
  2524. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2525. scn->initial_wakeup_cb = callback;
  2526. scn->initial_wakeup_priv = priv;
  2527. }
  2528. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  2529. uint32_t ce_service_max_yield_time)
  2530. {
  2531. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2532. hif_ctx->ce_service_max_yield_time =
  2533. ce_service_max_yield_time * 1000;
  2534. }
  2535. unsigned long long
  2536. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
  2537. {
  2538. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2539. return hif_ctx->ce_service_max_yield_time;
  2540. }
  2541. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  2542. uint8_t ce_service_max_rx_ind_flush)
  2543. {
  2544. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2545. if (ce_service_max_rx_ind_flush == 0 ||
  2546. ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
  2547. hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
  2548. else
  2549. hif_ctx->ce_service_max_rx_ind_flush =
  2550. ce_service_max_rx_ind_flush;
  2551. }
  2552. #ifdef SYSTEM_PM_CHECK
  2553. void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
  2554. enum hif_system_pm_state state)
  2555. {
  2556. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2557. qdf_atomic_set(&hif_ctx->sys_pm_state, state);
  2558. }
  2559. int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
  2560. {
  2561. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2562. return qdf_atomic_read(&hif_ctx->sys_pm_state);
  2563. }
  2564. int hif_system_pm_state_check(struct hif_opaque_softc *hif)
  2565. {
  2566. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2567. int32_t sys_pm_state;
  2568. if (!hif_ctx) {
  2569. hif_err("hif context is null");
  2570. return -EFAULT;
  2571. }
  2572. sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
  2573. if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
  2574. sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
  2575. hif_info("Triggering system wakeup");
  2576. qdf_pm_system_wakeup();
  2577. return -EAGAIN;
  2578. }
  2579. return 0;
  2580. }
  2581. #endif
  2582. #ifdef WLAN_FEATURE_AFFINITY_MGR
  2583. /*
  2584. * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
  2585. *
  2586. * @scn: hif handle
  2587. * @cfg: hif affinity manager configuration for IRQ
  2588. * @audio_taken_cpu: Current CPUs which are taken by audio.
  2589. * @current_time: Current system time.
  2590. *
  2591. * This API checks for 2 conditions
  2592. * 1) Last audio taken mask and current taken mask are different
  2593. * 2) Last time when IRQ was affined away due to audio taken CPUs is
  2594. * more than time threshold (5 Seconds in current case).
  2595. * If both condition satisfies then only return true.
  2596. *
  2597. * Return: bool: true if it is allowed to affine away audio taken cpus.
  2598. */
  2599. static inline bool
  2600. hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
  2601. struct hif_cpu_affinity *cfg,
  2602. qdf_cpu_mask audio_taken_cpu,
  2603. uint64_t current_time)
  2604. {
  2605. if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
  2606. (qdf_log_timestamp_to_usecs(current_time -
  2607. cfg->last_affined_away)
  2608. < scn->time_threshold))
  2609. return false;
  2610. return true;
  2611. }
  2612. /*
  2613. * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
  2614. *
  2615. * @scn: hif handle
  2616. * @cfg: hif affinity manager configuration for IRQ
  2617. * @audio_taken_cpu: Current CPUs which are taken by audio.
  2618. * @cpu_mask: CPU mask which need to be updated.
  2619. * @current_time: Current system time.
  2620. *
  2621. * This API checks if Pro audio use case is running and if cpu_mask need
  2622. * to be updated
  2623. *
  2624. * Return: QDF_STATUS
  2625. */
  2626. static inline QDF_STATUS
  2627. hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
  2628. struct hif_cpu_affinity *cfg,
  2629. qdf_cpu_mask audio_taken_cpu,
  2630. qdf_cpu_mask *cpu_mask,
  2631. uint64_t current_time)
  2632. {
  2633. qdf_cpu_mask allowed_mask;
  2634. /*
  2635. * Case 1: audio_taken_mask is empty
  2636. * Check if passed cpu_mask and wlan_requested_mask is same or not.
  2637. * If both mask are different copy wlan_requested_mask(IRQ affinity
  2638. * mask requested by WLAN) to cpu_mask.
  2639. *
  2640. * Case 2: audio_taken_mask is not empty
  2641. * 1. Only allow update if last time when IRQ was affined away due to
  2642. * audio taken CPUs is more than 5 seconds or update is requested
  2643. * by WLAN
  2644. * 2. Only allow silver cores to be affined away.
  2645. * 3. Check if any allowed CPUs for audio use case is set in cpu_mask.
  2646. * i. If any CPU mask is set, mask out that CPU from the cpu_mask
  2647. * ii. If after masking out audio taken cpu(Silver cores) cpu_mask
  2648. * is empty, set mask to all cpu except cpus taken by audio.
  2649. * Example:
  2650. *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
  2651. *| 0x00 | 0x00 | 0x0C | 0x0C | 0x0C |
  2652. *| 0x00 | 0x00 | 0x03 | 0x03 | 0x03 |
  2653. *| 0x00 | 0x00 | 0xFC | 0x03 | 0x03 |
  2654. *| 0x00 | 0x00 | 0x03 | 0x0C | 0x0C |
  2655. *| 0x0F | 0x03 | 0x0C | 0x0C | 0x0C |
  2656. *| 0x0F | 0x03 | 0x03 | 0x03 | 0xFC |
  2657. *| 0x03 | 0x03 | 0x0C | 0x0C | 0x0C |
  2658. *| 0x03 | 0x03 | 0x03 | 0x03 | 0xFC |
  2659. *| 0x03 | 0x03 | 0xFC | 0x03 | 0xFC |
  2660. *| 0xF0 | 0x00 | 0x0C | 0x0C | 0x0C |
  2661. *| 0xF0 | 0x00 | 0x03 | 0x03 | 0x03 |
  2662. */
  2663. /* Check if audio taken mask is empty*/
  2664. if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
  2665. /* If CPU mask requested by WLAN for the IRQ and
  2666. * cpu_mask passed CPU mask set for IRQ is different
  2667. * Copy requested mask into cpu_mask and return
  2668. */
  2669. if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
  2670. &cfg->wlan_requested_mask))) {
  2671. qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
  2672. return QDF_STATUS_SUCCESS;
  2673. }
  2674. return QDF_STATUS_E_ALREADY;
  2675. }
  2676. if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
  2677. current_time) ||
  2678. cfg->update_requested))
  2679. return QDF_STATUS_E_AGAIN;
  2680. /* Only allow Silver cores to be affine away */
  2681. qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
  2682. if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
  2683. /* If any of taken CPU(Silver cores) mask is set in cpu_mask,
  2684. * mask out the audio taken CPUs from the cpu_mask.
  2685. */
  2686. qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
  2687. &allowed_mask);
  2688. /* If cpu_mask is empty set it to all CPUs
  2689. * except taken by audio(Silver cores)
  2690. */
  2691. if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
  2692. qdf_cpumask_complement(cpu_mask, &allowed_mask);
  2693. return QDF_STATUS_SUCCESS;
  2694. }
  2695. return QDF_STATUS_E_ALREADY;
  2696. }
  2697. static inline QDF_STATUS
  2698. hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
  2699. qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
  2700. uint64_t current_time)
  2701. {
  2702. QDF_STATUS status;
  2703. status = hif_affinity_mgr_check_update_mask(scn, cfg,
  2704. audio_taken_cpu,
  2705. &cpu_mask,
  2706. current_time);
  2707. /* Set IRQ affinity if CPU mask was updated */
  2708. if (QDF_IS_STATUS_SUCCESS(status)) {
  2709. status = hif_irq_set_affinity_hint(cfg->irq,
  2710. &cpu_mask);
  2711. if (QDF_IS_STATUS_SUCCESS(status)) {
  2712. /* Store audio taken CPU mask */
  2713. qdf_cpumask_copy(&cfg->walt_taken_mask,
  2714. &audio_taken_cpu);
  2715. /* Store CPU mask which was set for IRQ*/
  2716. qdf_cpumask_copy(&cfg->current_irq_mask,
  2717. &cpu_mask);
  2718. /* Set time when IRQ affinity was updated */
  2719. cfg->last_updated = current_time;
  2720. if (hif_audio_cpu_affinity_allowed(scn, cfg,
  2721. audio_taken_cpu,
  2722. current_time))
  2723. /* If CPU mask was updated due to CPU
  2724. * taken by audio, update
  2725. * last_affined_away time
  2726. */
  2727. cfg->last_affined_away = current_time;
  2728. }
  2729. }
  2730. return status;
  2731. }
  2732. void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
  2733. {
  2734. bool audio_affinity_allowed = false;
  2735. int i, j, ce_id;
  2736. uint64_t current_time;
  2737. char cpu_str[10];
  2738. QDF_STATUS status;
  2739. qdf_cpu_mask cpu_mask, audio_taken_cpu;
  2740. struct HIF_CE_state *hif_state;
  2741. struct hif_exec_context *hif_ext_group;
  2742. struct CE_attr *host_ce_conf;
  2743. struct HIF_CE_state *ce_sc;
  2744. struct hif_cpu_affinity *cfg;
  2745. if (!scn->affinity_mgr_supported)
  2746. return;
  2747. current_time = hif_get_log_timestamp();
  2748. /* Get CPU mask for audio taken CPUs */
  2749. audio_taken_cpu = qdf_walt_get_cpus_taken();
  2750. ce_sc = HIF_GET_CE_STATE(scn);
  2751. host_ce_conf = ce_sc->host_ce_config;
  2752. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2753. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2754. continue;
  2755. cfg = &scn->ce_irq_cpu_mask[ce_id];
  2756. qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
  2757. status =
  2758. hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
  2759. cpu_mask, current_time);
  2760. if (QDF_IS_STATUS_SUCCESS(status))
  2761. audio_affinity_allowed = true;
  2762. }
  2763. hif_state = HIF_GET_CE_STATE(scn);
  2764. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2765. hif_ext_group = hif_state->hif_ext_group[i];
  2766. for (j = 0; j < hif_ext_group->numirq; j++) {
  2767. cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
  2768. qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
  2769. status =
  2770. hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
  2771. cpu_mask, current_time);
  2772. if (QDF_IS_STATUS_SUCCESS(status)) {
  2773. qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
  2774. audio_affinity_allowed = true;
  2775. }
  2776. }
  2777. }
  2778. if (audio_affinity_allowed) {
  2779. qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
  2780. &audio_taken_cpu);
  2781. hif_info("Audio taken CPU mask: %s", cpu_str);
  2782. }
  2783. }
  2784. static inline QDF_STATUS
  2785. hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2786. struct hif_cpu_affinity *cfg,
  2787. qdf_cpu_mask *cpu_mask)
  2788. {
  2789. uint64_t current_time;
  2790. char cpu_str[10];
  2791. QDF_STATUS status, mask_updated;
  2792. qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
  2793. current_time = hif_get_log_timestamp();
  2794. qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
  2795. cfg->update_requested = true;
  2796. mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
  2797. audio_taken_cpu,
  2798. cpu_mask,
  2799. current_time);
  2800. status = hif_irq_set_affinity_hint(irq, cpu_mask);
  2801. if (QDF_IS_STATUS_SUCCESS(status)) {
  2802. qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
  2803. qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
  2804. if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
  2805. cfg->last_updated = current_time;
  2806. if (hif_audio_cpu_affinity_allowed(scn, cfg,
  2807. audio_taken_cpu,
  2808. current_time)) {
  2809. cfg->last_affined_away = current_time;
  2810. qdf_thread_cpumap_print_to_pagebuf(false,
  2811. cpu_str,
  2812. &audio_taken_cpu);
  2813. hif_info_rl("Audio taken CPU mask: %s",
  2814. cpu_str);
  2815. }
  2816. }
  2817. }
  2818. cfg->update_requested = false;
  2819. return status;
  2820. }
  2821. QDF_STATUS
  2822. hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2823. uint32_t grp_id, uint32_t irq_index,
  2824. qdf_cpu_mask *cpu_mask)
  2825. {
  2826. struct hif_cpu_affinity *cfg;
  2827. if (!scn->affinity_mgr_supported)
  2828. return hif_irq_set_affinity_hint(irq, cpu_mask);
  2829. cfg = &scn->irq_cpu_mask[grp_id][irq_index];
  2830. return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
  2831. }
  2832. QDF_STATUS
  2833. hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2834. uint32_t ce_id, qdf_cpu_mask *cpu_mask)
  2835. {
  2836. struct hif_cpu_affinity *cfg;
  2837. if (!scn->affinity_mgr_supported)
  2838. return hif_irq_set_affinity_hint(irq, cpu_mask);
  2839. cfg = &scn->ce_irq_cpu_mask[ce_id];
  2840. return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
  2841. }
  2842. void
  2843. hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
  2844. {
  2845. unsigned int cpus;
  2846. qdf_cpu_mask cpu_mask = {0};
  2847. struct hif_cpu_affinity *cfg = NULL;
  2848. if (!scn->affinity_mgr_supported)
  2849. return;
  2850. /* Set CPU Mask to Silver core */
  2851. qdf_for_each_possible_cpu(cpus)
  2852. if (qdf_topology_physical_package_id(cpus) ==
  2853. CPU_CLUSTER_TYPE_LITTLE)
  2854. qdf_cpumask_set_cpu(cpus, &cpu_mask);
  2855. cfg = &scn->ce_irq_cpu_mask[id];
  2856. qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
  2857. qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
  2858. cfg->irq = irq;
  2859. cfg->last_updated = 0;
  2860. cfg->last_affined_away = 0;
  2861. cfg->update_requested = false;
  2862. }
  2863. void
  2864. hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
  2865. int irq_num, int irq)
  2866. {
  2867. unsigned int cpus;
  2868. qdf_cpu_mask cpu_mask = {0};
  2869. struct hif_cpu_affinity *cfg = NULL;
  2870. if (!scn->affinity_mgr_supported)
  2871. return;
  2872. /* Set CPU Mask to Silver core */
  2873. qdf_for_each_possible_cpu(cpus)
  2874. if (qdf_topology_physical_package_id(cpus) ==
  2875. CPU_CLUSTER_TYPE_LITTLE)
  2876. qdf_cpumask_set_cpu(cpus, &cpu_mask);
  2877. cfg = &scn->irq_cpu_mask[grp_id][irq_num];
  2878. qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
  2879. qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
  2880. cfg->irq = irq;
  2881. cfg->last_updated = 0;
  2882. cfg->last_affined_away = 0;
  2883. cfg->update_requested = false;
  2884. }
  2885. #endif