hif_main.c 83 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366
  1. /*
  2. * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include "targcfg.h"
  20. #include "qdf_lock.h"
  21. #include "qdf_status.h"
  22. #include "qdf_status.h"
  23. #include <qdf_atomic.h> /* qdf_atomic_read */
  24. #include <targaddrs.h>
  25. #include "hif_io32.h"
  26. #include <hif.h>
  27. #include <target_type.h>
  28. #include "regtable.h"
  29. #define ATH_MODULE_NAME hif
  30. #include <a_debug.h>
  31. #include "hif_main.h"
  32. #include "hif_hw_version.h"
  33. #if (defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  34. defined(HIF_IPCI))
  35. #include "ce_tasklet.h"
  36. #include "ce_api.h"
  37. #endif
  38. #include "qdf_trace.h"
  39. #include "qdf_status.h"
  40. #include "hif_debug.h"
  41. #include "mp_dev.h"
  42. #if defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  43. defined(QCA_WIFI_QCA5018) || defined(QCA_WIFI_QCA9574) || \
  44. defined(QCA_WIFI_QCA5332)
  45. #include "hal_api.h"
  46. #endif
  47. #include "hif_napi.h"
  48. #include "hif_unit_test_suspend_i.h"
  49. #include "qdf_module.h"
  50. #ifdef HIF_CE_LOG_INFO
  51. #include <qdf_notifier.h>
  52. #include <qdf_hang_event_notifier.h>
  53. #endif
  54. #include <linux/cpumask.h>
  55. #include <pld_common.h>
  56. #include "ce_internal.h"
  57. #include <qdf_tracepoint.h>
  58. void hif_dump(struct hif_opaque_softc *hif_ctx, uint8_t cmd_id, bool start)
  59. {
  60. hif_trigger_dump(hif_ctx, cmd_id, start);
  61. }
  62. /**
  63. * hif_get_target_id(): hif_get_target_id
  64. * @scn: scn
  65. *
  66. * Return the virtual memory base address to the caller
  67. *
  68. * @scn: hif_softc
  69. *
  70. * Return: A_target_id_t
  71. */
  72. A_target_id_t hif_get_target_id(struct hif_softc *scn)
  73. {
  74. return scn->mem;
  75. }
  76. /**
  77. * hif_get_targetdef(): hif_get_targetdef
  78. * @hif_ctx: hif context
  79. *
  80. * Return: void *
  81. */
  82. void *hif_get_targetdef(struct hif_opaque_softc *hif_ctx)
  83. {
  84. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  85. return scn->targetdef;
  86. }
  87. #ifdef FORCE_WAKE
  88. #ifndef QCA_WIFI_WCN6450
  89. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  90. bool init_phase)
  91. {
  92. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  93. if (ce_srng_based(scn))
  94. hal_set_init_phase(scn->hal_soc, init_phase);
  95. }
  96. #else
  97. void hif_srng_init_phase(struct hif_opaque_softc *hif_ctx,
  98. bool init_phase)
  99. {
  100. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  101. hal_set_init_phase(scn->hal_soc, init_phase);
  102. }
  103. #endif
  104. #endif /* FORCE_WAKE */
  105. #ifdef HIF_IPCI
  106. void hif_shutdown_notifier_cb(void *hif_ctx)
  107. {
  108. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  109. scn->recovery = true;
  110. }
  111. #endif
  112. /**
  113. * hif_vote_link_down(): unvote for link up
  114. * @hif_ctx: hif context
  115. *
  116. * Call hif_vote_link_down to release a previous request made using
  117. * hif_vote_link_up. A hif_vote_link_down call should only be made
  118. * after a corresponding hif_vote_link_up, otherwise you could be
  119. * negating a vote from another source. When no votes are present
  120. * hif will not guarantee the linkstate after hif_bus_suspend.
  121. *
  122. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  123. * and initialization deinitialization sequencences.
  124. *
  125. * Return: n/a
  126. */
  127. void hif_vote_link_down(struct hif_opaque_softc *hif_ctx)
  128. {
  129. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  130. QDF_BUG(scn);
  131. if (scn->linkstate_vote == 0)
  132. QDF_DEBUG_PANIC("linkstate_vote(%d) has already been 0",
  133. scn->linkstate_vote);
  134. scn->linkstate_vote--;
  135. hif_info("Down_linkstate_vote %d", scn->linkstate_vote);
  136. if (scn->linkstate_vote == 0)
  137. hif_bus_prevent_linkdown(scn, false);
  138. }
  139. /**
  140. * hif_vote_link_up(): vote to prevent bus from suspending
  141. * @hif_ctx: hif context
  142. *
  143. * Makes hif guarantee that fw can message the host normally
  144. * during suspend.
  145. *
  146. * SYNCHRONIZE WITH hif_vote_link_up by only calling in MC thread
  147. * and initialization deinitialization sequencences.
  148. *
  149. * Return: n/a
  150. */
  151. void hif_vote_link_up(struct hif_opaque_softc *hif_ctx)
  152. {
  153. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  154. QDF_BUG(scn);
  155. scn->linkstate_vote++;
  156. hif_info("Up_linkstate_vote %d", scn->linkstate_vote);
  157. if (scn->linkstate_vote == 1)
  158. hif_bus_prevent_linkdown(scn, true);
  159. }
  160. /**
  161. * hif_can_suspend_link(): query if hif is permitted to suspend the link
  162. * @hif_ctx: hif context
  163. *
  164. * Hif will ensure that the link won't be suspended if the upperlayers
  165. * don't want it to.
  166. *
  167. * SYNCHRONIZATION: MC thread is stopped before bus suspend thus
  168. * we don't need extra locking to ensure votes dont change while
  169. * we are in the process of suspending or resuming.
  170. *
  171. * Return: false if hif will guarantee link up during suspend.
  172. */
  173. bool hif_can_suspend_link(struct hif_opaque_softc *hif_ctx)
  174. {
  175. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  176. QDF_BUG(scn);
  177. return scn->linkstate_vote == 0;
  178. }
  179. /**
  180. * hif_hia_item_address(): hif_hia_item_address
  181. * @target_type: target_type
  182. * @item_offset: item_offset
  183. *
  184. * Return: n/a
  185. */
  186. uint32_t hif_hia_item_address(uint32_t target_type, uint32_t item_offset)
  187. {
  188. switch (target_type) {
  189. case TARGET_TYPE_AR6002:
  190. return AR6002_HOST_INTEREST_ADDRESS + item_offset;
  191. case TARGET_TYPE_AR6003:
  192. return AR6003_HOST_INTEREST_ADDRESS + item_offset;
  193. case TARGET_TYPE_AR6004:
  194. return AR6004_HOST_INTEREST_ADDRESS + item_offset;
  195. case TARGET_TYPE_AR6006:
  196. return AR6006_HOST_INTEREST_ADDRESS + item_offset;
  197. case TARGET_TYPE_AR9888:
  198. return AR9888_HOST_INTEREST_ADDRESS + item_offset;
  199. case TARGET_TYPE_AR6320:
  200. case TARGET_TYPE_AR6320V2:
  201. return AR6320_HOST_INTEREST_ADDRESS + item_offset;
  202. case TARGET_TYPE_ADRASTEA:
  203. /* ADRASTEA doesn't have a host interest address */
  204. ASSERT(0);
  205. return 0;
  206. case TARGET_TYPE_AR900B:
  207. return AR900B_HOST_INTEREST_ADDRESS + item_offset;
  208. case TARGET_TYPE_QCA9984:
  209. return QCA9984_HOST_INTEREST_ADDRESS + item_offset;
  210. case TARGET_TYPE_QCA9888:
  211. return QCA9888_HOST_INTEREST_ADDRESS + item_offset;
  212. default:
  213. ASSERT(0);
  214. return 0;
  215. }
  216. }
  217. /**
  218. * hif_max_num_receives_reached() - check max receive is reached
  219. * @scn: HIF Context
  220. * @count: unsigned int.
  221. *
  222. * Output check status as bool
  223. *
  224. * Return: bool
  225. */
  226. bool hif_max_num_receives_reached(struct hif_softc *scn, unsigned int count)
  227. {
  228. if (QDF_IS_EPPING_ENABLED(hif_get_conparam(scn)))
  229. return count > 120;
  230. else
  231. return count > MAX_NUM_OF_RECEIVES;
  232. }
  233. /**
  234. * init_buffer_count() - initial buffer count
  235. * @maxSize: qdf_size_t
  236. *
  237. * routine to modify the initial buffer count to be allocated on an os
  238. * platform basis. Platform owner will need to modify this as needed
  239. *
  240. * Return: qdf_size_t
  241. */
  242. qdf_size_t init_buffer_count(qdf_size_t maxSize)
  243. {
  244. return maxSize;
  245. }
  246. /**
  247. * hif_save_htc_htt_config_endpoint() - save htt_tx_endpoint
  248. * @hif_ctx: hif context
  249. * @htc_htt_tx_endpoint: htt_tx_endpoint
  250. *
  251. * Return: void
  252. */
  253. void hif_save_htc_htt_config_endpoint(struct hif_opaque_softc *hif_ctx,
  254. int htc_htt_tx_endpoint)
  255. {
  256. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  257. if (!scn) {
  258. hif_err("scn or scn->hif_sc is NULL!");
  259. return;
  260. }
  261. scn->htc_htt_tx_endpoint = htc_htt_tx_endpoint;
  262. }
  263. qdf_export_symbol(hif_save_htc_htt_config_endpoint);
  264. static const struct qwlan_hw qwlan_hw_list[] = {
  265. {
  266. .id = AR6320_REV1_VERSION,
  267. .subid = 0,
  268. .name = "QCA6174_REV1",
  269. },
  270. {
  271. .id = AR6320_REV1_1_VERSION,
  272. .subid = 0x1,
  273. .name = "QCA6174_REV1_1",
  274. },
  275. {
  276. .id = AR6320_REV1_3_VERSION,
  277. .subid = 0x2,
  278. .name = "QCA6174_REV1_3",
  279. },
  280. {
  281. .id = AR6320_REV2_1_VERSION,
  282. .subid = 0x4,
  283. .name = "QCA6174_REV2_1",
  284. },
  285. {
  286. .id = AR6320_REV2_1_VERSION,
  287. .subid = 0x5,
  288. .name = "QCA6174_REV2_2",
  289. },
  290. {
  291. .id = AR6320_REV3_VERSION,
  292. .subid = 0x6,
  293. .name = "QCA6174_REV2.3",
  294. },
  295. {
  296. .id = AR6320_REV3_VERSION,
  297. .subid = 0x8,
  298. .name = "QCA6174_REV3",
  299. },
  300. {
  301. .id = AR6320_REV3_VERSION,
  302. .subid = 0x9,
  303. .name = "QCA6174_REV3_1",
  304. },
  305. {
  306. .id = AR6320_REV3_2_VERSION,
  307. .subid = 0xA,
  308. .name = "AR6320_REV3_2_VERSION",
  309. },
  310. {
  311. .id = QCA6390_V1,
  312. .subid = 0x0,
  313. .name = "QCA6390_V1",
  314. },
  315. {
  316. .id = QCA6490_V1,
  317. .subid = 0x0,
  318. .name = "QCA6490_V1",
  319. },
  320. {
  321. .id = WCN3990_v1,
  322. .subid = 0x0,
  323. .name = "WCN3990_V1",
  324. },
  325. {
  326. .id = WCN3990_v2,
  327. .subid = 0x0,
  328. .name = "WCN3990_V2",
  329. },
  330. {
  331. .id = WCN3990_v2_1,
  332. .subid = 0x0,
  333. .name = "WCN3990_V2.1",
  334. },
  335. {
  336. .id = WCN3998,
  337. .subid = 0x0,
  338. .name = "WCN3998",
  339. },
  340. {
  341. .id = QCA9379_REV1_VERSION,
  342. .subid = 0xC,
  343. .name = "QCA9379_REV1",
  344. },
  345. {
  346. .id = QCA9379_REV1_VERSION,
  347. .subid = 0xD,
  348. .name = "QCA9379_REV1_1",
  349. },
  350. {
  351. .id = MANGO_V1,
  352. .subid = 0xF,
  353. .name = "MANGO_V1",
  354. },
  355. {
  356. .id = PEACH_V1,
  357. .subid = 0,
  358. .name = "PEACH_V1",
  359. },
  360. {
  361. .id = KIWI_V1,
  362. .subid = 0,
  363. .name = "KIWI_V1",
  364. },
  365. {
  366. .id = KIWI_V2,
  367. .subid = 0,
  368. .name = "KIWI_V2",
  369. },
  370. {
  371. .id = WCN6750_V1,
  372. .subid = 0,
  373. .name = "WCN6750_V1",
  374. },
  375. {
  376. .id = WCN6750_V2,
  377. .subid = 0,
  378. .name = "WCN6750_V2",
  379. },
  380. {
  381. .id = WCN6450_V1,
  382. .subid = 0,
  383. .name = "WCN6450_V1",
  384. },
  385. {
  386. .id = QCA6490_v2_1,
  387. .subid = 0,
  388. .name = "QCA6490",
  389. },
  390. {
  391. .id = QCA6490_v2,
  392. .subid = 0,
  393. .name = "QCA6490",
  394. },
  395. {
  396. .id = WCN3990_TALOS,
  397. .subid = 0,
  398. .name = "WCN3990",
  399. },
  400. {
  401. .id = WCN3990_MOOREA,
  402. .subid = 0,
  403. .name = "WCN3990",
  404. },
  405. {
  406. .id = WCN3990_SAIPAN,
  407. .subid = 0,
  408. .name = "WCN3990",
  409. },
  410. {
  411. .id = WCN3990_RENNELL,
  412. .subid = 0,
  413. .name = "WCN3990",
  414. },
  415. {
  416. .id = WCN3990_BITRA,
  417. .subid = 0,
  418. .name = "WCN3990",
  419. },
  420. {
  421. .id = WCN3990_DIVAR,
  422. .subid = 0,
  423. .name = "WCN3990",
  424. },
  425. {
  426. .id = WCN3990_ATHERTON,
  427. .subid = 0,
  428. .name = "WCN3990",
  429. },
  430. {
  431. .id = WCN3990_STRAIT,
  432. .subid = 0,
  433. .name = "WCN3990",
  434. },
  435. {
  436. .id = WCN3990_NETRANI,
  437. .subid = 0,
  438. .name = "WCN3990",
  439. },
  440. {
  441. .id = WCN3990_CLARENCE,
  442. .subid = 0,
  443. .name = "WCN3990",
  444. }
  445. };
  446. /**
  447. * hif_get_hw_name(): get a human readable name for the hardware
  448. * @info: Target Info
  449. *
  450. * Return: human readable name for the underlying wifi hardware.
  451. */
  452. static const char *hif_get_hw_name(struct hif_target_info *info)
  453. {
  454. int i;
  455. hif_debug("target version = %d, target revision = %d",
  456. info->target_version,
  457. info->target_revision);
  458. if (info->hw_name)
  459. return info->hw_name;
  460. for (i = 0; i < ARRAY_SIZE(qwlan_hw_list); i++) {
  461. if (info->target_version == qwlan_hw_list[i].id &&
  462. info->target_revision == qwlan_hw_list[i].subid) {
  463. return qwlan_hw_list[i].name;
  464. }
  465. }
  466. info->hw_name = qdf_mem_malloc(64);
  467. if (!info->hw_name)
  468. return "Unknown Device (nomem)";
  469. i = qdf_snprint(info->hw_name, 64, "HW_VERSION=%x.",
  470. info->target_version);
  471. if (i < 0)
  472. return "Unknown Device (snprintf failure)";
  473. else
  474. return info->hw_name;
  475. }
  476. /**
  477. * hif_get_hw_info(): hif_get_hw_info
  478. * @scn: scn
  479. * @version: version
  480. * @revision: revision
  481. * @target_name: target name
  482. *
  483. * Return: n/a
  484. */
  485. void hif_get_hw_info(struct hif_opaque_softc *scn, u32 *version, u32 *revision,
  486. const char **target_name)
  487. {
  488. struct hif_target_info *info = hif_get_target_info_handle(scn);
  489. struct hif_softc *sc = HIF_GET_SOFTC(scn);
  490. if (sc->bus_type == QDF_BUS_TYPE_USB)
  491. hif_usb_get_hw_info(sc);
  492. *version = info->target_version;
  493. *revision = info->target_revision;
  494. *target_name = hif_get_hw_name(info);
  495. }
  496. /**
  497. * hif_get_dev_ba(): API to get device base address.
  498. * @hif_handle: hif handle
  499. *
  500. * Return: device base address
  501. */
  502. void *hif_get_dev_ba(struct hif_opaque_softc *hif_handle)
  503. {
  504. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  505. return scn->mem;
  506. }
  507. qdf_export_symbol(hif_get_dev_ba);
  508. /**
  509. * hif_get_dev_ba_ce(): API to get device ce base address.
  510. * @hif_handle: hif handle
  511. *
  512. * Return: dev mem base address for CE
  513. */
  514. void *hif_get_dev_ba_ce(struct hif_opaque_softc *hif_handle)
  515. {
  516. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  517. return scn->mem_ce;
  518. }
  519. qdf_export_symbol(hif_get_dev_ba_ce);
  520. /**
  521. * hif_get_dev_ba_pmm(): API to get device pmm base address.
  522. * @hif_handle: scn
  523. *
  524. * Return: dev mem base address for PMM
  525. */
  526. void *hif_get_dev_ba_pmm(struct hif_opaque_softc *hif_handle)
  527. {
  528. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  529. return scn->mem_pmm_base;
  530. }
  531. qdf_export_symbol(hif_get_dev_ba_pmm);
  532. uint32_t hif_get_soc_version(struct hif_opaque_softc *hif_handle)
  533. {
  534. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  535. return scn->target_info.soc_version;
  536. }
  537. qdf_export_symbol(hif_get_soc_version);
  538. /**
  539. * hif_get_dev_ba_cmem(): API to get device ce base address.
  540. * @hif_handle: hif handle
  541. *
  542. * Return: dev mem base address for CMEM
  543. */
  544. void *hif_get_dev_ba_cmem(struct hif_opaque_softc *hif_handle)
  545. {
  546. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  547. return scn->mem_cmem;
  548. }
  549. qdf_export_symbol(hif_get_dev_ba_cmem);
  550. #ifdef FEATURE_RUNTIME_PM
  551. void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool is_get)
  552. {
  553. if (is_get)
  554. qdf_runtime_pm_prevent_suspend(&scn->prevent_linkdown_lock);
  555. else
  556. qdf_runtime_pm_allow_suspend(&scn->prevent_linkdown_lock);
  557. }
  558. static inline
  559. void hif_rtpm_lock_init(struct hif_softc *scn)
  560. {
  561. qdf_runtime_lock_init(&scn->prevent_linkdown_lock);
  562. }
  563. static inline
  564. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  565. {
  566. qdf_runtime_lock_deinit(&scn->prevent_linkdown_lock);
  567. }
  568. #else
  569. static inline
  570. void hif_rtpm_lock_init(struct hif_softc *scn)
  571. {
  572. }
  573. static inline
  574. void hif_rtpm_lock_deinit(struct hif_softc *scn)
  575. {
  576. }
  577. #endif
  578. #ifdef WLAN_CE_INTERRUPT_THRESHOLD_CONFIG
  579. /**
  580. * hif_get_interrupt_threshold_cfg_from_psoc() - Retrieve ini cfg from psoc
  581. * @scn: hif context
  582. * @psoc: psoc objmgr handle
  583. *
  584. * Return: None
  585. */
  586. static inline
  587. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  588. struct wlan_objmgr_psoc *psoc)
  589. {
  590. if (psoc) {
  591. scn->ini_cfg.ce_status_ring_timer_threshold =
  592. cfg_get(psoc,
  593. CFG_CE_STATUS_RING_TIMER_THRESHOLD);
  594. scn->ini_cfg.ce_status_ring_batch_count_threshold =
  595. cfg_get(psoc,
  596. CFG_CE_STATUS_RING_BATCH_COUNT_THRESHOLD);
  597. }
  598. }
  599. #else
  600. static inline
  601. void hif_get_interrupt_threshold_cfg_from_psoc(struct hif_softc *scn,
  602. struct wlan_objmgr_psoc *psoc)
  603. {
  604. }
  605. #endif /* WLAN_CE_INTERRUPT_THRESHOLD_CONFIG */
  606. /**
  607. * hif_get_cfg_from_psoc() - Retrieve ini cfg from psoc
  608. * @scn: hif context
  609. * @psoc: psoc objmgr handle
  610. *
  611. * Return: None
  612. */
  613. static inline
  614. void hif_get_cfg_from_psoc(struct hif_softc *scn,
  615. struct wlan_objmgr_psoc *psoc)
  616. {
  617. if (psoc) {
  618. scn->ini_cfg.disable_wake_irq =
  619. cfg_get(psoc, CFG_DISABLE_WAKE_IRQ);
  620. /**
  621. * Wake IRQ can't share the same IRQ with the copy engines
  622. * In one MSI mode, we don't know whether wake IRQ is triggered
  623. * or not in wake IRQ handler. known issue CR 2055359
  624. * If you want to support Wake IRQ. Please allocate at least
  625. * 2 MSI vector. The first is for wake IRQ while the others
  626. * share the second vector
  627. */
  628. if (pld_is_one_msi(scn->qdf_dev->dev)) {
  629. hif_debug("Disable wake IRQ once it is one MSI mode");
  630. scn->ini_cfg.disable_wake_irq = true;
  631. }
  632. hif_get_interrupt_threshold_cfg_from_psoc(scn, psoc);
  633. }
  634. }
  635. #if defined(HIF_CE_LOG_INFO) || defined(HIF_BUS_LOG_INFO)
  636. /**
  637. * hif_recovery_notifier_cb - Recovery notifier callback to log
  638. * hang event data
  639. * @block: notifier block
  640. * @state: state
  641. * @data: notifier data
  642. *
  643. * Return: status
  644. */
  645. static
  646. int hif_recovery_notifier_cb(struct notifier_block *block, unsigned long state,
  647. void *data)
  648. {
  649. struct qdf_notifer_data *notif_data = data;
  650. qdf_notif_block *notif_block;
  651. struct hif_softc *hif_handle;
  652. bool bus_id_invalid;
  653. if (!data || !block)
  654. return -EINVAL;
  655. notif_block = qdf_container_of(block, qdf_notif_block, notif_block);
  656. hif_handle = notif_block->priv_data;
  657. if (!hif_handle)
  658. return -EINVAL;
  659. bus_id_invalid = hif_log_bus_info(hif_handle, notif_data->hang_data,
  660. &notif_data->offset);
  661. if (bus_id_invalid)
  662. return NOTIFY_STOP_MASK;
  663. hif_log_ce_info(hif_handle, notif_data->hang_data,
  664. &notif_data->offset);
  665. return 0;
  666. }
  667. /**
  668. * hif_register_recovery_notifier - Register hif recovery notifier
  669. * @hif_handle: hif handle
  670. *
  671. * Return: status
  672. */
  673. static
  674. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  675. {
  676. qdf_notif_block *hif_notifier;
  677. if (!hif_handle)
  678. return QDF_STATUS_E_FAILURE;
  679. hif_notifier = &hif_handle->hif_recovery_notifier;
  680. hif_notifier->notif_block.notifier_call = hif_recovery_notifier_cb;
  681. hif_notifier->priv_data = hif_handle;
  682. return qdf_hang_event_register_notifier(hif_notifier);
  683. }
  684. /**
  685. * hif_unregister_recovery_notifier - Un-register hif recovery notifier
  686. * @hif_handle: hif handle
  687. *
  688. * Return: status
  689. */
  690. static
  691. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  692. {
  693. qdf_notif_block *hif_notifier = &hif_handle->hif_recovery_notifier;
  694. return qdf_hang_event_unregister_notifier(hif_notifier);
  695. }
  696. #else
  697. static inline
  698. QDF_STATUS hif_register_recovery_notifier(struct hif_softc *hif_handle)
  699. {
  700. return QDF_STATUS_SUCCESS;
  701. }
  702. static inline
  703. QDF_STATUS hif_unregister_recovery_notifier(struct hif_softc *hif_handle)
  704. {
  705. return QDF_STATUS_SUCCESS;
  706. }
  707. #endif
  708. #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
  709. defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
  710. /**
  711. * __hif_cpu_hotplug_notify() - CPU hotplug event handler
  712. * @context: HIF context
  713. * @cpu: CPU Id of the CPU generating the event
  714. * @cpu_up: true if the CPU is online
  715. *
  716. * Return: None
  717. */
  718. static void __hif_cpu_hotplug_notify(void *context,
  719. uint32_t cpu, bool cpu_up)
  720. {
  721. struct hif_softc *scn = context;
  722. if (!scn)
  723. return;
  724. if (hif_is_driver_unloading(scn) || hif_is_recovery_in_progress(scn))
  725. return;
  726. if (cpu_up) {
  727. hif_config_irq_set_perf_affinity_hint(GET_HIF_OPAQUE_HDL(scn));
  728. hif_debug("Setting affinity for online CPU: %d", cpu);
  729. } else {
  730. hif_debug("Skip setting affinity for offline CPU: %d", cpu);
  731. }
  732. }
  733. /**
  734. * hif_cpu_hotplug_notify - cpu core up/down notification
  735. * handler
  736. * @context: HIF context
  737. * @cpu: CPU generating the event
  738. * @cpu_up: true if the CPU is online
  739. *
  740. * Return: None
  741. */
  742. static void hif_cpu_hotplug_notify(void *context, uint32_t cpu, bool cpu_up)
  743. {
  744. struct qdf_op_sync *op_sync;
  745. if (qdf_op_protect(&op_sync))
  746. return;
  747. __hif_cpu_hotplug_notify(context, cpu, cpu_up);
  748. qdf_op_unprotect(op_sync);
  749. }
  750. static void hif_cpu_online_cb(void *context, uint32_t cpu)
  751. {
  752. hif_cpu_hotplug_notify(context, cpu, true);
  753. }
  754. static void hif_cpu_before_offline_cb(void *context, uint32_t cpu)
  755. {
  756. hif_cpu_hotplug_notify(context, cpu, false);
  757. }
  758. static void hif_cpuhp_register(struct hif_softc *scn)
  759. {
  760. if (!scn) {
  761. hif_info_high("cannot register hotplug notifiers");
  762. return;
  763. }
  764. qdf_cpuhp_register(&scn->cpuhp_event_handle,
  765. scn,
  766. hif_cpu_online_cb,
  767. hif_cpu_before_offline_cb);
  768. }
  769. static void hif_cpuhp_unregister(struct hif_softc *scn)
  770. {
  771. if (!scn) {
  772. hif_info_high("cannot unregister hotplug notifiers");
  773. return;
  774. }
  775. qdf_cpuhp_unregister(&scn->cpuhp_event_handle);
  776. }
  777. #else
  778. static void hif_cpuhp_register(struct hif_softc *scn)
  779. {
  780. }
  781. static void hif_cpuhp_unregister(struct hif_softc *scn)
  782. {
  783. }
  784. #endif /* ifdef HIF_CPU_PERF_AFFINE_MASK */
  785. #ifdef HIF_DETECTION_LATENCY_ENABLE
  786. /*
  787. * Bitmask to control enablement of latency detection for the tasklets,
  788. * bit-X represents for tasklet of WLAN_CE_X.
  789. */
  790. #ifndef DETECTION_LATENCY_TASKLET_MASK
  791. #define DETECTION_LATENCY_TASKLET_MASK (BIT(2) | BIT(7))
  792. #endif
  793. static inline int
  794. __hif_tasklet_latency(struct hif_softc *scn, bool from_timer, int idx)
  795. {
  796. qdf_time_t sched_time =
  797. scn->latency_detect.tasklet_info[idx].sched_time;
  798. qdf_time_t exec_time =
  799. scn->latency_detect.tasklet_info[idx].exec_time;
  800. qdf_time_t curr_time = qdf_system_ticks();
  801. uint32_t threshold = scn->latency_detect.threshold;
  802. qdf_time_t expect_exec_time =
  803. sched_time + qdf_system_msecs_to_ticks(threshold);
  804. /* 2 kinds of check here.
  805. * from_timer==true: check if tasklet stall
  806. * from_timer==false: check tasklet execute comes late
  807. */
  808. if (from_timer ?
  809. (qdf_system_time_after(sched_time, exec_time) &&
  810. qdf_system_time_after(curr_time, expect_exec_time)) :
  811. qdf_system_time_after(exec_time, expect_exec_time)) {
  812. hif_err("tasklet[%d] latency detected: from_timer %d, curr_time %lu, sched_time %lu, exec_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
  813. idx, from_timer, curr_time, sched_time,
  814. exec_time, threshold,
  815. scn->latency_detect.timeout,
  816. qdf_get_cpu(), (void *)_RET_IP_);
  817. qdf_trigger_self_recovery(NULL,
  818. QDF_TASKLET_CREDIT_LATENCY_DETECT);
  819. return -ETIMEDOUT;
  820. }
  821. return 0;
  822. }
  823. /**
  824. * hif_tasklet_latency_detect_enabled() - check whether latency detect
  825. * is enabled for the tasklet which is specified by idx
  826. * @scn: HIF opaque context
  827. * @idx: CE id
  828. *
  829. * Return: true if latency detect is enabled for the specified tasklet,
  830. * false otherwise.
  831. */
  832. static inline bool
  833. hif_tasklet_latency_detect_enabled(struct hif_softc *scn, int idx)
  834. {
  835. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  836. return false;
  837. if (!scn->latency_detect.enable_detection)
  838. return false;
  839. if (idx < 0 || idx >= HIF_TASKLET_IN_MONITOR ||
  840. !qdf_test_bit(idx, scn->latency_detect.tasklet_bmap))
  841. return false;
  842. return true;
  843. }
  844. void hif_tasklet_latency_record_exec(struct hif_softc *scn, int idx)
  845. {
  846. if (!hif_tasklet_latency_detect_enabled(scn, idx))
  847. return;
  848. /*
  849. * hif_set_enable_detection(true) might come between
  850. * hif_tasklet_latency_record_sched() and
  851. * hif_tasklet_latency_record_exec() during wlan startup, then the
  852. * sched_time is 0 but exec_time is not, and hit the timeout case in
  853. * __hif_tasklet_latency().
  854. * To avoid such issue, skip exec_time recording if sched_time has not
  855. * been recorded.
  856. */
  857. if (!scn->latency_detect.tasklet_info[idx].sched_time)
  858. return;
  859. scn->latency_detect.tasklet_info[idx].exec_time = qdf_system_ticks();
  860. __hif_tasklet_latency(scn, false, idx);
  861. }
  862. void hif_tasklet_latency_record_sched(struct hif_softc *scn, int idx)
  863. {
  864. if (!hif_tasklet_latency_detect_enabled(scn, idx))
  865. return;
  866. scn->latency_detect.tasklet_info[idx].sched_cpuid = qdf_get_cpu();
  867. scn->latency_detect.tasklet_info[idx].sched_time = qdf_system_ticks();
  868. }
  869. static inline void hif_credit_latency(struct hif_softc *scn, bool from_timer)
  870. {
  871. qdf_time_t credit_request_time =
  872. scn->latency_detect.credit_request_time;
  873. qdf_time_t credit_report_time = scn->latency_detect.credit_report_time;
  874. qdf_time_t curr_jiffies = qdf_system_ticks();
  875. uint32_t threshold = scn->latency_detect.threshold;
  876. int cpu_id = qdf_get_cpu();
  877. /* 2 kinds of check here.
  878. * from_timer==true: check if credit report stall
  879. * from_timer==false: check credit report comes late
  880. */
  881. if ((from_timer ?
  882. qdf_system_time_after(credit_request_time, credit_report_time) :
  883. qdf_system_time_after(credit_report_time, credit_request_time)) &&
  884. qdf_system_time_after(curr_jiffies,
  885. credit_request_time +
  886. qdf_system_msecs_to_ticks(threshold))) {
  887. hif_err("credit report latency: from timer %d, curr_jiffies %lu, credit_request_time %lu, credit_report_time %lu, threshold %ums, timeout %ums, cpu_id %d, called: %ps",
  888. from_timer, curr_jiffies, credit_request_time,
  889. credit_report_time, threshold,
  890. scn->latency_detect.timeout,
  891. cpu_id, (void *)_RET_IP_);
  892. goto latency;
  893. }
  894. return;
  895. latency:
  896. qdf_trigger_self_recovery(NULL, QDF_TASKLET_CREDIT_LATENCY_DETECT);
  897. }
  898. static inline void hif_tasklet_latency(struct hif_softc *scn, bool from_timer)
  899. {
  900. int i, ret;
  901. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  902. if (!qdf_test_bit(i, scn->latency_detect.tasklet_bmap))
  903. continue;
  904. ret = __hif_tasklet_latency(scn, from_timer, i);
  905. if (ret)
  906. return;
  907. }
  908. }
  909. /**
  910. * hif_check_detection_latency(): to check if latency for tasklet/credit
  911. *
  912. * @scn: hif context
  913. * @from_timer: if called from timer handler
  914. * @bitmap_type: indicate if check tasklet or credit
  915. *
  916. * Return: none
  917. */
  918. void hif_check_detection_latency(struct hif_softc *scn,
  919. bool from_timer,
  920. uint32_t bitmap_type)
  921. {
  922. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  923. return;
  924. if (!scn->latency_detect.enable_detection)
  925. return;
  926. if (bitmap_type & BIT(HIF_DETECT_TASKLET))
  927. hif_tasklet_latency(scn, from_timer);
  928. if (bitmap_type & BIT(HIF_DETECT_CREDIT))
  929. hif_credit_latency(scn, from_timer);
  930. }
  931. static void hif_latency_detect_timeout_handler(void *arg)
  932. {
  933. struct hif_softc *scn = (struct hif_softc *)arg;
  934. int next_cpu, i;
  935. qdf_cpu_mask cpu_mask = {0};
  936. struct hif_latency_detect *detect = &scn->latency_detect;
  937. hif_check_detection_latency(scn, true,
  938. BIT(HIF_DETECT_TASKLET) |
  939. BIT(HIF_DETECT_CREDIT));
  940. /* it need to make sure timer start on a different cpu,
  941. * so it can detect the tasklet schedule stall, but there
  942. * is still chance that, after timer has been started, then
  943. * irq/tasklet happens on the same cpu, then tasklet will
  944. * execute before softirq timer, if this tasklet stall, the
  945. * timer can't detect it, we can accept this as a limitation,
  946. * if tasklet stall, anyway other place will detect it, just
  947. * a little later.
  948. */
  949. qdf_cpumask_copy(&cpu_mask, (const qdf_cpu_mask *)cpu_active_mask);
  950. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  951. if (!qdf_test_bit(i, detect->tasklet_bmap))
  952. continue;
  953. qdf_cpumask_clear_cpu(detect->tasklet_info[i].sched_cpuid,
  954. &cpu_mask);
  955. }
  956. next_cpu = cpumask_first(&cpu_mask);
  957. if (qdf_unlikely(next_cpu >= nr_cpu_ids)) {
  958. hif_debug("start timer on local");
  959. /* it doesn't found a available cpu, start on local cpu*/
  960. qdf_timer_mod(&detect->timer, detect->timeout);
  961. } else {
  962. qdf_timer_start_on(&detect->timer, detect->timeout, next_cpu);
  963. }
  964. }
  965. static void hif_latency_detect_timer_init(struct hif_softc *scn)
  966. {
  967. scn->latency_detect.timeout =
  968. DETECTION_TIMER_TIMEOUT;
  969. scn->latency_detect.threshold =
  970. DETECTION_LATENCY_THRESHOLD;
  971. hif_info("timer timeout %u, latency threshold %u",
  972. scn->latency_detect.timeout,
  973. scn->latency_detect.threshold);
  974. scn->latency_detect.is_timer_started = false;
  975. qdf_timer_init(NULL,
  976. &scn->latency_detect.timer,
  977. &hif_latency_detect_timeout_handler,
  978. scn,
  979. QDF_TIMER_TYPE_SW_SPIN);
  980. }
  981. static void hif_latency_detect_timer_deinit(struct hif_softc *scn)
  982. {
  983. hif_info("deinit timer");
  984. qdf_timer_free(&scn->latency_detect.timer);
  985. }
  986. static void hif_latency_detect_init(struct hif_softc *scn)
  987. {
  988. uint32_t tasklet_mask;
  989. int i;
  990. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  991. return;
  992. tasklet_mask = DETECTION_LATENCY_TASKLET_MASK;
  993. hif_info("tasklet mask is 0x%x", tasklet_mask);
  994. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++) {
  995. if (BIT(i) & tasklet_mask)
  996. qdf_set_bit(i, scn->latency_detect.tasklet_bmap);
  997. }
  998. hif_latency_detect_timer_init(scn);
  999. }
  1000. static void hif_latency_detect_deinit(struct hif_softc *scn)
  1001. {
  1002. int i;
  1003. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1004. return;
  1005. hif_latency_detect_timer_deinit(scn);
  1006. for (i = 0; i < HIF_TASKLET_IN_MONITOR; i++)
  1007. qdf_clear_bit(i, scn->latency_detect.tasklet_bmap);
  1008. }
  1009. void hif_latency_detect_timer_start(struct hif_opaque_softc *hif_ctx)
  1010. {
  1011. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1012. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1013. return;
  1014. hif_debug_rl("start timer");
  1015. if (scn->latency_detect.is_timer_started) {
  1016. hif_info("timer has been started");
  1017. return;
  1018. }
  1019. qdf_timer_start(&scn->latency_detect.timer,
  1020. scn->latency_detect.timeout);
  1021. scn->latency_detect.is_timer_started = true;
  1022. }
  1023. void hif_latency_detect_timer_stop(struct hif_opaque_softc *hif_ctx)
  1024. {
  1025. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1026. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1027. return;
  1028. hif_debug_rl("stop timer");
  1029. qdf_timer_sync_cancel(&scn->latency_detect.timer);
  1030. scn->latency_detect.is_timer_started = false;
  1031. }
  1032. void hif_latency_detect_credit_record_time(
  1033. enum hif_credit_exchange_type type,
  1034. struct hif_opaque_softc *hif_ctx)
  1035. {
  1036. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1037. if (!scn) {
  1038. hif_err("Could not do runtime put, scn is null");
  1039. return;
  1040. }
  1041. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1042. return;
  1043. if (HIF_REQUEST_CREDIT == type)
  1044. scn->latency_detect.credit_request_time = qdf_system_ticks();
  1045. else if (HIF_PROCESS_CREDIT_REPORT == type)
  1046. scn->latency_detect.credit_report_time = qdf_system_ticks();
  1047. hif_check_detection_latency(scn, false, BIT(HIF_DETECT_CREDIT));
  1048. }
  1049. void hif_set_enable_detection(struct hif_opaque_softc *hif_ctx, bool value)
  1050. {
  1051. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1052. if (!scn) {
  1053. hif_err("Could not do runtime put, scn is null");
  1054. return;
  1055. }
  1056. if (QDF_GLOBAL_MISSION_MODE != hif_get_conparam(scn))
  1057. return;
  1058. scn->latency_detect.enable_detection = value;
  1059. }
  1060. #else
  1061. static inline void hif_latency_detect_init(struct hif_softc *scn)
  1062. {}
  1063. static inline void hif_latency_detect_deinit(struct hif_softc *scn)
  1064. {}
  1065. #endif
  1066. #ifdef WLAN_FEATURE_AFFINITY_MGR
  1067. #define AFFINITY_THRESHOLD 5000000
  1068. static inline void
  1069. hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
  1070. {
  1071. unsigned int cpus;
  1072. qdf_cpu_mask allowed_mask = {0};
  1073. scn->affinity_mgr_supported =
  1074. (cfg_get(psoc, CFG_IRQ_AFFINE_AUDIO_USE_CASE) &&
  1075. qdf_walt_get_cpus_taken_supported());
  1076. hif_info("Affinity Manager supported: %d", scn->affinity_mgr_supported);
  1077. if (!scn->affinity_mgr_supported)
  1078. return;
  1079. scn->time_threshold = AFFINITY_THRESHOLD;
  1080. qdf_for_each_possible_cpu(cpus)
  1081. if (qdf_topology_physical_package_id(cpus) ==
  1082. CPU_CLUSTER_TYPE_LITTLE)
  1083. qdf_cpumask_set_cpu(cpus, &allowed_mask);
  1084. qdf_cpumask_copy(&scn->allowed_mask, &allowed_mask);
  1085. }
  1086. #else
  1087. static inline void
  1088. hif_affinity_mgr_init(struct hif_softc *scn, struct wlan_objmgr_psoc *psoc)
  1089. {
  1090. }
  1091. #endif
  1092. #ifdef FEATURE_DIRECT_LINK
  1093. /**
  1094. * hif_init_direct_link_rcv_pipe_num(): Initialize the direct link receive
  1095. * pipe number
  1096. * @scn: hif context
  1097. *
  1098. * Return: None
  1099. */
  1100. static inline
  1101. void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
  1102. {
  1103. scn->dl_recv_pipe_num = INVALID_PIPE_NO;
  1104. }
  1105. #else
  1106. static inline
  1107. void hif_init_direct_link_rcv_pipe_num(struct hif_softc *scn)
  1108. {
  1109. }
  1110. #endif
  1111. struct hif_opaque_softc *hif_open(qdf_device_t qdf_ctx,
  1112. uint32_t mode,
  1113. enum qdf_bus_type bus_type,
  1114. struct hif_driver_state_callbacks *cbk,
  1115. struct wlan_objmgr_psoc *psoc)
  1116. {
  1117. struct hif_softc *scn;
  1118. QDF_STATUS status = QDF_STATUS_SUCCESS;
  1119. int bus_context_size = hif_bus_get_context_size(bus_type);
  1120. if (bus_context_size == 0) {
  1121. hif_err("context size 0 not allowed");
  1122. return NULL;
  1123. }
  1124. scn = (struct hif_softc *)qdf_mem_malloc(bus_context_size);
  1125. if (!scn)
  1126. return GET_HIF_OPAQUE_HDL(scn);
  1127. scn->qdf_dev = qdf_ctx;
  1128. scn->hif_con_param = mode;
  1129. qdf_atomic_init(&scn->active_tasklet_cnt);
  1130. qdf_atomic_init(&scn->active_grp_tasklet_cnt);
  1131. qdf_atomic_init(&scn->link_suspended);
  1132. qdf_atomic_init(&scn->tasklet_from_intr);
  1133. hif_system_pm_set_state_on(GET_HIF_OPAQUE_HDL(scn));
  1134. qdf_mem_copy(&scn->callbacks, cbk,
  1135. sizeof(struct hif_driver_state_callbacks));
  1136. scn->bus_type = bus_type;
  1137. hif_allow_ep_vote_access(GET_HIF_OPAQUE_HDL(scn));
  1138. hif_get_cfg_from_psoc(scn, psoc);
  1139. hif_set_event_hist_mask(GET_HIF_OPAQUE_HDL(scn));
  1140. status = hif_bus_open(scn, bus_type);
  1141. if (status != QDF_STATUS_SUCCESS) {
  1142. hif_err("hif_bus_open error = %d, bus_type = %d",
  1143. status, bus_type);
  1144. qdf_mem_free(scn);
  1145. scn = NULL;
  1146. goto out;
  1147. }
  1148. hif_rtpm_lock_init(scn);
  1149. hif_cpuhp_register(scn);
  1150. hif_latency_detect_init(scn);
  1151. hif_affinity_mgr_init(scn, psoc);
  1152. hif_init_direct_link_rcv_pipe_num(scn);
  1153. hif_ce_desc_history_log_register(scn);
  1154. hif_desc_history_log_register();
  1155. out:
  1156. return GET_HIF_OPAQUE_HDL(scn);
  1157. }
  1158. #ifdef ADRASTEA_RRI_ON_DDR
  1159. /**
  1160. * hif_uninit_rri_on_ddr(): free consistent memory allocated for rri
  1161. * @scn: hif context
  1162. *
  1163. * Return: none
  1164. */
  1165. void hif_uninit_rri_on_ddr(struct hif_softc *scn)
  1166. {
  1167. if (scn->vaddr_rri_on_ddr)
  1168. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  1169. RRI_ON_DDR_MEM_SIZE,
  1170. scn->vaddr_rri_on_ddr,
  1171. scn->paddr_rri_on_ddr, 0);
  1172. scn->vaddr_rri_on_ddr = NULL;
  1173. }
  1174. #endif
  1175. /**
  1176. * hif_close(): hif_close
  1177. * @hif_ctx: hif_ctx
  1178. *
  1179. * Return: n/a
  1180. */
  1181. void hif_close(struct hif_opaque_softc *hif_ctx)
  1182. {
  1183. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1184. if (!scn) {
  1185. hif_err("hif_opaque_softc is NULL");
  1186. return;
  1187. }
  1188. hif_desc_history_log_unregister();
  1189. hif_ce_desc_history_log_unregister();
  1190. hif_latency_detect_deinit(scn);
  1191. if (scn->athdiag_procfs_inited) {
  1192. athdiag_procfs_remove();
  1193. scn->athdiag_procfs_inited = false;
  1194. }
  1195. if (scn->target_info.hw_name) {
  1196. char *hw_name = scn->target_info.hw_name;
  1197. scn->target_info.hw_name = "ErrUnloading";
  1198. qdf_mem_free(hw_name);
  1199. }
  1200. hif_uninit_rri_on_ddr(scn);
  1201. hif_cleanup_static_buf_to_target(scn);
  1202. hif_cpuhp_unregister(scn);
  1203. hif_rtpm_lock_deinit(scn);
  1204. hif_bus_close(scn);
  1205. qdf_mem_free(scn);
  1206. }
  1207. /**
  1208. * hif_get_num_active_grp_tasklets() - get the number of active
  1209. * datapath group tasklets pending to be completed.
  1210. * @scn: HIF context
  1211. *
  1212. * Returns: the number of datapath group tasklets which are active
  1213. */
  1214. static inline int hif_get_num_active_grp_tasklets(struct hif_softc *scn)
  1215. {
  1216. return qdf_atomic_read(&scn->active_grp_tasklet_cnt);
  1217. }
  1218. #if (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1219. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1220. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1221. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1222. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1223. defined(QCA_WIFI_QCN6432) || \
  1224. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1225. /**
  1226. * hif_get_num_pending_work() - get the number of entries in
  1227. * the workqueue pending to be completed.
  1228. * @scn: HIF context
  1229. *
  1230. * Returns: the number of tasklets which are active
  1231. */
  1232. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1233. {
  1234. return hal_get_reg_write_pending_work(scn->hal_soc);
  1235. }
  1236. #elif defined(FEATURE_HIF_DELAYED_REG_WRITE)
  1237. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1238. {
  1239. return qdf_atomic_read(&scn->active_work_cnt);
  1240. }
  1241. #else
  1242. static inline int hif_get_num_pending_work(struct hif_softc *scn)
  1243. {
  1244. return 0;
  1245. }
  1246. #endif
  1247. QDF_STATUS hif_try_complete_tasks(struct hif_softc *scn)
  1248. {
  1249. uint32_t task_drain_wait_cnt = 0;
  1250. int tasklet = 0, grp_tasklet = 0, work = 0;
  1251. while ((tasklet = hif_get_num_active_tasklets(scn)) ||
  1252. (grp_tasklet = hif_get_num_active_grp_tasklets(scn)) ||
  1253. (work = hif_get_num_pending_work(scn))) {
  1254. if (++task_drain_wait_cnt > HIF_TASK_DRAIN_WAIT_CNT) {
  1255. hif_err("pending tasklets %d grp tasklets %d work %d",
  1256. tasklet, grp_tasklet, work);
  1257. QDF_DEBUG_PANIC("Complete tasks takes more than %u ms: tasklets %d grp tasklets %d work %d",
  1258. HIF_TASK_DRAIN_WAIT_CNT * 10,
  1259. tasklet, grp_tasklet, work);
  1260. return QDF_STATUS_E_FAULT;
  1261. }
  1262. hif_info("waiting for tasklets %d grp tasklets %d work %d",
  1263. tasklet, grp_tasklet, work);
  1264. msleep(10);
  1265. }
  1266. return QDF_STATUS_SUCCESS;
  1267. }
  1268. #ifdef HIF_HAL_REG_ACCESS_SUPPORT
  1269. void hif_reg_window_write(struct hif_softc *scn, uint32_t offset,
  1270. uint32_t value)
  1271. {
  1272. hal_write32_mb(scn->hal_soc, offset, value);
  1273. }
  1274. uint32_t hif_reg_window_read(struct hif_softc *scn, uint32_t offset)
  1275. {
  1276. return hal_read32_mb(scn->hal_soc, offset);
  1277. }
  1278. #endif
  1279. #if defined(HIF_IPCI) && defined(FEATURE_HAL_DELAYED_REG_WRITE)
  1280. QDF_STATUS hif_try_prevent_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1281. {
  1282. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1283. uint32_t work_drain_wait_cnt = 0;
  1284. uint32_t wait_cnt = 0;
  1285. int work = 0;
  1286. qdf_atomic_set(&scn->dp_ep_vote_access,
  1287. HIF_EP_VOTE_ACCESS_DISABLE);
  1288. qdf_atomic_set(&scn->ep_vote_access,
  1289. HIF_EP_VOTE_ACCESS_DISABLE);
  1290. while ((work = hif_get_num_pending_work(scn))) {
  1291. if (++work_drain_wait_cnt > HIF_WORK_DRAIN_WAIT_CNT) {
  1292. qdf_atomic_set(&scn->dp_ep_vote_access,
  1293. HIF_EP_VOTE_ACCESS_ENABLE);
  1294. qdf_atomic_set(&scn->ep_vote_access,
  1295. HIF_EP_VOTE_ACCESS_ENABLE);
  1296. hif_err("timeout wait for pending work %d ", work);
  1297. return QDF_STATUS_E_FAULT;
  1298. }
  1299. qdf_sleep(10);
  1300. }
  1301. if (pld_is_pci_ep_awake(scn->qdf_dev->dev) == -ENOTSUPP)
  1302. return QDF_STATUS_SUCCESS;
  1303. while (pld_is_pci_ep_awake(scn->qdf_dev->dev)) {
  1304. if (++wait_cnt > HIF_EP_WAKE_RESET_WAIT_CNT) {
  1305. hif_err("Release EP vote is not proceed by Fw");
  1306. return QDF_STATUS_E_FAULT;
  1307. }
  1308. qdf_sleep(5);
  1309. }
  1310. return QDF_STATUS_SUCCESS;
  1311. }
  1312. void hif_set_ep_intermediate_vote_access(struct hif_opaque_softc *hif_ctx)
  1313. {
  1314. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1315. uint8_t vote_access;
  1316. vote_access = qdf_atomic_read(&scn->ep_vote_access);
  1317. if (vote_access != HIF_EP_VOTE_ACCESS_DISABLE)
  1318. hif_info("EP vote changed from:%u to intermediate state",
  1319. vote_access);
  1320. if (QDF_IS_STATUS_ERROR(hif_try_prevent_ep_vote_access(hif_ctx)))
  1321. QDF_BUG(0);
  1322. qdf_atomic_set(&scn->ep_vote_access,
  1323. HIF_EP_VOTE_INTERMEDIATE_ACCESS);
  1324. }
  1325. void hif_allow_ep_vote_access(struct hif_opaque_softc *hif_ctx)
  1326. {
  1327. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1328. qdf_atomic_set(&scn->dp_ep_vote_access,
  1329. HIF_EP_VOTE_ACCESS_ENABLE);
  1330. qdf_atomic_set(&scn->ep_vote_access,
  1331. HIF_EP_VOTE_ACCESS_ENABLE);
  1332. }
  1333. void hif_set_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1334. uint8_t type, uint8_t access)
  1335. {
  1336. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1337. if (type == HIF_EP_VOTE_DP_ACCESS)
  1338. qdf_atomic_set(&scn->dp_ep_vote_access, access);
  1339. else
  1340. qdf_atomic_set(&scn->ep_vote_access, access);
  1341. }
  1342. uint8_t hif_get_ep_vote_access(struct hif_opaque_softc *hif_ctx,
  1343. uint8_t type)
  1344. {
  1345. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1346. if (type == HIF_EP_VOTE_DP_ACCESS)
  1347. return qdf_atomic_read(&scn->dp_ep_vote_access);
  1348. else
  1349. return qdf_atomic_read(&scn->ep_vote_access);
  1350. }
  1351. #endif
  1352. #ifdef FEATURE_HIF_DELAYED_REG_WRITE
  1353. #ifdef MEMORY_DEBUG
  1354. #define HIF_REG_WRITE_QUEUE_LEN 128
  1355. #else
  1356. #define HIF_REG_WRITE_QUEUE_LEN 32
  1357. #endif
  1358. /**
  1359. * hif_print_reg_write_stats() - Print hif delayed reg write stats
  1360. * @hif_ctx: hif opaque handle
  1361. *
  1362. * Return: None
  1363. */
  1364. void hif_print_reg_write_stats(struct hif_opaque_softc *hif_ctx)
  1365. {
  1366. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1367. struct CE_state *ce_state;
  1368. uint32_t *hist;
  1369. int i;
  1370. hist = scn->wstats.sched_delay;
  1371. hif_debug("wstats: enq %u deq %u coal %u direct %u q_depth %u max_q %u sched-delay hist %u %u %u %u",
  1372. qdf_atomic_read(&scn->wstats.enqueues),
  1373. scn->wstats.dequeues,
  1374. qdf_atomic_read(&scn->wstats.coalesces),
  1375. qdf_atomic_read(&scn->wstats.direct),
  1376. qdf_atomic_read(&scn->wstats.q_depth),
  1377. scn->wstats.max_q_depth,
  1378. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us],
  1379. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us],
  1380. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us],
  1381. hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]);
  1382. for (i = 0; i < scn->ce_count; i++) {
  1383. ce_state = scn->ce_id_to_state[i];
  1384. if (!ce_state)
  1385. continue;
  1386. hif_debug("ce%d: enq %u deq %u coal %u direct %u",
  1387. i, ce_state->wstats.enqueues,
  1388. ce_state->wstats.dequeues,
  1389. ce_state->wstats.coalesces,
  1390. ce_state->wstats.direct);
  1391. }
  1392. }
  1393. /**
  1394. * hif_is_reg_write_tput_level_high() - throughput level for delayed reg writes
  1395. * @scn: hif_softc pointer
  1396. *
  1397. * Return: true if throughput is high, else false.
  1398. */
  1399. static inline bool hif_is_reg_write_tput_level_high(struct hif_softc *scn)
  1400. {
  1401. int bw_level = hif_get_bandwidth_level(GET_HIF_OPAQUE_HDL(scn));
  1402. return (bw_level >= PLD_BUS_WIDTH_MEDIUM) ? true : false;
  1403. }
  1404. /**
  1405. * hif_reg_write_fill_sched_delay_hist() - fill reg write delay histogram
  1406. * @scn: hif_softc pointer
  1407. * @delay_us: delay in us
  1408. *
  1409. * Return: None
  1410. */
  1411. static inline void hif_reg_write_fill_sched_delay_hist(struct hif_softc *scn,
  1412. uint64_t delay_us)
  1413. {
  1414. uint32_t *hist;
  1415. hist = scn->wstats.sched_delay;
  1416. if (delay_us < 100)
  1417. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_100us]++;
  1418. else if (delay_us < 1000)
  1419. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_1000us]++;
  1420. else if (delay_us < 5000)
  1421. hist[HIF_REG_WRITE_SCHED_DELAY_SUB_5000us]++;
  1422. else
  1423. hist[HIF_REG_WRITE_SCHED_DELAY_GT_5000us]++;
  1424. }
  1425. /**
  1426. * hif_process_reg_write_q_elem() - process a register write queue element
  1427. * @scn: hif_softc pointer
  1428. * @q_elem: pointer to hal register write queue element
  1429. *
  1430. * Return: The value which was written to the address
  1431. */
  1432. static int32_t
  1433. hif_process_reg_write_q_elem(struct hif_softc *scn,
  1434. struct hif_reg_write_q_elem *q_elem)
  1435. {
  1436. struct CE_state *ce_state = q_elem->ce_state;
  1437. uint32_t write_val = -1;
  1438. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  1439. ce_state->reg_write_in_progress = false;
  1440. ce_state->wstats.dequeues++;
  1441. if (ce_state->src_ring) {
  1442. q_elem->dequeue_val = ce_state->src_ring->write_index;
  1443. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
  1444. ce_state->src_ring->write_index);
  1445. write_val = ce_state->src_ring->write_index;
  1446. } else if (ce_state->dest_ring) {
  1447. q_elem->dequeue_val = ce_state->dest_ring->write_index;
  1448. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset,
  1449. ce_state->dest_ring->write_index);
  1450. write_val = ce_state->dest_ring->write_index;
  1451. } else {
  1452. hif_debug("invalid reg write received");
  1453. qdf_assert(0);
  1454. }
  1455. q_elem->valid = 0;
  1456. ce_state->last_dequeue_time = q_elem->dequeue_time;
  1457. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  1458. return write_val;
  1459. }
  1460. /**
  1461. * hif_reg_write_work() - Worker to process delayed writes
  1462. * @arg: hif_softc pointer
  1463. *
  1464. * Return: None
  1465. */
  1466. static void hif_reg_write_work(void *arg)
  1467. {
  1468. struct hif_softc *scn = arg;
  1469. struct hif_reg_write_q_elem *q_elem;
  1470. uint32_t offset;
  1471. uint64_t delta_us;
  1472. int32_t q_depth, write_val;
  1473. uint32_t num_processed = 0;
  1474. int32_t ring_id;
  1475. q_elem = &scn->reg_write_queue[scn->read_idx];
  1476. q_elem->work_scheduled_time = qdf_get_log_timestamp();
  1477. q_elem->cpu_id = qdf_get_cpu();
  1478. /* Make sure q_elem consistent in the memory for multi-cores */
  1479. qdf_rmb();
  1480. if (!q_elem->valid)
  1481. return;
  1482. q_depth = qdf_atomic_read(&scn->wstats.q_depth);
  1483. if (q_depth > scn->wstats.max_q_depth)
  1484. scn->wstats.max_q_depth = q_depth;
  1485. if (hif_prevent_link_low_power_states(GET_HIF_OPAQUE_HDL(scn))) {
  1486. scn->wstats.prevent_l1_fails++;
  1487. return;
  1488. }
  1489. while (true) {
  1490. qdf_rmb();
  1491. if (!q_elem->valid)
  1492. break;
  1493. qdf_rmb();
  1494. q_elem->dequeue_time = qdf_get_log_timestamp();
  1495. ring_id = q_elem->ce_state->id;
  1496. offset = q_elem->offset;
  1497. delta_us = qdf_log_timestamp_to_usecs(q_elem->dequeue_time -
  1498. q_elem->enqueue_time);
  1499. hif_reg_write_fill_sched_delay_hist(scn, delta_us);
  1500. scn->wstats.dequeues++;
  1501. qdf_atomic_dec(&scn->wstats.q_depth);
  1502. write_val = hif_process_reg_write_q_elem(scn, q_elem);
  1503. hif_debug("read_idx %u ce_id %d offset 0x%x dequeue_val %d",
  1504. scn->read_idx, ring_id, offset, write_val);
  1505. qdf_trace_dp_del_reg_write(ring_id, q_elem->enqueue_val,
  1506. q_elem->dequeue_val,
  1507. q_elem->enqueue_time,
  1508. q_elem->dequeue_time);
  1509. num_processed++;
  1510. scn->read_idx = (scn->read_idx + 1) &
  1511. (HIF_REG_WRITE_QUEUE_LEN - 1);
  1512. q_elem = &scn->reg_write_queue[scn->read_idx];
  1513. }
  1514. hif_allow_link_low_power_states(GET_HIF_OPAQUE_HDL(scn));
  1515. /*
  1516. * Decrement active_work_cnt by the number of elements dequeued after
  1517. * hif_allow_link_low_power_states.
  1518. * This makes sure that hif_try_complete_tasks will wait till we make
  1519. * the bus access in hif_allow_link_low_power_states. This will avoid
  1520. * race condition between delayed register worker and bus suspend
  1521. * (system suspend or runtime suspend).
  1522. *
  1523. * The following decrement should be done at the end!
  1524. */
  1525. qdf_atomic_sub(num_processed, &scn->active_work_cnt);
  1526. }
  1527. /**
  1528. * hif_delayed_reg_write_deinit() - De-Initialize delayed reg write processing
  1529. * @scn: hif_softc pointer
  1530. *
  1531. * De-initialize main data structures to process register writes in a delayed
  1532. * workqueue.
  1533. *
  1534. * Return: None
  1535. */
  1536. static void hif_delayed_reg_write_deinit(struct hif_softc *scn)
  1537. {
  1538. qdf_flush_work(&scn->reg_write_work);
  1539. qdf_disable_work(&scn->reg_write_work);
  1540. qdf_flush_workqueue(0, scn->reg_write_wq);
  1541. qdf_destroy_workqueue(0, scn->reg_write_wq);
  1542. qdf_mem_free(scn->reg_write_queue);
  1543. }
  1544. /**
  1545. * hif_delayed_reg_write_init() - Initialization function for delayed reg writes
  1546. * @scn: hif_softc pointer
  1547. *
  1548. * Initialize main data structures to process register writes in a delayed
  1549. * workqueue.
  1550. */
  1551. static QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
  1552. {
  1553. qdf_atomic_init(&scn->active_work_cnt);
  1554. scn->reg_write_wq =
  1555. qdf_alloc_high_prior_ordered_workqueue("hif_register_write_wq");
  1556. qdf_create_work(0, &scn->reg_write_work, hif_reg_write_work, scn);
  1557. scn->reg_write_queue = qdf_mem_malloc(HIF_REG_WRITE_QUEUE_LEN *
  1558. sizeof(*scn->reg_write_queue));
  1559. if (!scn->reg_write_queue) {
  1560. hif_err("unable to allocate memory for delayed reg write");
  1561. QDF_BUG(0);
  1562. return QDF_STATUS_E_NOMEM;
  1563. }
  1564. /* Initial value of indices */
  1565. scn->read_idx = 0;
  1566. qdf_atomic_set(&scn->write_idx, -1);
  1567. return QDF_STATUS_SUCCESS;
  1568. }
  1569. static void hif_reg_write_enqueue(struct hif_softc *scn,
  1570. struct CE_state *ce_state,
  1571. uint32_t value)
  1572. {
  1573. struct hif_reg_write_q_elem *q_elem;
  1574. uint32_t write_idx;
  1575. if (ce_state->reg_write_in_progress) {
  1576. hif_debug("Already in progress ce_id %d offset 0x%x value %u",
  1577. ce_state->id, ce_state->ce_wrt_idx_offset, value);
  1578. qdf_atomic_inc(&scn->wstats.coalesces);
  1579. ce_state->wstats.coalesces++;
  1580. return;
  1581. }
  1582. write_idx = qdf_atomic_inc_return(&scn->write_idx);
  1583. write_idx = write_idx & (HIF_REG_WRITE_QUEUE_LEN - 1);
  1584. q_elem = &scn->reg_write_queue[write_idx];
  1585. if (q_elem->valid) {
  1586. hif_err("queue full");
  1587. QDF_BUG(0);
  1588. return;
  1589. }
  1590. qdf_atomic_inc(&scn->wstats.enqueues);
  1591. ce_state->wstats.enqueues++;
  1592. qdf_atomic_inc(&scn->wstats.q_depth);
  1593. q_elem->ce_state = ce_state;
  1594. q_elem->offset = ce_state->ce_wrt_idx_offset;
  1595. q_elem->enqueue_val = value;
  1596. q_elem->enqueue_time = qdf_get_log_timestamp();
  1597. /*
  1598. * Before the valid flag is set to true, all the other
  1599. * fields in the q_elem needs to be updated in memory.
  1600. * Else there is a chance that the dequeuing worker thread
  1601. * might read stale entries and process incorrect srng.
  1602. */
  1603. qdf_wmb();
  1604. q_elem->valid = true;
  1605. /*
  1606. * After all other fields in the q_elem has been updated
  1607. * in memory successfully, the valid flag needs to be updated
  1608. * in memory in time too.
  1609. * Else there is a chance that the dequeuing worker thread
  1610. * might read stale valid flag and the work will be bypassed
  1611. * for this round. And if there is no other work scheduled
  1612. * later, this hal register writing won't be updated any more.
  1613. */
  1614. qdf_wmb();
  1615. ce_state->reg_write_in_progress = true;
  1616. qdf_atomic_inc(&scn->active_work_cnt);
  1617. hif_debug("write_idx %u ce_id %d offset 0x%x value %u",
  1618. write_idx, ce_state->id, ce_state->ce_wrt_idx_offset, value);
  1619. qdf_queue_work(scn->qdf_dev, scn->reg_write_wq,
  1620. &scn->reg_write_work);
  1621. }
  1622. void hif_delayed_reg_write(struct hif_softc *scn, uint32_t ctrl_addr,
  1623. uint32_t val)
  1624. {
  1625. struct CE_state *ce_state;
  1626. int ce_id = COPY_ENGINE_ID(ctrl_addr);
  1627. ce_state = scn->ce_id_to_state[ce_id];
  1628. if (!ce_state->htt_tx_data && !ce_state->htt_rx_data) {
  1629. hif_reg_write_enqueue(scn, ce_state, val);
  1630. return;
  1631. }
  1632. if (hif_is_reg_write_tput_level_high(scn) ||
  1633. (PLD_MHI_STATE_L0 == pld_get_mhi_state(scn->qdf_dev->dev))) {
  1634. hal_write32_mb(scn->hal_soc, ce_state->ce_wrt_idx_offset, val);
  1635. qdf_atomic_inc(&scn->wstats.direct);
  1636. ce_state->wstats.direct++;
  1637. } else {
  1638. hif_reg_write_enqueue(scn, ce_state, val);
  1639. }
  1640. }
  1641. #else
  1642. static inline QDF_STATUS hif_delayed_reg_write_init(struct hif_softc *scn)
  1643. {
  1644. return QDF_STATUS_SUCCESS;
  1645. }
  1646. static inline void hif_delayed_reg_write_deinit(struct hif_softc *scn)
  1647. {
  1648. }
  1649. #endif
  1650. #if defined(QCA_WIFI_WCN6450)
  1651. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1652. {
  1653. scn->hal_soc = hal_attach(hif_softc_to_hif_opaque_softc(scn),
  1654. scn->qdf_dev);
  1655. if (!scn->hal_soc)
  1656. return QDF_STATUS_E_FAILURE;
  1657. return QDF_STATUS_SUCCESS;
  1658. }
  1659. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1660. {
  1661. hal_detach(scn->hal_soc);
  1662. scn->hal_soc = NULL;
  1663. return QDF_STATUS_SUCCESS;
  1664. }
  1665. #elif (defined(QCA_WIFI_QCA8074) || defined(QCA_WIFI_QCA6018) || \
  1666. defined(QCA_WIFI_QCA6290) || defined(QCA_WIFI_QCA6390) || \
  1667. defined(QCA_WIFI_QCN9000) || defined(QCA_WIFI_QCA6490) || \
  1668. defined(QCA_WIFI_QCA6750) || defined(QCA_WIFI_QCA5018) || \
  1669. defined(QCA_WIFI_KIWI) || defined(QCA_WIFI_QCN9224) || \
  1670. defined(QCA_WIFI_QCA9574)) || defined(QCA_WIFI_QCA5332)
  1671. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1672. {
  1673. if (ce_srng_based(scn)) {
  1674. scn->hal_soc = hal_attach(
  1675. hif_softc_to_hif_opaque_softc(scn),
  1676. scn->qdf_dev);
  1677. if (!scn->hal_soc)
  1678. return QDF_STATUS_E_FAILURE;
  1679. }
  1680. return QDF_STATUS_SUCCESS;
  1681. }
  1682. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1683. {
  1684. if (ce_srng_based(scn)) {
  1685. hal_detach(scn->hal_soc);
  1686. scn->hal_soc = NULL;
  1687. }
  1688. return QDF_STATUS_SUCCESS;
  1689. }
  1690. #else
  1691. static QDF_STATUS hif_hal_attach(struct hif_softc *scn)
  1692. {
  1693. return QDF_STATUS_SUCCESS;
  1694. }
  1695. static QDF_STATUS hif_hal_detach(struct hif_softc *scn)
  1696. {
  1697. return QDF_STATUS_SUCCESS;
  1698. }
  1699. #endif
  1700. int hif_init_dma_mask(struct device *dev, enum qdf_bus_type bus_type)
  1701. {
  1702. int ret;
  1703. switch (bus_type) {
  1704. case QDF_BUS_TYPE_IPCI:
  1705. ret = qdf_set_dma_coherent_mask(dev,
  1706. DMA_COHERENT_MASK_DEFAULT);
  1707. if (ret) {
  1708. hif_err("Failed to set dma mask error = %d", ret);
  1709. return ret;
  1710. }
  1711. break;
  1712. default:
  1713. /* Follow the existing sequence for other targets */
  1714. break;
  1715. }
  1716. return 0;
  1717. }
  1718. /**
  1719. * hif_enable(): hif_enable
  1720. * @hif_ctx: hif_ctx
  1721. * @dev: dev
  1722. * @bdev: bus dev
  1723. * @bid: bus ID
  1724. * @bus_type: bus type
  1725. * @type: enable type
  1726. *
  1727. * Return: QDF_STATUS
  1728. */
  1729. QDF_STATUS hif_enable(struct hif_opaque_softc *hif_ctx, struct device *dev,
  1730. void *bdev,
  1731. const struct hif_bus_id *bid,
  1732. enum qdf_bus_type bus_type,
  1733. enum hif_enable_type type)
  1734. {
  1735. QDF_STATUS status;
  1736. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1737. if (!scn) {
  1738. hif_err("hif_ctx = NULL");
  1739. return QDF_STATUS_E_NULL_VALUE;
  1740. }
  1741. status = hif_enable_bus(scn, dev, bdev, bid, type);
  1742. if (status != QDF_STATUS_SUCCESS) {
  1743. hif_err("hif_enable_bus error = %d", status);
  1744. return status;
  1745. }
  1746. status = hif_hal_attach(scn);
  1747. if (status != QDF_STATUS_SUCCESS) {
  1748. hif_err("hal attach failed");
  1749. goto disable_bus;
  1750. }
  1751. if (hif_delayed_reg_write_init(scn) != QDF_STATUS_SUCCESS) {
  1752. hif_err("unable to initialize delayed reg write");
  1753. goto hal_detach;
  1754. }
  1755. if (hif_bus_configure(scn)) {
  1756. hif_err("Target probe failed");
  1757. status = QDF_STATUS_E_FAILURE;
  1758. goto hal_detach;
  1759. }
  1760. hif_ut_suspend_init(scn);
  1761. hif_register_recovery_notifier(scn);
  1762. hif_latency_detect_timer_start(hif_ctx);
  1763. /*
  1764. * Flag to avoid potential unallocated memory access from MSI
  1765. * interrupt handler which could get scheduled as soon as MSI
  1766. * is enabled, i.e to take care of the race due to the order
  1767. * in where MSI is enabled before the memory, that will be
  1768. * in interrupt handlers, is allocated.
  1769. */
  1770. scn->hif_init_done = true;
  1771. hif_debug("OK");
  1772. return QDF_STATUS_SUCCESS;
  1773. hal_detach:
  1774. hif_hal_detach(scn);
  1775. disable_bus:
  1776. hif_disable_bus(scn);
  1777. return status;
  1778. }
  1779. void hif_disable(struct hif_opaque_softc *hif_ctx, enum hif_disable_type type)
  1780. {
  1781. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1782. if (!scn)
  1783. return;
  1784. hif_delayed_reg_write_deinit(scn);
  1785. hif_set_enable_detection(hif_ctx, false);
  1786. hif_latency_detect_timer_stop(hif_ctx);
  1787. hif_unregister_recovery_notifier(scn);
  1788. hif_nointrs(scn);
  1789. if (scn->hif_init_done == false)
  1790. hif_shutdown_device(hif_ctx);
  1791. else
  1792. hif_stop(hif_ctx);
  1793. hif_hal_detach(scn);
  1794. hif_disable_bus(scn);
  1795. hif_wlan_disable(scn);
  1796. scn->notice_send = false;
  1797. hif_debug("X");
  1798. }
  1799. #ifdef CE_TASKLET_DEBUG_ENABLE
  1800. void hif_enable_ce_latency_stats(struct hif_opaque_softc *hif_ctx, uint8_t val)
  1801. {
  1802. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1803. if (!scn)
  1804. return;
  1805. scn->ce_latency_stats = val;
  1806. }
  1807. #endif
  1808. void hif_display_stats(struct hif_opaque_softc *hif_ctx)
  1809. {
  1810. hif_display_bus_stats(hif_ctx);
  1811. }
  1812. qdf_export_symbol(hif_display_stats);
  1813. void hif_clear_stats(struct hif_opaque_softc *hif_ctx)
  1814. {
  1815. hif_clear_bus_stats(hif_ctx);
  1816. }
  1817. /**
  1818. * hif_crash_shutdown_dump_bus_register() - dump bus registers
  1819. * @hif_ctx: hif_ctx
  1820. *
  1821. * Return: n/a
  1822. */
  1823. #if defined(TARGET_RAMDUMP_AFTER_KERNEL_PANIC) && defined(WLAN_FEATURE_BMI)
  1824. static void hif_crash_shutdown_dump_bus_register(void *hif_ctx)
  1825. {
  1826. struct hif_opaque_softc *scn = hif_ctx;
  1827. if (hif_check_soc_status(scn))
  1828. return;
  1829. if (hif_dump_registers(scn))
  1830. hif_err("Failed to dump bus registers!");
  1831. }
  1832. /**
  1833. * hif_crash_shutdown(): hif_crash_shutdown
  1834. *
  1835. * This function is called by the platform driver to dump CE registers
  1836. *
  1837. * @hif_ctx: hif_ctx
  1838. *
  1839. * Return: n/a
  1840. */
  1841. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1842. {
  1843. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1844. if (!hif_ctx)
  1845. return;
  1846. if (scn->bus_type == QDF_BUS_TYPE_SNOC) {
  1847. hif_warn("RAM dump disabled for bustype %d", scn->bus_type);
  1848. return;
  1849. }
  1850. if (TARGET_STATUS_RESET == scn->target_status) {
  1851. hif_warn("Target is already asserted, ignore!");
  1852. return;
  1853. }
  1854. if (hif_is_load_or_unload_in_progress(scn)) {
  1855. hif_err("Load/unload is in progress, ignore!");
  1856. return;
  1857. }
  1858. hif_crash_shutdown_dump_bus_register(hif_ctx);
  1859. hif_set_target_status(hif_ctx, TARGET_STATUS_RESET);
  1860. if (ol_copy_ramdump(hif_ctx))
  1861. goto out;
  1862. hif_info("RAM dump collecting completed!");
  1863. out:
  1864. return;
  1865. }
  1866. #else
  1867. void hif_crash_shutdown(struct hif_opaque_softc *hif_ctx)
  1868. {
  1869. hif_debug("Collecting target RAM dump disabled");
  1870. }
  1871. #endif /* TARGET_RAMDUMP_AFTER_KERNEL_PANIC */
  1872. #ifdef QCA_WIFI_3_0
  1873. /**
  1874. * hif_check_fw_reg(): hif_check_fw_reg
  1875. * @scn: scn
  1876. *
  1877. * Return: int
  1878. */
  1879. int hif_check_fw_reg(struct hif_opaque_softc *scn)
  1880. {
  1881. return 0;
  1882. }
  1883. #endif
  1884. /**
  1885. * hif_read_phy_mem_base(): hif_read_phy_mem_base
  1886. * @scn: scn
  1887. * @phy_mem_base: physical mem base
  1888. *
  1889. * Return: n/a
  1890. */
  1891. void hif_read_phy_mem_base(struct hif_softc *scn, qdf_dma_addr_t *phy_mem_base)
  1892. {
  1893. *phy_mem_base = scn->mem_pa;
  1894. }
  1895. qdf_export_symbol(hif_read_phy_mem_base);
  1896. /**
  1897. * hif_get_device_type(): hif_get_device_type
  1898. * @device_id: device_id
  1899. * @revision_id: revision_id
  1900. * @hif_type: returned hif_type
  1901. * @target_type: returned target_type
  1902. *
  1903. * Return: int
  1904. */
  1905. int hif_get_device_type(uint32_t device_id,
  1906. uint32_t revision_id,
  1907. uint32_t *hif_type, uint32_t *target_type)
  1908. {
  1909. int ret = 0;
  1910. switch (device_id) {
  1911. case ADRASTEA_DEVICE_ID_P2_E12:
  1912. *hif_type = HIF_TYPE_ADRASTEA;
  1913. *target_type = TARGET_TYPE_ADRASTEA;
  1914. break;
  1915. case AR9888_DEVICE_ID:
  1916. *hif_type = HIF_TYPE_AR9888;
  1917. *target_type = TARGET_TYPE_AR9888;
  1918. break;
  1919. case AR6320_DEVICE_ID:
  1920. switch (revision_id) {
  1921. case AR6320_FW_1_1:
  1922. case AR6320_FW_1_3:
  1923. *hif_type = HIF_TYPE_AR6320;
  1924. *target_type = TARGET_TYPE_AR6320;
  1925. break;
  1926. case AR6320_FW_2_0:
  1927. case AR6320_FW_3_0:
  1928. case AR6320_FW_3_2:
  1929. *hif_type = HIF_TYPE_AR6320V2;
  1930. *target_type = TARGET_TYPE_AR6320V2;
  1931. break;
  1932. default:
  1933. hif_err("dev_id = 0x%x, rev_id = 0x%x",
  1934. device_id, revision_id);
  1935. ret = -ENODEV;
  1936. goto end;
  1937. }
  1938. break;
  1939. case AR9887_DEVICE_ID:
  1940. *hif_type = HIF_TYPE_AR9888;
  1941. *target_type = TARGET_TYPE_AR9888;
  1942. hif_info(" *********** AR9887 **************");
  1943. break;
  1944. case QCA9984_DEVICE_ID:
  1945. *hif_type = HIF_TYPE_QCA9984;
  1946. *target_type = TARGET_TYPE_QCA9984;
  1947. hif_info(" *********** QCA9984 *************");
  1948. break;
  1949. case QCA9888_DEVICE_ID:
  1950. *hif_type = HIF_TYPE_QCA9888;
  1951. *target_type = TARGET_TYPE_QCA9888;
  1952. hif_info(" *********** QCA9888 *************");
  1953. break;
  1954. case AR900B_DEVICE_ID:
  1955. *hif_type = HIF_TYPE_AR900B;
  1956. *target_type = TARGET_TYPE_AR900B;
  1957. hif_info(" *********** AR900B *************");
  1958. break;
  1959. case QCA8074_DEVICE_ID:
  1960. *hif_type = HIF_TYPE_QCA8074;
  1961. *target_type = TARGET_TYPE_QCA8074;
  1962. hif_info(" *********** QCA8074 *************");
  1963. break;
  1964. case QCA6290_EMULATION_DEVICE_ID:
  1965. case QCA6290_DEVICE_ID:
  1966. *hif_type = HIF_TYPE_QCA6290;
  1967. *target_type = TARGET_TYPE_QCA6290;
  1968. hif_info(" *********** QCA6290EMU *************");
  1969. break;
  1970. case QCN9000_DEVICE_ID:
  1971. *hif_type = HIF_TYPE_QCN9000;
  1972. *target_type = TARGET_TYPE_QCN9000;
  1973. hif_info(" *********** QCN9000 *************");
  1974. break;
  1975. case QCN9224_DEVICE_ID:
  1976. *hif_type = HIF_TYPE_QCN9224;
  1977. *target_type = TARGET_TYPE_QCN9224;
  1978. hif_info(" *********** QCN9224 *************");
  1979. break;
  1980. case QCN6122_DEVICE_ID:
  1981. *hif_type = HIF_TYPE_QCN6122;
  1982. *target_type = TARGET_TYPE_QCN6122;
  1983. hif_info(" *********** QCN6122 *************");
  1984. break;
  1985. case QCN9160_DEVICE_ID:
  1986. *hif_type = HIF_TYPE_QCN9160;
  1987. *target_type = TARGET_TYPE_QCN9160;
  1988. hif_info(" *********** QCN9160 *************");
  1989. break;
  1990. case QCN6432_DEVICE_ID:
  1991. *hif_type = HIF_TYPE_QCN6432;
  1992. *target_type = TARGET_TYPE_QCN6432;
  1993. hif_info(" *********** QCN6432 *************");
  1994. break;
  1995. case QCN7605_DEVICE_ID:
  1996. case QCN7605_COMPOSITE:
  1997. case QCN7605_STANDALONE:
  1998. case QCN7605_STANDALONE_V2:
  1999. case QCN7605_COMPOSITE_V2:
  2000. *hif_type = HIF_TYPE_QCN7605;
  2001. *target_type = TARGET_TYPE_QCN7605;
  2002. hif_info(" *********** QCN7605 *************");
  2003. break;
  2004. case QCA6390_DEVICE_ID:
  2005. case QCA6390_EMULATION_DEVICE_ID:
  2006. *hif_type = HIF_TYPE_QCA6390;
  2007. *target_type = TARGET_TYPE_QCA6390;
  2008. hif_info(" *********** QCA6390 *************");
  2009. break;
  2010. case QCA6490_DEVICE_ID:
  2011. case QCA6490_EMULATION_DEVICE_ID:
  2012. *hif_type = HIF_TYPE_QCA6490;
  2013. *target_type = TARGET_TYPE_QCA6490;
  2014. hif_info(" *********** QCA6490 *************");
  2015. break;
  2016. case QCA6750_DEVICE_ID:
  2017. case QCA6750_EMULATION_DEVICE_ID:
  2018. *hif_type = HIF_TYPE_QCA6750;
  2019. *target_type = TARGET_TYPE_QCA6750;
  2020. hif_info(" *********** QCA6750 *************");
  2021. break;
  2022. case KIWI_DEVICE_ID:
  2023. *hif_type = HIF_TYPE_KIWI;
  2024. *target_type = TARGET_TYPE_KIWI;
  2025. hif_info(" *********** KIWI *************");
  2026. break;
  2027. case MANGO_DEVICE_ID:
  2028. *hif_type = HIF_TYPE_MANGO;
  2029. *target_type = TARGET_TYPE_MANGO;
  2030. hif_info(" *********** MANGO *************");
  2031. break;
  2032. case PEACH_DEVICE_ID:
  2033. *hif_type = HIF_TYPE_PEACH;
  2034. *target_type = TARGET_TYPE_PEACH;
  2035. hif_info(" *********** PEACH *************");
  2036. break;
  2037. case QCA8074V2_DEVICE_ID:
  2038. *hif_type = HIF_TYPE_QCA8074V2;
  2039. *target_type = TARGET_TYPE_QCA8074V2;
  2040. hif_info(" *********** QCA8074V2 *************");
  2041. break;
  2042. case QCA6018_DEVICE_ID:
  2043. case RUMIM2M_DEVICE_ID_NODE0:
  2044. case RUMIM2M_DEVICE_ID_NODE1:
  2045. case RUMIM2M_DEVICE_ID_NODE2:
  2046. case RUMIM2M_DEVICE_ID_NODE3:
  2047. case RUMIM2M_DEVICE_ID_NODE4:
  2048. case RUMIM2M_DEVICE_ID_NODE5:
  2049. *hif_type = HIF_TYPE_QCA6018;
  2050. *target_type = TARGET_TYPE_QCA6018;
  2051. hif_info(" *********** QCA6018 *************");
  2052. break;
  2053. case QCA5018_DEVICE_ID:
  2054. *hif_type = HIF_TYPE_QCA5018;
  2055. *target_type = TARGET_TYPE_QCA5018;
  2056. hif_info(" *********** qca5018 *************");
  2057. break;
  2058. case QCA5332_DEVICE_ID:
  2059. *hif_type = HIF_TYPE_QCA5332;
  2060. *target_type = TARGET_TYPE_QCA5332;
  2061. hif_info(" *********** QCA5332 *************");
  2062. break;
  2063. case QCA9574_DEVICE_ID:
  2064. *hif_type = HIF_TYPE_QCA9574;
  2065. *target_type = TARGET_TYPE_QCA9574;
  2066. hif_info(" *********** QCA9574 *************");
  2067. break;
  2068. case WCN6450_DEVICE_ID:
  2069. *hif_type = HIF_TYPE_WCN6450;
  2070. *target_type = TARGET_TYPE_WCN6450;
  2071. hif_info(" *********** WCN6450 *************");
  2072. break;
  2073. default:
  2074. hif_err("Unsupported device ID = 0x%x!", device_id);
  2075. ret = -ENODEV;
  2076. break;
  2077. }
  2078. if (*target_type == TARGET_TYPE_UNKNOWN) {
  2079. hif_err("Unsupported target_type!");
  2080. ret = -ENODEV;
  2081. }
  2082. end:
  2083. return ret;
  2084. }
  2085. /**
  2086. * hif_get_bus_type() - return the bus type
  2087. * @hif_hdl: HIF Context
  2088. *
  2089. * Return: enum qdf_bus_type
  2090. */
  2091. enum qdf_bus_type hif_get_bus_type(struct hif_opaque_softc *hif_hdl)
  2092. {
  2093. struct hif_softc *scn = HIF_GET_SOFTC(hif_hdl);
  2094. return scn->bus_type;
  2095. }
  2096. /*
  2097. * Target info and ini parameters are global to the driver
  2098. * Hence these structures are exposed to all the modules in
  2099. * the driver and they don't need to maintains multiple copies
  2100. * of the same info, instead get the handle from hif and
  2101. * modify them in hif
  2102. */
  2103. /**
  2104. * hif_get_ini_handle() - API to get hif_config_param handle
  2105. * @hif_ctx: HIF Context
  2106. *
  2107. * Return: pointer to hif_config_info
  2108. */
  2109. struct hif_config_info *hif_get_ini_handle(struct hif_opaque_softc *hif_ctx)
  2110. {
  2111. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  2112. return &sc->hif_config;
  2113. }
  2114. /**
  2115. * hif_get_target_info_handle() - API to get hif_target_info handle
  2116. * @hif_ctx: HIF context
  2117. *
  2118. * Return: Pointer to hif_target_info
  2119. */
  2120. struct hif_target_info *hif_get_target_info_handle(
  2121. struct hif_opaque_softc *hif_ctx)
  2122. {
  2123. struct hif_softc *sc = HIF_GET_SOFTC(hif_ctx);
  2124. return &sc->target_info;
  2125. }
  2126. qdf_export_symbol(hif_get_target_info_handle);
  2127. #ifdef RECEIVE_OFFLOAD
  2128. void hif_offld_flush_cb_register(struct hif_opaque_softc *scn,
  2129. void (offld_flush_handler)(void *))
  2130. {
  2131. if (hif_napi_enabled(scn, -1))
  2132. hif_napi_rx_offld_flush_cb_register(scn, offld_flush_handler);
  2133. else
  2134. hif_err("NAPI not enabled");
  2135. }
  2136. qdf_export_symbol(hif_offld_flush_cb_register);
  2137. void hif_offld_flush_cb_deregister(struct hif_opaque_softc *scn)
  2138. {
  2139. if (hif_napi_enabled(scn, -1))
  2140. hif_napi_rx_offld_flush_cb_deregister(scn);
  2141. else
  2142. hif_err("NAPI not enabled");
  2143. }
  2144. qdf_export_symbol(hif_offld_flush_cb_deregister);
  2145. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2146. {
  2147. if (hif_napi_enabled(hif_hdl, -1))
  2148. return NAPI_PIPE2ID(ctx_id);
  2149. else
  2150. return ctx_id;
  2151. }
  2152. #else /* RECEIVE_OFFLOAD */
  2153. int hif_get_rx_ctx_id(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2154. {
  2155. return 0;
  2156. }
  2157. qdf_export_symbol(hif_get_rx_ctx_id);
  2158. #endif /* RECEIVE_OFFLOAD */
  2159. #if defined(FEATURE_LRO)
  2160. /**
  2161. * hif_get_lro_info - Returns LRO instance for instance ID
  2162. * @ctx_id: LRO instance ID
  2163. * @hif_hdl: HIF Context
  2164. *
  2165. * Return: Pointer to LRO instance.
  2166. */
  2167. void *hif_get_lro_info(int ctx_id, struct hif_opaque_softc *hif_hdl)
  2168. {
  2169. void *data;
  2170. if (hif_napi_enabled(hif_hdl, -1))
  2171. data = hif_napi_get_lro_info(hif_hdl, ctx_id);
  2172. else
  2173. data = hif_ce_get_lro_ctx(hif_hdl, ctx_id);
  2174. return data;
  2175. }
  2176. #endif
  2177. /**
  2178. * hif_get_target_status - API to get target status
  2179. * @hif_ctx: HIF Context
  2180. *
  2181. * Return: enum hif_target_status
  2182. */
  2183. enum hif_target_status hif_get_target_status(struct hif_opaque_softc *hif_ctx)
  2184. {
  2185. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2186. return scn->target_status;
  2187. }
  2188. qdf_export_symbol(hif_get_target_status);
  2189. /**
  2190. * hif_set_target_status() - API to set target status
  2191. * @hif_ctx: HIF Context
  2192. * @status: Target Status
  2193. *
  2194. * Return: void
  2195. */
  2196. void hif_set_target_status(struct hif_opaque_softc *hif_ctx, enum
  2197. hif_target_status status)
  2198. {
  2199. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2200. scn->target_status = status;
  2201. }
  2202. /**
  2203. * hif_init_ini_config() - API to initialize HIF configuration parameters
  2204. * @hif_ctx: HIF Context
  2205. * @cfg: HIF Configuration
  2206. *
  2207. * Return: void
  2208. */
  2209. void hif_init_ini_config(struct hif_opaque_softc *hif_ctx,
  2210. struct hif_config_info *cfg)
  2211. {
  2212. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2213. qdf_mem_copy(&scn->hif_config, cfg, sizeof(struct hif_config_info));
  2214. }
  2215. /**
  2216. * hif_get_conparam() - API to get driver mode in HIF
  2217. * @scn: HIF Context
  2218. *
  2219. * Return: driver mode of operation
  2220. */
  2221. uint32_t hif_get_conparam(struct hif_softc *scn)
  2222. {
  2223. if (!scn)
  2224. return 0;
  2225. return scn->hif_con_param;
  2226. }
  2227. /**
  2228. * hif_get_callbacks_handle() - API to get callbacks Handle
  2229. * @scn: HIF Context
  2230. *
  2231. * Return: pointer to HIF Callbacks
  2232. */
  2233. struct hif_driver_state_callbacks *hif_get_callbacks_handle(
  2234. struct hif_softc *scn)
  2235. {
  2236. return &scn->callbacks;
  2237. }
  2238. /**
  2239. * hif_is_driver_unloading() - API to query upper layers if driver is unloading
  2240. * @scn: HIF Context
  2241. *
  2242. * Return: True/False
  2243. */
  2244. bool hif_is_driver_unloading(struct hif_softc *scn)
  2245. {
  2246. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2247. if (cbk && cbk->is_driver_unloading)
  2248. return cbk->is_driver_unloading(cbk->context);
  2249. return false;
  2250. }
  2251. /**
  2252. * hif_is_load_or_unload_in_progress() - API to query upper layers if
  2253. * load/unload in progress
  2254. * @scn: HIF Context
  2255. *
  2256. * Return: True/False
  2257. */
  2258. bool hif_is_load_or_unload_in_progress(struct hif_softc *scn)
  2259. {
  2260. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2261. if (cbk && cbk->is_load_unload_in_progress)
  2262. return cbk->is_load_unload_in_progress(cbk->context);
  2263. return false;
  2264. }
  2265. /**
  2266. * hif_is_recovery_in_progress() - API to query upper layers if recovery in
  2267. * progress
  2268. * @scn: HIF Context
  2269. *
  2270. * Return: True/False
  2271. */
  2272. bool hif_is_recovery_in_progress(struct hif_softc *scn)
  2273. {
  2274. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2275. if (cbk && cbk->is_recovery_in_progress)
  2276. return cbk->is_recovery_in_progress(cbk->context);
  2277. return false;
  2278. }
  2279. #if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB) || \
  2280. defined(HIF_IPCI)
  2281. /**
  2282. * hif_update_pipe_callback() - API to register pipe specific callbacks
  2283. * @osc: Opaque softc
  2284. * @pipeid: pipe id
  2285. * @callbacks: callbacks to register
  2286. *
  2287. * Return: void
  2288. */
  2289. void hif_update_pipe_callback(struct hif_opaque_softc *osc,
  2290. u_int8_t pipeid,
  2291. struct hif_msg_callbacks *callbacks)
  2292. {
  2293. struct hif_softc *scn = HIF_GET_SOFTC(osc);
  2294. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2295. struct HIF_CE_pipe_info *pipe_info;
  2296. QDF_BUG(pipeid < CE_COUNT_MAX);
  2297. hif_debug("pipeid: %d", pipeid);
  2298. pipe_info = &hif_state->pipe_info[pipeid];
  2299. qdf_mem_copy(&pipe_info->pipe_callbacks,
  2300. callbacks, sizeof(pipe_info->pipe_callbacks));
  2301. }
  2302. qdf_export_symbol(hif_update_pipe_callback);
  2303. /**
  2304. * hif_is_target_ready() - API to query if target is in ready state
  2305. * progress
  2306. * @scn: HIF Context
  2307. *
  2308. * Return: True/False
  2309. */
  2310. bool hif_is_target_ready(struct hif_softc *scn)
  2311. {
  2312. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2313. if (cbk && cbk->is_target_ready)
  2314. return cbk->is_target_ready(cbk->context);
  2315. /*
  2316. * if callback is not registered then there is no way to determine
  2317. * if target is ready. In-such case return true to indicate that
  2318. * target is ready.
  2319. */
  2320. return true;
  2321. }
  2322. qdf_export_symbol(hif_is_target_ready);
  2323. int hif_get_bandwidth_level(struct hif_opaque_softc *hif_handle)
  2324. {
  2325. struct hif_softc *scn = HIF_GET_SOFTC(hif_handle);
  2326. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2327. if (cbk && cbk->get_bandwidth_level)
  2328. return cbk->get_bandwidth_level(cbk->context);
  2329. return 0;
  2330. }
  2331. qdf_export_symbol(hif_get_bandwidth_level);
  2332. #ifdef DP_MEM_PRE_ALLOC
  2333. void *hif_mem_alloc_consistent_unaligned(struct hif_softc *scn,
  2334. qdf_size_t size,
  2335. qdf_dma_addr_t *paddr,
  2336. uint32_t ring_type,
  2337. uint8_t *is_mem_prealloc)
  2338. {
  2339. void *vaddr = NULL;
  2340. struct hif_driver_state_callbacks *cbk =
  2341. hif_get_callbacks_handle(scn);
  2342. *is_mem_prealloc = false;
  2343. if (cbk && cbk->prealloc_get_consistent_mem_unaligned) {
  2344. vaddr = cbk->prealloc_get_consistent_mem_unaligned(size,
  2345. paddr,
  2346. ring_type);
  2347. if (vaddr) {
  2348. *is_mem_prealloc = true;
  2349. goto end;
  2350. }
  2351. }
  2352. vaddr = qdf_mem_alloc_consistent(scn->qdf_dev,
  2353. scn->qdf_dev->dev,
  2354. size,
  2355. paddr);
  2356. end:
  2357. dp_info("%s va_unaligned %pK pa_unaligned %pK size %d ring_type %d",
  2358. *is_mem_prealloc ? "pre-alloc" : "dynamic-alloc", vaddr,
  2359. (void *)*paddr, (int)size, ring_type);
  2360. return vaddr;
  2361. }
  2362. void hif_mem_free_consistent_unaligned(struct hif_softc *scn,
  2363. qdf_size_t size,
  2364. void *vaddr,
  2365. qdf_dma_addr_t paddr,
  2366. qdf_dma_context_t memctx,
  2367. uint8_t is_mem_prealloc)
  2368. {
  2369. struct hif_driver_state_callbacks *cbk =
  2370. hif_get_callbacks_handle(scn);
  2371. if (is_mem_prealloc) {
  2372. if (cbk && cbk->prealloc_put_consistent_mem_unaligned) {
  2373. cbk->prealloc_put_consistent_mem_unaligned(vaddr);
  2374. } else {
  2375. dp_warn("dp_prealloc_put_consistent_unligned NULL");
  2376. QDF_BUG(0);
  2377. }
  2378. } else {
  2379. qdf_mem_free_consistent(scn->qdf_dev, scn->qdf_dev->dev,
  2380. size, vaddr, paddr, memctx);
  2381. }
  2382. }
  2383. void hif_prealloc_get_multi_pages(struct hif_softc *scn, uint32_t desc_type,
  2384. qdf_size_t elem_size, uint16_t elem_num,
  2385. struct qdf_mem_multi_page_t *pages,
  2386. bool cacheable)
  2387. {
  2388. struct hif_driver_state_callbacks *cbk =
  2389. hif_get_callbacks_handle(scn);
  2390. if (cbk && cbk->prealloc_get_multi_pages)
  2391. cbk->prealloc_get_multi_pages(desc_type, elem_size, elem_num,
  2392. pages, cacheable);
  2393. if (!pages->num_pages)
  2394. qdf_mem_multi_pages_alloc(scn->qdf_dev, pages,
  2395. elem_size, elem_num, 0, cacheable);
  2396. }
  2397. void hif_prealloc_put_multi_pages(struct hif_softc *scn, uint32_t desc_type,
  2398. struct qdf_mem_multi_page_t *pages,
  2399. bool cacheable)
  2400. {
  2401. struct hif_driver_state_callbacks *cbk =
  2402. hif_get_callbacks_handle(scn);
  2403. if (cbk && cbk->prealloc_put_multi_pages &&
  2404. pages->is_mem_prealloc)
  2405. cbk->prealloc_put_multi_pages(desc_type, pages);
  2406. if (!pages->is_mem_prealloc)
  2407. qdf_mem_multi_pages_free(scn->qdf_dev, pages, 0,
  2408. cacheable);
  2409. }
  2410. #endif
  2411. /**
  2412. * hif_batch_send() - API to access hif specific function
  2413. * ce_batch_send.
  2414. * @osc: HIF Context
  2415. * @msdu: list of msdus to be sent
  2416. * @transfer_id: transfer id
  2417. * @len: downloaded length
  2418. * @sendhead:
  2419. *
  2420. * Return: list of msds not sent
  2421. */
  2422. qdf_nbuf_t hif_batch_send(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  2423. uint32_t transfer_id, u_int32_t len, uint32_t sendhead)
  2424. {
  2425. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2426. if (!ce_tx_hdl)
  2427. return NULL;
  2428. return ce_batch_send((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  2429. len, sendhead);
  2430. }
  2431. qdf_export_symbol(hif_batch_send);
  2432. /**
  2433. * hif_update_tx_ring() - API to access hif specific function
  2434. * ce_update_tx_ring.
  2435. * @osc: HIF Context
  2436. * @num_htt_cmpls: number of htt compl received.
  2437. *
  2438. * Return: void
  2439. */
  2440. void hif_update_tx_ring(struct hif_opaque_softc *osc, u_int32_t num_htt_cmpls)
  2441. {
  2442. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2443. ce_update_tx_ring(ce_tx_hdl, num_htt_cmpls);
  2444. }
  2445. qdf_export_symbol(hif_update_tx_ring);
  2446. /**
  2447. * hif_send_single() - API to access hif specific function
  2448. * ce_send_single.
  2449. * @osc: HIF Context
  2450. * @msdu : msdu to be sent
  2451. * @transfer_id: transfer id
  2452. * @len : downloaded length
  2453. *
  2454. * Return: msdu sent status
  2455. */
  2456. QDF_STATUS hif_send_single(struct hif_opaque_softc *osc, qdf_nbuf_t msdu,
  2457. uint32_t transfer_id, u_int32_t len)
  2458. {
  2459. void *ce_tx_hdl = hif_get_ce_handle(osc, CE_HTT_TX_CE);
  2460. if (!ce_tx_hdl)
  2461. return QDF_STATUS_E_NULL_VALUE;
  2462. return ce_send_single((struct CE_handle *)ce_tx_hdl, msdu, transfer_id,
  2463. len);
  2464. }
  2465. qdf_export_symbol(hif_send_single);
  2466. #endif
  2467. /**
  2468. * hif_reg_write() - API to access hif specific function
  2469. * hif_write32_mb.
  2470. * @hif_ctx : HIF Context
  2471. * @offset : offset on which value has to be written
  2472. * @value : value to be written
  2473. *
  2474. * Return: None
  2475. */
  2476. void hif_reg_write(struct hif_opaque_softc *hif_ctx, uint32_t offset,
  2477. uint32_t value)
  2478. {
  2479. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2480. hif_write32_mb(scn, scn->mem + offset, value);
  2481. }
  2482. qdf_export_symbol(hif_reg_write);
  2483. /**
  2484. * hif_reg_read() - API to access hif specific function
  2485. * hif_read32_mb.
  2486. * @hif_ctx : HIF Context
  2487. * @offset : offset from which value has to be read
  2488. *
  2489. * Return: Read value
  2490. */
  2491. uint32_t hif_reg_read(struct hif_opaque_softc *hif_ctx, uint32_t offset)
  2492. {
  2493. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2494. return hif_read32_mb(scn, scn->mem + offset);
  2495. }
  2496. qdf_export_symbol(hif_reg_read);
  2497. /**
  2498. * hif_ramdump_handler(): generic ramdump handler
  2499. * @scn: struct hif_opaque_softc
  2500. *
  2501. * Return: None
  2502. */
  2503. void hif_ramdump_handler(struct hif_opaque_softc *scn)
  2504. {
  2505. if (hif_get_bus_type(scn) == QDF_BUS_TYPE_USB)
  2506. hif_usb_ramdump_handler(scn);
  2507. }
  2508. hif_pm_wake_irq_type hif_pm_get_wake_irq_type(struct hif_opaque_softc *hif_ctx)
  2509. {
  2510. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2511. return scn->wake_irq_type;
  2512. }
  2513. irqreturn_t hif_wake_interrupt_handler(int irq, void *context)
  2514. {
  2515. struct hif_softc *scn = context;
  2516. hif_info("wake interrupt received on irq %d", irq);
  2517. hif_rtpm_set_monitor_wake_intr(0);
  2518. hif_rtpm_request_resume();
  2519. if (scn->initial_wakeup_cb)
  2520. scn->initial_wakeup_cb(scn->initial_wakeup_priv);
  2521. if (hif_is_ut_suspended(scn))
  2522. hif_ut_fw_resume(scn);
  2523. qdf_pm_system_wakeup();
  2524. return IRQ_HANDLED;
  2525. }
  2526. void hif_set_initial_wakeup_cb(struct hif_opaque_softc *hif_ctx,
  2527. void (*callback)(void *),
  2528. void *priv)
  2529. {
  2530. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2531. scn->initial_wakeup_cb = callback;
  2532. scn->initial_wakeup_priv = priv;
  2533. }
  2534. void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
  2535. uint32_t ce_service_max_yield_time)
  2536. {
  2537. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2538. hif_ctx->ce_service_max_yield_time =
  2539. ce_service_max_yield_time * 1000;
  2540. }
  2541. unsigned long long
  2542. hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
  2543. {
  2544. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2545. return hif_ctx->ce_service_max_yield_time;
  2546. }
  2547. void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
  2548. uint8_t ce_service_max_rx_ind_flush)
  2549. {
  2550. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2551. if (ce_service_max_rx_ind_flush == 0 ||
  2552. ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
  2553. hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
  2554. else
  2555. hif_ctx->ce_service_max_rx_ind_flush =
  2556. ce_service_max_rx_ind_flush;
  2557. }
  2558. #ifdef SYSTEM_PM_CHECK
  2559. void __hif_system_pm_set_state(struct hif_opaque_softc *hif,
  2560. enum hif_system_pm_state state)
  2561. {
  2562. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2563. qdf_atomic_set(&hif_ctx->sys_pm_state, state);
  2564. }
  2565. int32_t hif_system_pm_get_state(struct hif_opaque_softc *hif)
  2566. {
  2567. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2568. return qdf_atomic_read(&hif_ctx->sys_pm_state);
  2569. }
  2570. int hif_system_pm_state_check(struct hif_opaque_softc *hif)
  2571. {
  2572. struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
  2573. int32_t sys_pm_state;
  2574. if (!hif_ctx) {
  2575. hif_err("hif context is null");
  2576. return -EFAULT;
  2577. }
  2578. sys_pm_state = qdf_atomic_read(&hif_ctx->sys_pm_state);
  2579. if (sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDING ||
  2580. sys_pm_state == HIF_SYSTEM_PM_STATE_BUS_SUSPENDED) {
  2581. hif_info("Triggering system wakeup");
  2582. qdf_pm_system_wakeup();
  2583. return -EAGAIN;
  2584. }
  2585. return 0;
  2586. }
  2587. #endif
  2588. #ifdef WLAN_FEATURE_AFFINITY_MGR
  2589. /*
  2590. * hif_audio_cpu_affinity_allowed() - Check if audio cpu affinity allowed
  2591. *
  2592. * @scn: hif handle
  2593. * @cfg: hif affinity manager configuration for IRQ
  2594. * @audio_taken_cpu: Current CPUs which are taken by audio.
  2595. * @current_time: Current system time.
  2596. *
  2597. * This API checks for 2 conditions
  2598. * 1) Last audio taken mask and current taken mask are different
  2599. * 2) Last time when IRQ was affined away due to audio taken CPUs is
  2600. * more than time threshold (5 Seconds in current case).
  2601. * If both condition satisfies then only return true.
  2602. *
  2603. * Return: bool: true if it is allowed to affine away audio taken cpus.
  2604. */
  2605. static inline bool
  2606. hif_audio_cpu_affinity_allowed(struct hif_softc *scn,
  2607. struct hif_cpu_affinity *cfg,
  2608. qdf_cpu_mask audio_taken_cpu,
  2609. uint64_t current_time)
  2610. {
  2611. if (!qdf_cpumask_equal(&audio_taken_cpu, &cfg->walt_taken_mask) &&
  2612. (qdf_log_timestamp_to_usecs(current_time -
  2613. cfg->last_affined_away)
  2614. < scn->time_threshold))
  2615. return false;
  2616. return true;
  2617. }
  2618. /*
  2619. * hif_affinity_mgr_check_update_mask() - Check if cpu mask need to be updated
  2620. *
  2621. * @scn: hif handle
  2622. * @cfg: hif affinity manager configuration for IRQ
  2623. * @audio_taken_cpu: Current CPUs which are taken by audio.
  2624. * @cpu_mask: CPU mask which need to be updated.
  2625. * @current_time: Current system time.
  2626. *
  2627. * This API checks if Pro audio use case is running and if cpu_mask need
  2628. * to be updated
  2629. *
  2630. * Return: QDF_STATUS
  2631. */
  2632. static inline QDF_STATUS
  2633. hif_affinity_mgr_check_update_mask(struct hif_softc *scn,
  2634. struct hif_cpu_affinity *cfg,
  2635. qdf_cpu_mask audio_taken_cpu,
  2636. qdf_cpu_mask *cpu_mask,
  2637. uint64_t current_time)
  2638. {
  2639. qdf_cpu_mask allowed_mask;
  2640. /*
  2641. * Case 1: audio_taken_mask is empty
  2642. * Check if passed cpu_mask and wlan_requested_mask is same or not.
  2643. * If both mask are different copy wlan_requested_mask(IRQ affinity
  2644. * mask requested by WLAN) to cpu_mask.
  2645. *
  2646. * Case 2: audio_taken_mask is not empty
  2647. * 1. Only allow update if last time when IRQ was affined away due to
  2648. * audio taken CPUs is more than 5 seconds or update is requested
  2649. * by WLAN
  2650. * 2. Only allow silver cores to be affined away.
  2651. * 3. Check if any allowed CPUs for audio use case is set in cpu_mask.
  2652. * i. If any CPU mask is set, mask out that CPU from the cpu_mask
  2653. * ii. If after masking out audio taken cpu(Silver cores) cpu_mask
  2654. * is empty, set mask to all cpu except cpus taken by audio.
  2655. * Example:
  2656. *| Audio mask | mask allowed | cpu_mask | WLAN req mask | new cpu_mask|
  2657. *| 0x00 | 0x00 | 0x0C | 0x0C | 0x0C |
  2658. *| 0x00 | 0x00 | 0x03 | 0x03 | 0x03 |
  2659. *| 0x00 | 0x00 | 0xFC | 0x03 | 0x03 |
  2660. *| 0x00 | 0x00 | 0x03 | 0x0C | 0x0C |
  2661. *| 0x0F | 0x03 | 0x0C | 0x0C | 0x0C |
  2662. *| 0x0F | 0x03 | 0x03 | 0x03 | 0xFC |
  2663. *| 0x03 | 0x03 | 0x0C | 0x0C | 0x0C |
  2664. *| 0x03 | 0x03 | 0x03 | 0x03 | 0xFC |
  2665. *| 0x03 | 0x03 | 0xFC | 0x03 | 0xFC |
  2666. *| 0xF0 | 0x00 | 0x0C | 0x0C | 0x0C |
  2667. *| 0xF0 | 0x00 | 0x03 | 0x03 | 0x03 |
  2668. */
  2669. /* Check if audio taken mask is empty*/
  2670. if (qdf_likely(qdf_cpumask_empty(&audio_taken_cpu))) {
  2671. /* If CPU mask requested by WLAN for the IRQ and
  2672. * cpu_mask passed CPU mask set for IRQ is different
  2673. * Copy requested mask into cpu_mask and return
  2674. */
  2675. if (qdf_unlikely(!qdf_cpumask_equal(cpu_mask,
  2676. &cfg->wlan_requested_mask))) {
  2677. qdf_cpumask_copy(cpu_mask, &cfg->wlan_requested_mask);
  2678. return QDF_STATUS_SUCCESS;
  2679. }
  2680. return QDF_STATUS_E_ALREADY;
  2681. }
  2682. if (!(hif_audio_cpu_affinity_allowed(scn, cfg, audio_taken_cpu,
  2683. current_time) ||
  2684. cfg->update_requested))
  2685. return QDF_STATUS_E_AGAIN;
  2686. /* Only allow Silver cores to be affine away */
  2687. qdf_cpumask_and(&allowed_mask, &scn->allowed_mask, &audio_taken_cpu);
  2688. if (qdf_cpumask_intersects(cpu_mask, &allowed_mask)) {
  2689. /* If any of taken CPU(Silver cores) mask is set in cpu_mask,
  2690. * mask out the audio taken CPUs from the cpu_mask.
  2691. */
  2692. qdf_cpumask_andnot(cpu_mask, &cfg->wlan_requested_mask,
  2693. &allowed_mask);
  2694. /* If cpu_mask is empty set it to all CPUs
  2695. * except taken by audio(Silver cores)
  2696. */
  2697. if (qdf_unlikely(qdf_cpumask_empty(cpu_mask)))
  2698. qdf_cpumask_complement(cpu_mask, &allowed_mask);
  2699. return QDF_STATUS_SUCCESS;
  2700. }
  2701. return QDF_STATUS_E_ALREADY;
  2702. }
  2703. static inline QDF_STATUS
  2704. hif_check_and_affine_irq(struct hif_softc *scn, struct hif_cpu_affinity *cfg,
  2705. qdf_cpu_mask audio_taken_cpu, qdf_cpu_mask cpu_mask,
  2706. uint64_t current_time)
  2707. {
  2708. QDF_STATUS status;
  2709. status = hif_affinity_mgr_check_update_mask(scn, cfg,
  2710. audio_taken_cpu,
  2711. &cpu_mask,
  2712. current_time);
  2713. /* Set IRQ affinity if CPU mask was updated */
  2714. if (QDF_IS_STATUS_SUCCESS(status)) {
  2715. status = hif_irq_set_affinity_hint(cfg->irq,
  2716. &cpu_mask);
  2717. if (QDF_IS_STATUS_SUCCESS(status)) {
  2718. /* Store audio taken CPU mask */
  2719. qdf_cpumask_copy(&cfg->walt_taken_mask,
  2720. &audio_taken_cpu);
  2721. /* Store CPU mask which was set for IRQ*/
  2722. qdf_cpumask_copy(&cfg->current_irq_mask,
  2723. &cpu_mask);
  2724. /* Set time when IRQ affinity was updated */
  2725. cfg->last_updated = current_time;
  2726. if (hif_audio_cpu_affinity_allowed(scn, cfg,
  2727. audio_taken_cpu,
  2728. current_time))
  2729. /* If CPU mask was updated due to CPU
  2730. * taken by audio, update
  2731. * last_affined_away time
  2732. */
  2733. cfg->last_affined_away = current_time;
  2734. }
  2735. }
  2736. return status;
  2737. }
  2738. void hif_affinity_mgr_affine_irq(struct hif_softc *scn)
  2739. {
  2740. bool audio_affinity_allowed = false;
  2741. int i, j, ce_id;
  2742. uint64_t current_time;
  2743. char cpu_str[10];
  2744. QDF_STATUS status;
  2745. qdf_cpu_mask cpu_mask, audio_taken_cpu;
  2746. struct HIF_CE_state *hif_state;
  2747. struct hif_exec_context *hif_ext_group;
  2748. struct CE_attr *host_ce_conf;
  2749. struct HIF_CE_state *ce_sc;
  2750. struct hif_cpu_affinity *cfg;
  2751. if (!scn->affinity_mgr_supported)
  2752. return;
  2753. current_time = hif_get_log_timestamp();
  2754. /* Get CPU mask for audio taken CPUs */
  2755. audio_taken_cpu = qdf_walt_get_cpus_taken();
  2756. ce_sc = HIF_GET_CE_STATE(scn);
  2757. host_ce_conf = ce_sc->host_ce_config;
  2758. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2759. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2760. continue;
  2761. cfg = &scn->ce_irq_cpu_mask[ce_id];
  2762. qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
  2763. status =
  2764. hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
  2765. cpu_mask, current_time);
  2766. if (QDF_IS_STATUS_SUCCESS(status))
  2767. audio_affinity_allowed = true;
  2768. }
  2769. hif_state = HIF_GET_CE_STATE(scn);
  2770. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2771. hif_ext_group = hif_state->hif_ext_group[i];
  2772. for (j = 0; j < hif_ext_group->numirq; j++) {
  2773. cfg = &scn->irq_cpu_mask[hif_ext_group->grp_id][j];
  2774. qdf_cpumask_copy(&cpu_mask, &cfg->current_irq_mask);
  2775. status =
  2776. hif_check_and_affine_irq(scn, cfg, audio_taken_cpu,
  2777. cpu_mask, current_time);
  2778. if (QDF_IS_STATUS_SUCCESS(status)) {
  2779. qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
  2780. audio_affinity_allowed = true;
  2781. }
  2782. }
  2783. }
  2784. if (audio_affinity_allowed) {
  2785. qdf_thread_cpumap_print_to_pagebuf(false, cpu_str,
  2786. &audio_taken_cpu);
  2787. hif_info("Audio taken CPU mask: %s", cpu_str);
  2788. }
  2789. }
  2790. static inline QDF_STATUS
  2791. hif_affinity_mgr_set_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2792. struct hif_cpu_affinity *cfg,
  2793. qdf_cpu_mask *cpu_mask)
  2794. {
  2795. uint64_t current_time;
  2796. char cpu_str[10];
  2797. QDF_STATUS status, mask_updated;
  2798. qdf_cpu_mask audio_taken_cpu = qdf_walt_get_cpus_taken();
  2799. current_time = hif_get_log_timestamp();
  2800. qdf_cpumask_copy(&cfg->wlan_requested_mask, cpu_mask);
  2801. cfg->update_requested = true;
  2802. mask_updated = hif_affinity_mgr_check_update_mask(scn, cfg,
  2803. audio_taken_cpu,
  2804. cpu_mask,
  2805. current_time);
  2806. status = hif_irq_set_affinity_hint(irq, cpu_mask);
  2807. if (QDF_IS_STATUS_SUCCESS(status)) {
  2808. qdf_cpumask_copy(&cfg->walt_taken_mask, &audio_taken_cpu);
  2809. qdf_cpumask_copy(&cfg->current_irq_mask, cpu_mask);
  2810. if (QDF_IS_STATUS_SUCCESS(mask_updated)) {
  2811. cfg->last_updated = current_time;
  2812. if (hif_audio_cpu_affinity_allowed(scn, cfg,
  2813. audio_taken_cpu,
  2814. current_time)) {
  2815. cfg->last_affined_away = current_time;
  2816. qdf_thread_cpumap_print_to_pagebuf(false,
  2817. cpu_str,
  2818. &audio_taken_cpu);
  2819. hif_info_rl("Audio taken CPU mask: %s",
  2820. cpu_str);
  2821. }
  2822. }
  2823. }
  2824. cfg->update_requested = false;
  2825. return status;
  2826. }
  2827. QDF_STATUS
  2828. hif_affinity_mgr_set_qrg_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2829. uint32_t grp_id, uint32_t irq_index,
  2830. qdf_cpu_mask *cpu_mask)
  2831. {
  2832. struct hif_cpu_affinity *cfg;
  2833. if (!scn->affinity_mgr_supported)
  2834. return hif_irq_set_affinity_hint(irq, cpu_mask);
  2835. cfg = &scn->irq_cpu_mask[grp_id][irq_index];
  2836. return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
  2837. }
  2838. QDF_STATUS
  2839. hif_affinity_mgr_set_ce_irq_affinity(struct hif_softc *scn, uint32_t irq,
  2840. uint32_t ce_id, qdf_cpu_mask *cpu_mask)
  2841. {
  2842. struct hif_cpu_affinity *cfg;
  2843. if (!scn->affinity_mgr_supported)
  2844. return hif_irq_set_affinity_hint(irq, cpu_mask);
  2845. cfg = &scn->ce_irq_cpu_mask[ce_id];
  2846. return hif_affinity_mgr_set_irq_affinity(scn, irq, cfg, cpu_mask);
  2847. }
  2848. void
  2849. hif_affinity_mgr_init_ce_irq(struct hif_softc *scn, int id, int irq)
  2850. {
  2851. unsigned int cpus;
  2852. qdf_cpu_mask cpu_mask = {0};
  2853. struct hif_cpu_affinity *cfg = NULL;
  2854. if (!scn->affinity_mgr_supported)
  2855. return;
  2856. /* Set CPU Mask to Silver core */
  2857. qdf_for_each_possible_cpu(cpus)
  2858. if (qdf_topology_physical_package_id(cpus) ==
  2859. CPU_CLUSTER_TYPE_LITTLE)
  2860. qdf_cpumask_set_cpu(cpus, &cpu_mask);
  2861. cfg = &scn->ce_irq_cpu_mask[id];
  2862. qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
  2863. qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
  2864. cfg->irq = irq;
  2865. cfg->last_updated = 0;
  2866. cfg->last_affined_away = 0;
  2867. cfg->update_requested = false;
  2868. }
  2869. void
  2870. hif_affinity_mgr_init_grp_irq(struct hif_softc *scn, int grp_id,
  2871. int irq_num, int irq)
  2872. {
  2873. unsigned int cpus;
  2874. qdf_cpu_mask cpu_mask = {0};
  2875. struct hif_cpu_affinity *cfg = NULL;
  2876. if (!scn->affinity_mgr_supported)
  2877. return;
  2878. /* Set CPU Mask to Silver core */
  2879. qdf_for_each_possible_cpu(cpus)
  2880. if (qdf_topology_physical_package_id(cpus) ==
  2881. CPU_CLUSTER_TYPE_LITTLE)
  2882. qdf_cpumask_set_cpu(cpus, &cpu_mask);
  2883. cfg = &scn->irq_cpu_mask[grp_id][irq_num];
  2884. qdf_cpumask_copy(&cfg->current_irq_mask, &cpu_mask);
  2885. qdf_cpumask_copy(&cfg->wlan_requested_mask, &cpu_mask);
  2886. cfg->irq = irq;
  2887. cfg->last_updated = 0;
  2888. cfg->last_affined_away = 0;
  2889. cfg->update_requested = false;
  2890. }
  2891. #endif
  2892. #if defined(HIF_CPU_PERF_AFFINE_MASK) || \
  2893. defined(FEATURE_ENABLE_CE_DP_IRQ_AFFINE)
  2894. void hif_config_irq_set_perf_affinity_hint(
  2895. struct hif_opaque_softc *hif_ctx)
  2896. {
  2897. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2898. hif_config_irq_affinity(scn);
  2899. }
  2900. qdf_export_symbol(hif_config_irq_set_perf_affinity_hint);
  2901. #endif