if_pci.c 99 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <linux/pci.h>
  19. #include <linux/slab.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/if_arp.h>
  22. #include <linux/of_pci.h>
  23. #include <linux/version.h>
  24. #include "hif_io32.h"
  25. #include "if_pci.h"
  26. #include "hif.h"
  27. #include "target_type.h"
  28. #include "hif_main.h"
  29. #include "ce_main.h"
  30. #include "ce_api.h"
  31. #include "ce_internal.h"
  32. #include "ce_reg.h"
  33. #include "ce_bmi.h"
  34. #include "regtable.h"
  35. #include "hif_hw_version.h"
  36. #include <linux/debugfs.h>
  37. #include <linux/seq_file.h>
  38. #include "qdf_status.h"
  39. #include "qdf_atomic.h"
  40. #include "qdf_platform.h"
  41. #include "pld_common.h"
  42. #include "mp_dev.h"
  43. #include "hif_debug.h"
  44. #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  45. defined(QCA_WIFI_WCN7850))
  46. #include "hal_api.h"
  47. #endif
  48. #include "if_pci_internal.h"
  49. #include "ce_tasklet.h"
  50. #include "targaddrs.h"
  51. #include "hif_exec.h"
  52. #include "pci_api.h"
  53. #include "ahb_api.h"
  54. #include "wlan_cfg.h"
  55. #include "qdf_hang_event_notifier.h"
  56. #include "qdf_platform.h"
  57. #include "qal_devnode.h"
  58. #include "qdf_irq.h"
  59. /* Maximum ms timeout for host to wake up target */
  60. #define PCIE_WAKE_TIMEOUT 1000
  61. #define RAMDUMP_EVENT_TIMEOUT 2500
  62. /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
  63. * PCIe data bus error
  64. * As workaround for this issue - changing the reset sequence to
  65. * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
  66. */
  67. #define CPU_WARM_RESET_WAR
  68. #define WLAN_CFG_MAX_PCIE_GROUPS 2
  69. #ifdef QCA_WIFI_QCN9224
  70. #define WLAN_CFG_MAX_CE_COUNT 16
  71. #else
  72. #define WLAN_CFG_MAX_CE_COUNT 12
  73. #endif
  74. const char *dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS] = {
  75. {
  76. "pci0_wlan_grp_dp_0",
  77. "pci0_wlan_grp_dp_1",
  78. "pci0_wlan_grp_dp_2",
  79. "pci0_wlan_grp_dp_3",
  80. "pci0_wlan_grp_dp_4",
  81. "pci0_wlan_grp_dp_5",
  82. "pci0_wlan_grp_dp_6",
  83. #if !defined(WLAN_MAX_PDEVS)
  84. "pci0_wlan_grp_dp_7",
  85. "pci0_wlan_grp_dp_8",
  86. "pci0_wlan_grp_dp_9",
  87. "pci0_wlan_grp_dp_10",
  88. #endif
  89. },
  90. {
  91. "pci1_wlan_grp_dp_0",
  92. "pci1_wlan_grp_dp_1",
  93. "pci1_wlan_grp_dp_2",
  94. "pci1_wlan_grp_dp_3",
  95. "pci1_wlan_grp_dp_4",
  96. "pci1_wlan_grp_dp_5",
  97. "pci1_wlan_grp_dp_6",
  98. #if !defined(WLAN_MAX_PDEVS)
  99. "pci1_wlan_grp_dp_7",
  100. "pci1_wlan_grp_dp_8",
  101. "pci1_wlan_grp_dp_9",
  102. "pci1_wlan_grp_dp_10",
  103. #endif
  104. }
  105. };
  106. const char *ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT] = {
  107. {
  108. "pci0_wlan_ce_0",
  109. "pci0_wlan_ce_1",
  110. "pci0_wlan_ce_2",
  111. "pci0_wlan_ce_3",
  112. "pci0_wlan_ce_4",
  113. "pci0_wlan_ce_5",
  114. "pci0_wlan_ce_6",
  115. "pci0_wlan_ce_7",
  116. "pci0_wlan_ce_8",
  117. "pci0_wlan_ce_9",
  118. "pci0_wlan_ce_10",
  119. "pci0_wlan_ce_11",
  120. #ifdef QCA_WIFI_QCN9224
  121. "pci0_wlan_ce_12",
  122. "pci0_wlan_ce_13",
  123. "pci0_wlan_ce_14",
  124. "pci0_wlan_ce_15",
  125. #endif
  126. },
  127. {
  128. "pci1_wlan_ce_0",
  129. "pci1_wlan_ce_1",
  130. "pci1_wlan_ce_2",
  131. "pci1_wlan_ce_3",
  132. "pci1_wlan_ce_4",
  133. "pci1_wlan_ce_5",
  134. "pci1_wlan_ce_6",
  135. "pci1_wlan_ce_7",
  136. "pci1_wlan_ce_8",
  137. "pci1_wlan_ce_9",
  138. "pci1_wlan_ce_10",
  139. "pci1_wlan_ce_11",
  140. #ifdef QCA_WIFI_QCN9224
  141. "pci0_wlan_ce_12",
  142. "pci0_wlan_ce_13",
  143. "pci0_wlan_ce_14",
  144. "pci0_wlan_ce_15",
  145. #endif
  146. }
  147. };
  148. #if defined(WLAN_MAX_PDEVS) && (WLAN_MAX_PDEVS == 1)
  149. static inline int hif_get_pci_slot(struct hif_softc *scn)
  150. {
  151. /*
  152. * If WLAN_MAX_PDEVS is defined as 1, always return pci slot 0
  153. * since there is only one pci device attached.
  154. */
  155. return 0;
  156. }
  157. #else
  158. static inline int hif_get_pci_slot(struct hif_softc *scn)
  159. {
  160. int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
  161. if (pci_slot < 0) {
  162. hif_err("Invalid PCI SLOT %d", pci_slot);
  163. qdf_assert_always(0);
  164. return 0;
  165. } else {
  166. return pci_slot;
  167. }
  168. }
  169. #endif
  170. /*
  171. * Top-level interrupt handler for all PCI interrupts from a Target.
  172. * When a block of MSI interrupts is allocated, this top-level handler
  173. * is not used; instead, we directly call the correct sub-handler.
  174. */
  175. struct ce_irq_reg_table {
  176. uint32_t irq_enable;
  177. uint32_t irq_status;
  178. };
  179. #ifndef QCA_WIFI_3_0_ADRASTEA
  180. static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  181. {
  182. }
  183. #else
  184. static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  185. {
  186. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  187. unsigned int target_enable0, target_enable1;
  188. unsigned int target_cause0, target_cause1;
  189. target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
  190. target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
  191. target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
  192. target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
  193. if ((target_enable0 & target_cause0) ||
  194. (target_enable1 & target_cause1)) {
  195. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
  196. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
  197. if (scn->notice_send)
  198. pld_intr_notify_q6(sc->dev);
  199. }
  200. }
  201. #endif
  202. /**
  203. * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
  204. * @scn: scn
  205. *
  206. * Return: N/A
  207. */
  208. static void pci_dispatch_interrupt(struct hif_softc *scn)
  209. {
  210. uint32_t intr_summary;
  211. int id;
  212. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  213. if (scn->hif_init_done != true)
  214. return;
  215. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  216. return;
  217. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  218. if (intr_summary == 0) {
  219. if ((scn->target_status != TARGET_STATUS_RESET) &&
  220. (!qdf_atomic_read(&scn->link_suspended))) {
  221. hif_write32_mb(scn, scn->mem +
  222. (SOC_CORE_BASE_ADDRESS |
  223. PCIE_INTR_ENABLE_ADDRESS),
  224. HOST_GROUP0_MASK);
  225. hif_read32_mb(scn, scn->mem +
  226. (SOC_CORE_BASE_ADDRESS |
  227. PCIE_INTR_ENABLE_ADDRESS));
  228. }
  229. Q_TARGET_ACCESS_END(scn);
  230. return;
  231. }
  232. Q_TARGET_ACCESS_END(scn);
  233. scn->ce_irq_summary = intr_summary;
  234. for (id = 0; intr_summary && (id < scn->ce_count); id++) {
  235. if (intr_summary & (1 << id)) {
  236. intr_summary &= ~(1 << id);
  237. ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
  238. }
  239. }
  240. }
  241. irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
  242. {
  243. struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
  244. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  245. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
  246. volatile int tmp;
  247. uint16_t val = 0;
  248. uint32_t bar0 = 0;
  249. uint32_t fw_indicator_address, fw_indicator;
  250. bool ssr_irq = false;
  251. unsigned int host_cause, host_enable;
  252. if (LEGACY_INTERRUPTS(sc)) {
  253. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  254. return IRQ_HANDLED;
  255. if (ADRASTEA_BU) {
  256. host_enable = hif_read32_mb(sc, sc->mem +
  257. PCIE_INTR_ENABLE_ADDRESS);
  258. host_cause = hif_read32_mb(sc, sc->mem +
  259. PCIE_INTR_CAUSE_ADDRESS);
  260. if (!(host_enable & host_cause)) {
  261. hif_pci_route_adrastea_interrupt(sc);
  262. return IRQ_HANDLED;
  263. }
  264. }
  265. /* Clear Legacy PCI line interrupts
  266. * IMPORTANT: INTR_CLR regiser has to be set
  267. * after INTR_ENABLE is set to 0,
  268. * otherwise interrupt can not be really cleared
  269. */
  270. hif_write32_mb(sc, sc->mem +
  271. (SOC_CORE_BASE_ADDRESS |
  272. PCIE_INTR_ENABLE_ADDRESS), 0);
  273. hif_write32_mb(sc, sc->mem +
  274. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
  275. ADRASTEA_BU ?
  276. (host_enable & host_cause) :
  277. HOST_GROUP0_MASK);
  278. if (ADRASTEA_BU)
  279. hif_write32_mb(sc, sc->mem + 0x2f100c,
  280. (host_cause >> 1));
  281. /* IMPORTANT: this extra read transaction is required to
  282. * flush the posted write buffer
  283. */
  284. if (!ADRASTEA_BU) {
  285. tmp =
  286. hif_read32_mb(sc, sc->mem +
  287. (SOC_CORE_BASE_ADDRESS |
  288. PCIE_INTR_ENABLE_ADDRESS));
  289. if (tmp == 0xdeadbeef) {
  290. hif_err("SoC returns 0xdeadbeef!!");
  291. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  292. hif_err("PCI Vendor ID = 0x%04x", val);
  293. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  294. hif_err("PCI Device ID = 0x%04x", val);
  295. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  296. hif_err("PCI Command = 0x%04x", val);
  297. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  298. hif_err("PCI Status = 0x%04x", val);
  299. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
  300. &bar0);
  301. hif_err("PCI BAR0 = 0x%08x", bar0);
  302. hif_err("RTC_STATE_ADDRESS = 0x%08x",
  303. hif_read32_mb(sc, sc->mem +
  304. PCIE_LOCAL_BASE_ADDRESS
  305. + RTC_STATE_ADDRESS));
  306. hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
  307. hif_read32_mb(sc, sc->mem +
  308. PCIE_LOCAL_BASE_ADDRESS
  309. + PCIE_SOC_WAKE_ADDRESS));
  310. hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
  311. hif_read32_mb(sc, sc->mem + 0x80008),
  312. hif_read32_mb(sc, sc->mem + 0x8000c));
  313. hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
  314. hif_read32_mb(sc, sc->mem + 0x80010),
  315. hif_read32_mb(sc, sc->mem + 0x80014));
  316. hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
  317. hif_read32_mb(sc, sc->mem + 0x80018),
  318. hif_read32_mb(sc, sc->mem + 0x8001c));
  319. QDF_BUG(0);
  320. }
  321. PCI_CLR_CAUSE0_REGISTER(sc);
  322. }
  323. if (HAS_FW_INDICATOR) {
  324. fw_indicator_address = hif_state->fw_indicator_address;
  325. fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
  326. if ((fw_indicator != ~0) &&
  327. (fw_indicator & FW_IND_EVENT_PENDING))
  328. ssr_irq = true;
  329. }
  330. if (Q_TARGET_ACCESS_END(scn) < 0)
  331. return IRQ_HANDLED;
  332. }
  333. /* TBDXXX: Add support for WMAC */
  334. if (ssr_irq) {
  335. sc->irq_event = irq;
  336. qdf_atomic_set(&scn->tasklet_from_intr, 1);
  337. qdf_atomic_inc(&scn->active_tasklet_cnt);
  338. tasklet_schedule(&sc->intr_tq);
  339. } else {
  340. pci_dispatch_interrupt(scn);
  341. }
  342. return IRQ_HANDLED;
  343. }
  344. bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
  345. {
  346. return 1; /* FIX THIS */
  347. }
  348. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
  349. {
  350. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  351. int i = 0;
  352. if (!irq || !size) {
  353. return -EINVAL;
  354. }
  355. if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
  356. irq[0] = sc->irq;
  357. return 1;
  358. }
  359. if (sc->num_msi_intrs > size) {
  360. qdf_print("Not enough space in irq buffer to return irqs");
  361. return -EINVAL;
  362. }
  363. for (i = 0; i < sc->num_msi_intrs; i++) {
  364. irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
  365. }
  366. return sc->num_msi_intrs;
  367. }
  368. /**
  369. * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
  370. * @scn: hif_softc
  371. *
  372. * Return: void
  373. */
  374. #if CONFIG_ATH_PCIE_MAX_PERF == 0
  375. void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  376. {
  377. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  378. A_target_id_t pci_addr = scn->mem;
  379. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  380. /*
  381. * If the deferred sleep timer is running cancel it
  382. * and put the soc into sleep.
  383. */
  384. if (hif_state->fake_sleep == true) {
  385. qdf_timer_stop(&hif_state->sleep_timer);
  386. if (hif_state->verified_awake == false) {
  387. hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  388. PCIE_SOC_WAKE_ADDRESS,
  389. PCIE_SOC_WAKE_RESET);
  390. }
  391. hif_state->fake_sleep = false;
  392. }
  393. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  394. }
  395. #else
  396. inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  397. {
  398. }
  399. #endif
  400. #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
  401. hif_read32_mb(sc, (char *)(mem) + \
  402. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
  403. #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
  404. hif_write32_mb(sc, ((char *)(mem) + \
  405. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
  406. #ifdef QCA_WIFI_3_0
  407. /**
  408. * hif_targ_is_awake() - check to see if the target is awake
  409. * @hif_ctx: hif context
  410. *
  411. * emulation never goes to sleep
  412. *
  413. * Return: true if target is awake
  414. */
  415. static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
  416. {
  417. return true;
  418. }
  419. #else
  420. /**
  421. * hif_targ_is_awake() - check to see if the target is awake
  422. * @hif_ctx: hif context
  423. *
  424. * Return: true if the targets clocks are on
  425. */
  426. static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
  427. {
  428. uint32_t val;
  429. if (scn->recovery)
  430. return false;
  431. val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
  432. + RTC_STATE_ADDRESS);
  433. return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
  434. }
  435. #endif
  436. #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
  437. static void hif_pci_device_reset(struct hif_pci_softc *sc)
  438. {
  439. void __iomem *mem = sc->mem;
  440. int i;
  441. uint32_t val;
  442. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  443. if (!scn->hostdef)
  444. return;
  445. /* NB: Don't check resetok here. This form of reset
  446. * is integral to correct operation.
  447. */
  448. if (!SOC_GLOBAL_RESET_ADDRESS)
  449. return;
  450. if (!mem)
  451. return;
  452. hif_err("Reset Device");
  453. /*
  454. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  455. * writing WAKE_V, the Target may scribble over Host memory!
  456. */
  457. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  458. PCIE_SOC_WAKE_V_MASK);
  459. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  460. if (hif_targ_is_awake(scn, mem))
  461. break;
  462. qdf_mdelay(1);
  463. }
  464. /* Put Target, including PCIe, into RESET. */
  465. val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
  466. val |= 1;
  467. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  468. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  469. if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  470. RTC_STATE_COLD_RESET_MASK)
  471. break;
  472. qdf_mdelay(1);
  473. }
  474. /* Pull Target, including PCIe, out of RESET. */
  475. val &= ~1;
  476. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  477. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  478. if (!
  479. (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  480. RTC_STATE_COLD_RESET_MASK))
  481. break;
  482. qdf_mdelay(1);
  483. }
  484. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  485. PCIE_SOC_WAKE_RESET);
  486. }
  487. /* CPU warm reset function
  488. * Steps:
  489. * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
  490. * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
  491. * correctly on WARM reset
  492. * 3. Clear TARGET CPU LF timer interrupt
  493. * 4. Reset all CEs to clear any pending CE tarnsactions
  494. * 5. Warm reset CPU
  495. */
  496. static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
  497. {
  498. void __iomem *mem = sc->mem;
  499. int i;
  500. uint32_t val;
  501. uint32_t fw_indicator;
  502. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  503. /* NB: Don't check resetok here. This form of reset is
  504. * integral to correct operation.
  505. */
  506. if (!mem)
  507. return;
  508. hif_debug("Target Warm Reset");
  509. /*
  510. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  511. * writing WAKE_V, the Target may scribble over Host memory!
  512. */
  513. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  514. PCIE_SOC_WAKE_V_MASK);
  515. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  516. if (hif_targ_is_awake(scn, mem))
  517. break;
  518. qdf_mdelay(1);
  519. }
  520. /*
  521. * Disable Pending interrupts
  522. */
  523. val =
  524. hif_read32_mb(sc, mem +
  525. (SOC_CORE_BASE_ADDRESS |
  526. PCIE_INTR_CAUSE_ADDRESS));
  527. hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
  528. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
  529. /* Target CPU Intr Cause */
  530. val = hif_read32_mb(sc, mem +
  531. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  532. hif_debug("Target CPU Intr Cause 0x%x", val);
  533. val =
  534. hif_read32_mb(sc, mem +
  535. (SOC_CORE_BASE_ADDRESS |
  536. PCIE_INTR_ENABLE_ADDRESS));
  537. hif_write32_mb(sc, (mem +
  538. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
  539. hif_write32_mb(sc, (mem +
  540. (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
  541. HOST_GROUP0_MASK);
  542. qdf_mdelay(100);
  543. /* Clear FW_INDICATOR_ADDRESS */
  544. if (HAS_FW_INDICATOR) {
  545. fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  546. hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
  547. }
  548. /* Clear Target LF Timer interrupts */
  549. val =
  550. hif_read32_mb(sc, mem +
  551. (RTC_SOC_BASE_ADDRESS +
  552. SOC_LF_TIMER_CONTROL0_ADDRESS));
  553. hif_debug("addr 0x%x : 0x%x",
  554. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
  555. val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
  556. hif_write32_mb(sc, mem +
  557. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
  558. val);
  559. /* Reset CE */
  560. val =
  561. hif_read32_mb(sc, mem +
  562. (RTC_SOC_BASE_ADDRESS |
  563. SOC_RESET_CONTROL_ADDRESS));
  564. val |= SOC_RESET_CONTROL_CE_RST_MASK;
  565. hif_write32_mb(sc, (mem +
  566. (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
  567. val);
  568. val =
  569. hif_read32_mb(sc, mem +
  570. (RTC_SOC_BASE_ADDRESS |
  571. SOC_RESET_CONTROL_ADDRESS));
  572. qdf_mdelay(10);
  573. /* CE unreset */
  574. val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
  575. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  576. SOC_RESET_CONTROL_ADDRESS), val);
  577. val =
  578. hif_read32_mb(sc, mem +
  579. (RTC_SOC_BASE_ADDRESS |
  580. SOC_RESET_CONTROL_ADDRESS));
  581. qdf_mdelay(10);
  582. /* Read Target CPU Intr Cause */
  583. val = hif_read32_mb(sc, mem +
  584. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  585. hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
  586. /* CPU warm RESET */
  587. val =
  588. hif_read32_mb(sc, mem +
  589. (RTC_SOC_BASE_ADDRESS |
  590. SOC_RESET_CONTROL_ADDRESS));
  591. val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
  592. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  593. SOC_RESET_CONTROL_ADDRESS), val);
  594. val =
  595. hif_read32_mb(sc, mem +
  596. (RTC_SOC_BASE_ADDRESS |
  597. SOC_RESET_CONTROL_ADDRESS));
  598. hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
  599. qdf_mdelay(100);
  600. hif_debug("Target Warm reset complete");
  601. }
  602. #ifndef QCA_WIFI_3_0
  603. /* only applicable to legacy ce */
  604. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
  605. {
  606. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  607. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  608. void __iomem *mem = sc->mem;
  609. uint32_t val;
  610. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  611. return ATH_ISR_NOSCHED;
  612. val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  613. if (Q_TARGET_ACCESS_END(scn) < 0)
  614. return ATH_ISR_SCHED;
  615. hif_debug("FW_INDICATOR register is 0x%x", val);
  616. if (val & FW_IND_HELPER)
  617. return 0;
  618. return 1;
  619. }
  620. #endif
  621. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  622. {
  623. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  624. uint16_t device_id = 0;
  625. uint32_t val;
  626. uint16_t timeout_count = 0;
  627. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  628. /* Check device ID from PCIe configuration space for link status */
  629. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
  630. if (device_id != sc->devid) {
  631. hif_err("Device ID does match (read 0x%x, expect 0x%x)",
  632. device_id, sc->devid);
  633. return -EACCES;
  634. }
  635. /* Check PCIe local register for bar/memory access */
  636. val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  637. RTC_STATE_ADDRESS);
  638. hif_debug("RTC_STATE_ADDRESS is %08x", val);
  639. /* Try to wake up taget if it sleeps */
  640. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  641. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  642. hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
  643. hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  644. PCIE_SOC_WAKE_ADDRESS));
  645. /* Check if taget can be woken up */
  646. while (!hif_targ_is_awake(scn, sc->mem)) {
  647. if (timeout_count >= PCIE_WAKE_TIMEOUT) {
  648. hif_err("wake up timeout, %08x, %08x",
  649. hif_read32_mb(sc, sc->mem +
  650. PCIE_LOCAL_BASE_ADDRESS +
  651. RTC_STATE_ADDRESS),
  652. hif_read32_mb(sc, sc->mem +
  653. PCIE_LOCAL_BASE_ADDRESS +
  654. PCIE_SOC_WAKE_ADDRESS));
  655. return -EACCES;
  656. }
  657. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  658. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  659. qdf_mdelay(100);
  660. timeout_count += 100;
  661. }
  662. /* Check Power register for SoC internal bus issues */
  663. val =
  664. hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
  665. SOC_POWER_REG_OFFSET);
  666. hif_debug("Power register is %08x", val);
  667. return 0;
  668. }
  669. /**
  670. * __hif_pci_dump_registers(): dump other PCI debug registers
  671. * @scn: struct hif_softc
  672. *
  673. * This function dumps pci debug registers. The parrent function
  674. * dumps the copy engine registers before calling this function.
  675. *
  676. * Return: void
  677. */
  678. static void __hif_pci_dump_registers(struct hif_softc *scn)
  679. {
  680. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  681. void __iomem *mem = sc->mem;
  682. uint32_t val, i, j;
  683. uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
  684. uint32_t ce_base;
  685. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  686. return;
  687. /* DEBUG_INPUT_SEL_SRC = 0x6 */
  688. val =
  689. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  690. WLAN_DEBUG_INPUT_SEL_OFFSET);
  691. val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
  692. val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
  693. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  694. WLAN_DEBUG_INPUT_SEL_OFFSET, val);
  695. /* DEBUG_CONTROL_ENABLE = 0x1 */
  696. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  697. WLAN_DEBUG_CONTROL_OFFSET);
  698. val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
  699. val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
  700. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  701. WLAN_DEBUG_CONTROL_OFFSET, val);
  702. hif_debug("Debug: inputsel: %x dbgctrl: %x",
  703. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  704. WLAN_DEBUG_INPUT_SEL_OFFSET),
  705. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  706. WLAN_DEBUG_CONTROL_OFFSET));
  707. hif_debug("Debug CE");
  708. /* Loop CE debug output */
  709. /* AMBA_DEBUG_BUS_SEL = 0xc */
  710. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  711. AMBA_DEBUG_BUS_OFFSET);
  712. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  713. val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
  714. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
  715. val);
  716. for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
  717. /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
  718. val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  719. CE_WRAPPER_DEBUG_OFFSET);
  720. val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
  721. val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
  722. hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  723. CE_WRAPPER_DEBUG_OFFSET, val);
  724. hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
  725. wrapper_idx[i],
  726. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  727. AMBA_DEBUG_BUS_OFFSET),
  728. hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  729. CE_WRAPPER_DEBUG_OFFSET));
  730. if (wrapper_idx[i] <= 7) {
  731. for (j = 0; j <= 5; j++) {
  732. ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
  733. /* For (j=0~5) write CE_DEBUG_SEL = j */
  734. val =
  735. hif_read32_mb(sc, mem + ce_base +
  736. CE_DEBUG_OFFSET);
  737. val &= ~CE_DEBUG_SEL_MASK;
  738. val |= CE_DEBUG_SEL_SET(j);
  739. hif_write32_mb(sc, mem + ce_base +
  740. CE_DEBUG_OFFSET, val);
  741. /* read (@gpio_athr_wlan_reg)
  742. * WLAN_DEBUG_OUT_DATA
  743. */
  744. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
  745. + WLAN_DEBUG_OUT_OFFSET);
  746. val = WLAN_DEBUG_OUT_DATA_GET(val);
  747. hif_debug("module%d: cedbg: %x out: %x",
  748. j,
  749. hif_read32_mb(sc, mem + ce_base +
  750. CE_DEBUG_OFFSET), val);
  751. }
  752. } else {
  753. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  754. val =
  755. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  756. WLAN_DEBUG_OUT_OFFSET);
  757. val = WLAN_DEBUG_OUT_DATA_GET(val);
  758. hif_debug("out: %x", val);
  759. }
  760. }
  761. hif_debug("Debug PCIe:");
  762. /* Loop PCIe debug output */
  763. /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
  764. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  765. AMBA_DEBUG_BUS_OFFSET);
  766. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  767. val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
  768. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  769. AMBA_DEBUG_BUS_OFFSET, val);
  770. for (i = 0; i <= 8; i++) {
  771. /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
  772. val =
  773. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  774. AMBA_DEBUG_BUS_OFFSET);
  775. val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
  776. val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
  777. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  778. AMBA_DEBUG_BUS_OFFSET, val);
  779. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  780. val =
  781. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  782. WLAN_DEBUG_OUT_OFFSET);
  783. val = WLAN_DEBUG_OUT_DATA_GET(val);
  784. hif_debug("amdbg: %x out: %x %x",
  785. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  786. WLAN_DEBUG_OUT_OFFSET), val,
  787. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  788. WLAN_DEBUG_OUT_OFFSET));
  789. }
  790. Q_TARGET_ACCESS_END(scn);
  791. }
  792. /**
  793. * hif_dump_registers(): dump bus debug registers
  794. * @scn: struct hif_opaque_softc
  795. *
  796. * This function dumps hif bus debug registers
  797. *
  798. * Return: 0 for success or error code
  799. */
  800. int hif_pci_dump_registers(struct hif_softc *hif_ctx)
  801. {
  802. int status;
  803. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  804. status = hif_dump_ce_registers(scn);
  805. if (status)
  806. hif_err("Dump CE Registers Failed");
  807. /* dump non copy engine pci registers */
  808. __hif_pci_dump_registers(scn);
  809. return 0;
  810. }
  811. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  812. /* worker thread to schedule wlan_tasklet in SLUB debug build */
  813. static void reschedule_tasklet_work_handler(void *arg)
  814. {
  815. struct hif_pci_softc *sc = arg;
  816. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  817. if (!scn) {
  818. hif_err("hif_softc is NULL");
  819. return;
  820. }
  821. if (scn->hif_init_done == false) {
  822. hif_err("wlan driver is unloaded");
  823. return;
  824. }
  825. tasklet_schedule(&sc->intr_tq);
  826. }
  827. /**
  828. * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
  829. * work
  830. * @sc: HIF PCI Context
  831. *
  832. * Return: void
  833. */
  834. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
  835. {
  836. qdf_create_work(0, &sc->reschedule_tasklet_work,
  837. reschedule_tasklet_work_handler, NULL);
  838. }
  839. #else
  840. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
  841. #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
  842. void wlan_tasklet(unsigned long data)
  843. {
  844. struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
  845. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  846. if (scn->hif_init_done == false)
  847. goto end;
  848. if (qdf_atomic_read(&scn->link_suspended))
  849. goto end;
  850. if (!ADRASTEA_BU) {
  851. hif_fw_interrupt_handler(sc->irq_event, scn);
  852. if (scn->target_status == TARGET_STATUS_RESET)
  853. goto end;
  854. }
  855. end:
  856. qdf_atomic_set(&scn->tasklet_from_intr, 0);
  857. qdf_atomic_dec(&scn->active_tasklet_cnt);
  858. }
  859. /**
  860. * hif_disable_power_gating() - disable HW power gating
  861. * @hif_ctx: hif context
  862. *
  863. * disables pcie L1 power states
  864. */
  865. static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
  866. {
  867. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  868. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  869. if (!scn) {
  870. hif_err("Could not disable ASPM scn is null");
  871. return;
  872. }
  873. /* Disable ASPM when pkt log is enabled */
  874. pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
  875. pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
  876. }
  877. /**
  878. * hif_enable_power_gating() - enable HW power gating
  879. * @hif_ctx: hif context
  880. *
  881. * enables pcie L1 power states
  882. */
  883. static void hif_enable_power_gating(struct hif_pci_softc *sc)
  884. {
  885. if (!sc) {
  886. hif_err("Could not disable ASPM scn is null");
  887. return;
  888. }
  889. /* Re-enable ASPM after firmware/OTP download is complete */
  890. pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
  891. }
  892. /**
  893. * hif_enable_power_management() - enable power management
  894. * @hif_ctx: hif context
  895. *
  896. * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
  897. * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
  898. *
  899. * note: epping mode does not call this function as it does not
  900. * care about saving power.
  901. */
  902. void hif_pci_enable_power_management(struct hif_softc *hif_sc,
  903. bool is_packet_log_enabled)
  904. {
  905. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
  906. uint32_t mode;
  907. if (!pci_ctx) {
  908. hif_err("hif_ctx null");
  909. return;
  910. }
  911. mode = hif_get_conparam(hif_sc);
  912. if (mode == QDF_GLOBAL_FTM_MODE) {
  913. hif_info("Enable power gating for FTM mode");
  914. hif_enable_power_gating(pci_ctx);
  915. return;
  916. }
  917. hif_pm_runtime_start(hif_sc);
  918. if (!is_packet_log_enabled)
  919. hif_enable_power_gating(pci_ctx);
  920. if (!CONFIG_ATH_PCIE_MAX_PERF &&
  921. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
  922. !ce_srng_based(hif_sc)) {
  923. /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
  924. if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
  925. hif_err("Failed to set target to sleep");
  926. }
  927. }
  928. /**
  929. * hif_disable_power_management() - disable power management
  930. * @hif_ctx: hif context
  931. *
  932. * Currently disables runtime pm. Should be updated to behave
  933. * if runtime pm is not started. Should be updated to take care
  934. * of aspm and soc sleep for driver load.
  935. */
  936. void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
  937. {
  938. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  939. if (!pci_ctx) {
  940. hif_err("hif_ctx null");
  941. return;
  942. }
  943. hif_pm_runtime_stop(hif_ctx);
  944. }
  945. void hif_pci_display_stats(struct hif_softc *hif_ctx)
  946. {
  947. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  948. if (!pci_ctx) {
  949. hif_err("hif_ctx null");
  950. return;
  951. }
  952. hif_display_ce_stats(hif_ctx);
  953. hif_print_pci_stats(pci_ctx);
  954. }
  955. void hif_pci_clear_stats(struct hif_softc *hif_ctx)
  956. {
  957. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  958. if (!pci_ctx) {
  959. hif_err("hif_ctx null");
  960. return;
  961. }
  962. hif_clear_ce_stats(&pci_ctx->ce_sc);
  963. }
  964. #define ATH_PCI_PROBE_RETRY_MAX 3
  965. /**
  966. * hif_bus_open(): hif_bus_open
  967. * @scn: scn
  968. * @bus_type: bus type
  969. *
  970. * Return: n/a
  971. */
  972. QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  973. {
  974. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  975. hif_ctx->bus_type = bus_type;
  976. hif_pm_runtime_open(hif_ctx);
  977. qdf_spinlock_create(&sc->irq_lock);
  978. return hif_ce_open(hif_ctx);
  979. }
  980. /**
  981. * hif_wake_target_cpu() - wake the target's cpu
  982. * @scn: hif context
  983. *
  984. * Send an interrupt to the device to wake up the Target CPU
  985. * so it has an opportunity to notice any changed state.
  986. */
  987. static void hif_wake_target_cpu(struct hif_softc *scn)
  988. {
  989. QDF_STATUS rv;
  990. uint32_t core_ctrl;
  991. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  992. rv = hif_diag_read_access(hif_hdl,
  993. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  994. &core_ctrl);
  995. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  996. /* A_INUM_FIRMWARE interrupt to Target CPU */
  997. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  998. rv = hif_diag_write_access(hif_hdl,
  999. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1000. core_ctrl);
  1001. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1002. }
  1003. /**
  1004. * soc_wake_reset() - allow the target to go to sleep
  1005. * @scn: hif_softc
  1006. *
  1007. * Clear the force wake register. This is done by
  1008. * hif_sleep_entry and cancel defered timer sleep.
  1009. */
  1010. static void soc_wake_reset(struct hif_softc *scn)
  1011. {
  1012. hif_write32_mb(scn, scn->mem +
  1013. PCIE_LOCAL_BASE_ADDRESS +
  1014. PCIE_SOC_WAKE_ADDRESS,
  1015. PCIE_SOC_WAKE_RESET);
  1016. }
  1017. /**
  1018. * hif_sleep_entry() - gate target sleep
  1019. * @arg: hif context
  1020. *
  1021. * This function is the callback for the sleep timer.
  1022. * Check if last force awake critical section was at least
  1023. * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
  1024. * allow the target to go to sleep and cancel the sleep timer.
  1025. * otherwise reschedule the sleep timer.
  1026. */
  1027. static void hif_sleep_entry(void *arg)
  1028. {
  1029. struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
  1030. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1031. uint32_t idle_ms;
  1032. if (scn->recovery)
  1033. return;
  1034. if (hif_is_driver_unloading(scn))
  1035. return;
  1036. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  1037. if (hif_state->fake_sleep) {
  1038. idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
  1039. - hif_state->sleep_ticks);
  1040. if (!hif_state->verified_awake &&
  1041. idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
  1042. if (!qdf_atomic_read(&scn->link_suspended)) {
  1043. soc_wake_reset(scn);
  1044. hif_state->fake_sleep = false;
  1045. }
  1046. } else {
  1047. qdf_timer_stop(&hif_state->sleep_timer);
  1048. qdf_timer_start(&hif_state->sleep_timer,
  1049. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  1050. }
  1051. }
  1052. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  1053. }
  1054. #define HIF_HIA_MAX_POLL_LOOP 1000000
  1055. #define HIF_HIA_POLLING_DELAY_MS 10
  1056. #ifdef QCA_HIF_HIA_EXTND
  1057. static void hif_set_hia_extnd(struct hif_softc *scn)
  1058. {
  1059. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1060. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1061. uint32_t target_type = tgt_info->target_type;
  1062. hif_info("E");
  1063. if ((target_type == TARGET_TYPE_AR900B) ||
  1064. target_type == TARGET_TYPE_QCA9984 ||
  1065. target_type == TARGET_TYPE_QCA9888) {
  1066. /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
  1067. * in RTC space
  1068. */
  1069. tgt_info->target_revision
  1070. = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
  1071. + CHIP_ID_ADDRESS));
  1072. qdf_print("chip_id 0x%x chip_revision 0x%x",
  1073. target_type, tgt_info->target_revision);
  1074. }
  1075. {
  1076. uint32_t flag2_value = 0;
  1077. uint32_t flag2_targ_addr =
  1078. host_interest_item_address(target_type,
  1079. offsetof(struct host_interest_s, hi_skip_clock_init));
  1080. if ((ar900b_20_targ_clk != -1) &&
  1081. (frac != -1) && (intval != -1)) {
  1082. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1083. &flag2_value);
  1084. qdf_print("\n Setting clk_override");
  1085. flag2_value |= CLOCK_OVERRIDE;
  1086. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1087. flag2_value);
  1088. qdf_print("\n CLOCK PLL val set %d", flag2_value);
  1089. } else {
  1090. qdf_print("\n CLOCK PLL skipped");
  1091. }
  1092. }
  1093. if (target_type == TARGET_TYPE_AR900B
  1094. || target_type == TARGET_TYPE_QCA9984
  1095. || target_type == TARGET_TYPE_QCA9888) {
  1096. /* for AR9980_2.0, 300 mhz clock is used, right now we assume
  1097. * this would be supplied through module parameters,
  1098. * if not supplied assumed default or same behavior as 1.0.
  1099. * Assume 1.0 clock can't be tuned, reset to defaults
  1100. */
  1101. qdf_print(KERN_INFO
  1102. "%s: setting the target pll frac %x intval %x",
  1103. __func__, frac, intval);
  1104. /* do not touch frac, and int val, let them be default -1,
  1105. * if desired, host can supply these through module params
  1106. */
  1107. if (frac != -1 || intval != -1) {
  1108. uint32_t flag2_value = 0;
  1109. uint32_t flag2_targ_addr;
  1110. flag2_targ_addr =
  1111. host_interest_item_address(target_type,
  1112. offsetof(struct host_interest_s,
  1113. hi_clock_info));
  1114. hif_diag_read_access(hif_hdl,
  1115. flag2_targ_addr, &flag2_value);
  1116. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1117. flag2_value);
  1118. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1119. qdf_print("\n INT Val %x Address %x",
  1120. intval, flag2_value + 4);
  1121. hif_diag_write_access(hif_hdl,
  1122. flag2_value + 4, intval);
  1123. } else {
  1124. qdf_print(KERN_INFO
  1125. "%s: no frac provided, skipping pre-configuring PLL",
  1126. __func__);
  1127. }
  1128. /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
  1129. if ((target_type == TARGET_TYPE_AR900B)
  1130. && (tgt_info->target_revision == AR900B_REV_2)
  1131. && ar900b_20_targ_clk != -1) {
  1132. uint32_t flag2_value = 0;
  1133. uint32_t flag2_targ_addr;
  1134. flag2_targ_addr
  1135. = host_interest_item_address(target_type,
  1136. offsetof(struct host_interest_s,
  1137. hi_desired_cpu_speed_hz));
  1138. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1139. &flag2_value);
  1140. qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
  1141. flag2_value);
  1142. hif_diag_write_access(hif_hdl, flag2_value,
  1143. ar900b_20_targ_clk/*300000000u*/);
  1144. } else if (target_type == TARGET_TYPE_QCA9888) {
  1145. uint32_t flag2_targ_addr;
  1146. if (200000000u != qca9888_20_targ_clk) {
  1147. qca9888_20_targ_clk = 300000000u;
  1148. /* Setting the target clock speed to 300 mhz */
  1149. }
  1150. flag2_targ_addr
  1151. = host_interest_item_address(target_type,
  1152. offsetof(struct host_interest_s,
  1153. hi_desired_cpu_speed_hz));
  1154. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1155. qca9888_20_targ_clk);
  1156. } else {
  1157. qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
  1158. __func__);
  1159. }
  1160. } else {
  1161. if (frac != -1 || intval != -1) {
  1162. uint32_t flag2_value = 0;
  1163. uint32_t flag2_targ_addr =
  1164. host_interest_item_address(target_type,
  1165. offsetof(struct host_interest_s,
  1166. hi_clock_info));
  1167. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1168. &flag2_value);
  1169. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1170. flag2_value);
  1171. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1172. qdf_print("\n INT Val %x Address %x", intval,
  1173. flag2_value + 4);
  1174. hif_diag_write_access(hif_hdl, flag2_value + 4,
  1175. intval);
  1176. }
  1177. }
  1178. }
  1179. #else
  1180. static void hif_set_hia_extnd(struct hif_softc *scn)
  1181. {
  1182. }
  1183. #endif
  1184. /**
  1185. * hif_set_hia() - fill out the host interest area
  1186. * @scn: hif context
  1187. *
  1188. * This is replaced by hif_wlan_enable for integrated targets.
  1189. * This fills out the host interest area. The firmware will
  1190. * process these memory addresses when it is first brought out
  1191. * of reset.
  1192. *
  1193. * Return: 0 for success.
  1194. */
  1195. static int hif_set_hia(struct hif_softc *scn)
  1196. {
  1197. QDF_STATUS rv;
  1198. uint32_t interconnect_targ_addr = 0;
  1199. uint32_t pcie_state_targ_addr = 0;
  1200. uint32_t pipe_cfg_targ_addr = 0;
  1201. uint32_t svc_to_pipe_map = 0;
  1202. uint32_t pcie_config_flags = 0;
  1203. uint32_t flag2_value = 0;
  1204. uint32_t flag2_targ_addr = 0;
  1205. #ifdef QCA_WIFI_3_0
  1206. uint32_t host_interest_area = 0;
  1207. uint8_t i;
  1208. #else
  1209. uint32_t ealloc_value = 0;
  1210. uint32_t ealloc_targ_addr = 0;
  1211. uint8_t banks_switched = 1;
  1212. uint32_t chip_id;
  1213. #endif
  1214. uint32_t pipe_cfg_addr;
  1215. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1216. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1217. uint32_t target_type = tgt_info->target_type;
  1218. uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
  1219. static struct CE_pipe_config *target_ce_config;
  1220. struct service_to_pipe *target_service_to_ce_map;
  1221. hif_info("E");
  1222. hif_get_target_ce_config(scn,
  1223. &target_ce_config, &target_ce_config_sz,
  1224. &target_service_to_ce_map,
  1225. &target_service_to_ce_map_sz,
  1226. NULL, NULL);
  1227. if (ADRASTEA_BU)
  1228. return 0;
  1229. #ifdef QCA_WIFI_3_0
  1230. i = 0;
  1231. while (i < HIF_HIA_MAX_POLL_LOOP) {
  1232. host_interest_area = hif_read32_mb(scn, scn->mem +
  1233. A_SOC_CORE_SCRATCH_0_ADDRESS);
  1234. if ((host_interest_area & 0x01) == 0) {
  1235. qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
  1236. host_interest_area = 0;
  1237. i++;
  1238. if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
  1239. hif_err("poll timeout: %d", i);
  1240. } else {
  1241. host_interest_area &= (~0x01);
  1242. hif_write32_mb(scn, scn->mem + 0x113014, 0);
  1243. break;
  1244. }
  1245. }
  1246. if (i >= HIF_HIA_MAX_POLL_LOOP) {
  1247. hif_err("hia polling timeout");
  1248. return -EIO;
  1249. }
  1250. if (host_interest_area == 0) {
  1251. hif_err("host_interest_area = 0");
  1252. return -EIO;
  1253. }
  1254. interconnect_targ_addr = host_interest_area +
  1255. offsetof(struct host_interest_area_t,
  1256. hi_interconnect_state);
  1257. flag2_targ_addr = host_interest_area +
  1258. offsetof(struct host_interest_area_t, hi_option_flag2);
  1259. #else
  1260. interconnect_targ_addr = hif_hia_item_address(target_type,
  1261. offsetof(struct host_interest_s, hi_interconnect_state));
  1262. ealloc_targ_addr = hif_hia_item_address(target_type,
  1263. offsetof(struct host_interest_s, hi_early_alloc));
  1264. flag2_targ_addr = hif_hia_item_address(target_type,
  1265. offsetof(struct host_interest_s, hi_option_flag2));
  1266. #endif
  1267. /* Supply Target-side CE configuration */
  1268. rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
  1269. &pcie_state_targ_addr);
  1270. if (rv != QDF_STATUS_SUCCESS) {
  1271. hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
  1272. interconnect_targ_addr, rv);
  1273. goto done;
  1274. }
  1275. if (pcie_state_targ_addr == 0) {
  1276. rv = QDF_STATUS_E_FAILURE;
  1277. hif_err("pcie state addr is 0");
  1278. goto done;
  1279. }
  1280. pipe_cfg_addr = pcie_state_targ_addr +
  1281. offsetof(struct pcie_state_s,
  1282. pipe_cfg_addr);
  1283. rv = hif_diag_read_access(hif_hdl,
  1284. pipe_cfg_addr,
  1285. &pipe_cfg_targ_addr);
  1286. if (rv != QDF_STATUS_SUCCESS) {
  1287. hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
  1288. goto done;
  1289. }
  1290. if (pipe_cfg_targ_addr == 0) {
  1291. rv = QDF_STATUS_E_FAILURE;
  1292. hif_err("pipe cfg addr is 0");
  1293. goto done;
  1294. }
  1295. rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
  1296. (uint8_t *) target_ce_config,
  1297. target_ce_config_sz);
  1298. if (rv != QDF_STATUS_SUCCESS) {
  1299. hif_err("write pipe cfg: %d", rv);
  1300. goto done;
  1301. }
  1302. rv = hif_diag_read_access(hif_hdl,
  1303. pcie_state_targ_addr +
  1304. offsetof(struct pcie_state_s,
  1305. svc_to_pipe_map),
  1306. &svc_to_pipe_map);
  1307. if (rv != QDF_STATUS_SUCCESS) {
  1308. hif_err("get svc/pipe map: %d", rv);
  1309. goto done;
  1310. }
  1311. if (svc_to_pipe_map == 0) {
  1312. rv = QDF_STATUS_E_FAILURE;
  1313. hif_err("svc_to_pipe map is 0");
  1314. goto done;
  1315. }
  1316. rv = hif_diag_write_mem(hif_hdl,
  1317. svc_to_pipe_map,
  1318. (uint8_t *) target_service_to_ce_map,
  1319. target_service_to_ce_map_sz);
  1320. if (rv != QDF_STATUS_SUCCESS) {
  1321. hif_err("write svc/pipe map: %d", rv);
  1322. goto done;
  1323. }
  1324. rv = hif_diag_read_access(hif_hdl,
  1325. pcie_state_targ_addr +
  1326. offsetof(struct pcie_state_s,
  1327. config_flags),
  1328. &pcie_config_flags);
  1329. if (rv != QDF_STATUS_SUCCESS) {
  1330. hif_err("get pcie config_flags: %d", rv);
  1331. goto done;
  1332. }
  1333. #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
  1334. pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
  1335. #else
  1336. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1337. #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
  1338. pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
  1339. #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
  1340. pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
  1341. #endif
  1342. rv = hif_diag_write_mem(hif_hdl,
  1343. pcie_state_targ_addr +
  1344. offsetof(struct pcie_state_s,
  1345. config_flags),
  1346. (uint8_t *) &pcie_config_flags,
  1347. sizeof(pcie_config_flags));
  1348. if (rv != QDF_STATUS_SUCCESS) {
  1349. hif_err("write pcie config_flags: %d", rv);
  1350. goto done;
  1351. }
  1352. #ifndef QCA_WIFI_3_0
  1353. /* configure early allocation */
  1354. ealloc_targ_addr = hif_hia_item_address(target_type,
  1355. offsetof(
  1356. struct host_interest_s,
  1357. hi_early_alloc));
  1358. rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
  1359. &ealloc_value);
  1360. if (rv != QDF_STATUS_SUCCESS) {
  1361. hif_err("get early alloc val: %d", rv);
  1362. goto done;
  1363. }
  1364. /* 1 bank is switched to IRAM, except ROME 1.0 */
  1365. ealloc_value |=
  1366. ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1367. HI_EARLY_ALLOC_MAGIC_MASK);
  1368. rv = hif_diag_read_access(hif_hdl,
  1369. CHIP_ID_ADDRESS |
  1370. RTC_SOC_BASE_ADDRESS, &chip_id);
  1371. if (rv != QDF_STATUS_SUCCESS) {
  1372. hif_err("get chip id val: %d", rv);
  1373. goto done;
  1374. }
  1375. if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
  1376. tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
  1377. switch (CHIP_ID_REVISION_GET(chip_id)) {
  1378. case 0x2: /* ROME 1.3 */
  1379. /* 2 banks are switched to IRAM */
  1380. banks_switched = 2;
  1381. break;
  1382. case 0x4: /* ROME 2.1 */
  1383. case 0x5: /* ROME 2.2 */
  1384. banks_switched = 6;
  1385. break;
  1386. case 0x8: /* ROME 3.0 */
  1387. case 0x9: /* ROME 3.1 */
  1388. case 0xA: /* ROME 3.2 */
  1389. banks_switched = 9;
  1390. break;
  1391. case 0x0: /* ROME 1.0 */
  1392. case 0x1: /* ROME 1.1 */
  1393. default:
  1394. /* 3 banks are switched to IRAM */
  1395. banks_switched = 3;
  1396. break;
  1397. }
  1398. }
  1399. ealloc_value |=
  1400. ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
  1401. & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1402. rv = hif_diag_write_access(hif_hdl,
  1403. ealloc_targ_addr,
  1404. ealloc_value);
  1405. if (rv != QDF_STATUS_SUCCESS) {
  1406. hif_err("set early alloc val: %d", rv);
  1407. goto done;
  1408. }
  1409. #endif
  1410. if ((target_type == TARGET_TYPE_AR900B)
  1411. || (target_type == TARGET_TYPE_QCA9984)
  1412. || (target_type == TARGET_TYPE_QCA9888)
  1413. || (target_type == TARGET_TYPE_AR9888)) {
  1414. hif_set_hia_extnd(scn);
  1415. }
  1416. /* Tell Target to proceed with initialization */
  1417. flag2_targ_addr = hif_hia_item_address(target_type,
  1418. offsetof(
  1419. struct host_interest_s,
  1420. hi_option_flag2));
  1421. rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1422. &flag2_value);
  1423. if (rv != QDF_STATUS_SUCCESS) {
  1424. hif_err("get option val: %d", rv);
  1425. goto done;
  1426. }
  1427. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1428. rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1429. flag2_value);
  1430. if (rv != QDF_STATUS_SUCCESS) {
  1431. hif_err("set option val: %d", rv);
  1432. goto done;
  1433. }
  1434. hif_wake_target_cpu(scn);
  1435. done:
  1436. return qdf_status_to_os_return(rv);
  1437. }
  1438. /**
  1439. * hif_bus_configure() - configure the pcie bus
  1440. * @hif_sc: pointer to the hif context.
  1441. *
  1442. * return: 0 for success. nonzero for failure.
  1443. */
  1444. int hif_pci_bus_configure(struct hif_softc *hif_sc)
  1445. {
  1446. int status = 0;
  1447. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1448. struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
  1449. hif_ce_prepare_config(hif_sc);
  1450. /* initialize sleep state adjust variables */
  1451. hif_state->sleep_timer_init = true;
  1452. hif_state->keep_awake_count = 0;
  1453. hif_state->fake_sleep = false;
  1454. hif_state->sleep_ticks = 0;
  1455. qdf_timer_init(NULL, &hif_state->sleep_timer,
  1456. hif_sleep_entry, (void *)hif_state,
  1457. QDF_TIMER_TYPE_WAKE_APPS);
  1458. hif_state->sleep_timer_init = true;
  1459. status = hif_wlan_enable(hif_sc);
  1460. if (status) {
  1461. hif_err("hif_wlan_enable error: %d", status);
  1462. goto timer_free;
  1463. }
  1464. A_TARGET_ACCESS_LIKELY(hif_sc);
  1465. if ((CONFIG_ATH_PCIE_MAX_PERF ||
  1466. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
  1467. !ce_srng_based(hif_sc)) {
  1468. /*
  1469. * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
  1470. * prevent sleep when we want to keep firmware always awake
  1471. * note: when we want to keep firmware always awake,
  1472. * hif_target_sleep_state_adjust will point to a dummy
  1473. * function, and hif_pci_target_sleep_state_adjust must
  1474. * be called instead.
  1475. * note: bus type check is here because AHB bus is reusing
  1476. * hif_pci_bus_configure code.
  1477. */
  1478. if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
  1479. if (hif_pci_target_sleep_state_adjust(hif_sc,
  1480. false, true) < 0) {
  1481. status = -EACCES;
  1482. goto disable_wlan;
  1483. }
  1484. }
  1485. }
  1486. /* todo: consider replacing this with an srng field */
  1487. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1488. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1489. (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
  1490. (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
  1491. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
  1492. (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
  1493. hif_sc->per_ce_irq = true;
  1494. }
  1495. status = hif_config_ce(hif_sc);
  1496. if (status)
  1497. goto disable_wlan;
  1498. if (hif_needs_bmi(hif_osc)) {
  1499. status = hif_set_hia(hif_sc);
  1500. if (status)
  1501. goto unconfig_ce;
  1502. hif_debug("hif_set_hia done");
  1503. }
  1504. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1505. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1506. (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
  1507. (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
  1508. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
  1509. (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
  1510. hif_debug("Skip irq config for PCI based 8074 target");
  1511. else {
  1512. status = hif_configure_irq(hif_sc);
  1513. if (status < 0)
  1514. goto unconfig_ce;
  1515. }
  1516. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1517. return status;
  1518. unconfig_ce:
  1519. hif_unconfig_ce(hif_sc);
  1520. disable_wlan:
  1521. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1522. hif_wlan_disable(hif_sc);
  1523. timer_free:
  1524. qdf_timer_stop(&hif_state->sleep_timer);
  1525. qdf_timer_free(&hif_state->sleep_timer);
  1526. hif_state->sleep_timer_init = false;
  1527. hif_err("Failed, status: %d", status);
  1528. return status;
  1529. }
  1530. /**
  1531. * hif_bus_close(): hif_bus_close
  1532. *
  1533. * Return: n/a
  1534. */
  1535. void hif_pci_close(struct hif_softc *hif_sc)
  1536. {
  1537. hif_pm_runtime_close(hif_sc);
  1538. hif_ce_close(hif_sc);
  1539. }
  1540. #define BAR_NUM 0
  1541. static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
  1542. struct pci_dev *pdev,
  1543. const struct pci_device_id *id)
  1544. {
  1545. void __iomem *mem;
  1546. int ret = 0;
  1547. uint16_t device_id = 0;
  1548. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1549. pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
  1550. if (device_id != id->device) {
  1551. hif_err(
  1552. "dev id mismatch, config id = 0x%x, probing id = 0x%x",
  1553. device_id, id->device);
  1554. /* pci link is down, so returing with error code */
  1555. return -EIO;
  1556. }
  1557. /* FIXME: temp. commenting out assign_resource
  1558. * call for dev_attach to work on 2.6.38 kernel
  1559. */
  1560. #if (!defined(__LINUX_ARM_ARCH__))
  1561. if (pci_assign_resource(pdev, BAR_NUM)) {
  1562. hif_err("pci_assign_resource error");
  1563. return -EIO;
  1564. }
  1565. #endif
  1566. if (pci_enable_device(pdev)) {
  1567. hif_err("pci_enable_device error");
  1568. return -EIO;
  1569. }
  1570. /* Request MMIO resources */
  1571. ret = pci_request_region(pdev, BAR_NUM, "ath");
  1572. if (ret) {
  1573. hif_err("PCI MMIO reservation error");
  1574. ret = -EIO;
  1575. goto err_region;
  1576. }
  1577. #ifdef CONFIG_ARM_LPAE
  1578. /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
  1579. * for 32 bits device also.
  1580. */
  1581. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1582. if (ret) {
  1583. hif_err("Cannot enable 64-bit pci DMA");
  1584. goto err_dma;
  1585. }
  1586. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1587. if (ret) {
  1588. hif_err("Cannot enable 64-bit DMA");
  1589. goto err_dma;
  1590. }
  1591. #else
  1592. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1593. if (ret) {
  1594. hif_err("Cannot enable 32-bit pci DMA");
  1595. goto err_dma;
  1596. }
  1597. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1598. if (ret) {
  1599. hif_err("Cannot enable 32-bit consistent DMA!");
  1600. goto err_dma;
  1601. }
  1602. #endif
  1603. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1604. /* Set bus master bit in PCI_COMMAND to enable DMA */
  1605. pci_set_master(pdev);
  1606. /* Arrange for access to Target SoC registers. */
  1607. mem = pci_iomap(pdev, BAR_NUM, 0);
  1608. if (!mem) {
  1609. hif_err("PCI iomap error");
  1610. ret = -EIO;
  1611. goto err_iomap;
  1612. }
  1613. hif_info("*****BAR is %pK", (void *)mem);
  1614. sc->mem = mem;
  1615. /* Hawkeye emulation specific change */
  1616. if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
  1617. (device_id == RUMIM2M_DEVICE_ID_NODE1) ||
  1618. (device_id == RUMIM2M_DEVICE_ID_NODE2) ||
  1619. (device_id == RUMIM2M_DEVICE_ID_NODE3) ||
  1620. (device_id == RUMIM2M_DEVICE_ID_NODE4) ||
  1621. (device_id == RUMIM2M_DEVICE_ID_NODE5)) {
  1622. mem = mem + 0x0c000000;
  1623. sc->mem = mem;
  1624. hif_info("Changing PCI mem base to %pK", sc->mem);
  1625. }
  1626. sc->mem_len = pci_resource_len(pdev, BAR_NUM);
  1627. ol_sc->mem = mem;
  1628. ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
  1629. sc->pci_enabled = true;
  1630. return ret;
  1631. err_iomap:
  1632. pci_clear_master(pdev);
  1633. err_dma:
  1634. pci_release_region(pdev, BAR_NUM);
  1635. err_region:
  1636. pci_disable_device(pdev);
  1637. return ret;
  1638. }
  1639. static int hif_enable_pci_pld(struct hif_pci_softc *sc,
  1640. struct pci_dev *pdev,
  1641. const struct pci_device_id *id)
  1642. {
  1643. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1644. sc->pci_enabled = true;
  1645. return 0;
  1646. }
  1647. static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
  1648. {
  1649. pci_disable_msi(sc->pdev);
  1650. pci_iounmap(sc->pdev, sc->mem);
  1651. pci_clear_master(sc->pdev);
  1652. pci_release_region(sc->pdev, BAR_NUM);
  1653. pci_disable_device(sc->pdev);
  1654. }
  1655. static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
  1656. static void hif_disable_pci(struct hif_pci_softc *sc)
  1657. {
  1658. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1659. if (!ol_sc) {
  1660. hif_err("ol_sc = NULL");
  1661. return;
  1662. }
  1663. hif_pci_device_reset(sc);
  1664. sc->hif_pci_deinit(sc);
  1665. sc->mem = NULL;
  1666. ol_sc->mem = NULL;
  1667. }
  1668. static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
  1669. {
  1670. int ret = 0;
  1671. int targ_awake_limit = 500;
  1672. #ifndef QCA_WIFI_3_0
  1673. uint32_t fw_indicator;
  1674. #endif
  1675. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1676. /*
  1677. * Verify that the Target was started cleanly.*
  1678. * The case where this is most likely is with an AUX-powered
  1679. * Target and a Host in WoW mode. If the Host crashes,
  1680. * loses power, or is restarted (without unloading the driver)
  1681. * then the Target is left (aux) powered and running. On a
  1682. * subsequent driver load, the Target is in an unexpected state.
  1683. * We try to catch that here in order to reset the Target and
  1684. * retry the probe.
  1685. */
  1686. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1687. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1688. while (!hif_targ_is_awake(scn, sc->mem)) {
  1689. if (0 == targ_awake_limit) {
  1690. hif_err("target awake timeout");
  1691. ret = -EAGAIN;
  1692. goto end;
  1693. }
  1694. qdf_mdelay(1);
  1695. targ_awake_limit--;
  1696. }
  1697. #if PCIE_BAR0_READY_CHECKING
  1698. {
  1699. int wait_limit = 200;
  1700. /* Synchronization point: wait the BAR0 is configured */
  1701. while (wait_limit-- &&
  1702. !(hif_read32_mb(sc, c->mem +
  1703. PCIE_LOCAL_BASE_ADDRESS +
  1704. PCIE_SOC_RDY_STATUS_ADDRESS)
  1705. & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
  1706. qdf_mdelay(10);
  1707. }
  1708. if (wait_limit < 0) {
  1709. /* AR6320v1 doesn't support checking of BAR0
  1710. * configuration, takes one sec to wait BAR0 ready
  1711. */
  1712. hif_debug("AR6320v1 waits two sec for BAR0");
  1713. }
  1714. }
  1715. #endif
  1716. #ifndef QCA_WIFI_3_0
  1717. fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
  1718. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1719. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1720. if (fw_indicator & FW_IND_INITIALIZED) {
  1721. hif_err("Target is in an unknown state. EAGAIN");
  1722. ret = -EAGAIN;
  1723. goto end;
  1724. }
  1725. #endif
  1726. end:
  1727. return ret;
  1728. }
  1729. static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
  1730. {
  1731. int ret = 0;
  1732. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1733. uint32_t target_type = scn->target_info.target_type;
  1734. hif_info("E");
  1735. /* do notn support MSI or MSI IRQ failed */
  1736. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  1737. ret = request_irq(sc->pdev->irq,
  1738. hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
  1739. "wlan_pci", sc);
  1740. if (ret) {
  1741. hif_err("request_irq failed, ret: %d", ret);
  1742. goto end;
  1743. }
  1744. scn->wake_irq = sc->pdev->irq;
  1745. /* Use sc->irq instead of sc->pdev-irq
  1746. * platform_device pdev doesn't have an irq field
  1747. */
  1748. sc->irq = sc->pdev->irq;
  1749. /* Use Legacy PCI Interrupts */
  1750. hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  1751. PCIE_INTR_ENABLE_ADDRESS),
  1752. HOST_GROUP0_MASK);
  1753. hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  1754. PCIE_INTR_ENABLE_ADDRESS));
  1755. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1756. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1757. if ((target_type == TARGET_TYPE_IPQ4019) ||
  1758. (target_type == TARGET_TYPE_AR900B) ||
  1759. (target_type == TARGET_TYPE_QCA9984) ||
  1760. (target_type == TARGET_TYPE_AR9888) ||
  1761. (target_type == TARGET_TYPE_QCA9888) ||
  1762. (target_type == TARGET_TYPE_AR6320V1) ||
  1763. (target_type == TARGET_TYPE_AR6320V2) ||
  1764. (target_type == TARGET_TYPE_AR6320V3)) {
  1765. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  1766. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1767. }
  1768. end:
  1769. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
  1770. "%s: X, ret = %d", __func__, ret);
  1771. return ret;
  1772. }
  1773. static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
  1774. {
  1775. int ret;
  1776. int ce_id, irq, irq_id;
  1777. uint32_t msi_data_start;
  1778. uint32_t msi_data_count;
  1779. uint32_t msi_irq_start;
  1780. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  1781. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  1782. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  1783. &msi_data_count, &msi_data_start,
  1784. &msi_irq_start);
  1785. if (ret)
  1786. return ret;
  1787. /* needs to match the ce_id -> irq data mapping
  1788. * used in the srng parameter configuration
  1789. */
  1790. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  1791. unsigned int msi_data;
  1792. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  1793. continue;
  1794. if (!ce_sc->tasklets[ce_id].inited)
  1795. continue;
  1796. irq_id = scn->int_assignment->msi_idx[ce_id];
  1797. msi_data = irq_id + msi_irq_start;
  1798. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  1799. hif_pci_ce_irq_remove_affinity_hint(irq);
  1800. hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d)",
  1801. __func__, irq_id, ce_id, msi_data, irq);
  1802. pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
  1803. }
  1804. return ret;
  1805. }
  1806. void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
  1807. {
  1808. int i, j, irq;
  1809. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1810. struct hif_exec_context *hif_ext_group;
  1811. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  1812. hif_ext_group = hif_state->hif_ext_group[i];
  1813. if (hif_ext_group->irq_requested) {
  1814. hif_ext_group->irq_requested = false;
  1815. for (j = 0; j < hif_ext_group->numirq; j++) {
  1816. irq = hif_ext_group->os_irq[j];
  1817. if (scn->irq_unlazy_disable) {
  1818. qdf_dev_clear_irq_status_flags(
  1819. irq,
  1820. QDF_IRQ_DISABLE_UNLAZY);
  1821. }
  1822. pfrm_free_irq(scn->qdf_dev->dev,
  1823. irq, hif_ext_group);
  1824. }
  1825. hif_ext_group->numirq = 0;
  1826. }
  1827. }
  1828. }
  1829. /**
  1830. * hif_nointrs(): disable IRQ
  1831. *
  1832. * This function stops interrupt(s)
  1833. *
  1834. * @scn: struct hif_softc
  1835. *
  1836. * Return: none
  1837. */
  1838. void hif_pci_nointrs(struct hif_softc *scn)
  1839. {
  1840. int i, ret;
  1841. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1842. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1843. ce_unregister_irq(hif_state, CE_ALL_BITMAP);
  1844. if (scn->request_irq_done == false)
  1845. return;
  1846. hif_pci_deconfigure_grp_irq(scn);
  1847. ret = hif_ce_srng_msi_free_irq(scn);
  1848. if (ret != -EINVAL) {
  1849. /* ce irqs freed in hif_ce_srng_msi_free_irq */
  1850. if (scn->wake_irq)
  1851. pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
  1852. scn->wake_irq = 0;
  1853. } else if (sc->num_msi_intrs > 0) {
  1854. /* MSI interrupt(s) */
  1855. for (i = 0; i < sc->num_msi_intrs; i++)
  1856. free_irq(sc->irq + i, sc);
  1857. sc->num_msi_intrs = 0;
  1858. } else {
  1859. /* Legacy PCI line interrupt
  1860. * Use sc->irq instead of sc->pdev-irq
  1861. * platform_device pdev doesn't have an irq field
  1862. */
  1863. free_irq(sc->irq, sc);
  1864. }
  1865. scn->request_irq_done = false;
  1866. }
  1867. static inline
  1868. bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
  1869. {
  1870. if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
  1871. return true;
  1872. else
  1873. return false;
  1874. }
  1875. /**
  1876. * hif_disable_bus(): hif_disable_bus
  1877. *
  1878. * This function disables the bus
  1879. *
  1880. * @bdev: bus dev
  1881. *
  1882. * Return: none
  1883. */
  1884. void hif_pci_disable_bus(struct hif_softc *scn)
  1885. {
  1886. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1887. struct pci_dev *pdev;
  1888. void __iomem *mem;
  1889. struct hif_target_info *tgt_info = &scn->target_info;
  1890. /* Attach did not succeed, all resources have been
  1891. * freed in error handler
  1892. */
  1893. if (!sc)
  1894. return;
  1895. pdev = sc->pdev;
  1896. if (hif_pci_default_link_up(tgt_info)) {
  1897. hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
  1898. hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
  1899. hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
  1900. HOST_GROUP0_MASK);
  1901. }
  1902. #if defined(CPU_WARM_RESET_WAR)
  1903. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  1904. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  1905. * verified for AR9888_REV1
  1906. */
  1907. if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
  1908. (tgt_info->target_version == AR9887_REV1_VERSION))
  1909. hif_pci_device_warm_reset(sc);
  1910. else
  1911. hif_pci_device_reset(sc);
  1912. #else
  1913. hif_pci_device_reset(sc);
  1914. #endif
  1915. mem = (void __iomem *)sc->mem;
  1916. if (mem) {
  1917. hif_dump_pipe_debug_count(scn);
  1918. if (scn->athdiag_procfs_inited) {
  1919. athdiag_procfs_remove();
  1920. scn->athdiag_procfs_inited = false;
  1921. }
  1922. sc->hif_pci_deinit(sc);
  1923. scn->mem = NULL;
  1924. }
  1925. hif_info("X");
  1926. }
  1927. #ifdef FEATURE_RUNTIME_PM
  1928. /**
  1929. * hif_pci_get_rpm_ctx() - Map corresponding hif_runtime_pm_ctx
  1930. * @scn: hif context
  1931. *
  1932. * This function will map and return the corresponding
  1933. * hif_runtime_pm_ctx based on pcie interface.
  1934. *
  1935. * Return: struct hif_runtime_pm_ctx pointer
  1936. */
  1937. struct hif_runtime_pm_ctx *hif_pci_get_rpm_ctx(struct hif_softc *scn)
  1938. {
  1939. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1940. return &sc->rpm_ctx;
  1941. }
  1942. /**
  1943. * hif_pci_get_dev() - Map corresponding device structure
  1944. * @scn: hif context
  1945. *
  1946. * This function will map and return the corresponding
  1947. * device structure based on pcie interface.
  1948. *
  1949. * Return: struct device pointer
  1950. */
  1951. struct device *hif_pci_get_dev(struct hif_softc *scn)
  1952. {
  1953. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1954. return sc->dev;
  1955. }
  1956. #endif
  1957. #define OL_ATH_PCI_PM_CONTROL 0x44
  1958. #ifdef CONFIG_PLD_PCIE_CNSS
  1959. /**
  1960. * hif_pci_prevent_linkdown(): allow or permit linkdown
  1961. * @flag: true prevents linkdown, false allows
  1962. *
  1963. * Calls into the platform driver to vote against taking down the
  1964. * pcie link.
  1965. *
  1966. * Return: n/a
  1967. */
  1968. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  1969. {
  1970. int errno;
  1971. hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
  1972. hif_runtime_prevent_linkdown(scn, flag);
  1973. errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
  1974. if (errno)
  1975. hif_err("Failed pld_wlan_pm_control; errno %d", errno);
  1976. }
  1977. #else
  1978. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  1979. {
  1980. }
  1981. #endif
  1982. /**
  1983. * hif_pci_bus_suspend(): prepare hif for suspend
  1984. *
  1985. * Return: Errno
  1986. */
  1987. int hif_pci_bus_suspend(struct hif_softc *scn)
  1988. {
  1989. QDF_STATUS ret;
  1990. hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
  1991. ret = hif_try_complete_tasks(scn);
  1992. if (QDF_IS_STATUS_ERROR(ret)) {
  1993. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  1994. return -EBUSY;
  1995. }
  1996. /* Stop the HIF Sleep Timer */
  1997. hif_cancel_deferred_target_sleep(scn);
  1998. scn->bus_suspended = true;
  1999. return 0;
  2000. }
  2001. #ifdef PCI_LINK_STATUS_SANITY
  2002. /**
  2003. * __hif_check_link_status() - API to check if PCIe link is active/not
  2004. * @scn: HIF Context
  2005. *
  2006. * API reads the PCIe config space to verify if PCIe link training is
  2007. * successful or not.
  2008. *
  2009. * Return: Success/Failure
  2010. */
  2011. static int __hif_check_link_status(struct hif_softc *scn)
  2012. {
  2013. uint16_t dev_id = 0;
  2014. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2015. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2016. if (!sc) {
  2017. hif_err("HIF Bus Context is Invalid");
  2018. return -EINVAL;
  2019. }
  2020. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
  2021. if (dev_id == sc->devid)
  2022. return 0;
  2023. hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
  2024. dev_id);
  2025. scn->recovery = true;
  2026. if (cbk && cbk->set_recovery_in_progress)
  2027. cbk->set_recovery_in_progress(cbk->context, true);
  2028. else
  2029. hif_err("Driver Global Recovery is not set");
  2030. pld_is_pci_link_down(sc->dev);
  2031. return -EACCES;
  2032. }
  2033. #else
  2034. static inline int __hif_check_link_status(struct hif_softc *scn)
  2035. {
  2036. return 0;
  2037. }
  2038. #endif
  2039. #ifdef HIF_BUS_LOG_INFO
  2040. bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
  2041. unsigned int *offset)
  2042. {
  2043. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2044. struct hang_event_bus_info info = {0};
  2045. size_t size;
  2046. if (!sc) {
  2047. hif_err("HIF Bus Context is Invalid");
  2048. return false;
  2049. }
  2050. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
  2051. size = sizeof(info);
  2052. QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
  2053. size - QDF_HANG_EVENT_TLV_HDR_SIZE);
  2054. if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
  2055. return false;
  2056. qdf_mem_copy(data + *offset, &info, size);
  2057. *offset = *offset + size;
  2058. if (info.dev_id == sc->devid)
  2059. return false;
  2060. qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
  2061. qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
  2062. (QDF_WLAN_HANG_FW_OFFSET - size));
  2063. return true;
  2064. }
  2065. #endif
  2066. /**
  2067. * hif_pci_bus_resume(): prepare hif for resume
  2068. *
  2069. * Return: Errno
  2070. */
  2071. int hif_pci_bus_resume(struct hif_softc *scn)
  2072. {
  2073. int errno;
  2074. scn->bus_suspended = false;
  2075. errno = __hif_check_link_status(scn);
  2076. if (errno)
  2077. return errno;
  2078. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2079. return 0;
  2080. }
  2081. /**
  2082. * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
  2083. * @scn: hif context
  2084. *
  2085. * Ensure that if we received the wakeup message before the irq
  2086. * was disabled that the message is pocessed before suspending.
  2087. *
  2088. * Return: -EBUSY if we fail to flush the tasklets.
  2089. */
  2090. int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
  2091. {
  2092. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2093. qdf_atomic_set(&scn->link_suspended, 1);
  2094. hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
  2095. return 0;
  2096. }
  2097. /**
  2098. * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
  2099. * @scn: hif context
  2100. *
  2101. * Ensure that if we received the wakeup message before the irq
  2102. * was disabled that the message is pocessed before suspending.
  2103. *
  2104. * Return: -EBUSY if we fail to flush the tasklets.
  2105. */
  2106. int hif_pci_bus_resume_noirq(struct hif_softc *scn)
  2107. {
  2108. hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
  2109. /* a vote for link up can come in the middle of the ongoing resume
  2110. * process. hence, clear the link suspend flag once
  2111. * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
  2112. * by this time
  2113. */
  2114. qdf_atomic_set(&scn->link_suspended, 0);
  2115. return 0;
  2116. }
  2117. #if CONFIG_PCIE_64BIT_MSI
  2118. static void hif_free_msi_ctx(struct hif_softc *scn)
  2119. {
  2120. struct hif_pci_softc *sc = scn->hif_sc;
  2121. struct hif_msi_info *info = &sc->msi_info;
  2122. struct device *dev = scn->qdf_dev->dev;
  2123. OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
  2124. OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
  2125. info->magic = NULL;
  2126. info->magic_dma = 0;
  2127. }
  2128. #else
  2129. static void hif_free_msi_ctx(struct hif_softc *scn)
  2130. {
  2131. }
  2132. #endif
  2133. void hif_pci_disable_isr(struct hif_softc *scn)
  2134. {
  2135. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2136. hif_exec_kill(&scn->osc);
  2137. hif_nointrs(scn);
  2138. hif_free_msi_ctx(scn);
  2139. /* Cancel the pending tasklet */
  2140. ce_tasklet_kill(scn);
  2141. tasklet_kill(&sc->intr_tq);
  2142. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  2143. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  2144. }
  2145. /* Function to reset SoC */
  2146. void hif_pci_reset_soc(struct hif_softc *hif_sc)
  2147. {
  2148. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
  2149. struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
  2150. struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
  2151. #if defined(CPU_WARM_RESET_WAR)
  2152. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2153. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2154. * verified for AR9888_REV1
  2155. */
  2156. if (tgt_info->target_version == AR9888_REV2_VERSION)
  2157. hif_pci_device_warm_reset(sc);
  2158. else
  2159. hif_pci_device_reset(sc);
  2160. #else
  2161. hif_pci_device_reset(sc);
  2162. #endif
  2163. }
  2164. /**
  2165. * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
  2166. * @sc: HIF PCIe Context
  2167. *
  2168. * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
  2169. *
  2170. * Return: Failure to caller
  2171. */
  2172. static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
  2173. {
  2174. uint16_t val = 0;
  2175. uint32_t bar = 0;
  2176. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
  2177. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2178. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
  2179. struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
  2180. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2181. A_target_id_t pci_addr = scn->mem;
  2182. hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
  2183. pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  2184. hif_info("PCI Vendor ID = 0x%04x", val);
  2185. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  2186. hif_info("PCI Device ID = 0x%04x", val);
  2187. pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
  2188. hif_info("PCI Command = 0x%04x", val);
  2189. pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
  2190. hif_info("PCI Status = 0x%04x", val);
  2191. pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
  2192. hif_info("PCI BAR 0 = 0x%08x", bar);
  2193. hif_info("SOC_WAKE_ADDR 0%08x",
  2194. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2195. PCIE_SOC_WAKE_ADDRESS));
  2196. hif_info("RTC_STATE_ADDR 0x%08x",
  2197. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2198. RTC_STATE_ADDRESS));
  2199. hif_info("wakeup target");
  2200. if (!cfg->enable_self_recovery)
  2201. QDF_BUG(0);
  2202. scn->recovery = true;
  2203. if (cbk->set_recovery_in_progress)
  2204. cbk->set_recovery_in_progress(cbk->context, true);
  2205. pld_is_pci_link_down(sc->dev);
  2206. return -EACCES;
  2207. }
  2208. /*
  2209. * For now, we use simple on-demand sleep/wake.
  2210. * Some possible improvements:
  2211. * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
  2212. * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
  2213. * Careful, though, these functions may be used by
  2214. * interrupt handlers ("atomic")
  2215. * -Don't use host_reg_table for this code; instead use values directly
  2216. * -Use a separate timer to track activity and allow Target to sleep only
  2217. * if it hasn't done anything for a while; may even want to delay some
  2218. * processing for a short while in order to "batch" (e.g.) transmit
  2219. * requests with completion processing into "windows of up time". Costs
  2220. * some performance, but improves power utilization.
  2221. * -On some platforms, it might be possible to eliminate explicit
  2222. * sleep/wakeup. Instead, take a chance that each access works OK. If not,
  2223. * recover from the failure by forcing the Target awake.
  2224. * -Change keep_awake_count to an atomic_t in order to avoid spin lock
  2225. * overhead in some cases. Perhaps this makes more sense when
  2226. * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
  2227. * disabled.
  2228. * -It is possible to compile this code out and simply force the Target
  2229. * to remain awake. That would yield optimal performance at the cost of
  2230. * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
  2231. *
  2232. * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
  2233. */
  2234. /**
  2235. * hif_target_sleep_state_adjust() - on-demand sleep/wake
  2236. * @scn: hif_softc pointer.
  2237. * @sleep_ok: bool
  2238. * @wait_for_it: bool
  2239. *
  2240. * Output the pipe error counts of each pipe to log file
  2241. *
  2242. * Return: int
  2243. */
  2244. int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
  2245. bool sleep_ok, bool wait_for_it)
  2246. {
  2247. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2248. A_target_id_t pci_addr = scn->mem;
  2249. static int max_delay;
  2250. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2251. static int debug;
  2252. if (scn->recovery)
  2253. return -EACCES;
  2254. if (qdf_atomic_read(&scn->link_suspended)) {
  2255. hif_err("Invalid access, PCIe link is down");
  2256. debug = true;
  2257. QDF_ASSERT(0);
  2258. return -EACCES;
  2259. }
  2260. if (debug) {
  2261. wait_for_it = true;
  2262. hif_err("Invalid access, PCIe link is suspended");
  2263. QDF_ASSERT(0);
  2264. }
  2265. if (sleep_ok) {
  2266. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2267. hif_state->keep_awake_count--;
  2268. if (hif_state->keep_awake_count == 0) {
  2269. /* Allow sleep */
  2270. hif_state->verified_awake = false;
  2271. hif_state->sleep_ticks = qdf_system_ticks();
  2272. }
  2273. if (hif_state->fake_sleep == false) {
  2274. /* Set the Fake Sleep */
  2275. hif_state->fake_sleep = true;
  2276. /* Start the Sleep Timer */
  2277. qdf_timer_stop(&hif_state->sleep_timer);
  2278. qdf_timer_start(&hif_state->sleep_timer,
  2279. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  2280. }
  2281. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2282. } else {
  2283. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2284. if (hif_state->fake_sleep) {
  2285. hif_state->verified_awake = true;
  2286. } else {
  2287. if (hif_state->keep_awake_count == 0) {
  2288. /* Force AWAKE */
  2289. hif_write32_mb(sc, pci_addr +
  2290. PCIE_LOCAL_BASE_ADDRESS +
  2291. PCIE_SOC_WAKE_ADDRESS,
  2292. PCIE_SOC_WAKE_V_MASK);
  2293. }
  2294. }
  2295. hif_state->keep_awake_count++;
  2296. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2297. if (wait_for_it && !hif_state->verified_awake) {
  2298. #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
  2299. int tot_delay = 0;
  2300. int curr_delay = 5;
  2301. for (;; ) {
  2302. if (hif_targ_is_awake(scn, pci_addr)) {
  2303. hif_state->verified_awake = true;
  2304. break;
  2305. }
  2306. if (!hif_pci_targ_is_present(scn, pci_addr))
  2307. break;
  2308. if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
  2309. return hif_log_soc_wakeup_timeout(sc);
  2310. OS_DELAY(curr_delay);
  2311. tot_delay += curr_delay;
  2312. if (curr_delay < 50)
  2313. curr_delay += 5;
  2314. }
  2315. /*
  2316. * NB: If Target has to come out of Deep Sleep,
  2317. * this may take a few Msecs. Typically, though
  2318. * this delay should be <30us.
  2319. */
  2320. if (tot_delay > max_delay)
  2321. max_delay = tot_delay;
  2322. }
  2323. }
  2324. if (debug && hif_state->verified_awake) {
  2325. debug = 0;
  2326. hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
  2327. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2328. PCIE_INTR_ENABLE_ADDRESS),
  2329. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2330. PCIE_INTR_CAUSE_ADDRESS),
  2331. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2332. CPU_INTR_ADDRESS),
  2333. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2334. PCIE_INTR_CLR_ADDRESS),
  2335. hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
  2336. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
  2337. }
  2338. return 0;
  2339. }
  2340. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  2341. uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
  2342. {
  2343. uint32_t value;
  2344. void *addr;
  2345. addr = scn->mem + offset;
  2346. value = hif_read32_mb(scn, addr);
  2347. {
  2348. unsigned long irq_flags;
  2349. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2350. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2351. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2352. pcie_access_log[idx].is_write = false;
  2353. pcie_access_log[idx].addr = addr;
  2354. pcie_access_log[idx].value = value;
  2355. pcie_access_log_seqnum++;
  2356. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2357. }
  2358. return value;
  2359. }
  2360. void
  2361. hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
  2362. {
  2363. void *addr;
  2364. addr = scn->mem + (offset);
  2365. hif_write32_mb(scn, addr, value);
  2366. {
  2367. unsigned long irq_flags;
  2368. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2369. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2370. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2371. pcie_access_log[idx].is_write = true;
  2372. pcie_access_log[idx].addr = addr;
  2373. pcie_access_log[idx].value = value;
  2374. pcie_access_log_seqnum++;
  2375. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2376. }
  2377. }
  2378. /**
  2379. * hif_target_dump_access_log() - dump access log
  2380. *
  2381. * dump access log
  2382. *
  2383. * Return: n/a
  2384. */
  2385. void hif_target_dump_access_log(void)
  2386. {
  2387. int idx, len, start_idx, cur_idx;
  2388. unsigned long irq_flags;
  2389. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2390. if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
  2391. len = PCIE_ACCESS_LOG_NUM;
  2392. start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2393. } else {
  2394. len = pcie_access_log_seqnum;
  2395. start_idx = 0;
  2396. }
  2397. for (idx = 0; idx < len; idx++) {
  2398. cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
  2399. hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
  2400. idx,
  2401. pcie_access_log[cur_idx].seqnum,
  2402. pcie_access_log[cur_idx].is_write,
  2403. pcie_access_log[cur_idx].addr,
  2404. pcie_access_log[cur_idx].value);
  2405. }
  2406. pcie_access_log_seqnum = 0;
  2407. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2408. }
  2409. #endif
  2410. #ifndef HIF_AHB
  2411. int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
  2412. {
  2413. QDF_BUG(0);
  2414. return -EINVAL;
  2415. }
  2416. int hif_ahb_configure_irq(struct hif_pci_softc *sc)
  2417. {
  2418. QDF_BUG(0);
  2419. return -EINVAL;
  2420. }
  2421. #endif
  2422. static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
  2423. {
  2424. struct ce_tasklet_entry *tasklet_entry = context;
  2425. return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
  2426. }
  2427. extern const char *ce_name[];
  2428. static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  2429. {
  2430. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  2431. return pci_scn->ce_msi_irq_num[ce_id];
  2432. }
  2433. /* hif_srng_msi_irq_disable() - disable the irq for msi
  2434. * @hif_sc: hif context
  2435. * @ce_id: which ce to disable copy complete interrupts for
  2436. *
  2437. * since MSI interrupts are not level based, the system can function
  2438. * without disabling these interrupts. Interrupt mitigation can be
  2439. * added here for better system performance.
  2440. */
  2441. static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2442. {
  2443. pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
  2444. hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2445. }
  2446. static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2447. {
  2448. if (__hif_check_link_status(hif_sc))
  2449. return;
  2450. pfrm_enable_irq(hif_sc->qdf_dev->dev,
  2451. hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2452. }
  2453. static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2454. {
  2455. disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2456. }
  2457. static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2458. {
  2459. enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2460. }
  2461. int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
  2462. {
  2463. int ret = 0;
  2464. int irq;
  2465. uint32_t msi_data_start;
  2466. uint32_t msi_data_count;
  2467. unsigned int msi_data;
  2468. int irq_id;
  2469. uint32_t msi_irq_start;
  2470. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2471. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2472. int pci_slot;
  2473. if (ce_id >= CE_COUNT_MAX)
  2474. return -EINVAL;
  2475. /* do ce irq assignments */
  2476. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2477. &msi_data_count, &msi_data_start,
  2478. &msi_irq_start);
  2479. if (ret) {
  2480. hif_err("Failed to get CE msi config");
  2481. return -EINVAL;
  2482. }
  2483. irq_id = scn->int_assignment->msi_idx[ce_id];
  2484. /* needs to match the ce_id -> irq data mapping
  2485. * used in the srng parameter configuration
  2486. */
  2487. pci_slot = hif_get_pci_slot(scn);
  2488. msi_data = irq_id + msi_irq_start;
  2489. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2490. hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d tasklet %pK)",
  2491. __func__, ce_id, irq_id, msi_data, irq,
  2492. &ce_sc->tasklets[ce_id]);
  2493. /* implies the ce is also initialized */
  2494. if (!ce_sc->tasklets[ce_id].inited)
  2495. goto skip;
  2496. pci_sc->ce_msi_irq_num[ce_id] = irq;
  2497. ret = pfrm_request_irq(scn->qdf_dev->dev,
  2498. irq, hif_ce_interrupt_handler, IRQF_SHARED,
  2499. ce_irqname[pci_slot][ce_id],
  2500. &ce_sc->tasklets[ce_id]);
  2501. if (ret)
  2502. return -EINVAL;
  2503. skip:
  2504. return ret;
  2505. }
  2506. static int hif_ce_msi_configure_irq(struct hif_softc *scn)
  2507. {
  2508. int ret;
  2509. int ce_id, irq;
  2510. uint32_t msi_data_start;
  2511. uint32_t msi_data_count;
  2512. uint32_t msi_irq_start;
  2513. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2514. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  2515. if (!scn->disable_wake_irq) {
  2516. /* do wake irq assignment */
  2517. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
  2518. &msi_data_count,
  2519. &msi_data_start,
  2520. &msi_irq_start);
  2521. if (ret)
  2522. return ret;
  2523. scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
  2524. msi_irq_start);
  2525. scn->wake_irq_type = HIF_PM_MSI_WAKE;
  2526. ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
  2527. hif_wake_interrupt_handler,
  2528. IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
  2529. if (ret)
  2530. return ret;
  2531. }
  2532. /* do ce irq assignments */
  2533. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2534. &msi_data_count, &msi_data_start,
  2535. &msi_irq_start);
  2536. if (ret)
  2537. goto free_wake_irq;
  2538. if (ce_srng_based(scn)) {
  2539. scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
  2540. scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
  2541. } else {
  2542. scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
  2543. scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
  2544. }
  2545. scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
  2546. /* needs to match the ce_id -> irq data mapping
  2547. * used in the srng parameter configuration
  2548. */
  2549. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2550. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2551. continue;
  2552. if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
  2553. continue;
  2554. ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
  2555. if (ret)
  2556. goto free_irq;
  2557. }
  2558. return ret;
  2559. free_irq:
  2560. /* the request_irq for the last ce_id failed so skip it. */
  2561. while (ce_id > 0 && ce_id < scn->ce_count) {
  2562. unsigned int msi_data;
  2563. ce_id--;
  2564. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  2565. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2566. pfrm_free_irq(scn->qdf_dev->dev,
  2567. irq, &ce_sc->tasklets[ce_id]);
  2568. }
  2569. free_wake_irq:
  2570. if (!scn->disable_wake_irq) {
  2571. pfrm_free_irq(scn->qdf_dev->dev,
  2572. scn->wake_irq, scn->qdf_dev->dev);
  2573. scn->wake_irq = 0;
  2574. scn->wake_irq_type = HIF_PM_INVALID_WAKE;
  2575. }
  2576. return ret;
  2577. }
  2578. static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
  2579. {
  2580. int i;
  2581. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  2582. for (i = 0; i < hif_ext_group->numirq; i++)
  2583. pfrm_disable_irq_nosync(scn->qdf_dev->dev,
  2584. hif_ext_group->os_irq[i]);
  2585. }
  2586. static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
  2587. {
  2588. int i;
  2589. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  2590. for (i = 0; i < hif_ext_group->numirq; i++)
  2591. pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
  2592. }
  2593. /**
  2594. * hif_pci_get_irq_name() - get irqname
  2595. * This function gives irqnumber to irqname
  2596. * mapping.
  2597. *
  2598. * @irq_no: irq number
  2599. *
  2600. * Return: irq name
  2601. */
  2602. const char *hif_pci_get_irq_name(int irq_no)
  2603. {
  2604. return "pci-dummy";
  2605. }
  2606. #ifdef HIF_CPU_PERF_AFFINE_MASK
  2607. void hif_pci_irq_set_affinity_hint(
  2608. struct hif_exec_context *hif_ext_group)
  2609. {
  2610. int i, ret;
  2611. unsigned int cpus;
  2612. bool mask_set = false;
  2613. for (i = 0; i < hif_ext_group->numirq; i++)
  2614. qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
  2615. for (i = 0; i < hif_ext_group->numirq; i++) {
  2616. qdf_for_each_online_cpu(cpus) {
  2617. if (qdf_topology_physical_package_id(cpus) ==
  2618. CPU_CLUSTER_TYPE_PERF) {
  2619. qdf_cpumask_set_cpu(cpus,
  2620. &hif_ext_group->
  2621. new_cpu_mask[i]);
  2622. mask_set = true;
  2623. }
  2624. }
  2625. }
  2626. for (i = 0; i < hif_ext_group->numirq; i++) {
  2627. if (mask_set) {
  2628. qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
  2629. IRQ_NO_BALANCING, 0);
  2630. ret = qdf_dev_set_irq_affinity(hif_ext_group->os_irq[i],
  2631. (struct qdf_cpu_mask *)
  2632. &hif_ext_group->
  2633. new_cpu_mask[i]);
  2634. qdf_dev_modify_irq_status(hif_ext_group->os_irq[i],
  2635. 0, IRQ_NO_BALANCING);
  2636. if (ret)
  2637. qdf_err("Set affinity %*pbl fails for IRQ %d ",
  2638. qdf_cpumask_pr_args(&hif_ext_group->
  2639. new_cpu_mask[i]),
  2640. hif_ext_group->os_irq[i]);
  2641. else
  2642. qdf_debug("Set affinity %*pbl for IRQ: %d",
  2643. qdf_cpumask_pr_args(&hif_ext_group->
  2644. new_cpu_mask[i]),
  2645. hif_ext_group->os_irq[i]);
  2646. } else {
  2647. qdf_err("Offline CPU: Set affinity fails for IRQ: %d",
  2648. hif_ext_group->os_irq[i]);
  2649. }
  2650. }
  2651. }
  2652. void hif_pci_ce_irq_set_affinity_hint(
  2653. struct hif_softc *scn)
  2654. {
  2655. int ret;
  2656. unsigned int cpus;
  2657. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2658. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2659. struct CE_attr *host_ce_conf;
  2660. int ce_id;
  2661. qdf_cpu_mask ce_cpu_mask;
  2662. host_ce_conf = ce_sc->host_ce_config;
  2663. qdf_cpumask_clear(&ce_cpu_mask);
  2664. qdf_for_each_online_cpu(cpus) {
  2665. if (qdf_topology_physical_package_id(cpus) ==
  2666. CPU_CLUSTER_TYPE_PERF) {
  2667. qdf_cpumask_set_cpu(cpus,
  2668. &ce_cpu_mask);
  2669. } else {
  2670. hif_err_rl("Unable to set cpu mask for offline CPU %d"
  2671. , cpus);
  2672. }
  2673. }
  2674. if (qdf_cpumask_empty(&ce_cpu_mask)) {
  2675. hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
  2676. return;
  2677. }
  2678. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2679. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2680. continue;
  2681. qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
  2682. qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
  2683. &ce_cpu_mask);
  2684. qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id],
  2685. IRQ_NO_BALANCING, 0);
  2686. ret = qdf_dev_set_irq_affinity(
  2687. pci_sc->ce_msi_irq_num[ce_id],
  2688. (struct qdf_cpu_mask *)&pci_sc->ce_irq_cpu_mask[ce_id]);
  2689. qdf_dev_modify_irq_status(pci_sc->ce_msi_irq_num[ce_id],
  2690. 0, IRQ_NO_BALANCING);
  2691. if (ret)
  2692. hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
  2693. qdf_cpumask_pr_args(
  2694. &pci_sc->ce_irq_cpu_mask[ce_id]),
  2695. pci_sc->ce_msi_irq_num[ce_id]);
  2696. else
  2697. hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
  2698. qdf_cpumask_pr_args(
  2699. &pci_sc->ce_irq_cpu_mask[ce_id]),
  2700. pci_sc->ce_msi_irq_num[ce_id]);
  2701. }
  2702. }
  2703. #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
  2704. void hif_pci_config_irq_affinity(struct hif_softc *scn)
  2705. {
  2706. int i;
  2707. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2708. struct hif_exec_context *hif_ext_group;
  2709. hif_core_ctl_set_boost(true);
  2710. /* Set IRQ affinity for WLAN DP interrupts*/
  2711. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2712. hif_ext_group = hif_state->hif_ext_group[i];
  2713. hif_pci_irq_set_affinity_hint(hif_ext_group);
  2714. }
  2715. /* Set IRQ affinity for CE interrupts*/
  2716. hif_pci_ce_irq_set_affinity_hint(scn);
  2717. }
  2718. int hif_pci_configure_grp_irq(struct hif_softc *scn,
  2719. struct hif_exec_context *hif_ext_group)
  2720. {
  2721. int ret = 0;
  2722. int irq = 0;
  2723. int j;
  2724. int pci_slot;
  2725. hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
  2726. hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
  2727. hif_ext_group->irq_name = &hif_pci_get_irq_name;
  2728. hif_ext_group->work_complete = &hif_dummy_grp_done;
  2729. pci_slot = hif_get_pci_slot(scn);
  2730. for (j = 0; j < hif_ext_group->numirq; j++) {
  2731. irq = hif_ext_group->irq[j];
  2732. if (scn->irq_unlazy_disable)
  2733. qdf_dev_set_irq_status_flags(irq,
  2734. QDF_IRQ_DISABLE_UNLAZY);
  2735. hif_debug("request_irq = %d for grp %d",
  2736. irq, hif_ext_group->grp_id);
  2737. ret = pfrm_request_irq(
  2738. scn->qdf_dev->dev, irq,
  2739. hif_ext_group_interrupt_handler,
  2740. IRQF_SHARED | IRQF_NO_SUSPEND,
  2741. dp_irqname[pci_slot][hif_ext_group->grp_id],
  2742. hif_ext_group);
  2743. if (ret) {
  2744. hif_err("request_irq failed ret = %d", ret);
  2745. return -EFAULT;
  2746. }
  2747. hif_ext_group->os_irq[j] = irq;
  2748. }
  2749. hif_ext_group->irq_requested = true;
  2750. return 0;
  2751. }
  2752. #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  2753. defined(QCA_WIFI_WCN7850))
  2754. uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
  2755. uint32_t offset)
  2756. {
  2757. return hal_read32_mb(hif_sc->hal_soc, offset);
  2758. }
  2759. void hif_pci_reg_write32(struct hif_softc *hif_sc,
  2760. uint32_t offset,
  2761. uint32_t value)
  2762. {
  2763. hal_write32_mb(hif_sc->hal_soc, offset, value);
  2764. }
  2765. #else
  2766. /* TODO: Need to implement other chips carefully */
  2767. uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
  2768. uint32_t offset)
  2769. {
  2770. return 0;
  2771. }
  2772. void hif_pci_reg_write32(struct hif_softc *hif_sc,
  2773. uint32_t offset,
  2774. uint32_t value)
  2775. {
  2776. }
  2777. #endif
  2778. /**
  2779. * hif_configure_irq() - configure interrupt
  2780. *
  2781. * This function configures interrupt(s)
  2782. *
  2783. * @sc: PCIe control struct
  2784. * @hif_hdl: struct HIF_CE_state
  2785. *
  2786. * Return: 0 - for success
  2787. */
  2788. int hif_configure_irq(struct hif_softc *scn)
  2789. {
  2790. int ret = 0;
  2791. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2792. hif_info("E");
  2793. if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
  2794. scn->request_irq_done = false;
  2795. return 0;
  2796. }
  2797. hif_init_reschedule_tasklet_work(sc);
  2798. ret = hif_ce_msi_configure_irq(scn);
  2799. if (ret == 0) {
  2800. goto end;
  2801. }
  2802. switch (scn->target_info.target_type) {
  2803. case TARGET_TYPE_IPQ4019:
  2804. ret = hif_ahb_configure_legacy_irq(sc);
  2805. break;
  2806. case TARGET_TYPE_QCA8074:
  2807. case TARGET_TYPE_QCA8074V2:
  2808. case TARGET_TYPE_QCA6018:
  2809. case TARGET_TYPE_QCA5018:
  2810. ret = hif_ahb_configure_irq(sc);
  2811. break;
  2812. default:
  2813. ret = hif_pci_configure_legacy_irq(sc);
  2814. break;
  2815. }
  2816. if (ret < 0) {
  2817. hif_err("hif_pci_configure_legacy_irq error = %d", ret);
  2818. return ret;
  2819. }
  2820. end:
  2821. scn->request_irq_done = true;
  2822. return 0;
  2823. }
  2824. /**
  2825. * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
  2826. * @scn: hif control structure
  2827. *
  2828. * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
  2829. * stuck at a polling loop in pcie_address_config in FW
  2830. *
  2831. * Return: none
  2832. */
  2833. static void hif_trigger_timer_irq(struct hif_softc *scn)
  2834. {
  2835. int tmp;
  2836. /* Trigger IRQ on Peregrine/Swift by setting
  2837. * IRQ Bit of LF_TIMER 0
  2838. */
  2839. tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  2840. SOC_LF_TIMER_STATUS0_ADDRESS));
  2841. /* Set Raw IRQ Bit */
  2842. tmp |= 1;
  2843. /* SOC_LF_TIMER_STATUS0 */
  2844. hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  2845. SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
  2846. }
  2847. /**
  2848. * hif_target_sync() : ensure the target is ready
  2849. * @scn: hif control structure
  2850. *
  2851. * Informs fw that we plan to use legacy interupts so that
  2852. * it can begin booting. Ensures that the fw finishes booting
  2853. * before continuing. Should be called before trying to write
  2854. * to the targets other registers for the first time.
  2855. *
  2856. * Return: none
  2857. */
  2858. static void hif_target_sync(struct hif_softc *scn)
  2859. {
  2860. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  2861. PCIE_INTR_ENABLE_ADDRESS),
  2862. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  2863. /* read to flush pcie write */
  2864. (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  2865. PCIE_INTR_ENABLE_ADDRESS));
  2866. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  2867. PCIE_SOC_WAKE_ADDRESS,
  2868. PCIE_SOC_WAKE_V_MASK);
  2869. while (!hif_targ_is_awake(scn, scn->mem))
  2870. ;
  2871. if (HAS_FW_INDICATOR) {
  2872. int wait_limit = 500;
  2873. int fw_ind = 0;
  2874. int retry_count = 0;
  2875. uint32_t target_type = scn->target_info.target_type;
  2876. fw_retry:
  2877. hif_info("Loop checking FW signal");
  2878. while (1) {
  2879. fw_ind = hif_read32_mb(scn, scn->mem +
  2880. FW_INDICATOR_ADDRESS);
  2881. if (fw_ind & FW_IND_INITIALIZED)
  2882. break;
  2883. if (wait_limit-- < 0)
  2884. break;
  2885. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  2886. PCIE_INTR_ENABLE_ADDRESS),
  2887. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  2888. /* read to flush pcie write */
  2889. (void)hif_read32_mb(scn, scn->mem +
  2890. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
  2891. qdf_mdelay(10);
  2892. }
  2893. if (wait_limit < 0) {
  2894. if (target_type == TARGET_TYPE_AR9888 &&
  2895. retry_count++ < 2) {
  2896. hif_trigger_timer_irq(scn);
  2897. wait_limit = 500;
  2898. goto fw_retry;
  2899. }
  2900. hif_info("FW signal timed out");
  2901. qdf_assert_always(0);
  2902. } else {
  2903. hif_info("Got FW signal, retries = %x", 500-wait_limit);
  2904. }
  2905. }
  2906. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  2907. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  2908. }
  2909. static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
  2910. struct device *dev)
  2911. {
  2912. struct pld_soc_info info;
  2913. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2914. pld_get_soc_info(dev, &info);
  2915. sc->mem = info.v_addr;
  2916. sc->ce_sc.ol_sc.mem = info.v_addr;
  2917. sc->ce_sc.ol_sc.mem_pa = info.p_addr;
  2918. scn->target_info.target_version = info.soc_id;
  2919. scn->target_info.target_revision = 0;
  2920. }
  2921. static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
  2922. struct device *dev)
  2923. {}
  2924. static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
  2925. int device_id)
  2926. {
  2927. if (!pld_have_platform_driver_support(sc->dev))
  2928. return false;
  2929. switch (device_id) {
  2930. case QCA6290_DEVICE_ID:
  2931. case QCN9000_DEVICE_ID:
  2932. case QCN9224_DEVICE_ID:
  2933. case QCA6290_EMULATION_DEVICE_ID:
  2934. case QCA6390_DEVICE_ID:
  2935. case QCA6490_DEVICE_ID:
  2936. case AR6320_DEVICE_ID:
  2937. case QCN7605_DEVICE_ID:
  2938. case WCN7850_DEVICE_ID:
  2939. return true;
  2940. }
  2941. return false;
  2942. }
  2943. static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
  2944. int device_id)
  2945. {
  2946. if (hif_is_pld_based_target(sc, device_id)) {
  2947. sc->hif_enable_pci = hif_enable_pci_pld;
  2948. sc->hif_pci_deinit = hif_pci_deinit_pld;
  2949. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
  2950. } else {
  2951. sc->hif_enable_pci = hif_enable_pci_nopld;
  2952. sc->hif_pci_deinit = hif_pci_deinit_nopld;
  2953. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
  2954. }
  2955. }
  2956. #ifdef HIF_REG_WINDOW_SUPPORT
  2957. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  2958. u32 target_type)
  2959. {
  2960. switch (target_type) {
  2961. case TARGET_TYPE_QCN7605:
  2962. case TARGET_TYPE_QCA6490:
  2963. case TARGET_TYPE_QCA6390:
  2964. sc->use_register_windowing = true;
  2965. qdf_spinlock_create(&sc->register_access_lock);
  2966. sc->register_window = 0;
  2967. break;
  2968. default:
  2969. sc->use_register_windowing = false;
  2970. }
  2971. }
  2972. #else
  2973. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  2974. u32 target_type)
  2975. {
  2976. sc->use_register_windowing = false;
  2977. }
  2978. #endif
  2979. /**
  2980. * hif_enable_bus(): enable bus
  2981. *
  2982. * This function enables the bus
  2983. *
  2984. * @ol_sc: soft_sc struct
  2985. * @dev: device pointer
  2986. * @bdev: bus dev pointer
  2987. * bid: bus id pointer
  2988. * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
  2989. * Return: QDF_STATUS
  2990. */
  2991. QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
  2992. struct device *dev, void *bdev,
  2993. const struct hif_bus_id *bid,
  2994. enum hif_enable_type type)
  2995. {
  2996. int ret = 0;
  2997. uint32_t hif_type;
  2998. uint32_t target_type = TARGET_TYPE_UNKNOWN;
  2999. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
  3000. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
  3001. uint16_t revision_id = 0;
  3002. int probe_again = 0;
  3003. struct pci_dev *pdev = bdev;
  3004. const struct pci_device_id *id = (const struct pci_device_id *)bid;
  3005. struct hif_target_info *tgt_info;
  3006. if (!ol_sc) {
  3007. hif_err("hif_ctx is NULL");
  3008. return QDF_STATUS_E_NOMEM;
  3009. }
  3010. /* Following print is used by various tools to identify
  3011. * WLAN SOC (e.g. crash dump analysis and reporting tool).
  3012. */
  3013. hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
  3014. hif_get_conparam(ol_sc), id->device);
  3015. sc->pdev = pdev;
  3016. sc->dev = &pdev->dev;
  3017. sc->devid = id->device;
  3018. sc->cacheline_sz = dma_get_cache_alignment();
  3019. tgt_info = hif_get_target_info_handle(hif_hdl);
  3020. hif_pci_init_deinit_ops_attach(sc, id->device);
  3021. sc->hif_pci_get_soc_info(sc, dev);
  3022. again:
  3023. ret = sc->hif_enable_pci(sc, pdev, id);
  3024. if (ret < 0) {
  3025. hif_err("hif_enable_pci error = %d", ret);
  3026. goto err_enable_pci;
  3027. }
  3028. hif_info("hif_enable_pci done");
  3029. /* Temporary FIX: disable ASPM on peregrine.
  3030. * Will be removed after the OTP is programmed
  3031. */
  3032. hif_disable_power_gating(hif_hdl);
  3033. device_disable_async_suspend(&pdev->dev);
  3034. pfrm_read_config_word(pdev, 0x08, &revision_id);
  3035. ret = hif_get_device_type(id->device, revision_id,
  3036. &hif_type, &target_type);
  3037. if (ret < 0) {
  3038. hif_err("Invalid device id/revision_id");
  3039. goto err_tgtstate;
  3040. }
  3041. hif_info("hif_type = 0x%x, target_type = 0x%x",
  3042. hif_type, target_type);
  3043. hif_register_tbl_attach(ol_sc, hif_type);
  3044. hif_target_register_tbl_attach(ol_sc, target_type);
  3045. hif_pci_init_reg_windowing_support(sc, target_type);
  3046. tgt_info->target_type = target_type;
  3047. /*
  3048. * Disable unlzay interrupt registration for QCN9000
  3049. */
  3050. if (target_type == TARGET_TYPE_QCN9000 ||
  3051. target_type == TARGET_TYPE_QCN9224)
  3052. ol_sc->irq_unlazy_disable = 1;
  3053. if (ce_srng_based(ol_sc)) {
  3054. hif_info("Skip tgt_wake up for srng devices");
  3055. } else {
  3056. ret = hif_pci_probe_tgt_wakeup(sc);
  3057. if (ret < 0) {
  3058. hif_err("hif_pci_prob_wakeup error = %d", ret);
  3059. if (ret == -EAGAIN)
  3060. probe_again++;
  3061. goto err_tgtstate;
  3062. }
  3063. hif_info("hif_pci_probe_tgt_wakeup done");
  3064. }
  3065. if (!ol_sc->mem_pa) {
  3066. hif_err("BAR0 uninitialized");
  3067. ret = -EIO;
  3068. goto err_tgtstate;
  3069. }
  3070. if (!ce_srng_based(ol_sc)) {
  3071. hif_target_sync(ol_sc);
  3072. if (hif_pci_default_link_up(tgt_info))
  3073. hif_vote_link_up(hif_hdl);
  3074. }
  3075. return QDF_STATUS_SUCCESS;
  3076. err_tgtstate:
  3077. hif_disable_pci(sc);
  3078. sc->pci_enabled = false;
  3079. hif_err("hif_disable_pci done");
  3080. return QDF_STATUS_E_ABORTED;
  3081. err_enable_pci:
  3082. if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
  3083. int delay_time;
  3084. hif_info("pci reprobe");
  3085. /* 10, 40, 90, 100, 100, ... */
  3086. delay_time = max(100, 10 * (probe_again * probe_again));
  3087. qdf_mdelay(delay_time);
  3088. goto again;
  3089. }
  3090. return qdf_status_from_os_return(ret);
  3091. }
  3092. /**
  3093. * hif_pci_irq_enable() - ce_irq_enable
  3094. * @scn: hif_softc
  3095. * @ce_id: ce_id
  3096. *
  3097. * Return: void
  3098. */
  3099. void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
  3100. {
  3101. uint32_t tmp = 1 << ce_id;
  3102. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3103. qdf_spin_lock_irqsave(&sc->irq_lock);
  3104. scn->ce_irq_summary &= ~tmp;
  3105. if (scn->ce_irq_summary == 0) {
  3106. /* Enable Legacy PCI line interrupts */
  3107. if (LEGACY_INTERRUPTS(sc) &&
  3108. (scn->target_status != TARGET_STATUS_RESET) &&
  3109. (!qdf_atomic_read(&scn->link_suspended))) {
  3110. hif_write32_mb(scn, scn->mem +
  3111. (SOC_CORE_BASE_ADDRESS |
  3112. PCIE_INTR_ENABLE_ADDRESS),
  3113. HOST_GROUP0_MASK);
  3114. hif_read32_mb(scn, scn->mem +
  3115. (SOC_CORE_BASE_ADDRESS |
  3116. PCIE_INTR_ENABLE_ADDRESS));
  3117. }
  3118. }
  3119. if (scn->hif_init_done == true)
  3120. Q_TARGET_ACCESS_END(scn);
  3121. qdf_spin_unlock_irqrestore(&sc->irq_lock);
  3122. /* check for missed firmware crash */
  3123. hif_fw_interrupt_handler(0, scn);
  3124. }
  3125. /**
  3126. * hif_pci_irq_disable() - ce_irq_disable
  3127. * @scn: hif_softc
  3128. * @ce_id: ce_id
  3129. *
  3130. * only applicable to legacy copy engine...
  3131. *
  3132. * Return: void
  3133. */
  3134. void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
  3135. {
  3136. /* For Rome only need to wake up target */
  3137. /* target access is maintained until interrupts are re-enabled */
  3138. Q_TARGET_ACCESS_BEGIN(scn);
  3139. }
  3140. int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  3141. {
  3142. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3143. /* legacy case only has one irq */
  3144. return pci_scn->irq;
  3145. }
  3146. int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
  3147. {
  3148. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3149. struct hif_target_info *tgt_info;
  3150. tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
  3151. if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
  3152. tgt_info->target_type == TARGET_TYPE_QCA6390 ||
  3153. tgt_info->target_type == TARGET_TYPE_QCA6490 ||
  3154. tgt_info->target_type == TARGET_TYPE_QCN7605 ||
  3155. tgt_info->target_type == TARGET_TYPE_QCA8074 ||
  3156. tgt_info->target_type == TARGET_TYPE_WCN7850) {
  3157. /*
  3158. * Need to consider offset's memtype for QCA6290/QCA8074,
  3159. * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
  3160. * well initialized/defined.
  3161. */
  3162. return 0;
  3163. }
  3164. if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
  3165. || (offset + sizeof(unsigned int) <= sc->mem_len)) {
  3166. return 0;
  3167. }
  3168. hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
  3169. offset, (uint32_t)(offset + sizeof(unsigned int)),
  3170. sc->mem_len);
  3171. return -EINVAL;
  3172. }
  3173. /**
  3174. * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
  3175. * @scn: hif context
  3176. *
  3177. * Return: true if soc needs driver bmi otherwise false
  3178. */
  3179. bool hif_pci_needs_bmi(struct hif_softc *scn)
  3180. {
  3181. return !ce_srng_based(scn);
  3182. }
  3183. #ifdef FORCE_WAKE
  3184. #ifdef DEVICE_FORCE_WAKE_ENABLE
  3185. /**
  3186. * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
  3187. * Update the below macro with FW defined one.
  3188. */
  3189. #define HIF_POLL_UMAC_WAKE 0x2
  3190. /**
  3191. * hif_force_wake_request(): Enable the force wake recipe
  3192. * @hif_handle: HIF handle
  3193. *
  3194. * Bring MHI to M0 state and force wake the UMAC by asserting the
  3195. * soc wake reg. Poll the scratch reg to check if its set to
  3196. * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
  3197. * is powered down.
  3198. *
  3199. * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
  3200. */
  3201. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  3202. {
  3203. uint32_t timeout, value;
  3204. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3205. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3206. HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
  3207. if (qdf_in_interrupt())
  3208. timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
  3209. else
  3210. timeout = 0;
  3211. if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) {
  3212. hif_err("force wake request send failed");
  3213. HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
  3214. return -EINVAL;
  3215. }
  3216. /* If device's M1 state-change event races here, it can be ignored,
  3217. * as the device is expected to immediately move from M2 to M0
  3218. * without entering low power state.
  3219. */
  3220. if (!pld_is_device_awake(scn->qdf_dev->dev))
  3221. hif_info("state-change event races, ignore");
  3222. HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
  3223. hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
  3224. HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
  3225. /*
  3226. * do not reset the timeout
  3227. * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
  3228. */
  3229. timeout = 0;
  3230. do {
  3231. value = hif_read32_mb(
  3232. scn, scn->mem +
  3233. PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
  3234. hif_info("pcie scratch reg read value = %x", value);
  3235. if (value == HIF_POLL_UMAC_WAKE)
  3236. break;
  3237. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  3238. timeout += FORCE_WAKE_DELAY_MS;
  3239. } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
  3240. if (value != HIF_POLL_UMAC_WAKE) {
  3241. hif_err("failed force wake handshake mechanism");
  3242. HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
  3243. return -ETIMEDOUT;
  3244. }
  3245. HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
  3246. return 0;
  3247. }
  3248. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  3249. {
  3250. int ret;
  3251. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3252. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3253. ret = pld_force_wake_release(scn->qdf_dev->dev);
  3254. if (ret) {
  3255. hif_err("force wake release failure");
  3256. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  3257. return ret;
  3258. }
  3259. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  3260. hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
  3261. HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
  3262. return 0;
  3263. }
  3264. #else /* DEVICE_FORCE_WAKE_ENABLE */
  3265. /** hif_force_wake_request() - Disable the PCIE scratch register
  3266. * write/read
  3267. *
  3268. * Return: 0
  3269. */
  3270. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  3271. {
  3272. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3273. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3274. uint32_t timeout;
  3275. HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
  3276. if (qdf_in_interrupt())
  3277. timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
  3278. else
  3279. timeout = 0;
  3280. if (pld_force_wake_request_sync(scn->qdf_dev->dev, timeout)) {
  3281. hif_err("force wake request send failed");
  3282. HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
  3283. return -EINVAL;
  3284. }
  3285. /* If device's M1 state-change event races here, it can be ignored,
  3286. * as the device is expected to immediately move from M2 to M0
  3287. * without entering low power state.
  3288. */
  3289. if (!pld_is_device_awake(scn->qdf_dev->dev))
  3290. hif_info("state-change event races, ignore");
  3291. HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
  3292. return 0;
  3293. }
  3294. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  3295. {
  3296. int ret;
  3297. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3298. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3299. ret = pld_force_wake_release(scn->qdf_dev->dev);
  3300. if (ret) {
  3301. hif_err("force wake release failure");
  3302. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  3303. return ret;
  3304. }
  3305. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  3306. return 0;
  3307. }
  3308. #endif /* DEVICE_FORCE_WAKE_ENABLE */
  3309. void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
  3310. {
  3311. hif_debug("mhi_force_wake_request_vote: %d",
  3312. pci_handle->stats.mhi_force_wake_request_vote);
  3313. hif_debug("mhi_force_wake_failure: %d",
  3314. pci_handle->stats.mhi_force_wake_failure);
  3315. hif_debug("mhi_force_wake_success: %d",
  3316. pci_handle->stats.mhi_force_wake_success);
  3317. hif_debug("soc_force_wake_register_write_success: %d",
  3318. pci_handle->stats.soc_force_wake_register_write_success);
  3319. hif_debug("soc_force_wake_failure: %d",
  3320. pci_handle->stats.soc_force_wake_failure);
  3321. hif_debug("soc_force_wake_success: %d",
  3322. pci_handle->stats.soc_force_wake_success);
  3323. hif_debug("mhi_force_wake_release_failure: %d",
  3324. pci_handle->stats.mhi_force_wake_release_failure);
  3325. hif_debug("mhi_force_wake_release_success: %d",
  3326. pci_handle->stats.mhi_force_wake_release_success);
  3327. hif_debug("oc_force_wake_release_success: %d",
  3328. pci_handle->stats.soc_force_wake_release_success);
  3329. }
  3330. #endif /* FORCE_WAKE */
  3331. #ifdef FEATURE_HAL_DELAYED_REG_WRITE
  3332. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
  3333. {
  3334. return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
  3335. }
  3336. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
  3337. {
  3338. pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
  3339. }
  3340. #endif