if_pci.c 109 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200
  1. /*
  2. * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
  3. *
  4. * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
  5. *
  6. *
  7. * Permission to use, copy, modify, and/or distribute this software for
  8. * any purpose with or without fee is hereby granted, provided that the
  9. * above copyright notice and this permission notice appear in all
  10. * copies.
  11. *
  12. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  13. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  14. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  15. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  16. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  17. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  18. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  19. * PERFORMANCE OF THIS SOFTWARE.
  20. */
  21. /*
  22. * This file was originally distributed by Qualcomm Atheros, Inc.
  23. * under proprietary terms before Copyright ownership was assigned
  24. * to the Linux Foundation.
  25. */
  26. #include <linux/pci.h>
  27. #include <linux/slab.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/if_arp.h>
  30. #ifdef CONFIG_PCI_MSM
  31. #include <linux/msm_pcie.h>
  32. #endif
  33. #include "hif_io32.h"
  34. #include "if_pci.h"
  35. #include "hif.h"
  36. #include "hif_main.h"
  37. #include "ce_main.h"
  38. #include "ce_api.h"
  39. #include "ce_internal.h"
  40. #include "ce_reg.h"
  41. #include "ce_bmi.h"
  42. #include "regtable.h"
  43. #include "hif_hw_version.h"
  44. #include <linux/debugfs.h>
  45. #include <linux/seq_file.h>
  46. #include "qdf_status.h"
  47. #include "qdf_atomic.h"
  48. #include "pld_common.h"
  49. #include "mp_dev.h"
  50. #include "hif_debug.h"
  51. #include "if_pci_internal.h"
  52. #include "ce_tasklet.h"
  53. #include "targaddrs.h"
  54. #include "pci_api.h"
  55. #include "ahb_api.h"
  56. /* Maximum ms timeout for host to wake up target */
  57. #define PCIE_WAKE_TIMEOUT 1000
  58. #define RAMDUMP_EVENT_TIMEOUT 2500
  59. /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
  60. * PCIe data bus error
  61. * As workaround for this issue - changing the reset sequence to
  62. * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
  63. */
  64. #define CPU_WARM_RESET_WAR
  65. #ifdef CONFIG_WIN
  66. extern int32_t frac, intval, ar900b_20_targ_clk, qca9888_20_targ_clk;
  67. #endif
  68. /*
  69. * Top-level interrupt handler for all PCI interrupts from a Target.
  70. * When a block of MSI interrupts is allocated, this top-level handler
  71. * is not used; instead, we directly call the correct sub-handler.
  72. */
  73. struct ce_irq_reg_table {
  74. uint32_t irq_enable;
  75. uint32_t irq_status;
  76. };
  77. #ifndef QCA_WIFI_3_0_ADRASTEA
  78. static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  79. {
  80. return;
  81. }
  82. #else
  83. void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  84. {
  85. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  86. unsigned int target_enable0, target_enable1;
  87. unsigned int target_cause0, target_cause1;
  88. target_enable0 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_0);
  89. target_enable1 = hif_read32_mb(sc->mem + Q6_ENABLE_REGISTER_1);
  90. target_cause0 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_0);
  91. target_cause1 = hif_read32_mb(sc->mem + Q6_CAUSE_REGISTER_1);
  92. if ((target_enable0 & target_cause0) ||
  93. (target_enable1 & target_cause1)) {
  94. hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_0, 0);
  95. hif_write32_mb(sc->mem + Q6_ENABLE_REGISTER_1, 0);
  96. if (scn->notice_send)
  97. pld_intr_notify_q6(sc->dev);
  98. }
  99. }
  100. #endif
  101. #ifdef QCA_WIFI_NAPIER_EMULATION
  102. void __iomem *napier_emu_ioremap(struct pci_dev *dev,
  103. int bar, unsigned long maxlen)
  104. {
  105. resource_size_t start = pci_resource_start(dev, bar);
  106. resource_size_t len = 0xD00000;
  107. unsigned long flags = pci_resource_flags(dev, bar);
  108. if (!len || !start)
  109. return NULL;
  110. if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) {
  111. if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO))
  112. return ioremap(start, len);
  113. else
  114. return ioremap_nocache(start, len);
  115. }
  116. return NULL;
  117. }
  118. #endif
  119. /**
  120. * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
  121. * @scn: scn
  122. *
  123. * Return: N/A
  124. */
  125. static void pci_dispatch_interrupt(struct hif_softc *scn)
  126. {
  127. uint32_t intr_summary;
  128. int id;
  129. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  130. if (scn->hif_init_done != true)
  131. return;
  132. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  133. return;
  134. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  135. if (intr_summary == 0) {
  136. if ((scn->target_status != TARGET_STATUS_RESET) &&
  137. (!qdf_atomic_read(&scn->link_suspended))) {
  138. hif_write32_mb(scn->mem +
  139. (SOC_CORE_BASE_ADDRESS |
  140. PCIE_INTR_ENABLE_ADDRESS),
  141. HOST_GROUP0_MASK);
  142. hif_read32_mb(scn->mem +
  143. (SOC_CORE_BASE_ADDRESS |
  144. PCIE_INTR_ENABLE_ADDRESS));
  145. }
  146. Q_TARGET_ACCESS_END(scn);
  147. return;
  148. } else {
  149. Q_TARGET_ACCESS_END(scn);
  150. }
  151. scn->ce_irq_summary = intr_summary;
  152. for (id = 0; intr_summary && (id < scn->ce_count); id++) {
  153. if (intr_summary & (1 << id)) {
  154. intr_summary &= ~(1 << id);
  155. ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
  156. }
  157. }
  158. }
  159. irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
  160. {
  161. struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
  162. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  163. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
  164. volatile int tmp;
  165. uint16_t val;
  166. uint32_t bar0;
  167. uint32_t fw_indicator_address, fw_indicator;
  168. bool ssr_irq = false;
  169. unsigned int host_cause, host_enable;
  170. if (LEGACY_INTERRUPTS(sc)) {
  171. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  172. return IRQ_HANDLED;
  173. if (ADRASTEA_BU) {
  174. host_enable = hif_read32_mb(sc->mem +
  175. PCIE_INTR_ENABLE_ADDRESS);
  176. host_cause = hif_read32_mb(sc->mem +
  177. PCIE_INTR_CAUSE_ADDRESS);
  178. if (!(host_enable & host_cause)) {
  179. hif_pci_route_adrastea_interrupt(sc);
  180. return IRQ_HANDLED;
  181. }
  182. }
  183. /* Clear Legacy PCI line interrupts
  184. * IMPORTANT: INTR_CLR regiser has to be set
  185. * after INTR_ENABLE is set to 0,
  186. * otherwise interrupt can not be really cleared */
  187. hif_write32_mb(sc->mem +
  188. (SOC_CORE_BASE_ADDRESS |
  189. PCIE_INTR_ENABLE_ADDRESS), 0);
  190. hif_write32_mb(sc->mem +
  191. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
  192. ADRASTEA_BU ?
  193. (host_enable & host_cause) :
  194. HOST_GROUP0_MASK);
  195. if (ADRASTEA_BU)
  196. hif_write32_mb(sc->mem + 0x2f100c , (host_cause >> 1));
  197. /* IMPORTANT: this extra read transaction is required to
  198. * flush the posted write buffer */
  199. if (!ADRASTEA_BU) {
  200. tmp =
  201. hif_read32_mb(sc->mem +
  202. (SOC_CORE_BASE_ADDRESS |
  203. PCIE_INTR_ENABLE_ADDRESS));
  204. if (tmp == 0xdeadbeef) {
  205. HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
  206. __func__);
  207. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  208. HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
  209. __func__, val);
  210. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  211. HIF_ERROR("%s: PCI Device ID = 0x%04x",
  212. __func__, val);
  213. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  214. HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
  215. val);
  216. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  217. HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
  218. val);
  219. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
  220. &bar0);
  221. HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
  222. bar0);
  223. HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
  224. __func__,
  225. hif_read32_mb(sc->mem +
  226. PCIE_LOCAL_BASE_ADDRESS
  227. + RTC_STATE_ADDRESS));
  228. HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
  229. __func__,
  230. hif_read32_mb(sc->mem +
  231. PCIE_LOCAL_BASE_ADDRESS
  232. + PCIE_SOC_WAKE_ADDRESS));
  233. HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
  234. __func__,
  235. hif_read32_mb(sc->mem + 0x80008),
  236. hif_read32_mb(sc->mem + 0x8000c));
  237. HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
  238. __func__,
  239. hif_read32_mb(sc->mem + 0x80010),
  240. hif_read32_mb(sc->mem + 0x80014));
  241. HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
  242. __func__,
  243. hif_read32_mb(sc->mem + 0x80018),
  244. hif_read32_mb(sc->mem + 0x8001c));
  245. QDF_BUG(0);
  246. }
  247. PCI_CLR_CAUSE0_REGISTER(sc);
  248. }
  249. if (HAS_FW_INDICATOR) {
  250. fw_indicator_address = hif_state->fw_indicator_address;
  251. fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
  252. if ((fw_indicator != ~0) &&
  253. (fw_indicator & FW_IND_EVENT_PENDING))
  254. ssr_irq = true;
  255. }
  256. if (Q_TARGET_ACCESS_END(scn) < 0)
  257. return IRQ_HANDLED;
  258. }
  259. /* TBDXXX: Add support for WMAC */
  260. if (ssr_irq) {
  261. sc->irq_event = irq;
  262. qdf_atomic_set(&scn->tasklet_from_intr, 1);
  263. qdf_atomic_inc(&scn->active_tasklet_cnt);
  264. tasklet_schedule(&sc->intr_tq);
  265. } else {
  266. pci_dispatch_interrupt(scn);
  267. }
  268. return IRQ_HANDLED;
  269. }
  270. static irqreturn_t hif_pci_msi_fw_handler(int irq, void *arg)
  271. {
  272. struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
  273. (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, arg);
  274. return IRQ_HANDLED;
  275. }
  276. bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
  277. {
  278. return 1; /* FIX THIS */
  279. }
  280. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
  281. {
  282. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  283. int i = 0;
  284. if (!irq || !size) {
  285. return -EINVAL;
  286. }
  287. if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
  288. irq[0] = sc->irq;
  289. return 1;
  290. }
  291. if (sc->num_msi_intrs > size) {
  292. qdf_print("Not enough space in irq buffer to return irqs\n");
  293. return -EINVAL;
  294. }
  295. for (i = 0; i < sc->num_msi_intrs; i++) {
  296. irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
  297. }
  298. return sc->num_msi_intrs;
  299. }
  300. /**
  301. * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
  302. * @scn: hif_softc
  303. *
  304. * Return: void
  305. */
  306. #if CONFIG_ATH_PCIE_MAX_PERF == 0
  307. void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  308. {
  309. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  310. A_target_id_t pci_addr = scn->mem;
  311. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  312. /*
  313. * If the deferred sleep timer is running cancel it
  314. * and put the soc into sleep.
  315. */
  316. if (hif_state->fake_sleep == true) {
  317. qdf_timer_stop(&hif_state->sleep_timer);
  318. if (hif_state->verified_awake == false) {
  319. hif_write32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  320. PCIE_SOC_WAKE_ADDRESS,
  321. PCIE_SOC_WAKE_RESET);
  322. }
  323. hif_state->fake_sleep = false;
  324. }
  325. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  326. }
  327. #else
  328. inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  329. {
  330. return;
  331. }
  332. #endif
  333. #define A_PCIE_LOCAL_REG_READ(mem, addr) \
  334. hif_read32_mb((char *)(mem) + \
  335. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
  336. #define A_PCIE_LOCAL_REG_WRITE(mem, addr, val) \
  337. hif_write32_mb(((char *)(mem) + \
  338. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
  339. #ifdef QCA_WIFI_3_0
  340. /**
  341. * hif_targ_is_awake() - check to see if the target is awake
  342. * @hif_ctx: hif context
  343. *
  344. * emulation never goes to sleep
  345. *
  346. * Return: true if target is awake
  347. */
  348. bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
  349. {
  350. return true;
  351. }
  352. #else
  353. /**
  354. * hif_targ_is_awake() - check to see if the target is awake
  355. * @hif_ctx: hif context
  356. *
  357. * Return: true if the targets clocks are on
  358. */
  359. bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
  360. {
  361. uint32_t val;
  362. if (scn->recovery)
  363. return false;
  364. val = hif_read32_mb(mem + PCIE_LOCAL_BASE_ADDRESS
  365. + RTC_STATE_ADDRESS);
  366. return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
  367. }
  368. #endif
  369. #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
  370. static void hif_pci_device_reset(struct hif_pci_softc *sc)
  371. {
  372. void __iomem *mem = sc->mem;
  373. int i;
  374. uint32_t val;
  375. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  376. if (!scn->hostdef)
  377. return;
  378. /* NB: Don't check resetok here. This form of reset
  379. * is integral to correct operation. */
  380. if (!SOC_GLOBAL_RESET_ADDRESS) {
  381. return;
  382. }
  383. if (!mem) {
  384. return;
  385. }
  386. HIF_ERROR("%s: Reset Device", __func__);
  387. /*
  388. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  389. * writing WAKE_V, the Target may scribble over Host memory!
  390. */
  391. A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
  392. PCIE_SOC_WAKE_V_MASK);
  393. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  394. if (hif_targ_is_awake(scn, mem))
  395. break;
  396. qdf_mdelay(1);
  397. }
  398. /* Put Target, including PCIe, into RESET. */
  399. val = A_PCIE_LOCAL_REG_READ(mem, SOC_GLOBAL_RESET_ADDRESS);
  400. val |= 1;
  401. A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
  402. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  403. if (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
  404. RTC_STATE_COLD_RESET_MASK)
  405. break;
  406. qdf_mdelay(1);
  407. }
  408. /* Pull Target, including PCIe, out of RESET. */
  409. val &= ~1;
  410. A_PCIE_LOCAL_REG_WRITE(mem, SOC_GLOBAL_RESET_ADDRESS, val);
  411. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  412. if (!
  413. (A_PCIE_LOCAL_REG_READ(mem, RTC_STATE_ADDRESS) &
  414. RTC_STATE_COLD_RESET_MASK))
  415. break;
  416. qdf_mdelay(1);
  417. }
  418. A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  419. }
  420. /* CPU warm reset function
  421. * Steps:
  422. * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
  423. * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU intializes FW
  424. * correctly on WARM reset
  425. * 3. Clear TARGET CPU LF timer interrupt
  426. * 4. Reset all CEs to clear any pending CE tarnsactions
  427. * 5. Warm reset CPU
  428. */
  429. void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
  430. {
  431. void __iomem *mem = sc->mem;
  432. int i;
  433. uint32_t val;
  434. uint32_t fw_indicator;
  435. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  436. /* NB: Don't check resetok here. This form of reset is
  437. * integral to correct operation. */
  438. if (!mem) {
  439. return;
  440. }
  441. HIF_INFO_MED("%s: Target Warm Reset", __func__);
  442. /*
  443. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  444. * writing WAKE_V, the Target may scribble over Host memory!
  445. */
  446. A_PCIE_LOCAL_REG_WRITE(mem, PCIE_SOC_WAKE_ADDRESS,
  447. PCIE_SOC_WAKE_V_MASK);
  448. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  449. if (hif_targ_is_awake(scn, mem))
  450. break;
  451. qdf_mdelay(1);
  452. }
  453. /*
  454. * Disable Pending interrupts
  455. */
  456. val =
  457. hif_read32_mb(mem +
  458. (SOC_CORE_BASE_ADDRESS |
  459. PCIE_INTR_CAUSE_ADDRESS));
  460. HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
  461. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
  462. /* Target CPU Intr Cause */
  463. val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  464. HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
  465. val =
  466. hif_read32_mb(mem +
  467. (SOC_CORE_BASE_ADDRESS |
  468. PCIE_INTR_ENABLE_ADDRESS));
  469. hif_write32_mb((mem +
  470. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
  471. hif_write32_mb((mem + (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
  472. HOST_GROUP0_MASK);
  473. qdf_mdelay(100);
  474. /* Clear FW_INDICATOR_ADDRESS */
  475. if (HAS_FW_INDICATOR) {
  476. fw_indicator = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
  477. hif_write32_mb(mem + FW_INDICATOR_ADDRESS, 0);
  478. }
  479. /* Clear Target LF Timer interrupts */
  480. val =
  481. hif_read32_mb(mem +
  482. (RTC_SOC_BASE_ADDRESS +
  483. SOC_LF_TIMER_CONTROL0_ADDRESS));
  484. HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
  485. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
  486. val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
  487. hif_write32_mb(mem +
  488. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
  489. val);
  490. /* Reset CE */
  491. val =
  492. hif_read32_mb(mem +
  493. (RTC_SOC_BASE_ADDRESS |
  494. SOC_RESET_CONTROL_ADDRESS));
  495. val |= SOC_RESET_CONTROL_CE_RST_MASK;
  496. hif_write32_mb((mem +
  497. (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
  498. val);
  499. val =
  500. hif_read32_mb(mem +
  501. (RTC_SOC_BASE_ADDRESS |
  502. SOC_RESET_CONTROL_ADDRESS));
  503. qdf_mdelay(10);
  504. /* CE unreset */
  505. val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
  506. hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
  507. val);
  508. val =
  509. hif_read32_mb(mem +
  510. (RTC_SOC_BASE_ADDRESS |
  511. SOC_RESET_CONTROL_ADDRESS));
  512. qdf_mdelay(10);
  513. /* Read Target CPU Intr Cause */
  514. val = hif_read32_mb(mem + (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  515. HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
  516. __func__, val);
  517. /* CPU warm RESET */
  518. val =
  519. hif_read32_mb(mem +
  520. (RTC_SOC_BASE_ADDRESS |
  521. SOC_RESET_CONTROL_ADDRESS));
  522. val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
  523. hif_write32_mb(mem + (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS),
  524. val);
  525. val =
  526. hif_read32_mb(mem +
  527. (RTC_SOC_BASE_ADDRESS |
  528. SOC_RESET_CONTROL_ADDRESS));
  529. HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
  530. __func__, val);
  531. qdf_mdelay(100);
  532. HIF_INFO_MED("%s: Target Warm reset complete", __func__);
  533. }
  534. #ifndef QCA_WIFI_3_0
  535. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
  536. {
  537. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  538. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  539. void __iomem *mem = sc->mem;
  540. uint32_t val;
  541. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  542. return ATH_ISR_NOSCHED;
  543. val = hif_read32_mb(mem + FW_INDICATOR_ADDRESS);
  544. if (Q_TARGET_ACCESS_END(scn) < 0)
  545. return ATH_ISR_SCHED;
  546. HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
  547. if (val & FW_IND_HELPER)
  548. return 0;
  549. return 1;
  550. }
  551. #endif
  552. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  553. {
  554. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  555. uint16_t device_id;
  556. uint32_t val;
  557. uint16_t timeout_count = 0;
  558. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  559. /* Check device ID from PCIe configuration space for link status */
  560. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
  561. if (device_id != sc->devid) {
  562. HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
  563. __func__, device_id, sc->devid);
  564. return -EACCES;
  565. }
  566. /* Check PCIe local register for bar/memory access */
  567. val = hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  568. RTC_STATE_ADDRESS);
  569. HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
  570. /* Try to wake up taget if it sleeps */
  571. hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  572. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  573. HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
  574. hif_read32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  575. PCIE_SOC_WAKE_ADDRESS));
  576. /* Check if taget can be woken up */
  577. while (!hif_targ_is_awake(scn, sc->mem)) {
  578. if (timeout_count >= PCIE_WAKE_TIMEOUT) {
  579. HIF_ERROR("%s: wake up timeout, %08x, %08x",
  580. __func__,
  581. hif_read32_mb(sc->mem +
  582. PCIE_LOCAL_BASE_ADDRESS +
  583. RTC_STATE_ADDRESS),
  584. hif_read32_mb(sc->mem +
  585. PCIE_LOCAL_BASE_ADDRESS +
  586. PCIE_SOC_WAKE_ADDRESS));
  587. return -EACCES;
  588. }
  589. hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  590. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  591. qdf_mdelay(100);
  592. timeout_count += 100;
  593. }
  594. /* Check Power register for SoC internal bus issues */
  595. val =
  596. hif_read32_mb(sc->mem + RTC_SOC_BASE_ADDRESS +
  597. SOC_POWER_REG_OFFSET);
  598. HIF_INFO_MED("%s: Power register is %08x", __func__, val);
  599. return 0;
  600. }
  601. /**
  602. * __hif_pci_dump_registers(): dump other PCI debug registers
  603. * @scn: struct hif_softc
  604. *
  605. * This function dumps pci debug registers. The parrent function
  606. * dumps the copy engine registers before calling this function.
  607. *
  608. * Return: void
  609. */
  610. static void __hif_pci_dump_registers(struct hif_softc *scn)
  611. {
  612. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  613. void __iomem *mem = sc->mem;
  614. uint32_t val, i, j;
  615. uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
  616. uint32_t ce_base;
  617. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  618. return;
  619. /* DEBUG_INPUT_SEL_SRC = 0x6 */
  620. val =
  621. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  622. WLAN_DEBUG_INPUT_SEL_OFFSET);
  623. val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
  624. val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
  625. hif_write32_mb(mem + GPIO_BASE_ADDRESS + WLAN_DEBUG_INPUT_SEL_OFFSET,
  626. val);
  627. /* DEBUG_CONTROL_ENABLE = 0x1 */
  628. val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  629. WLAN_DEBUG_CONTROL_OFFSET);
  630. val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
  631. val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
  632. hif_write32_mb(mem + GPIO_BASE_ADDRESS +
  633. WLAN_DEBUG_CONTROL_OFFSET, val);
  634. HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
  635. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  636. WLAN_DEBUG_INPUT_SEL_OFFSET),
  637. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  638. WLAN_DEBUG_CONTROL_OFFSET));
  639. HIF_INFO_MED("%s: Debug CE", __func__);
  640. /* Loop CE debug output */
  641. /* AMBA_DEBUG_BUS_SEL = 0xc */
  642. val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
  643. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  644. val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
  645. hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
  646. for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
  647. /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
  648. val = hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
  649. CE_WRAPPER_DEBUG_OFFSET);
  650. val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
  651. val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
  652. hif_write32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
  653. CE_WRAPPER_DEBUG_OFFSET, val);
  654. HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
  655. __func__, wrapper_idx[i],
  656. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  657. AMBA_DEBUG_BUS_OFFSET),
  658. hif_read32_mb(mem + CE_WRAPPER_BASE_ADDRESS +
  659. CE_WRAPPER_DEBUG_OFFSET));
  660. if (wrapper_idx[i] <= 7) {
  661. for (j = 0; j <= 5; j++) {
  662. ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
  663. /* For (j=0~5) write CE_DEBUG_SEL = j */
  664. val =
  665. hif_read32_mb(mem + ce_base +
  666. CE_DEBUG_OFFSET);
  667. val &= ~CE_DEBUG_SEL_MASK;
  668. val |= CE_DEBUG_SEL_SET(j);
  669. hif_write32_mb(mem + ce_base + CE_DEBUG_OFFSET,
  670. val);
  671. /* read (@gpio_athr_wlan_reg)
  672. * WLAN_DEBUG_OUT_DATA */
  673. val = hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  674. WLAN_DEBUG_OUT_OFFSET);
  675. val = WLAN_DEBUG_OUT_DATA_GET(val);
  676. HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
  677. __func__, j,
  678. hif_read32_mb(mem + ce_base +
  679. CE_DEBUG_OFFSET), val);
  680. }
  681. } else {
  682. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  683. val =
  684. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  685. WLAN_DEBUG_OUT_OFFSET);
  686. val = WLAN_DEBUG_OUT_DATA_GET(val);
  687. HIF_INFO_MED("%s: out: %x", __func__, val);
  688. }
  689. }
  690. HIF_INFO_MED("%s: Debug PCIe:", __func__);
  691. /* Loop PCIe debug output */
  692. /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
  693. val = hif_read32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET);
  694. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  695. val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
  696. hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET, val);
  697. for (i = 0; i <= 8; i++) {
  698. /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
  699. val =
  700. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  701. AMBA_DEBUG_BUS_OFFSET);
  702. val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
  703. val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
  704. hif_write32_mb(mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
  705. val);
  706. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  707. val =
  708. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  709. WLAN_DEBUG_OUT_OFFSET);
  710. val = WLAN_DEBUG_OUT_DATA_GET(val);
  711. HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
  712. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  713. WLAN_DEBUG_OUT_OFFSET), val,
  714. hif_read32_mb(mem + GPIO_BASE_ADDRESS +
  715. WLAN_DEBUG_OUT_OFFSET));
  716. }
  717. Q_TARGET_ACCESS_END(scn);
  718. }
  719. /**
  720. * hif_dump_registers(): dump bus debug registers
  721. * @scn: struct hif_opaque_softc
  722. *
  723. * This function dumps hif bus debug registers
  724. *
  725. * Return: 0 for success or error code
  726. */
  727. int hif_pci_dump_registers(struct hif_softc *hif_ctx)
  728. {
  729. int status;
  730. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  731. status = hif_dump_ce_registers(scn);
  732. if (status)
  733. HIF_ERROR("%s: Dump CE Registers Failed", __func__);
  734. /* dump non copy engine pci registers */
  735. __hif_pci_dump_registers(scn);
  736. return 0;
  737. }
  738. /*
  739. * Handler for a per-engine interrupt on a PARTICULAR CE.
  740. * This is used in cases where each CE has a private
  741. * MSI interrupt.
  742. */
  743. static irqreturn_t ce_per_engine_handler(int irq, void *arg)
  744. {
  745. int CE_id = irq - MSI_ASSIGN_CE_INITIAL;
  746. /*
  747. * NOTE: We are able to derive CE_id from irq because we
  748. * use a one-to-one mapping for CE's 0..5.
  749. * CE's 6 & 7 do not use interrupts at all.
  750. *
  751. * This mapping must be kept in sync with the mapping
  752. * used by firmware.
  753. */
  754. ce_per_engine_service(arg, CE_id);
  755. return IRQ_HANDLED;
  756. }
  757. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  758. /* worker thread to schedule wlan_tasklet in SLUB debug build */
  759. static void reschedule_tasklet_work_handler(void *arg)
  760. {
  761. struct hif_pci_softc *sc = arg;
  762. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  763. if (!scn) {
  764. HIF_ERROR("%s: hif_softc is NULL\n", __func__);
  765. return;
  766. }
  767. if (scn->hif_init_done == false) {
  768. HIF_ERROR("%s: wlan driver is unloaded", __func__);
  769. return;
  770. }
  771. tasklet_schedule(&sc->intr_tq);
  772. return;
  773. }
  774. /**
  775. * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
  776. * work
  777. * @sc: HIF PCI Context
  778. *
  779. * Return: void
  780. */
  781. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
  782. {
  783. qdf_create_work(0, &sc->reschedule_tasklet_work,
  784. reschedule_tasklet_work_handler, NULL);
  785. }
  786. #else
  787. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
  788. #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
  789. void wlan_tasklet(unsigned long data)
  790. {
  791. struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
  792. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  793. if (scn->hif_init_done == false)
  794. goto end;
  795. if (qdf_atomic_read(&scn->link_suspended))
  796. goto end;
  797. if (!ADRASTEA_BU) {
  798. (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
  799. if (scn->target_status == TARGET_STATUS_RESET)
  800. goto end;
  801. }
  802. end:
  803. qdf_atomic_set(&scn->tasklet_from_intr, 0);
  804. qdf_atomic_dec(&scn->active_tasklet_cnt);
  805. }
  806. #ifdef FEATURE_RUNTIME_PM
  807. static const char *hif_pm_runtime_state_to_string(uint32_t state)
  808. {
  809. switch (state) {
  810. case HIF_PM_RUNTIME_STATE_NONE:
  811. return "INIT_STATE";
  812. case HIF_PM_RUNTIME_STATE_ON:
  813. return "ON";
  814. case HIF_PM_RUNTIME_STATE_INPROGRESS:
  815. return "INPROGRESS";
  816. case HIF_PM_RUNTIME_STATE_SUSPENDED:
  817. return "SUSPENDED";
  818. default:
  819. return "INVALID STATE";
  820. }
  821. }
  822. #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
  823. seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
  824. /**
  825. * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
  826. * @sc: hif_pci_softc context
  827. * @msg: log message
  828. *
  829. * log runtime pm stats when something seems off.
  830. *
  831. * Return: void
  832. */
  833. void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
  834. {
  835. struct hif_pm_runtime_lock *ctx;
  836. HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
  837. msg, atomic_read(&sc->dev->power.usage_count),
  838. hif_pm_runtime_state_to_string(
  839. atomic_read(&sc->pm_state)),
  840. sc->prevent_suspend_cnt);
  841. HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
  842. sc->dev->power.runtime_status,
  843. sc->dev->power.runtime_error,
  844. sc->dev->power.disable_depth,
  845. sc->dev->power.autosuspend_delay);
  846. HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
  847. sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
  848. sc->pm_stats.request_resume);
  849. HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
  850. sc->pm_stats.allow_suspend,
  851. sc->pm_stats.prevent_suspend);
  852. HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
  853. sc->pm_stats.prevent_suspend_timeout,
  854. sc->pm_stats.allow_suspend_timeout);
  855. HIF_ERROR("Suspended: %u, resumed: %u count",
  856. sc->pm_stats.suspended,
  857. sc->pm_stats.resumed);
  858. HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
  859. sc->pm_stats.suspend_err,
  860. sc->pm_stats.runtime_get_err);
  861. HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
  862. list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
  863. HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
  864. }
  865. WARN_ON(1);
  866. }
  867. /**
  868. * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
  869. * @s: file to print to
  870. * @data: unused
  871. *
  872. * debugging tool added to the debug fs for displaying runtimepm stats
  873. *
  874. * Return: 0
  875. */
  876. static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
  877. {
  878. struct hif_pci_softc *sc = s->private;
  879. static const char * const autopm_state[] = {"NONE", "ON", "INPROGRESS",
  880. "SUSPENDED"};
  881. unsigned int msecs_age;
  882. int pm_state = atomic_read(&sc->pm_state);
  883. unsigned long timer_expires, flags;
  884. struct hif_pm_runtime_lock *ctx;
  885. seq_printf(s, "%30s: %s\n", "Runtime PM state",
  886. autopm_state[pm_state]);
  887. seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
  888. sc->pm_stats.last_resume_caller);
  889. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
  890. msecs_age = jiffies_to_msecs(
  891. jiffies - sc->pm_stats.suspend_jiffies);
  892. seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
  893. msecs_age / 1000, msecs_age % 1000);
  894. }
  895. seq_printf(s, "%30s: %d\n", "PM Usage count",
  896. atomic_read(&sc->dev->power.usage_count));
  897. seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
  898. sc->prevent_suspend_cnt);
  899. HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
  900. HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
  901. HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
  902. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
  903. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
  904. HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
  905. HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
  906. HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
  907. HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
  908. HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
  909. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
  910. timer_expires = sc->runtime_timer_expires;
  911. if (timer_expires > 0) {
  912. msecs_age = jiffies_to_msecs(timer_expires - jiffies);
  913. seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
  914. msecs_age / 1000, msecs_age % 1000);
  915. }
  916. spin_lock_irqsave(&sc->runtime_lock, flags);
  917. if (list_empty(&sc->prevent_suspend_list)) {
  918. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  919. return 0;
  920. }
  921. seq_printf(s, "%30s: ", "Active Wakeup_Sources");
  922. list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
  923. seq_printf(s, "%s", ctx->name);
  924. if (ctx->timeout)
  925. seq_printf(s, "(%d ms)", ctx->timeout);
  926. seq_puts(s, " ");
  927. }
  928. seq_puts(s, "\n");
  929. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  930. return 0;
  931. }
  932. #undef HIF_PCI_RUNTIME_PM_STATS
  933. /**
  934. * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
  935. * @inode
  936. * @file
  937. *
  938. * Return: linux error code of single_open.
  939. */
  940. static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
  941. {
  942. return single_open(file, hif_pci_pm_runtime_debugfs_show,
  943. inode->i_private);
  944. }
  945. static const struct file_operations hif_pci_runtime_pm_fops = {
  946. .owner = THIS_MODULE,
  947. .open = hif_pci_runtime_pm_open,
  948. .release = single_release,
  949. .read = seq_read,
  950. .llseek = seq_lseek,
  951. };
  952. /**
  953. * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
  954. * @sc: pci context
  955. *
  956. * creates a debugfs entry to debug the runtime pm feature.
  957. */
  958. static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
  959. {
  960. sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
  961. S_IRUSR, NULL, sc,
  962. &hif_pci_runtime_pm_fops);
  963. }
  964. /**
  965. * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
  966. * @sc: pci context
  967. *
  968. * removes the debugfs entry to debug the runtime pm feature.
  969. */
  970. static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
  971. {
  972. debugfs_remove(sc->pm_dentry);
  973. }
  974. static void hif_runtime_init(struct device *dev, int delay)
  975. {
  976. pm_runtime_set_autosuspend_delay(dev, delay);
  977. pm_runtime_use_autosuspend(dev);
  978. pm_runtime_allow(dev);
  979. pm_runtime_mark_last_busy(dev);
  980. pm_runtime_put_noidle(dev);
  981. pm_suspend_ignore_children(dev, true);
  982. }
  983. static void hif_runtime_exit(struct device *dev)
  984. {
  985. pm_runtime_get_noresume(dev);
  986. pm_runtime_set_active(dev);
  987. }
  988. static void hif_pm_runtime_lock_timeout_fn(unsigned long data);
  989. /**
  990. * hif_pm_runtime_start(): start the runtime pm
  991. * @sc: pci context
  992. *
  993. * After this call, runtime pm will be active.
  994. */
  995. static void hif_pm_runtime_start(struct hif_pci_softc *sc)
  996. {
  997. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  998. uint32_t mode = hif_get_conparam(ol_sc);
  999. if (!ol_sc->hif_config.enable_runtime_pm) {
  1000. HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
  1001. return;
  1002. }
  1003. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode)) {
  1004. HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
  1005. __func__);
  1006. return;
  1007. }
  1008. setup_timer(&sc->runtime_timer, hif_pm_runtime_lock_timeout_fn,
  1009. (unsigned long)sc);
  1010. HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
  1011. ol_sc->hif_config.runtime_pm_delay);
  1012. hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
  1013. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
  1014. hif_runtime_pm_debugfs_create(sc);
  1015. }
  1016. /**
  1017. * hif_pm_runtime_stop(): stop runtime pm
  1018. * @sc: pci context
  1019. *
  1020. * Turns off runtime pm and frees corresponding resources
  1021. * that were acquired by hif_runtime_pm_start().
  1022. */
  1023. static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
  1024. {
  1025. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1026. uint32_t mode = hif_get_conparam(ol_sc);
  1027. if (!ol_sc->hif_config.enable_runtime_pm)
  1028. return;
  1029. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode))
  1030. return;
  1031. hif_runtime_exit(sc->dev);
  1032. hif_pm_runtime_resume(sc->dev);
  1033. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  1034. hif_runtime_pm_debugfs_remove(sc);
  1035. del_timer_sync(&sc->runtime_timer);
  1036. /* doesn't wait for penting trafic unlike cld-2.0 */
  1037. }
  1038. /**
  1039. * hif_pm_runtime_open(): initialize runtime pm
  1040. * @sc: pci data structure
  1041. *
  1042. * Early initialization
  1043. */
  1044. static void hif_pm_runtime_open(struct hif_pci_softc *sc)
  1045. {
  1046. spin_lock_init(&sc->runtime_lock);
  1047. qdf_atomic_init(&sc->pm_state);
  1048. sc->prevent_linkdown_lock =
  1049. hif_runtime_lock_init("linkdown suspend disabled");
  1050. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  1051. INIT_LIST_HEAD(&sc->prevent_suspend_list);
  1052. }
  1053. /**
  1054. * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
  1055. * @sc: pci context
  1056. *
  1057. * Ensure we have only one vote against runtime suspend before closing
  1058. * the runtime suspend feature.
  1059. *
  1060. * all gets by the wlan driver should have been returned
  1061. * one vote should remain as part of cnss_runtime_exit
  1062. *
  1063. * needs to be revisited if we share the root complex.
  1064. */
  1065. static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
  1066. {
  1067. unsigned long flags;
  1068. struct hif_pm_runtime_lock *ctx, *tmp;
  1069. if (atomic_read(&sc->dev->power.usage_count) != 1)
  1070. hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
  1071. else
  1072. return;
  1073. spin_lock_irqsave(&sc->runtime_lock, flags);
  1074. list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
  1075. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  1076. hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
  1077. spin_lock_irqsave(&sc->runtime_lock, flags);
  1078. }
  1079. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  1080. /* ensure 1 and only 1 usage count so that when the wlan
  1081. * driver is re-insmodded runtime pm won't be
  1082. * disabled also ensures runtime pm doesn't get
  1083. * broken on by being less than 1.
  1084. */
  1085. if (atomic_read(&sc->dev->power.usage_count) <= 0)
  1086. atomic_set(&sc->dev->power.usage_count, 1);
  1087. while (atomic_read(&sc->dev->power.usage_count) > 1)
  1088. hif_pm_runtime_put_auto(sc->dev);
  1089. }
  1090. static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
  1091. struct hif_pm_runtime_lock *lock);
  1092. /**
  1093. * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
  1094. * @sc: PCIe Context
  1095. *
  1096. * API is used to empty the runtime pm prevent suspend list.
  1097. *
  1098. * Return: void
  1099. */
  1100. static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
  1101. {
  1102. unsigned long flags;
  1103. struct hif_pm_runtime_lock *ctx, *tmp;
  1104. spin_lock_irqsave(&sc->runtime_lock, flags);
  1105. list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
  1106. __hif_pm_runtime_allow_suspend(sc, ctx);
  1107. }
  1108. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  1109. }
  1110. /**
  1111. * hif_pm_runtime_close(): close runtime pm
  1112. * @sc: pci bus handle
  1113. *
  1114. * ensure runtime_pm is stopped before closing the driver
  1115. */
  1116. static void hif_pm_runtime_close(struct hif_pci_softc *sc)
  1117. {
  1118. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1119. if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
  1120. return;
  1121. else
  1122. hif_pm_runtime_stop(sc);
  1123. hif_is_recovery_in_progress(scn) ?
  1124. hif_pm_runtime_sanitize_on_ssr_exit(sc) :
  1125. hif_pm_runtime_sanitize_on_exit(sc);
  1126. }
  1127. #else
  1128. static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
  1129. static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
  1130. static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
  1131. static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
  1132. #endif
  1133. /**
  1134. * hif_disable_power_gating() - disable HW power gating
  1135. * @hif_ctx: hif context
  1136. *
  1137. * disables pcie L1 power states
  1138. */
  1139. static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
  1140. {
  1141. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1142. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1143. if (NULL == scn) {
  1144. HIF_ERROR("%s: Could not disable ASPM scn is null",
  1145. __func__);
  1146. return;
  1147. }
  1148. /* Disable ASPM when pkt log is enabled */
  1149. pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
  1150. pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
  1151. }
  1152. /**
  1153. * hif_enable_power_gating() - enable HW power gating
  1154. * @hif_ctx: hif context
  1155. *
  1156. * enables pcie L1 power states
  1157. */
  1158. static void hif_enable_power_gating(struct hif_pci_softc *sc)
  1159. {
  1160. if (NULL == sc) {
  1161. HIF_ERROR("%s: Could not disable ASPM scn is null",
  1162. __func__);
  1163. return;
  1164. }
  1165. /* Re-enable ASPM after firmware/OTP download is complete */
  1166. pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
  1167. }
  1168. /**
  1169. * hif_enable_power_management() - enable power management
  1170. * @hif_ctx: hif context
  1171. *
  1172. * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
  1173. * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
  1174. *
  1175. * note: epping mode does not call this function as it does not
  1176. * care about saving power.
  1177. */
  1178. void hif_pci_enable_power_management(struct hif_softc *hif_sc,
  1179. bool is_packet_log_enabled)
  1180. {
  1181. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
  1182. if (pci_ctx == NULL) {
  1183. HIF_ERROR("%s, hif_ctx null", __func__);
  1184. return;
  1185. }
  1186. hif_pm_runtime_start(pci_ctx);
  1187. if (!is_packet_log_enabled)
  1188. hif_enable_power_gating(pci_ctx);
  1189. if (!CONFIG_ATH_PCIE_MAX_PERF &&
  1190. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
  1191. /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
  1192. if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
  1193. HIF_ERROR("%s, failed to set target to sleep",
  1194. __func__);
  1195. }
  1196. }
  1197. /**
  1198. * hif_disable_power_management() - disable power management
  1199. * @hif_ctx: hif context
  1200. *
  1201. * Currently disables runtime pm. Should be updated to behave
  1202. * if runtime pm is not started. Should be updated to take care
  1203. * of aspm and soc sleep for driver load.
  1204. */
  1205. void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
  1206. {
  1207. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1208. if (pci_ctx == NULL) {
  1209. HIF_ERROR("%s, hif_ctx null", __func__);
  1210. return;
  1211. }
  1212. hif_pm_runtime_stop(pci_ctx);
  1213. }
  1214. void hif_pci_display_stats(struct hif_softc *hif_ctx)
  1215. {
  1216. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1217. if (pci_ctx == NULL) {
  1218. HIF_ERROR("%s, hif_ctx null", __func__);
  1219. return;
  1220. }
  1221. hif_display_ce_stats(&pci_ctx->ce_sc);
  1222. }
  1223. void hif_pci_clear_stats(struct hif_softc *hif_ctx)
  1224. {
  1225. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1226. if (pci_ctx == NULL) {
  1227. HIF_ERROR("%s, hif_ctx null", __func__);
  1228. return;
  1229. }
  1230. hif_clear_ce_stats(&pci_ctx->ce_sc);
  1231. }
  1232. #define ATH_PCI_PROBE_RETRY_MAX 3
  1233. /**
  1234. * hif_bus_open(): hif_bus_open
  1235. * @scn: scn
  1236. * @bus_type: bus type
  1237. *
  1238. * Return: n/a
  1239. */
  1240. QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  1241. {
  1242. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1243. hif_ctx->bus_type = bus_type;
  1244. hif_pm_runtime_open(sc);
  1245. qdf_spinlock_create(&sc->irq_lock);
  1246. return hif_ce_open(hif_ctx);
  1247. }
  1248. #ifdef BMI_RSP_POLLING
  1249. #define BMI_RSP_CB_REGISTER 0
  1250. #else
  1251. #define BMI_RSP_CB_REGISTER 1
  1252. #endif
  1253. /**
  1254. * hif_register_bmi_callbacks() - register bmi callbacks
  1255. * @hif_sc: hif context
  1256. *
  1257. * Bmi phase uses different copy complete callbacks than mission mode.
  1258. */
  1259. void hif_register_bmi_callbacks(struct hif_softc *hif_sc)
  1260. {
  1261. struct HIF_CE_pipe_info *pipe_info;
  1262. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1263. /*
  1264. * Initially, establish CE completion handlers for use with BMI.
  1265. * These are overwritten with generic handlers after we exit BMI phase.
  1266. */
  1267. pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_TARG];
  1268. ce_send_cb_register(pipe_info->ce_hdl, hif_bmi_send_done, pipe_info, 0);
  1269. if (BMI_RSP_CB_REGISTER) {
  1270. pipe_info = &hif_state->pipe_info[BMI_CE_NUM_TO_HOST];
  1271. ce_recv_cb_register(
  1272. pipe_info->ce_hdl, hif_bmi_recv_data, pipe_info, 0);
  1273. }
  1274. }
  1275. /**
  1276. * hif_wake_target_cpu() - wake the target's cpu
  1277. * @scn: hif context
  1278. *
  1279. * Send an interrupt to the device to wake up the Target CPU
  1280. * so it has an opportunity to notice any changed state.
  1281. */
  1282. void hif_wake_target_cpu(struct hif_softc *scn)
  1283. {
  1284. QDF_STATUS rv;
  1285. uint32_t core_ctrl;
  1286. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1287. rv = hif_diag_read_access(hif_hdl,
  1288. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1289. &core_ctrl);
  1290. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1291. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1292. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1293. rv = hif_diag_write_access(hif_hdl,
  1294. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1295. core_ctrl);
  1296. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1297. }
  1298. /**
  1299. * soc_wake_reset() - allow the target to go to sleep
  1300. * @scn: hif_softc
  1301. *
  1302. * Clear the force wake register. This is done by
  1303. * hif_sleep_entry and cancel defered timer sleep.
  1304. */
  1305. static void soc_wake_reset(struct hif_softc *scn)
  1306. {
  1307. hif_write32_mb(scn->mem +
  1308. PCIE_LOCAL_BASE_ADDRESS +
  1309. PCIE_SOC_WAKE_ADDRESS,
  1310. PCIE_SOC_WAKE_RESET);
  1311. }
  1312. /**
  1313. * hif_sleep_entry() - gate target sleep
  1314. * @arg: hif context
  1315. *
  1316. * This function is the callback for the sleep timer.
  1317. * Check if last force awake critical section was at least
  1318. * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
  1319. * allow the target to go to sleep and cancel the sleep timer.
  1320. * otherwise reschedule the sleep timer.
  1321. */
  1322. static void hif_sleep_entry(void *arg)
  1323. {
  1324. struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
  1325. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1326. uint32_t idle_ms;
  1327. if (scn->recovery)
  1328. return;
  1329. if (hif_is_driver_unloading(scn))
  1330. return;
  1331. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  1332. if (hif_state->verified_awake == false) {
  1333. idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
  1334. - hif_state->sleep_ticks);
  1335. if (idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
  1336. if (!qdf_atomic_read(&scn->link_suspended)) {
  1337. soc_wake_reset(scn);
  1338. hif_state->fake_sleep = false;
  1339. }
  1340. } else {
  1341. qdf_timer_stop(&hif_state->sleep_timer);
  1342. qdf_timer_start(&hif_state->sleep_timer,
  1343. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  1344. }
  1345. } else {
  1346. qdf_timer_stop(&hif_state->sleep_timer);
  1347. qdf_timer_start(&hif_state->sleep_timer,
  1348. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  1349. }
  1350. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  1351. }
  1352. #define HIF_HIA_MAX_POLL_LOOP 1000000
  1353. #define HIF_HIA_POLLING_DELAY_MS 10
  1354. #ifdef CONFIG_WIN
  1355. void hif_set_hia_extnd(struct hif_softc *scn)
  1356. {
  1357. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1358. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1359. uint32_t target_type = tgt_info->target_type;
  1360. HIF_TRACE("%s: E", __func__);
  1361. if ((target_type == TARGET_TYPE_AR900B) ||
  1362. target_type == TARGET_TYPE_QCA9984 ||
  1363. target_type == TARGET_TYPE_QCA9888) {
  1364. /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
  1365. in RTC space */
  1366. tgt_info->target_revision
  1367. = CHIP_ID_REVISION_GET(hif_read32_mb(scn->mem
  1368. + CHIP_ID_ADDRESS));
  1369. qdf_print(KERN_INFO"chip_id 0x%x chip_revision 0x%x\n",
  1370. target_type, tgt_info->target_revision);
  1371. }
  1372. {
  1373. uint32_t flag2_value = 0;
  1374. uint32_t flag2_targ_addr =
  1375. host_interest_item_address(target_type,
  1376. offsetof(struct host_interest_s, hi_skip_clock_init));
  1377. if ((ar900b_20_targ_clk != -1) &&
  1378. (frac != -1) && (intval != -1)) {
  1379. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1380. &flag2_value);
  1381. qdf_print("\n Setting clk_override\n");
  1382. flag2_value |= CLOCK_OVERRIDE;
  1383. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1384. flag2_value);
  1385. qdf_print("\n CLOCK PLL val set %d\n", flag2_value);
  1386. } else {
  1387. qdf_print(KERN_INFO"\n CLOCK PLL skipped\n");
  1388. }
  1389. }
  1390. if (target_type == TARGET_TYPE_AR900B
  1391. || target_type == TARGET_TYPE_QCA9984
  1392. || target_type == TARGET_TYPE_QCA9888) {
  1393. /* for AR9980_2.0, 300 mhz clock is used, right now we assume
  1394. * this would be supplied through module parameters,
  1395. * if not supplied assumed default or same behavior as 1.0.
  1396. * Assume 1.0 clock can't be tuned, reset to defaults
  1397. */
  1398. qdf_print(KERN_INFO"%s: setting the target pll frac %x intval %x\n",
  1399. __func__, frac, intval);
  1400. /* do not touch frac, and int val, let them be default -1,
  1401. * if desired, host can supply these through module params
  1402. */
  1403. if (frac != -1 || intval != -1) {
  1404. uint32_t flag2_value = 0;
  1405. uint32_t flag2_targ_addr;
  1406. flag2_targ_addr =
  1407. host_interest_item_address(target_type,
  1408. offsetof(struct host_interest_s,
  1409. hi_clock_info));
  1410. hif_diag_read_access(hif_hdl,
  1411. flag2_targ_addr, &flag2_value);
  1412. qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
  1413. flag2_value);
  1414. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1415. qdf_print("\n INT Val %x Address %x\n",
  1416. intval, flag2_value + 4);
  1417. hif_diag_write_access(hif_hdl,
  1418. flag2_value + 4, intval);
  1419. } else {
  1420. qdf_print(KERN_INFO"%s: no frac provided, skipping pre-configuring PLL\n",
  1421. __func__);
  1422. }
  1423. /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
  1424. if ((target_type == TARGET_TYPE_AR900B)
  1425. && (tgt_info->target_revision == AR900B_REV_2)
  1426. && ar900b_20_targ_clk != -1) {
  1427. uint32_t flag2_value = 0;
  1428. uint32_t flag2_targ_addr;
  1429. flag2_targ_addr
  1430. = host_interest_item_address(target_type,
  1431. offsetof(struct host_interest_s,
  1432. hi_desired_cpu_speed_hz));
  1433. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1434. &flag2_value);
  1435. qdf_print("\n ====> hi_desired_cpu_speed_hz Address %x\n",
  1436. flag2_value);
  1437. hif_diag_write_access(hif_hdl, flag2_value,
  1438. ar900b_20_targ_clk/*300000000u*/);
  1439. } else if (target_type == TARGET_TYPE_QCA9888) {
  1440. uint32_t flag2_targ_addr;
  1441. if (200000000u != qca9888_20_targ_clk) {
  1442. qca9888_20_targ_clk = 300000000u;
  1443. /* Setting the target clock speed to 300 mhz */
  1444. }
  1445. flag2_targ_addr
  1446. = host_interest_item_address(target_type,
  1447. offsetof(struct host_interest_s,
  1448. hi_desired_cpu_speed_hz));
  1449. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1450. qca9888_20_targ_clk);
  1451. } else {
  1452. qdf_print(KERN_INFO"%s: targ_clk is not provided, skipping pre-configuring PLL\n",
  1453. __func__);
  1454. }
  1455. } else {
  1456. if (frac != -1 || intval != -1) {
  1457. uint32_t flag2_value = 0;
  1458. uint32_t flag2_targ_addr =
  1459. host_interest_item_address(target_type,
  1460. offsetof(struct host_interest_s,
  1461. hi_clock_info));
  1462. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1463. &flag2_value);
  1464. qdf_print("\n ====> FRAC Val %x Address %x\n", frac,
  1465. flag2_value);
  1466. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1467. qdf_print("\n INT Val %x Address %x\n", intval,
  1468. flag2_value + 4);
  1469. hif_diag_write_access(hif_hdl, flag2_value + 4,
  1470. intval);
  1471. }
  1472. }
  1473. }
  1474. #else
  1475. void hif_set_hia_extnd(struct hif_softc *scn)
  1476. {
  1477. }
  1478. #endif
  1479. /**
  1480. * hif_set_hia() - fill out the host interest area
  1481. * @scn: hif context
  1482. *
  1483. * This is replaced by hif_wlan_enable for integrated targets.
  1484. * This fills out the host interest area. The firmware will
  1485. * process these memory addresses when it is first brought out
  1486. * of reset.
  1487. *
  1488. * Return: 0 for success.
  1489. */
  1490. int hif_set_hia(struct hif_softc *scn)
  1491. {
  1492. QDF_STATUS rv;
  1493. uint32_t interconnect_targ_addr = 0;
  1494. uint32_t pcie_state_targ_addr = 0;
  1495. uint32_t pipe_cfg_targ_addr = 0;
  1496. uint32_t svc_to_pipe_map = 0;
  1497. uint32_t pcie_config_flags = 0;
  1498. uint32_t flag2_value = 0;
  1499. uint32_t flag2_targ_addr = 0;
  1500. #ifdef QCA_WIFI_3_0
  1501. uint32_t host_interest_area = 0;
  1502. uint8_t i;
  1503. #else
  1504. uint32_t ealloc_value = 0;
  1505. uint32_t ealloc_targ_addr = 0;
  1506. uint8_t banks_switched = 1;
  1507. uint32_t chip_id;
  1508. #endif
  1509. uint32_t pipe_cfg_addr;
  1510. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1511. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1512. uint32_t target_type = tgt_info->target_type;
  1513. int target_ce_config_sz, target_service_to_ce_map_sz;
  1514. static struct CE_pipe_config *target_ce_config;
  1515. struct service_to_pipe *target_service_to_ce_map;
  1516. HIF_TRACE("%s: E", __func__);
  1517. hif_get_target_ce_config(scn,
  1518. &target_ce_config, &target_ce_config_sz,
  1519. &target_service_to_ce_map,
  1520. &target_service_to_ce_map_sz,
  1521. NULL, NULL);
  1522. if (ADRASTEA_BU)
  1523. return QDF_STATUS_SUCCESS;
  1524. #ifdef QCA_WIFI_3_0
  1525. i = 0;
  1526. while (i < HIF_HIA_MAX_POLL_LOOP) {
  1527. host_interest_area = hif_read32_mb(scn->mem +
  1528. A_SOC_CORE_SCRATCH_0_ADDRESS);
  1529. if ((host_interest_area & 0x01) == 0) {
  1530. qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
  1531. host_interest_area = 0;
  1532. i++;
  1533. if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
  1534. HIF_ERROR("%s: poll timeout(%d)", __func__, i);
  1535. } else {
  1536. host_interest_area &= (~0x01);
  1537. hif_write32_mb(scn->mem + 0x113014, 0);
  1538. break;
  1539. }
  1540. }
  1541. if (i >= HIF_HIA_MAX_POLL_LOOP) {
  1542. HIF_ERROR("%s: hia polling timeout", __func__);
  1543. return -EIO;
  1544. }
  1545. if (host_interest_area == 0) {
  1546. HIF_ERROR("%s: host_interest_area = 0", __func__);
  1547. return -EIO;
  1548. }
  1549. interconnect_targ_addr = host_interest_area +
  1550. offsetof(struct host_interest_area_t,
  1551. hi_interconnect_state);
  1552. flag2_targ_addr = host_interest_area +
  1553. offsetof(struct host_interest_area_t, hi_option_flag2);
  1554. #else
  1555. interconnect_targ_addr = hif_hia_item_address(target_type,
  1556. offsetof(struct host_interest_s, hi_interconnect_state));
  1557. ealloc_targ_addr = hif_hia_item_address(target_type,
  1558. offsetof(struct host_interest_s, hi_early_alloc));
  1559. flag2_targ_addr = hif_hia_item_address(target_type,
  1560. offsetof(struct host_interest_s, hi_option_flag2));
  1561. #endif
  1562. /* Supply Target-side CE configuration */
  1563. rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
  1564. &pcie_state_targ_addr);
  1565. if (rv != QDF_STATUS_SUCCESS) {
  1566. HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
  1567. __func__, interconnect_targ_addr, rv);
  1568. goto done;
  1569. }
  1570. if (pcie_state_targ_addr == 0) {
  1571. rv = QDF_STATUS_E_FAILURE;
  1572. HIF_ERROR("%s: pcie state addr is 0", __func__);
  1573. goto done;
  1574. }
  1575. pipe_cfg_addr = pcie_state_targ_addr +
  1576. offsetof(struct pcie_state_s,
  1577. pipe_cfg_addr);
  1578. rv = hif_diag_read_access(hif_hdl,
  1579. pipe_cfg_addr,
  1580. &pipe_cfg_targ_addr);
  1581. if (rv != QDF_STATUS_SUCCESS) {
  1582. HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
  1583. __func__, pipe_cfg_addr, rv);
  1584. goto done;
  1585. }
  1586. if (pipe_cfg_targ_addr == 0) {
  1587. rv = QDF_STATUS_E_FAILURE;
  1588. HIF_ERROR("%s: pipe cfg addr is 0", __func__);
  1589. goto done;
  1590. }
  1591. rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
  1592. (uint8_t *) target_ce_config,
  1593. target_ce_config_sz);
  1594. if (rv != QDF_STATUS_SUCCESS) {
  1595. HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
  1596. goto done;
  1597. }
  1598. rv = hif_diag_read_access(hif_hdl,
  1599. pcie_state_targ_addr +
  1600. offsetof(struct pcie_state_s,
  1601. svc_to_pipe_map),
  1602. &svc_to_pipe_map);
  1603. if (rv != QDF_STATUS_SUCCESS) {
  1604. HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
  1605. goto done;
  1606. }
  1607. if (svc_to_pipe_map == 0) {
  1608. rv = QDF_STATUS_E_FAILURE;
  1609. HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
  1610. goto done;
  1611. }
  1612. rv = hif_diag_write_mem(hif_hdl,
  1613. svc_to_pipe_map,
  1614. (uint8_t *) target_service_to_ce_map,
  1615. target_service_to_ce_map_sz);
  1616. if (rv != QDF_STATUS_SUCCESS) {
  1617. HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
  1618. goto done;
  1619. }
  1620. rv = hif_diag_read_access(hif_hdl,
  1621. pcie_state_targ_addr +
  1622. offsetof(struct pcie_state_s,
  1623. config_flags),
  1624. &pcie_config_flags);
  1625. if (rv != QDF_STATUS_SUCCESS) {
  1626. HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
  1627. goto done;
  1628. }
  1629. #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
  1630. pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
  1631. #else
  1632. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1633. #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
  1634. pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
  1635. #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
  1636. pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
  1637. #endif
  1638. rv = hif_diag_write_mem(hif_hdl,
  1639. pcie_state_targ_addr +
  1640. offsetof(struct pcie_state_s,
  1641. config_flags),
  1642. (uint8_t *) &pcie_config_flags,
  1643. sizeof(pcie_config_flags));
  1644. if (rv != QDF_STATUS_SUCCESS) {
  1645. HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
  1646. goto done;
  1647. }
  1648. #ifndef QCA_WIFI_3_0
  1649. /* configure early allocation */
  1650. ealloc_targ_addr = hif_hia_item_address(target_type,
  1651. offsetof(
  1652. struct host_interest_s,
  1653. hi_early_alloc));
  1654. rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
  1655. &ealloc_value);
  1656. if (rv != QDF_STATUS_SUCCESS) {
  1657. HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
  1658. goto done;
  1659. }
  1660. /* 1 bank is switched to IRAM, except ROME 1.0 */
  1661. ealloc_value |=
  1662. ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1663. HI_EARLY_ALLOC_MAGIC_MASK);
  1664. rv = hif_diag_read_access(hif_hdl,
  1665. CHIP_ID_ADDRESS |
  1666. RTC_SOC_BASE_ADDRESS, &chip_id);
  1667. if (rv != QDF_STATUS_SUCCESS) {
  1668. HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
  1669. goto done;
  1670. }
  1671. if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
  1672. tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
  1673. switch (CHIP_ID_REVISION_GET(chip_id)) {
  1674. case 0x2: /* ROME 1.3 */
  1675. /* 2 banks are switched to IRAM */
  1676. banks_switched = 2;
  1677. break;
  1678. case 0x4: /* ROME 2.1 */
  1679. case 0x5: /* ROME 2.2 */
  1680. banks_switched = 6;
  1681. break;
  1682. case 0x8: /* ROME 3.0 */
  1683. case 0x9: /* ROME 3.1 */
  1684. case 0xA: /* ROME 3.2 */
  1685. banks_switched = 9;
  1686. break;
  1687. case 0x0: /* ROME 1.0 */
  1688. case 0x1: /* ROME 1.1 */
  1689. default:
  1690. /* 3 banks are switched to IRAM */
  1691. banks_switched = 3;
  1692. break;
  1693. }
  1694. }
  1695. ealloc_value |=
  1696. ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
  1697. & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1698. rv = hif_diag_write_access(hif_hdl,
  1699. ealloc_targ_addr,
  1700. ealloc_value);
  1701. if (rv != QDF_STATUS_SUCCESS) {
  1702. HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
  1703. goto done;
  1704. }
  1705. #endif
  1706. if ((target_type == TARGET_TYPE_AR900B)
  1707. || (target_type == TARGET_TYPE_QCA9984)
  1708. || (target_type == TARGET_TYPE_QCA9888)
  1709. || (target_type == TARGET_TYPE_AR9888)) {
  1710. hif_set_hia_extnd(scn);
  1711. }
  1712. /* Tell Target to proceed with initialization */
  1713. flag2_targ_addr = hif_hia_item_address(target_type,
  1714. offsetof(
  1715. struct host_interest_s,
  1716. hi_option_flag2));
  1717. rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1718. &flag2_value);
  1719. if (rv != QDF_STATUS_SUCCESS) {
  1720. HIF_ERROR("%s: get option val (%d)", __func__, rv);
  1721. goto done;
  1722. }
  1723. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1724. rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1725. flag2_value);
  1726. if (rv != QDF_STATUS_SUCCESS) {
  1727. HIF_ERROR("%s: set option val (%d)", __func__, rv);
  1728. goto done;
  1729. }
  1730. hif_wake_target_cpu(scn);
  1731. done:
  1732. return rv;
  1733. }
  1734. /**
  1735. * hif_bus_configure() - configure the pcie bus
  1736. * @hif_sc: pointer to the hif context.
  1737. *
  1738. * return: 0 for success. nonzero for failure.
  1739. */
  1740. int hif_pci_bus_configure(struct hif_softc *hif_sc)
  1741. {
  1742. int status = 0;
  1743. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1744. struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
  1745. hif_ce_prepare_config(hif_sc);
  1746. /* initialize sleep state adjust variables */
  1747. hif_state->sleep_timer_init = true;
  1748. hif_state->keep_awake_count = 0;
  1749. hif_state->fake_sleep = false;
  1750. hif_state->sleep_ticks = 0;
  1751. qdf_timer_init(NULL, &hif_state->sleep_timer,
  1752. hif_sleep_entry, (void *)hif_state,
  1753. QDF_TIMER_TYPE_WAKE_APPS);
  1754. hif_state->sleep_timer_init = true;
  1755. if (ADRASTEA_BU) {
  1756. status = hif_wlan_enable(hif_sc);
  1757. if (status) {
  1758. HIF_ERROR("%s: hif_wlan_enable error = %d",
  1759. __func__, status);
  1760. goto timer_free;
  1761. }
  1762. }
  1763. A_TARGET_ACCESS_LIKELY(hif_sc);
  1764. if (CONFIG_ATH_PCIE_MAX_PERF ||
  1765. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) {
  1766. /*
  1767. * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
  1768. * prevent sleep when we want to keep firmware always awake
  1769. * note: when we want to keep firmware always awake,
  1770. * hif_target_sleep_state_adjust will point to a dummy
  1771. * function, and hif_pci_target_sleep_state_adjust must
  1772. * be called instead.
  1773. * note: bus type check is here because AHB bus is reusing
  1774. * hif_pci_bus_configure code.
  1775. */
  1776. if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
  1777. if (hif_pci_target_sleep_state_adjust(hif_sc,
  1778. false, true) < 0) {
  1779. status = -EACCES;
  1780. goto disable_wlan;
  1781. }
  1782. }
  1783. }
  1784. /* todo: consider replacing this with an srng field */
  1785. if (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) {
  1786. hif_sc->per_ce_irq = true;
  1787. }
  1788. status = hif_config_ce(hif_sc);
  1789. if (status)
  1790. goto disable_wlan;
  1791. /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
  1792. if (hif_needs_bmi(hif_osc)) {
  1793. status = hif_set_hia(hif_sc);
  1794. if (status)
  1795. goto unconfig_ce;
  1796. HIF_INFO_MED("%s: hif_set_hia done", __func__);
  1797. hif_register_bmi_callbacks(hif_sc);
  1798. }
  1799. status = hif_configure_irq(hif_sc);
  1800. if (status < 0)
  1801. goto unconfig_ce;
  1802. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1803. return status;
  1804. unconfig_ce:
  1805. hif_unconfig_ce(hif_sc);
  1806. disable_wlan:
  1807. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1808. if (ADRASTEA_BU)
  1809. hif_wlan_disable(hif_sc);
  1810. timer_free:
  1811. qdf_timer_stop(&hif_state->sleep_timer);
  1812. qdf_timer_free(&hif_state->sleep_timer);
  1813. hif_state->sleep_timer_init = false;
  1814. HIF_ERROR("%s: failed, status = %d", __func__, status);
  1815. return status;
  1816. }
  1817. /**
  1818. * hif_bus_close(): hif_bus_close
  1819. *
  1820. * Return: n/a
  1821. */
  1822. void hif_pci_close(struct hif_softc *hif_sc)
  1823. {
  1824. struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
  1825. hif_pm_runtime_close(hif_pci_sc);
  1826. hif_ce_close(hif_sc);
  1827. }
  1828. #define BAR_NUM 0
  1829. int hif_enable_pci(struct hif_pci_softc *sc,
  1830. struct pci_dev *pdev,
  1831. const struct pci_device_id *id)
  1832. {
  1833. void __iomem *mem;
  1834. int ret = 0;
  1835. uint16_t device_id;
  1836. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1837. pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
  1838. if (device_id != id->device) {
  1839. HIF_ERROR(
  1840. "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
  1841. __func__, device_id, id->device);
  1842. /* pci link is down, so returing with error code */
  1843. return -EIO;
  1844. }
  1845. /* FIXME: temp. commenting out assign_resource
  1846. * call for dev_attach to work on 2.6.38 kernel
  1847. */
  1848. #if (!defined(__LINUX_ARM_ARCH__))
  1849. if (pci_assign_resource(pdev, BAR_NUM)) {
  1850. HIF_ERROR("%s: pci_assign_resource error", __func__);
  1851. return -EIO;
  1852. }
  1853. #endif
  1854. if (pci_enable_device(pdev)) {
  1855. HIF_ERROR("%s: pci_enable_device error",
  1856. __func__);
  1857. return -EIO;
  1858. }
  1859. /* Request MMIO resources */
  1860. ret = pci_request_region(pdev, BAR_NUM, "ath");
  1861. if (ret) {
  1862. HIF_ERROR("%s: PCI MMIO reservation error", __func__);
  1863. ret = -EIO;
  1864. goto err_region;
  1865. }
  1866. #ifdef CONFIG_ARM_LPAE
  1867. /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
  1868. * for 32 bits device also. */
  1869. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1870. if (ret) {
  1871. HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
  1872. goto err_dma;
  1873. }
  1874. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1875. if (ret) {
  1876. HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
  1877. goto err_dma;
  1878. }
  1879. #else
  1880. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1881. if (ret) {
  1882. HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
  1883. goto err_dma;
  1884. }
  1885. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1886. if (ret) {
  1887. HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
  1888. __func__);
  1889. goto err_dma;
  1890. }
  1891. #endif
  1892. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1893. /* Set bus master bit in PCI_COMMAND to enable DMA */
  1894. pci_set_master(pdev);
  1895. /* Arrange for access to Target SoC registers. */
  1896. #ifdef QCA_WIFI_NAPIER_EMULATION
  1897. mem = napier_emu_ioremap(pdev, BAR_NUM, 0);
  1898. #else
  1899. mem = pci_iomap(pdev, BAR_NUM, 0);
  1900. #endif
  1901. if (!mem) {
  1902. HIF_ERROR("%s: PCI iomap error", __func__);
  1903. ret = -EIO;
  1904. goto err_iomap;
  1905. }
  1906. pr_err("*****BAR is %p\n", mem);
  1907. sc->mem = mem;
  1908. sc->pdev = pdev;
  1909. sc->dev = &pdev->dev;
  1910. sc->devid = id->device;
  1911. sc->cacheline_sz = dma_get_cache_alignment();
  1912. ol_sc->mem = mem;
  1913. sc->pci_enabled = true;
  1914. return ret;
  1915. err_iomap:
  1916. pci_clear_master(pdev);
  1917. err_dma:
  1918. pci_release_region(pdev, BAR_NUM);
  1919. err_region:
  1920. pci_disable_device(pdev);
  1921. return ret;
  1922. }
  1923. void hif_disable_pci(struct hif_pci_softc *sc)
  1924. {
  1925. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1926. if (ol_sc == NULL) {
  1927. HIF_ERROR("%s: ol_sc = NULL", __func__);
  1928. return;
  1929. }
  1930. hif_pci_device_reset(sc);
  1931. pci_iounmap(sc->pdev, sc->mem);
  1932. sc->mem = NULL;
  1933. ol_sc->mem = NULL;
  1934. pci_clear_master(sc->pdev);
  1935. pci_release_region(sc->pdev, BAR_NUM);
  1936. pci_disable_device(sc->pdev);
  1937. }
  1938. int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
  1939. {
  1940. int ret = 0;
  1941. int targ_awake_limit = 500;
  1942. #ifndef QCA_WIFI_3_0
  1943. uint32_t fw_indicator;
  1944. #endif
  1945. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1946. /*
  1947. * Verify that the Target was started cleanly.*
  1948. * The case where this is most likely is with an AUX-powered
  1949. * Target and a Host in WoW mode. If the Host crashes,
  1950. * loses power, or is restarted (without unloading the driver)
  1951. * then the Target is left (aux) powered and running. On a
  1952. * subsequent driver load, the Target is in an unexpected state.
  1953. * We try to catch that here in order to reset the Target and
  1954. * retry the probe.
  1955. */
  1956. hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1957. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1958. while (!hif_targ_is_awake(scn, sc->mem)) {
  1959. if (0 == targ_awake_limit) {
  1960. HIF_ERROR("%s: target awake timeout", __func__);
  1961. ret = -EAGAIN;
  1962. goto end;
  1963. }
  1964. qdf_mdelay(1);
  1965. targ_awake_limit--;
  1966. }
  1967. #if PCIE_BAR0_READY_CHECKING
  1968. {
  1969. int wait_limit = 200;
  1970. /* Synchronization point: wait the BAR0 is configured */
  1971. while (wait_limit-- &&
  1972. !(hif_read32_mb(sc->mem +
  1973. PCIE_LOCAL_BASE_ADDRESS +
  1974. PCIE_SOC_RDY_STATUS_ADDRESS) \
  1975. & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
  1976. qdf_mdelay(10);
  1977. }
  1978. if (wait_limit < 0) {
  1979. /* AR6320v1 doesn't support checking of BAR0 configuration,
  1980. takes one sec to wait BAR0 ready */
  1981. HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
  1982. __func__);
  1983. }
  1984. }
  1985. #endif
  1986. #ifndef QCA_WIFI_3_0
  1987. fw_indicator = hif_read32_mb(sc->mem + FW_INDICATOR_ADDRESS);
  1988. hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1989. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1990. if (fw_indicator & FW_IND_INITIALIZED) {
  1991. HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
  1992. __func__);
  1993. ret = -EAGAIN;
  1994. goto end;
  1995. }
  1996. #endif
  1997. end:
  1998. return ret;
  1999. }
  2000. void wlan_tasklet_msi(unsigned long data)
  2001. {
  2002. struct hif_tasklet_entry *entry = (struct hif_tasklet_entry *)data;
  2003. struct hif_pci_softc *sc = (struct hif_pci_softc *) entry->hif_handler;
  2004. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2005. if (scn->hif_init_done == false)
  2006. goto irq_handled;
  2007. if (qdf_atomic_read(&scn->link_suspended))
  2008. goto irq_handled;
  2009. qdf_atomic_inc(&scn->active_tasklet_cnt);
  2010. if (entry->id == HIF_MAX_TASKLET_NUM) {
  2011. /* the last tasklet is for fw IRQ */
  2012. (irqreturn_t)hif_fw_interrupt_handler(sc->irq_event, scn);
  2013. if (scn->target_status == TARGET_STATUS_RESET)
  2014. goto irq_handled;
  2015. } else if (entry->id < scn->ce_count) {
  2016. ce_per_engine_service(scn, entry->id);
  2017. } else {
  2018. HIF_ERROR("%s: ERROR - invalid CE_id = %d",
  2019. __func__, entry->id);
  2020. }
  2021. return;
  2022. irq_handled:
  2023. qdf_atomic_dec(&scn->active_tasklet_cnt);
  2024. }
  2025. int hif_configure_msi(struct hif_pci_softc *sc)
  2026. {
  2027. int ret = 0;
  2028. int num_msi_desired;
  2029. int rv = -1;
  2030. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2031. HIF_TRACE("%s: E", __func__);
  2032. num_msi_desired = MSI_NUM_REQUEST; /* Multiple MSI */
  2033. if (num_msi_desired < 1) {
  2034. HIF_ERROR("%s: MSI is not configured", __func__);
  2035. return -EINVAL;
  2036. }
  2037. if (num_msi_desired > 1) {
  2038. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0))
  2039. rv = pci_enable_msi_range(sc->pdev, num_msi_desired,
  2040. num_msi_desired);
  2041. #else
  2042. rv = pci_enable_msi_block(sc->pdev, num_msi_desired);
  2043. #endif
  2044. }
  2045. HIF_TRACE("%s: num_msi_desired = %d, available_msi = %d",
  2046. __func__, num_msi_desired, rv);
  2047. if (rv == 0 || rv >= HIF_MAX_TASKLET_NUM) {
  2048. int i;
  2049. sc->num_msi_intrs = HIF_MAX_TASKLET_NUM;
  2050. sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].hif_handler =
  2051. (void *)sc;
  2052. sc->tasklet_entries[HIF_MAX_TASKLET_NUM-1].id =
  2053. HIF_MAX_TASKLET_NUM;
  2054. tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
  2055. (unsigned long)&sc->tasklet_entries[
  2056. HIF_MAX_TASKLET_NUM-1]);
  2057. ret = request_irq(sc->pdev->irq + MSI_ASSIGN_FW,
  2058. hif_pci_msi_fw_handler,
  2059. IRQF_SHARED, "wlan_pci", sc);
  2060. if (ret) {
  2061. HIF_ERROR("%s: request_irq failed", __func__);
  2062. goto err_intr;
  2063. }
  2064. for (i = 0; i <= scn->ce_count; i++) {
  2065. sc->tasklet_entries[i].hif_handler = (void *)sc;
  2066. sc->tasklet_entries[i].id = i;
  2067. tasklet_init(&sc->intr_tq, wlan_tasklet_msi,
  2068. (unsigned long)&sc->tasklet_entries[i]);
  2069. ret = request_irq((sc->pdev->irq +
  2070. i + MSI_ASSIGN_CE_INITIAL),
  2071. ce_per_engine_handler, IRQF_SHARED,
  2072. "wlan_pci", sc);
  2073. if (ret) {
  2074. HIF_ERROR("%s: request_irq failed", __func__);
  2075. goto err_intr;
  2076. }
  2077. }
  2078. } else if (rv > 0) {
  2079. HIF_TRACE("%s: use single msi", __func__);
  2080. ret = pci_enable_msi(sc->pdev);
  2081. if (ret < 0) {
  2082. HIF_ERROR("%s: single MSI allocation failed",
  2083. __func__);
  2084. /* Try for legacy PCI line interrupts */
  2085. sc->num_msi_intrs = 0;
  2086. } else {
  2087. sc->num_msi_intrs = 1;
  2088. tasklet_init(&sc->intr_tq,
  2089. wlan_tasklet, (unsigned long)sc);
  2090. ret = request_irq(sc->pdev->irq,
  2091. hif_pci_interrupt_handler,
  2092. IRQF_SHARED, "wlan_pci", sc);
  2093. if (ret) {
  2094. HIF_ERROR("%s: request_irq failed", __func__);
  2095. goto err_intr;
  2096. }
  2097. }
  2098. } else {
  2099. sc->num_msi_intrs = 0;
  2100. ret = -EIO;
  2101. HIF_ERROR("%s: do not support MSI, rv = %d", __func__, rv);
  2102. }
  2103. ret = pci_enable_msi(sc->pdev);
  2104. if (ret < 0) {
  2105. HIF_ERROR("%s: single MSI interrupt allocation failed",
  2106. __func__);
  2107. /* Try for legacy PCI line interrupts */
  2108. sc->num_msi_intrs = 0;
  2109. } else {
  2110. sc->num_msi_intrs = 1;
  2111. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  2112. ret = request_irq(sc->pdev->irq,
  2113. hif_pci_interrupt_handler, IRQF_SHARED,
  2114. "wlan_pci", sc);
  2115. if (ret) {
  2116. HIF_ERROR("%s: request_irq failed", __func__);
  2117. goto err_intr;
  2118. }
  2119. }
  2120. if (ret == 0) {
  2121. hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
  2122. PCIE_INTR_ENABLE_ADDRESS),
  2123. HOST_GROUP0_MASK);
  2124. hif_write32_mb(sc->mem +
  2125. PCIE_LOCAL_BASE_ADDRESS + PCIE_SOC_WAKE_ADDRESS,
  2126. PCIE_SOC_WAKE_RESET);
  2127. }
  2128. HIF_TRACE("%s: X, ret = %d", __func__, ret);
  2129. return ret;
  2130. err_intr:
  2131. if (sc->num_msi_intrs >= 1)
  2132. pci_disable_msi(sc->pdev);
  2133. return ret;
  2134. }
  2135. static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
  2136. {
  2137. int ret = 0;
  2138. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2139. uint32_t target_type = scn->target_info.target_type;
  2140. HIF_TRACE("%s: E", __func__);
  2141. /* do notn support MSI or MSI IRQ failed */
  2142. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  2143. ret = request_irq(sc->pdev->irq,
  2144. hif_pci_interrupt_handler, IRQF_SHARED,
  2145. "wlan_pci", sc);
  2146. if (ret) {
  2147. HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
  2148. goto end;
  2149. }
  2150. /* Use sc->irq instead of sc->pdev-irq
  2151. platform_device pdev doesn't have an irq field */
  2152. sc->irq = sc->pdev->irq;
  2153. /* Use Legacy PCI Interrupts */
  2154. hif_write32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
  2155. PCIE_INTR_ENABLE_ADDRESS),
  2156. HOST_GROUP0_MASK);
  2157. hif_read32_mb(sc->mem+(SOC_CORE_BASE_ADDRESS |
  2158. PCIE_INTR_ENABLE_ADDRESS));
  2159. hif_write32_mb(sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  2160. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  2161. if ((target_type == TARGET_TYPE_IPQ4019) ||
  2162. (target_type == TARGET_TYPE_AR900B) ||
  2163. (target_type == TARGET_TYPE_QCA9984) ||
  2164. (target_type == TARGET_TYPE_AR9888) ||
  2165. (target_type == TARGET_TYPE_QCA9888)) {
  2166. hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  2167. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  2168. }
  2169. end:
  2170. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
  2171. "%s: X, ret = %d", __func__, ret);
  2172. return ret;
  2173. }
  2174. /**
  2175. * hif_nointrs(): disable IRQ
  2176. *
  2177. * This function stops interrupt(s)
  2178. *
  2179. * @scn: struct hif_softc
  2180. *
  2181. * Return: none
  2182. */
  2183. void hif_pci_nointrs(struct hif_softc *scn)
  2184. {
  2185. int i;
  2186. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2187. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2188. if (scn->request_irq_done == false)
  2189. return;
  2190. if (sc->num_msi_intrs > 0) {
  2191. /* MSI interrupt(s) */
  2192. for (i = 0; i < sc->num_msi_intrs; i++) {
  2193. free_irq(sc->irq + i, sc);
  2194. }
  2195. sc->num_msi_intrs = 0;
  2196. } else {
  2197. /* Legacy PCI line interrupt
  2198. Use sc->irq instead of sc->pdev-irq
  2199. platform_device pdev doesn't have an irq field */
  2200. free_irq(sc->irq, sc);
  2201. }
  2202. ce_unregister_irq(hif_state, 0xfff);
  2203. scn->request_irq_done = false;
  2204. }
  2205. /**
  2206. * hif_disable_bus(): hif_disable_bus
  2207. *
  2208. * This function disables the bus
  2209. *
  2210. * @bdev: bus dev
  2211. *
  2212. * Return: none
  2213. */
  2214. void hif_pci_disable_bus(struct hif_softc *scn)
  2215. {
  2216. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2217. struct pci_dev *pdev;
  2218. void __iomem *mem;
  2219. struct hif_target_info *tgt_info = &scn->target_info;
  2220. /* Attach did not succeed, all resources have been
  2221. * freed in error handler
  2222. */
  2223. if (!sc)
  2224. return;
  2225. pdev = sc->pdev;
  2226. if (ADRASTEA_BU) {
  2227. hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
  2228. hif_write32_mb(sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
  2229. hif_write32_mb(sc->mem + PCIE_INTR_CLR_ADDRESS,
  2230. HOST_GROUP0_MASK);
  2231. }
  2232. #if defined(CPU_WARM_RESET_WAR)
  2233. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2234. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2235. * verified for AR9888_REV1
  2236. */
  2237. if ((tgt_info->target_version == AR9888_REV2_VERSION) || (tgt_info->target_version == AR9887_REV1_VERSION))
  2238. hif_pci_device_warm_reset(sc);
  2239. else
  2240. hif_pci_device_reset(sc);
  2241. #else
  2242. hif_pci_device_reset(sc);
  2243. #endif
  2244. mem = (void __iomem *)sc->mem;
  2245. if (mem) {
  2246. pci_disable_msi(pdev);
  2247. hif_dump_pipe_debug_count(scn);
  2248. if (scn->athdiag_procfs_inited) {
  2249. athdiag_procfs_remove();
  2250. scn->athdiag_procfs_inited = false;
  2251. }
  2252. pci_iounmap(pdev, mem);
  2253. scn->mem = NULL;
  2254. pci_release_region(pdev, BAR_NUM);
  2255. pci_clear_master(pdev);
  2256. pci_disable_device(pdev);
  2257. }
  2258. HIF_INFO("%s: X", __func__);
  2259. }
  2260. #define OL_ATH_PCI_PM_CONTROL 0x44
  2261. #ifdef FEATURE_RUNTIME_PM
  2262. /**
  2263. * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occuring
  2264. * @scn: hif context
  2265. * @flag: prevent linkdown if true otherwise allow
  2266. *
  2267. * this api should only be called as part of bus prevent linkdown
  2268. */
  2269. static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  2270. {
  2271. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2272. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  2273. if (flag)
  2274. hif_pm_runtime_prevent_suspend(hif_hdl,
  2275. sc->prevent_linkdown_lock);
  2276. else
  2277. hif_pm_runtime_allow_suspend(hif_hdl,
  2278. sc->prevent_linkdown_lock);
  2279. }
  2280. #else
  2281. static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  2282. {
  2283. }
  2284. #endif
  2285. #if defined(CONFIG_PCI_MSM)
  2286. /**
  2287. * hif_bus_prevent_linkdown(): allow or permit linkdown
  2288. * @flag: true prevents linkdown, false allows
  2289. *
  2290. * Calls into the platform driver to vote against taking down the
  2291. * pcie link.
  2292. *
  2293. * Return: n/a
  2294. */
  2295. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2296. {
  2297. HIF_ERROR("wlan: %s pcie power collapse",
  2298. (flag ? "disable" : "enable"));
  2299. hif_runtime_prevent_linkdown(scn, flag);
  2300. pld_wlan_pm_control(scn->qdf_dev->dev, flag);
  2301. }
  2302. #else
  2303. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2304. {
  2305. HIF_ERROR("wlan: %s pcie power collapse",
  2306. (flag ? "disable" : "enable"));
  2307. hif_runtime_prevent_linkdown(scn, flag);
  2308. }
  2309. #endif
  2310. /**
  2311. * hif_bus_suspend_link_up() - suspend the bus
  2312. *
  2313. * Configures the pci irq line as a wakeup source.
  2314. *
  2315. * Return: 0 for success and non-zero for failure
  2316. */
  2317. static int hif_bus_suspend_link_up(struct hif_softc *scn)
  2318. {
  2319. struct pci_dev *pdev;
  2320. int status;
  2321. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2322. if (!sc)
  2323. return -EFAULT;
  2324. pdev = sc->pdev;
  2325. status = hif_drain_tasklets(scn);
  2326. if (status != 0)
  2327. return status;
  2328. if (unlikely(enable_irq_wake(pdev->irq))) {
  2329. HIF_ERROR("%s: Fail to enable wake IRQ!", __func__);
  2330. return -EINVAL;
  2331. }
  2332. hif_cancel_deferred_target_sleep(scn);
  2333. return 0;
  2334. }
  2335. /**
  2336. * hif_bus_resume_link_up() - hif bus resume API
  2337. *
  2338. * This function disables the wakeup source.
  2339. *
  2340. * Return: 0 for success and non-zero for failure
  2341. */
  2342. static int hif_bus_resume_link_up(struct hif_softc *scn)
  2343. {
  2344. struct pci_dev *pdev;
  2345. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2346. if (!sc)
  2347. return -EFAULT;
  2348. pdev = sc->pdev;
  2349. if (!pdev) {
  2350. HIF_ERROR("%s: pci_dev is null", __func__);
  2351. return -EFAULT;
  2352. }
  2353. if (unlikely(disable_irq_wake(pdev->irq))) {
  2354. HIF_ERROR("%s: Fail to disable wake IRQ!", __func__);
  2355. return -EFAULT;
  2356. }
  2357. return 0;
  2358. }
  2359. /**
  2360. * hif_bus_suspend_link_down() - suspend the bus
  2361. *
  2362. * Suspends the hif layer taking care of draining recieve queues and
  2363. * shutting down copy engines if needed. Ensures opy engine interrupts
  2364. * are disabled when it returns. Prevents register access after it
  2365. * returns.
  2366. *
  2367. * Return: 0 for success and non-zero for failure
  2368. */
  2369. static int hif_bus_suspend_link_down(struct hif_softc *scn)
  2370. {
  2371. struct pci_dev *pdev;
  2372. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2373. int status = 0;
  2374. pdev = sc->pdev;
  2375. disable_irq(pdev->irq);
  2376. status = hif_drain_tasklets(scn);
  2377. if (status != 0) {
  2378. enable_irq(pdev->irq);
  2379. return status;
  2380. }
  2381. /* Stop the HIF Sleep Timer */
  2382. hif_cancel_deferred_target_sleep(scn);
  2383. qdf_atomic_set(&scn->link_suspended, 1);
  2384. return 0;
  2385. }
  2386. /**
  2387. * hif_bus_resume_link_down() - hif bus resume API
  2388. *
  2389. * This function resumes the bus reenabling interupts.
  2390. *
  2391. * Return: 0 for success and non-zero for failure
  2392. */
  2393. static int hif_bus_resume_link_down(struct hif_softc *scn)
  2394. {
  2395. struct pci_dev *pdev;
  2396. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2397. if (!sc)
  2398. return -EFAULT;
  2399. pdev = sc->pdev;
  2400. if (!pdev) {
  2401. HIF_ERROR("%s: pci_dev is null", __func__);
  2402. return -EFAULT;
  2403. }
  2404. qdf_atomic_set(&scn->link_suspended, 0);
  2405. enable_irq(pdev->irq);
  2406. return 0;
  2407. }
  2408. /**
  2409. * hif_pci_suspend(): prepare hif for suspend
  2410. *
  2411. * chose suspend type based on link suspend voting.
  2412. *
  2413. * Return: 0 for success and non-zero error code for failure
  2414. */
  2415. int hif_pci_bus_suspend(struct hif_softc *scn)
  2416. {
  2417. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2418. return hif_bus_suspend_link_down(scn);
  2419. else
  2420. return hif_bus_suspend_link_up(scn);
  2421. }
  2422. /**
  2423. * __hif_check_link_status() - API to check if PCIe link is active/not
  2424. * @scn: HIF Context
  2425. *
  2426. * API reads the PCIe config space to verify if PCIe link training is
  2427. * successful or not.
  2428. *
  2429. * Return: Success/Failure
  2430. */
  2431. static int __hif_check_link_status(struct hif_softc *scn)
  2432. {
  2433. uint16_t dev_id;
  2434. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2435. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2436. if (!sc) {
  2437. HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
  2438. return -EINVAL;
  2439. }
  2440. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
  2441. if (dev_id == sc->devid)
  2442. return 0;
  2443. HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
  2444. __func__, dev_id);
  2445. scn->recovery = true;
  2446. if (cbk && cbk->set_recovery_in_progress)
  2447. cbk->set_recovery_in_progress(cbk->context, true);
  2448. else
  2449. HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
  2450. pld_is_pci_link_down(sc->dev);
  2451. return -EACCES;
  2452. }
  2453. /**
  2454. * hif_bus_resume(): prepare hif for resume
  2455. *
  2456. * chose suspend type based on link suspend voting.
  2457. *
  2458. * Return: 0 for success and non-zero error code for failure
  2459. */
  2460. int hif_pci_bus_resume(struct hif_softc *scn)
  2461. {
  2462. int ret;
  2463. ret = __hif_check_link_status(scn);
  2464. if (ret)
  2465. return ret;
  2466. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2467. return hif_bus_resume_link_down(scn);
  2468. else
  2469. return hif_bus_resume_link_up(scn);
  2470. }
  2471. #ifdef FEATURE_RUNTIME_PM
  2472. /**
  2473. * __hif_runtime_pm_set_state(): utility function
  2474. * @state: state to set
  2475. *
  2476. * indexes into the runtime pm state and sets it.
  2477. */
  2478. static void __hif_runtime_pm_set_state(struct hif_softc *scn,
  2479. enum hif_pm_runtime_state state)
  2480. {
  2481. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2482. if (NULL == sc) {
  2483. HIF_ERROR("%s: HIF_CTX not initialized",
  2484. __func__);
  2485. return;
  2486. }
  2487. qdf_atomic_set(&sc->pm_state, state);
  2488. }
  2489. /**
  2490. * hif_runtime_pm_set_state_inprogress(): adjust runtime pm state
  2491. *
  2492. * Notify hif that a runtime pm opperation has started
  2493. */
  2494. static void hif_runtime_pm_set_state_inprogress(struct hif_softc *scn)
  2495. {
  2496. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_INPROGRESS);
  2497. }
  2498. /**
  2499. * hif_runtime_pm_set_state_on(): adjust runtime pm state
  2500. *
  2501. * Notify hif that a the runtime pm state should be on
  2502. */
  2503. static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
  2504. {
  2505. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
  2506. }
  2507. /**
  2508. * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
  2509. *
  2510. * Notify hif that a runtime suspend attempt has been completed successfully
  2511. */
  2512. static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
  2513. {
  2514. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
  2515. }
  2516. /**
  2517. * hif_log_runtime_suspend_success() - log a successful runtime suspend
  2518. */
  2519. static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
  2520. {
  2521. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2522. if (sc == NULL)
  2523. return;
  2524. sc->pm_stats.suspended++;
  2525. sc->pm_stats.suspend_jiffies = jiffies;
  2526. }
  2527. /**
  2528. * hif_log_runtime_suspend_failure() - log a failed runtime suspend
  2529. *
  2530. * log a failed runtime suspend
  2531. * mark last busy to prevent immediate runtime suspend
  2532. */
  2533. static void hif_log_runtime_suspend_failure(void *hif_ctx)
  2534. {
  2535. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2536. if (sc == NULL)
  2537. return;
  2538. sc->pm_stats.suspend_err++;
  2539. }
  2540. /**
  2541. * hif_log_runtime_resume_success() - log a successful runtime resume
  2542. *
  2543. * log a successfull runtime resume
  2544. * mark last busy to prevent immediate runtime suspend
  2545. */
  2546. static void hif_log_runtime_resume_success(void *hif_ctx)
  2547. {
  2548. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2549. if (sc == NULL)
  2550. return;
  2551. sc->pm_stats.resumed++;
  2552. }
  2553. /**
  2554. * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
  2555. *
  2556. * Record the failure.
  2557. * mark last busy to delay a retry.
  2558. * adjust the runtime_pm state.
  2559. */
  2560. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
  2561. {
  2562. struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2563. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2564. hif_log_runtime_suspend_failure(hif_ctx);
  2565. if (hif_pci_sc != NULL)
  2566. hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
  2567. hif_runtime_pm_set_state_on(scn);
  2568. }
  2569. /**
  2570. * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
  2571. *
  2572. * Makes sure that the pci link will be taken down by the suspend opperation.
  2573. * If the hif layer is configured to leave the bus on, runtime suspend will
  2574. * not save any power.
  2575. *
  2576. * Set the runtime suspend state to in progress.
  2577. *
  2578. * return -EINVAL if the bus won't go down. otherwise return 0
  2579. */
  2580. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  2581. {
  2582. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2583. if (!hif_can_suspend_link(hif_ctx)) {
  2584. HIF_ERROR("Runtime PM not supported for link up suspend");
  2585. return -EINVAL;
  2586. }
  2587. hif_runtime_pm_set_state_inprogress(scn);
  2588. return 0;
  2589. }
  2590. /**
  2591. * hif_process_runtime_suspend_success() - bookkeeping of suspend success
  2592. *
  2593. * Record the success.
  2594. * adjust the runtime_pm state
  2595. */
  2596. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
  2597. {
  2598. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2599. hif_runtime_pm_set_state_suspended(scn);
  2600. hif_log_runtime_suspend_success(scn);
  2601. }
  2602. /**
  2603. * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
  2604. *
  2605. * update the runtime pm state.
  2606. */
  2607. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
  2608. {
  2609. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2610. hif_runtime_pm_set_state_inprogress(scn);
  2611. }
  2612. /**
  2613. * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
  2614. *
  2615. * record the success.
  2616. * adjust the runtime_pm state
  2617. */
  2618. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
  2619. {
  2620. struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2621. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2622. hif_log_runtime_resume_success(hif_ctx);
  2623. if (hif_pci_sc != NULL)
  2624. hif_pm_runtime_mark_last_busy(hif_pci_sc->dev);
  2625. hif_runtime_pm_set_state_on(scn);
  2626. }
  2627. #endif
  2628. /**
  2629. * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
  2630. *
  2631. * Return: 0 for success and non-zero error code for failure
  2632. */
  2633. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  2634. {
  2635. return hif_pci_bus_suspend(HIF_GET_SOFTC(hif_ctx));
  2636. }
  2637. #ifdef WLAN_FEATURE_FASTPATH
  2638. /**
  2639. * hif_fastpath_resume() - resume fastpath for runtimepm
  2640. *
  2641. * ensure that the fastpath write index register is up to date
  2642. * since runtime pm may cause ce_send_fast to skip the register
  2643. * write.
  2644. */
  2645. static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  2646. {
  2647. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2648. struct CE_state *ce_state;
  2649. if (!scn)
  2650. return;
  2651. if (scn->fastpath_mode_on) {
  2652. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  2653. return;
  2654. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  2655. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  2656. /*war_ce_src_ring_write_idx_set */
  2657. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  2658. ce_state->src_ring->write_index);
  2659. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  2660. Q_TARGET_ACCESS_END(scn);
  2661. }
  2662. }
  2663. #else
  2664. static void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx) {}
  2665. #endif
  2666. /**
  2667. * hif_runtime_resume() - do the bus resume part of a runtime resume
  2668. *
  2669. * Return: 0 for success and non-zero error code for failure
  2670. */
  2671. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  2672. {
  2673. int status = hif_pci_bus_resume(HIF_GET_SOFTC(hif_ctx));
  2674. hif_fastpath_resume(hif_ctx);
  2675. return status;
  2676. }
  2677. #if CONFIG_PCIE_64BIT_MSI
  2678. static void hif_free_msi_ctx(struct hif_softc *scn)
  2679. {
  2680. struct hif_pci_softc *sc = scn->hif_sc;
  2681. struct hif_msi_info *info = &sc->msi_info;
  2682. struct device *dev = scn->qdf_dev->dev;
  2683. OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
  2684. OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
  2685. info->magic = NULL;
  2686. info->magic_dma = 0;
  2687. }
  2688. #else
  2689. static void hif_free_msi_ctx(struct hif_softc *scn)
  2690. {
  2691. }
  2692. #endif
  2693. void hif_pci_disable_isr(struct hif_softc *scn)
  2694. {
  2695. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2696. hif_nointrs(scn);
  2697. hif_free_msi_ctx(scn);
  2698. /* Cancel the pending tasklet */
  2699. ce_tasklet_kill(scn);
  2700. hif_grp_tasklet_kill(scn);
  2701. tasklet_kill(&sc->intr_tq);
  2702. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  2703. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  2704. }
  2705. /* Function to reset SoC */
  2706. void hif_pci_reset_soc(struct hif_softc *hif_sc)
  2707. {
  2708. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
  2709. struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
  2710. struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
  2711. #if defined(CPU_WARM_RESET_WAR)
  2712. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2713. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2714. * verified for AR9888_REV1
  2715. */
  2716. if (tgt_info->target_version == AR9888_REV2_VERSION)
  2717. hif_pci_device_warm_reset(sc);
  2718. else
  2719. hif_pci_device_reset(sc);
  2720. #else
  2721. hif_pci_device_reset(sc);
  2722. #endif
  2723. }
  2724. #ifdef CONFIG_PCI_MSM
  2725. static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
  2726. {
  2727. msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
  2728. msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
  2729. }
  2730. #else
  2731. static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
  2732. #endif
  2733. /**
  2734. * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
  2735. * @sc: HIF PCIe Context
  2736. *
  2737. * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
  2738. *
  2739. * Return: Failure to caller
  2740. */
  2741. static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
  2742. {
  2743. uint16_t val;
  2744. uint32_t bar;
  2745. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
  2746. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2747. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
  2748. struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
  2749. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2750. A_target_id_t pci_addr = scn->mem;
  2751. HIF_ERROR("%s: keep_awake_count = %d",
  2752. __func__, hif_state->keep_awake_count);
  2753. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  2754. HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
  2755. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  2756. HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
  2757. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  2758. HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
  2759. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  2760. HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
  2761. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
  2762. HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
  2763. HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
  2764. hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2765. PCIE_SOC_WAKE_ADDRESS));
  2766. HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
  2767. hif_read32_mb(pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2768. RTC_STATE_ADDRESS));
  2769. HIF_ERROR("%s:error, wakeup target", __func__);
  2770. hif_msm_pcie_debug_info(sc);
  2771. if (!cfg->enable_self_recovery)
  2772. QDF_BUG(0);
  2773. scn->recovery = true;
  2774. if (cbk->set_recovery_in_progress)
  2775. cbk->set_recovery_in_progress(cbk->context, true);
  2776. pld_is_pci_link_down(sc->dev);
  2777. return -EACCES;
  2778. }
  2779. /*
  2780. * For now, we use simple on-demand sleep/wake.
  2781. * Some possible improvements:
  2782. * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
  2783. * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
  2784. * Careful, though, these functions may be used by
  2785. * interrupt handlers ("atomic")
  2786. * -Don't use host_reg_table for this code; instead use values directly
  2787. * -Use a separate timer to track activity and allow Target to sleep only
  2788. * if it hasn't done anything for a while; may even want to delay some
  2789. * processing for a short while in order to "batch" (e.g.) transmit
  2790. * requests with completion processing into "windows of up time". Costs
  2791. * some performance, but improves power utilization.
  2792. * -On some platforms, it might be possible to eliminate explicit
  2793. * sleep/wakeup. Instead, take a chance that each access works OK. If not,
  2794. * recover from the failure by forcing the Target awake.
  2795. * -Change keep_awake_count to an atomic_t in order to avoid spin lock
  2796. * overhead in some cases. Perhaps this makes more sense when
  2797. * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
  2798. * disabled.
  2799. * -It is possible to compile this code out and simply force the Target
  2800. * to remain awake. That would yield optimal performance at the cost of
  2801. * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
  2802. *
  2803. * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
  2804. */
  2805. /**
  2806. * hif_target_sleep_state_adjust() - on-demand sleep/wake
  2807. * @scn: hif_softc pointer.
  2808. * @sleep_ok: bool
  2809. * @wait_for_it: bool
  2810. *
  2811. * Output the pipe error counts of each pipe to log file
  2812. *
  2813. * Return: int
  2814. */
  2815. int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
  2816. bool sleep_ok, bool wait_for_it)
  2817. {
  2818. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2819. A_target_id_t pci_addr = scn->mem;
  2820. static int max_delay;
  2821. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2822. static int debug;
  2823. if (scn->recovery)
  2824. return -EACCES;
  2825. if (qdf_atomic_read(&scn->link_suspended)) {
  2826. HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
  2827. debug = true;
  2828. QDF_ASSERT(0);
  2829. return -EACCES;
  2830. }
  2831. if (debug) {
  2832. wait_for_it = true;
  2833. HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
  2834. __func__);
  2835. QDF_ASSERT(0);
  2836. }
  2837. if (sleep_ok) {
  2838. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2839. hif_state->keep_awake_count--;
  2840. if (hif_state->keep_awake_count == 0) {
  2841. /* Allow sleep */
  2842. hif_state->verified_awake = false;
  2843. hif_state->sleep_ticks = qdf_system_ticks();
  2844. }
  2845. if (hif_state->fake_sleep == false) {
  2846. /* Set the Fake Sleep */
  2847. hif_state->fake_sleep = true;
  2848. /* Start the Sleep Timer */
  2849. qdf_timer_stop(&hif_state->sleep_timer);
  2850. qdf_timer_start(&hif_state->sleep_timer,
  2851. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  2852. }
  2853. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2854. } else {
  2855. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2856. if (hif_state->fake_sleep) {
  2857. hif_state->verified_awake = true;
  2858. } else {
  2859. if (hif_state->keep_awake_count == 0) {
  2860. /* Force AWAKE */
  2861. hif_write32_mb(pci_addr +
  2862. PCIE_LOCAL_BASE_ADDRESS +
  2863. PCIE_SOC_WAKE_ADDRESS,
  2864. PCIE_SOC_WAKE_V_MASK);
  2865. }
  2866. }
  2867. hif_state->keep_awake_count++;
  2868. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2869. if (wait_for_it && !hif_state->verified_awake) {
  2870. #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
  2871. int tot_delay = 0;
  2872. int curr_delay = 5;
  2873. for (;; ) {
  2874. if (hif_targ_is_awake(scn, pci_addr)) {
  2875. hif_state->verified_awake = true;
  2876. break;
  2877. } else
  2878. if (!hif_pci_targ_is_present
  2879. (scn, pci_addr)) {
  2880. break;
  2881. }
  2882. if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
  2883. return hif_log_soc_wakeup_timeout(sc);
  2884. OS_DELAY(curr_delay);
  2885. tot_delay += curr_delay;
  2886. if (curr_delay < 50)
  2887. curr_delay += 5;
  2888. }
  2889. /*
  2890. * NB: If Target has to come out of Deep Sleep,
  2891. * this may take a few Msecs. Typically, though
  2892. * this delay should be <30us.
  2893. */
  2894. if (tot_delay > max_delay)
  2895. max_delay = tot_delay;
  2896. }
  2897. }
  2898. if (debug && hif_state->verified_awake) {
  2899. debug = 0;
  2900. HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
  2901. __func__,
  2902. hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
  2903. PCIE_INTR_ENABLE_ADDRESS),
  2904. hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
  2905. PCIE_INTR_CAUSE_ADDRESS),
  2906. hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
  2907. CPU_INTR_ADDRESS),
  2908. hif_read32_mb(sc->mem + SOC_CORE_BASE_ADDRESS +
  2909. PCIE_INTR_CLR_ADDRESS),
  2910. hif_read32_mb(sc->mem + CE_WRAPPER_BASE_ADDRESS +
  2911. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
  2912. }
  2913. return 0;
  2914. }
  2915. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  2916. uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
  2917. {
  2918. uint32_t value;
  2919. void *addr;
  2920. addr = scn->mem + offset;
  2921. value = hif_read32_mb(addr);
  2922. {
  2923. unsigned long irq_flags;
  2924. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2925. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2926. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2927. pcie_access_log[idx].is_write = false;
  2928. pcie_access_log[idx].addr = addr;
  2929. pcie_access_log[idx].value = value;
  2930. pcie_access_log_seqnum++;
  2931. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2932. }
  2933. return value;
  2934. }
  2935. void
  2936. hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
  2937. {
  2938. void *addr;
  2939. addr = scn->mem + (offset);
  2940. hif_write32_mb(addr, value);
  2941. {
  2942. unsigned long irq_flags;
  2943. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2944. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2945. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2946. pcie_access_log[idx].is_write = true;
  2947. pcie_access_log[idx].addr = addr;
  2948. pcie_access_log[idx].value = value;
  2949. pcie_access_log_seqnum++;
  2950. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2951. }
  2952. }
  2953. /**
  2954. * hif_target_dump_access_log() - dump access log
  2955. *
  2956. * dump access log
  2957. *
  2958. * Return: n/a
  2959. */
  2960. void hif_target_dump_access_log(void)
  2961. {
  2962. int idx, len, start_idx, cur_idx;
  2963. unsigned long irq_flags;
  2964. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2965. if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
  2966. len = PCIE_ACCESS_LOG_NUM;
  2967. start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2968. } else {
  2969. len = pcie_access_log_seqnum;
  2970. start_idx = 0;
  2971. }
  2972. for (idx = 0; idx < len; idx++) {
  2973. cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
  2974. HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%p val:%u.",
  2975. __func__, idx,
  2976. pcie_access_log[cur_idx].seqnum,
  2977. pcie_access_log[cur_idx].is_write,
  2978. pcie_access_log[cur_idx].addr,
  2979. pcie_access_log[cur_idx].value);
  2980. }
  2981. pcie_access_log_seqnum = 0;
  2982. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2983. }
  2984. #endif
  2985. #ifndef HIF_AHB
  2986. int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
  2987. {
  2988. QDF_BUG(0);
  2989. return -EINVAL;
  2990. }
  2991. int hif_ahb_configure_irq(struct hif_pci_softc *sc)
  2992. {
  2993. QDF_BUG(0);
  2994. return -EINVAL;
  2995. }
  2996. #endif
  2997. /**
  2998. * hif_configure_irq() - configure interrupt
  2999. *
  3000. * This function configures interrupt(s)
  3001. *
  3002. * @sc: PCIe control struct
  3003. * @hif_hdl: struct HIF_CE_state
  3004. *
  3005. * Return: 0 - for success
  3006. */
  3007. int hif_configure_irq(struct hif_softc *scn)
  3008. {
  3009. int ret = 0;
  3010. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3011. HIF_TRACE("%s: E", __func__);
  3012. hif_init_reschedule_tasklet_work(sc);
  3013. if (ENABLE_MSI) {
  3014. ret = hif_configure_msi(sc);
  3015. if (ret == 0)
  3016. goto end;
  3017. }
  3018. /* MSI failed. Try legacy irq */
  3019. switch (scn->target_info.target_type) {
  3020. case TARGET_TYPE_IPQ4019:
  3021. ret = hif_ahb_configure_legacy_irq(sc);
  3022. break;
  3023. case TARGET_TYPE_QCA8074:
  3024. ret = hif_ahb_configure_irq(sc);
  3025. break;
  3026. default:
  3027. ret = hif_pci_configure_legacy_irq(sc);
  3028. break;
  3029. }
  3030. if (ret < 0) {
  3031. HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
  3032. __func__, ret);
  3033. return ret;
  3034. }
  3035. end:
  3036. scn->request_irq_done = true;
  3037. return 0;
  3038. }
  3039. /**
  3040. * hif_target_sync() : ensure the target is ready
  3041. * @scn: hif controll structure
  3042. *
  3043. * Informs fw that we plan to use legacy interupts so that
  3044. * it can begin booting. Ensures that the fw finishes booting
  3045. * before continuing. Should be called before trying to write
  3046. * to the targets other registers for the first time.
  3047. *
  3048. * Return: none
  3049. */
  3050. void hif_target_sync(struct hif_softc *scn)
  3051. {
  3052. hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
  3053. PCIE_INTR_ENABLE_ADDRESS),
  3054. PCIE_INTR_FIRMWARE_MASK);
  3055. hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3056. PCIE_SOC_WAKE_ADDRESS,
  3057. PCIE_SOC_WAKE_V_MASK);
  3058. while (!hif_targ_is_awake(scn, scn->mem))
  3059. ;
  3060. if (HAS_FW_INDICATOR) {
  3061. int wait_limit = 500;
  3062. int fw_ind = 0;
  3063. HIF_TRACE("%s: Loop checking FW signal", __func__);
  3064. while (1) {
  3065. fw_ind = hif_read32_mb(scn->mem +
  3066. FW_INDICATOR_ADDRESS);
  3067. if (fw_ind & FW_IND_INITIALIZED)
  3068. break;
  3069. if (wait_limit-- < 0)
  3070. break;
  3071. hif_write32_mb(scn->mem+(SOC_CORE_BASE_ADDRESS |
  3072. PCIE_INTR_ENABLE_ADDRESS),
  3073. PCIE_INTR_FIRMWARE_MASK);
  3074. qdf_mdelay(10);
  3075. }
  3076. if (wait_limit < 0)
  3077. HIF_TRACE("%s: FW signal timed out",
  3078. __func__);
  3079. else
  3080. HIF_TRACE("%s: Got FW signal, retries = %x",
  3081. __func__, 500-wait_limit);
  3082. }
  3083. hif_write32_mb(scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3084. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  3085. }
  3086. /**
  3087. * hif_enable_bus(): enable bus
  3088. *
  3089. * This function enables the bus
  3090. *
  3091. * @ol_sc: soft_sc struct
  3092. * @dev: device pointer
  3093. * @bdev: bus dev pointer
  3094. * bid: bus id pointer
  3095. * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
  3096. * Return: QDF_STATUS
  3097. */
  3098. QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
  3099. struct device *dev, void *bdev,
  3100. const hif_bus_id *bid,
  3101. enum hif_enable_type type)
  3102. {
  3103. int ret = 0;
  3104. uint32_t hif_type, target_type;
  3105. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
  3106. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
  3107. uint16_t revision_id;
  3108. int probe_again = 0;
  3109. struct pci_dev *pdev = bdev;
  3110. const struct pci_device_id *id = (const struct pci_device_id *)bid;
  3111. struct hif_target_info *tgt_info;
  3112. if (!ol_sc) {
  3113. HIF_ERROR("%s: hif_ctx is NULL", __func__);
  3114. return QDF_STATUS_E_NOMEM;
  3115. }
  3116. HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
  3117. __func__, hif_get_conparam(ol_sc), id->device);
  3118. sc->pdev = pdev;
  3119. sc->dev = &pdev->dev;
  3120. sc->devid = id->device;
  3121. sc->cacheline_sz = dma_get_cache_alignment();
  3122. tgt_info = hif_get_target_info_handle(hif_hdl);
  3123. again:
  3124. ret = hif_enable_pci(sc, pdev, id);
  3125. if (ret < 0) {
  3126. HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
  3127. __func__, ret);
  3128. goto err_enable_pci;
  3129. }
  3130. HIF_TRACE("%s: hif_enable_pci done", __func__);
  3131. /* Temporary FIX: disable ASPM on peregrine.
  3132. * Will be removed after the OTP is programmed
  3133. */
  3134. hif_disable_power_gating(hif_hdl);
  3135. device_disable_async_suspend(&pdev->dev);
  3136. pci_read_config_word(pdev, 0x08, &revision_id);
  3137. ret = hif_get_device_type(id->device, revision_id,
  3138. &hif_type, &target_type);
  3139. if (ret < 0) {
  3140. HIF_ERROR("%s: invalid device id/revision_id", __func__);
  3141. goto err_tgtstate;
  3142. }
  3143. HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
  3144. __func__, hif_type, target_type);
  3145. hif_register_tbl_attach(ol_sc, hif_type);
  3146. hif_target_register_tbl_attach(ol_sc, target_type);
  3147. ret = hif_pci_probe_tgt_wakeup(sc);
  3148. if (ret < 0) {
  3149. HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
  3150. __func__, ret);
  3151. if (ret == -EAGAIN)
  3152. probe_again++;
  3153. goto err_tgtstate;
  3154. }
  3155. HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
  3156. tgt_info->target_type = target_type;
  3157. sc->soc_pcie_bar0 = pci_resource_start(pdev, BAR_NUM);
  3158. if (!sc->soc_pcie_bar0) {
  3159. HIF_ERROR("%s: ERROR - cannot get CE BAR0", __func__);
  3160. ret = -EIO;
  3161. goto err_tgtstate;
  3162. }
  3163. ol_sc->mem_pa = sc->soc_pcie_bar0;
  3164. hif_target_sync(ol_sc);
  3165. if (ADRASTEA_BU)
  3166. hif_vote_link_up(hif_hdl);
  3167. return 0;
  3168. err_tgtstate:
  3169. hif_disable_pci(sc);
  3170. sc->pci_enabled = false;
  3171. HIF_ERROR("%s: error, hif_disable_pci done", __func__);
  3172. return QDF_STATUS_E_ABORTED;
  3173. err_enable_pci:
  3174. if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
  3175. int delay_time;
  3176. HIF_INFO("%s: pci reprobe", __func__);
  3177. /* 10, 40, 90, 100, 100, ... */
  3178. delay_time = max(100, 10 * (probe_again * probe_again));
  3179. qdf_mdelay(delay_time);
  3180. goto again;
  3181. }
  3182. return ret;
  3183. }
  3184. /**
  3185. * hif_pci_irq_enable() - ce_irq_enable
  3186. * @scn: hif_softc
  3187. * @ce_id: ce_id
  3188. *
  3189. * Return: void
  3190. */
  3191. void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
  3192. {
  3193. uint32_t tmp = 1 << ce_id;
  3194. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3195. qdf_spin_lock_irqsave(&sc->irq_lock);
  3196. scn->ce_irq_summary &= ~tmp;
  3197. if (scn->ce_irq_summary == 0) {
  3198. /* Enable Legacy PCI line interrupts */
  3199. if (LEGACY_INTERRUPTS(sc) &&
  3200. (scn->target_status != TARGET_STATUS_RESET) &&
  3201. (!qdf_atomic_read(&scn->link_suspended))) {
  3202. hif_write32_mb(scn->mem +
  3203. (SOC_CORE_BASE_ADDRESS |
  3204. PCIE_INTR_ENABLE_ADDRESS),
  3205. HOST_GROUP0_MASK);
  3206. hif_read32_mb(scn->mem +
  3207. (SOC_CORE_BASE_ADDRESS |
  3208. PCIE_INTR_ENABLE_ADDRESS));
  3209. }
  3210. }
  3211. if (scn->hif_init_done == true)
  3212. Q_TARGET_ACCESS_END(scn);
  3213. qdf_spin_unlock_irqrestore(&sc->irq_lock);
  3214. /* check for missed firmware crash */
  3215. hif_fw_interrupt_handler(0, scn);
  3216. }
  3217. /**
  3218. * hif_pci_irq_disable() - ce_irq_disable
  3219. * @scn: hif_softc
  3220. * @ce_id: ce_id
  3221. *
  3222. * Return: void
  3223. */
  3224. void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
  3225. {
  3226. /* For Rome only need to wake up target */
  3227. /* target access is maintained untill interrupts are re-enabled */
  3228. Q_TARGET_ACCESS_BEGIN(scn);
  3229. }
  3230. #ifdef FEATURE_RUNTIME_PM
  3231. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  3232. {
  3233. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3234. if (NULL == sc)
  3235. return;
  3236. sc->pm_stats.runtime_get++;
  3237. pm_runtime_get_noresume(sc->dev);
  3238. }
  3239. /**
  3240. * hif_pm_runtime_get() - do a get opperation on the device
  3241. *
  3242. * A get opperation will prevent a runtime suspend untill a
  3243. * corresponding put is done. This api should be used when sending
  3244. * data.
  3245. *
  3246. * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
  3247. * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
  3248. *
  3249. * return: success if the bus is up and a get has been issued
  3250. * otherwise an error code.
  3251. */
  3252. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  3253. {
  3254. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  3255. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3256. int ret;
  3257. int pm_state;
  3258. if (NULL == scn) {
  3259. HIF_ERROR("%s: Could not do runtime get, scn is null",
  3260. __func__);
  3261. return -EFAULT;
  3262. }
  3263. pm_state = qdf_atomic_read(&sc->pm_state);
  3264. if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
  3265. pm_state == HIF_PM_RUNTIME_STATE_NONE) {
  3266. sc->pm_stats.runtime_get++;
  3267. ret = __hif_pm_runtime_get(sc->dev);
  3268. /* Get can return 1 if the device is already active, just return
  3269. * success in that case
  3270. */
  3271. if (ret > 0)
  3272. ret = 0;
  3273. if (ret)
  3274. hif_pm_runtime_put(hif_ctx);
  3275. if (ret && ret != -EINPROGRESS) {
  3276. sc->pm_stats.runtime_get_err++;
  3277. HIF_ERROR("%s: Runtime Get PM Error in pm_state:%d ret: %d",
  3278. __func__, qdf_atomic_read(&sc->pm_state), ret);
  3279. }
  3280. return ret;
  3281. }
  3282. sc->pm_stats.request_resume++;
  3283. sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
  3284. ret = hif_pm_request_resume(sc->dev);
  3285. return -EAGAIN;
  3286. }
  3287. /**
  3288. * hif_pm_runtime_put() - do a put opperation on the device
  3289. *
  3290. * A put opperation will allow a runtime suspend after a corresponding
  3291. * get was done. This api should be used when sending data.
  3292. *
  3293. * This api will return a failure if runtime pm is stopped
  3294. * This api will return failure if it would decrement the usage count below 0.
  3295. *
  3296. * return: QDF_STATUS_SUCCESS if the put is performed
  3297. */
  3298. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  3299. {
  3300. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  3301. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3302. int pm_state, usage_count;
  3303. unsigned long flags;
  3304. char *error = NULL;
  3305. if (NULL == scn) {
  3306. HIF_ERROR("%s: Could not do runtime put, scn is null",
  3307. __func__);
  3308. return -EFAULT;
  3309. }
  3310. usage_count = atomic_read(&sc->dev->power.usage_count);
  3311. if (usage_count == 1) {
  3312. pm_state = qdf_atomic_read(&sc->pm_state);
  3313. if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
  3314. error = "Ignoring unexpected put when runtime pm is disabled";
  3315. } else if (usage_count == 0) {
  3316. error = "PUT Without a Get Operation";
  3317. }
  3318. if (error) {
  3319. spin_lock_irqsave(&sc->runtime_lock, flags);
  3320. hif_pci_runtime_pm_warn(sc, error);
  3321. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  3322. return -EINVAL;
  3323. }
  3324. sc->pm_stats.runtime_put++;
  3325. hif_pm_runtime_mark_last_busy(sc->dev);
  3326. hif_pm_runtime_put_auto(sc->dev);
  3327. return 0;
  3328. }
  3329. /**
  3330. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol reason
  3331. * @hif_sc: pci context
  3332. * @lock: runtime_pm lock being acquired
  3333. *
  3334. * Return 0 if successful.
  3335. */
  3336. static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
  3337. *hif_sc, struct hif_pm_runtime_lock *lock)
  3338. {
  3339. int ret = 0;
  3340. /*
  3341. * We shouldn't be setting context->timeout to zero here when
  3342. * context is active as we will have a case where Timeout API's
  3343. * for the same context called back to back.
  3344. * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
  3345. * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
  3346. * API to ensure the timeout version is no more active and
  3347. * list entry of this context will be deleted during allow suspend.
  3348. */
  3349. if (lock->active)
  3350. return 0;
  3351. ret = __hif_pm_runtime_get(hif_sc->dev);
  3352. /**
  3353. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  3354. * RPM_SUSPENDING. Any other negative value is an error.
  3355. * We shouldn't be do runtime_put here as in later point allow
  3356. * suspend gets called with the the context and there the usage count
  3357. * is decremented, so suspend will be prevented.
  3358. */
  3359. if (ret < 0 && ret != -EINPROGRESS) {
  3360. hif_sc->pm_stats.runtime_get_err++;
  3361. hif_pci_runtime_pm_warn(hif_sc,
  3362. "Prevent Suspend Runtime PM Error");
  3363. }
  3364. hif_sc->prevent_suspend_cnt++;
  3365. lock->active = true;
  3366. list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
  3367. hif_sc->pm_stats.prevent_suspend++;
  3368. HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
  3369. hif_pm_runtime_state_to_string(
  3370. qdf_atomic_read(&hif_sc->pm_state)),
  3371. ret);
  3372. return ret;
  3373. }
  3374. static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
  3375. struct hif_pm_runtime_lock *lock)
  3376. {
  3377. int ret = 0;
  3378. int usage_count;
  3379. if (hif_sc->prevent_suspend_cnt == 0)
  3380. return ret;
  3381. if (!lock->active)
  3382. return ret;
  3383. usage_count = atomic_read(&hif_sc->dev->power.usage_count);
  3384. /*
  3385. * During Driver unload, platform driver increments the usage
  3386. * count to prevent any runtime suspend getting called.
  3387. * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
  3388. * usage_count should be one. Ideally this shouldn't happen as
  3389. * context->active should be active for allow suspend to happen
  3390. * Handling this case here to prevent any failures.
  3391. */
  3392. if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
  3393. && usage_count == 1) || usage_count == 0) {
  3394. hif_pci_runtime_pm_warn(hif_sc,
  3395. "Allow without a prevent suspend");
  3396. return -EINVAL;
  3397. }
  3398. list_del(&lock->list);
  3399. hif_sc->prevent_suspend_cnt--;
  3400. lock->active = false;
  3401. lock->timeout = 0;
  3402. hif_pm_runtime_mark_last_busy(hif_sc->dev);
  3403. ret = hif_pm_runtime_put_auto(hif_sc->dev);
  3404. HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
  3405. hif_pm_runtime_state_to_string(
  3406. qdf_atomic_read(&hif_sc->pm_state)),
  3407. ret);
  3408. hif_sc->pm_stats.allow_suspend++;
  3409. return ret;
  3410. }
  3411. /**
  3412. * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
  3413. * @data: calback data that is the pci context
  3414. *
  3415. * if runtime locks are aquired with a timeout, this function releases
  3416. * the locks when the last runtime lock expires.
  3417. *
  3418. * dummy implementation until lock acquisition is implemented.
  3419. */
  3420. static void hif_pm_runtime_lock_timeout_fn(unsigned long data)
  3421. {
  3422. struct hif_pci_softc *hif_sc = (struct hif_pci_softc *)data;
  3423. unsigned long flags;
  3424. unsigned long timer_expires;
  3425. struct hif_pm_runtime_lock *context, *temp;
  3426. spin_lock_irqsave(&hif_sc->runtime_lock, flags);
  3427. timer_expires = hif_sc->runtime_timer_expires;
  3428. /* Make sure we are not called too early, this should take care of
  3429. * following case
  3430. *
  3431. * CPU0 CPU1 (timeout function)
  3432. * ---- ----------------------
  3433. * spin_lock_irq
  3434. * timeout function called
  3435. *
  3436. * mod_timer()
  3437. *
  3438. * spin_unlock_irq
  3439. * spin_lock_irq
  3440. */
  3441. if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
  3442. hif_sc->runtime_timer_expires = 0;
  3443. list_for_each_entry_safe(context, temp,
  3444. &hif_sc->prevent_suspend_list, list) {
  3445. if (context->timeout) {
  3446. __hif_pm_runtime_allow_suspend(hif_sc, context);
  3447. hif_sc->pm_stats.allow_suspend_timeout++;
  3448. }
  3449. }
  3450. }
  3451. spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
  3452. }
  3453. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  3454. struct hif_pm_runtime_lock *data)
  3455. {
  3456. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3457. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
  3458. struct hif_pm_runtime_lock *context = data;
  3459. unsigned long flags;
  3460. if (!sc->hif_config.enable_runtime_pm)
  3461. return 0;
  3462. if (!context)
  3463. return -EINVAL;
  3464. spin_lock_irqsave(&hif_sc->runtime_lock, flags);
  3465. context->timeout = 0;
  3466. __hif_pm_runtime_prevent_suspend(hif_sc, context);
  3467. spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
  3468. return 0;
  3469. }
  3470. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  3471. struct hif_pm_runtime_lock *data)
  3472. {
  3473. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3474. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
  3475. struct hif_pm_runtime_lock *context = data;
  3476. unsigned long flags;
  3477. if (!sc->hif_config.enable_runtime_pm)
  3478. return 0;
  3479. if (!context)
  3480. return -EINVAL;
  3481. spin_lock_irqsave(&hif_sc->runtime_lock, flags);
  3482. __hif_pm_runtime_allow_suspend(hif_sc, context);
  3483. /* The list can be empty as well in cases where
  3484. * we have one context in the list and the allow
  3485. * suspend came before the timer expires and we delete
  3486. * context above from the list.
  3487. * When list is empty prevent_suspend count will be zero.
  3488. */
  3489. if (hif_sc->prevent_suspend_cnt == 0 &&
  3490. hif_sc->runtime_timer_expires > 0) {
  3491. del_timer(&hif_sc->runtime_timer);
  3492. hif_sc->runtime_timer_expires = 0;
  3493. }
  3494. spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
  3495. return 0;
  3496. }
  3497. /**
  3498. * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
  3499. * @ol_sc: HIF context
  3500. * @lock: which lock is being acquired
  3501. * @delay: Timeout in milliseconds
  3502. *
  3503. * Prevent runtime suspend with a timeout after which runtime suspend would be
  3504. * allowed. This API uses a single timer to allow the suspend and timer is
  3505. * modified if the timeout is changed before timer fires.
  3506. * If the timeout is less than autosuspend_delay then use mark_last_busy instead
  3507. * of starting the timer.
  3508. *
  3509. * It is wise to try not to use this API and correct the design if possible.
  3510. *
  3511. * Return: 0 on success and negative error code on failure
  3512. */
  3513. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  3514. struct hif_pm_runtime_lock *lock, unsigned int delay)
  3515. {
  3516. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3517. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
  3518. int ret = 0;
  3519. unsigned long expires;
  3520. unsigned long flags;
  3521. struct hif_pm_runtime_lock *context = lock;
  3522. if (hif_is_load_or_unload_in_progress(sc)) {
  3523. HIF_ERROR("%s: Load/unload in progress, ignore!",
  3524. __func__);
  3525. return -EINVAL;
  3526. }
  3527. if (hif_is_recovery_in_progress(sc)) {
  3528. HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
  3529. return -EINVAL;
  3530. }
  3531. if (!sc->hif_config.enable_runtime_pm)
  3532. return 0;
  3533. if (!context)
  3534. return -EINVAL;
  3535. /*
  3536. * Don't use internal timer if the timeout is less than auto suspend
  3537. * delay.
  3538. */
  3539. if (delay <= hif_sc->dev->power.autosuspend_delay) {
  3540. hif_pm_request_resume(hif_sc->dev);
  3541. hif_pm_runtime_mark_last_busy(hif_sc->dev);
  3542. return ret;
  3543. }
  3544. expires = jiffies + msecs_to_jiffies(delay);
  3545. expires += !expires;
  3546. spin_lock_irqsave(&hif_sc->runtime_lock, flags);
  3547. context->timeout = delay;
  3548. ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
  3549. hif_sc->pm_stats.prevent_suspend_timeout++;
  3550. /* Modify the timer only if new timeout is after already configured
  3551. * timeout
  3552. */
  3553. if (time_after(expires, hif_sc->runtime_timer_expires)) {
  3554. mod_timer(&hif_sc->runtime_timer, expires);
  3555. hif_sc->runtime_timer_expires = expires;
  3556. }
  3557. spin_unlock_irqrestore(&hif_sc->runtime_lock, flags);
  3558. HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
  3559. hif_pm_runtime_state_to_string(
  3560. qdf_atomic_read(&hif_sc->pm_state)),
  3561. delay, ret);
  3562. return ret;
  3563. }
  3564. /**
  3565. * hif_runtime_lock_init() - API to initialize Runtime PM context
  3566. * @name: Context name
  3567. *
  3568. * This API initalizes the Runtime PM context of the caller and
  3569. * return the pointer.
  3570. *
  3571. * Return: void *
  3572. */
  3573. struct hif_pm_runtime_lock *hif_runtime_lock_init(const char *name)
  3574. {
  3575. struct hif_pm_runtime_lock *context;
  3576. context = qdf_mem_malloc(sizeof(*context));
  3577. if (!context) {
  3578. HIF_ERROR("%s: No memory for Runtime PM wakelock context\n",
  3579. __func__);
  3580. return NULL;
  3581. }
  3582. context->name = name ? name : "Default";
  3583. return context;
  3584. }
  3585. /**
  3586. * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
  3587. * @data: Runtime PM context
  3588. *
  3589. * Return: void
  3590. */
  3591. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  3592. struct hif_pm_runtime_lock *data)
  3593. {
  3594. unsigned long flags;
  3595. struct hif_pm_runtime_lock *context = data;
  3596. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3597. if (!sc)
  3598. return;
  3599. if (!context)
  3600. return;
  3601. /*
  3602. * Ensure to delete the context list entry and reduce the usage count
  3603. * before freeing the context if context is active.
  3604. */
  3605. spin_lock_irqsave(&sc->runtime_lock, flags);
  3606. __hif_pm_runtime_allow_suspend(sc, context);
  3607. spin_unlock_irqrestore(&sc->runtime_lock, flags);
  3608. qdf_mem_free(context);
  3609. }
  3610. #endif /* FEATURE_RUNTIME_PM */