if_pci.c 124 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745
  1. /*
  2. * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <linux/pci.h>
  19. #include <linux/slab.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/if_arp.h>
  22. #ifdef CONFIG_PCI_MSM
  23. #include <linux/msm_pcie.h>
  24. #endif
  25. #include "hif_io32.h"
  26. #include "if_pci.h"
  27. #include "hif.h"
  28. #include "target_type.h"
  29. #include "hif_main.h"
  30. #include "ce_main.h"
  31. #include "ce_api.h"
  32. #include "ce_internal.h"
  33. #include "ce_reg.h"
  34. #include "ce_bmi.h"
  35. #include "regtable.h"
  36. #include "hif_hw_version.h"
  37. #include <linux/debugfs.h>
  38. #include <linux/seq_file.h>
  39. #include "qdf_status.h"
  40. #include "qdf_atomic.h"
  41. #include "pld_common.h"
  42. #include "mp_dev.h"
  43. #include "hif_debug.h"
  44. #include "if_pci_internal.h"
  45. #include "ce_tasklet.h"
  46. #include "targaddrs.h"
  47. #include "hif_exec.h"
  48. #include "pci_api.h"
  49. #include "ahb_api.h"
  50. /* Maximum ms timeout for host to wake up target */
  51. #define PCIE_WAKE_TIMEOUT 1000
  52. #define RAMDUMP_EVENT_TIMEOUT 2500
  53. /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
  54. * PCIe data bus error
  55. * As workaround for this issue - changing the reset sequence to
  56. * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
  57. */
  58. #define CPU_WARM_RESET_WAR
  59. /*
  60. * Top-level interrupt handler for all PCI interrupts from a Target.
  61. * When a block of MSI interrupts is allocated, this top-level handler
  62. * is not used; instead, we directly call the correct sub-handler.
  63. */
  64. struct ce_irq_reg_table {
  65. uint32_t irq_enable;
  66. uint32_t irq_status;
  67. };
  68. #ifndef QCA_WIFI_3_0_ADRASTEA
  69. static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  70. {
  71. }
  72. #else
  73. static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  74. {
  75. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  76. unsigned int target_enable0, target_enable1;
  77. unsigned int target_cause0, target_cause1;
  78. target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
  79. target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
  80. target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
  81. target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
  82. if ((target_enable0 & target_cause0) ||
  83. (target_enable1 & target_cause1)) {
  84. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
  85. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
  86. if (scn->notice_send)
  87. pld_intr_notify_q6(sc->dev);
  88. }
  89. }
  90. #endif
  91. /**
  92. * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
  93. * @scn: scn
  94. *
  95. * Return: N/A
  96. */
  97. static void pci_dispatch_interrupt(struct hif_softc *scn)
  98. {
  99. uint32_t intr_summary;
  100. int id;
  101. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  102. if (scn->hif_init_done != true)
  103. return;
  104. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  105. return;
  106. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  107. if (intr_summary == 0) {
  108. if ((scn->target_status != TARGET_STATUS_RESET) &&
  109. (!qdf_atomic_read(&scn->link_suspended))) {
  110. hif_write32_mb(scn, scn->mem +
  111. (SOC_CORE_BASE_ADDRESS |
  112. PCIE_INTR_ENABLE_ADDRESS),
  113. HOST_GROUP0_MASK);
  114. hif_read32_mb(scn, scn->mem +
  115. (SOC_CORE_BASE_ADDRESS |
  116. PCIE_INTR_ENABLE_ADDRESS));
  117. }
  118. Q_TARGET_ACCESS_END(scn);
  119. return;
  120. }
  121. Q_TARGET_ACCESS_END(scn);
  122. scn->ce_irq_summary = intr_summary;
  123. for (id = 0; intr_summary && (id < scn->ce_count); id++) {
  124. if (intr_summary & (1 << id)) {
  125. intr_summary &= ~(1 << id);
  126. ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
  127. }
  128. }
  129. }
  130. irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
  131. {
  132. struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
  133. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  134. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
  135. volatile int tmp;
  136. uint16_t val = 0;
  137. uint32_t bar0 = 0;
  138. uint32_t fw_indicator_address, fw_indicator;
  139. bool ssr_irq = false;
  140. unsigned int host_cause, host_enable;
  141. if (LEGACY_INTERRUPTS(sc)) {
  142. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  143. return IRQ_HANDLED;
  144. if (ADRASTEA_BU) {
  145. host_enable = hif_read32_mb(sc, sc->mem +
  146. PCIE_INTR_ENABLE_ADDRESS);
  147. host_cause = hif_read32_mb(sc, sc->mem +
  148. PCIE_INTR_CAUSE_ADDRESS);
  149. if (!(host_enable & host_cause)) {
  150. hif_pci_route_adrastea_interrupt(sc);
  151. return IRQ_HANDLED;
  152. }
  153. }
  154. /* Clear Legacy PCI line interrupts
  155. * IMPORTANT: INTR_CLR regiser has to be set
  156. * after INTR_ENABLE is set to 0,
  157. * otherwise interrupt can not be really cleared
  158. */
  159. hif_write32_mb(sc, sc->mem +
  160. (SOC_CORE_BASE_ADDRESS |
  161. PCIE_INTR_ENABLE_ADDRESS), 0);
  162. hif_write32_mb(sc, sc->mem +
  163. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
  164. ADRASTEA_BU ?
  165. (host_enable & host_cause) :
  166. HOST_GROUP0_MASK);
  167. if (ADRASTEA_BU)
  168. hif_write32_mb(sc, sc->mem + 0x2f100c,
  169. (host_cause >> 1));
  170. /* IMPORTANT: this extra read transaction is required to
  171. * flush the posted write buffer
  172. */
  173. if (!ADRASTEA_BU) {
  174. tmp =
  175. hif_read32_mb(sc, sc->mem +
  176. (SOC_CORE_BASE_ADDRESS |
  177. PCIE_INTR_ENABLE_ADDRESS));
  178. if (tmp == 0xdeadbeef) {
  179. HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
  180. __func__);
  181. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  182. HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
  183. __func__, val);
  184. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  185. HIF_ERROR("%s: PCI Device ID = 0x%04x",
  186. __func__, val);
  187. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  188. HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
  189. val);
  190. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  191. HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
  192. val);
  193. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
  194. &bar0);
  195. HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
  196. bar0);
  197. HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
  198. __func__,
  199. hif_read32_mb(sc, sc->mem +
  200. PCIE_LOCAL_BASE_ADDRESS
  201. + RTC_STATE_ADDRESS));
  202. HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
  203. __func__,
  204. hif_read32_mb(sc, sc->mem +
  205. PCIE_LOCAL_BASE_ADDRESS
  206. + PCIE_SOC_WAKE_ADDRESS));
  207. HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
  208. __func__,
  209. hif_read32_mb(sc, sc->mem + 0x80008),
  210. hif_read32_mb(sc, sc->mem + 0x8000c));
  211. HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
  212. __func__,
  213. hif_read32_mb(sc, sc->mem + 0x80010),
  214. hif_read32_mb(sc, sc->mem + 0x80014));
  215. HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
  216. __func__,
  217. hif_read32_mb(sc, sc->mem + 0x80018),
  218. hif_read32_mb(sc, sc->mem + 0x8001c));
  219. QDF_BUG(0);
  220. }
  221. PCI_CLR_CAUSE0_REGISTER(sc);
  222. }
  223. if (HAS_FW_INDICATOR) {
  224. fw_indicator_address = hif_state->fw_indicator_address;
  225. fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
  226. if ((fw_indicator != ~0) &&
  227. (fw_indicator & FW_IND_EVENT_PENDING))
  228. ssr_irq = true;
  229. }
  230. if (Q_TARGET_ACCESS_END(scn) < 0)
  231. return IRQ_HANDLED;
  232. }
  233. /* TBDXXX: Add support for WMAC */
  234. if (ssr_irq) {
  235. sc->irq_event = irq;
  236. qdf_atomic_set(&scn->tasklet_from_intr, 1);
  237. qdf_atomic_inc(&scn->active_tasklet_cnt);
  238. tasklet_schedule(&sc->intr_tq);
  239. } else {
  240. pci_dispatch_interrupt(scn);
  241. }
  242. return IRQ_HANDLED;
  243. }
  244. bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
  245. {
  246. return 1; /* FIX THIS */
  247. }
  248. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
  249. {
  250. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  251. int i = 0;
  252. if (!irq || !size) {
  253. return -EINVAL;
  254. }
  255. if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
  256. irq[0] = sc->irq;
  257. return 1;
  258. }
  259. if (sc->num_msi_intrs > size) {
  260. qdf_print("Not enough space in irq buffer to return irqs");
  261. return -EINVAL;
  262. }
  263. for (i = 0; i < sc->num_msi_intrs; i++) {
  264. irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
  265. }
  266. return sc->num_msi_intrs;
  267. }
  268. /**
  269. * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
  270. * @scn: hif_softc
  271. *
  272. * Return: void
  273. */
  274. #if CONFIG_ATH_PCIE_MAX_PERF == 0
  275. void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  276. {
  277. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  278. A_target_id_t pci_addr = scn->mem;
  279. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  280. /*
  281. * If the deferred sleep timer is running cancel it
  282. * and put the soc into sleep.
  283. */
  284. if (hif_state->fake_sleep == true) {
  285. qdf_timer_stop(&hif_state->sleep_timer);
  286. if (hif_state->verified_awake == false) {
  287. hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  288. PCIE_SOC_WAKE_ADDRESS,
  289. PCIE_SOC_WAKE_RESET);
  290. }
  291. hif_state->fake_sleep = false;
  292. }
  293. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  294. }
  295. #else
  296. inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  297. {
  298. }
  299. #endif
  300. #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
  301. hif_read32_mb(sc, (char *)(mem) + \
  302. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
  303. #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
  304. hif_write32_mb(sc, ((char *)(mem) + \
  305. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
  306. #ifdef QCA_WIFI_3_0
  307. /**
  308. * hif_targ_is_awake() - check to see if the target is awake
  309. * @hif_ctx: hif context
  310. *
  311. * emulation never goes to sleep
  312. *
  313. * Return: true if target is awake
  314. */
  315. static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
  316. {
  317. return true;
  318. }
  319. #else
  320. /**
  321. * hif_targ_is_awake() - check to see if the target is awake
  322. * @hif_ctx: hif context
  323. *
  324. * Return: true if the targets clocks are on
  325. */
  326. static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
  327. {
  328. uint32_t val;
  329. if (scn->recovery)
  330. return false;
  331. val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
  332. + RTC_STATE_ADDRESS);
  333. return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
  334. }
  335. #endif
  336. #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
  337. static void hif_pci_device_reset(struct hif_pci_softc *sc)
  338. {
  339. void __iomem *mem = sc->mem;
  340. int i;
  341. uint32_t val;
  342. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  343. if (!scn->hostdef)
  344. return;
  345. /* NB: Don't check resetok here. This form of reset
  346. * is integral to correct operation.
  347. */
  348. if (!SOC_GLOBAL_RESET_ADDRESS)
  349. return;
  350. if (!mem)
  351. return;
  352. HIF_ERROR("%s: Reset Device", __func__);
  353. /*
  354. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  355. * writing WAKE_V, the Target may scribble over Host memory!
  356. */
  357. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  358. PCIE_SOC_WAKE_V_MASK);
  359. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  360. if (hif_targ_is_awake(scn, mem))
  361. break;
  362. qdf_mdelay(1);
  363. }
  364. /* Put Target, including PCIe, into RESET. */
  365. val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
  366. val |= 1;
  367. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  368. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  369. if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  370. RTC_STATE_COLD_RESET_MASK)
  371. break;
  372. qdf_mdelay(1);
  373. }
  374. /* Pull Target, including PCIe, out of RESET. */
  375. val &= ~1;
  376. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  377. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  378. if (!
  379. (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  380. RTC_STATE_COLD_RESET_MASK))
  381. break;
  382. qdf_mdelay(1);
  383. }
  384. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  385. PCIE_SOC_WAKE_RESET);
  386. }
  387. /* CPU warm reset function
  388. * Steps:
  389. * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
  390. * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
  391. * correctly on WARM reset
  392. * 3. Clear TARGET CPU LF timer interrupt
  393. * 4. Reset all CEs to clear any pending CE tarnsactions
  394. * 5. Warm reset CPU
  395. */
  396. static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
  397. {
  398. void __iomem *mem = sc->mem;
  399. int i;
  400. uint32_t val;
  401. uint32_t fw_indicator;
  402. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  403. /* NB: Don't check resetok here. This form of reset is
  404. * integral to correct operation.
  405. */
  406. if (!mem)
  407. return;
  408. HIF_INFO_MED("%s: Target Warm Reset", __func__);
  409. /*
  410. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  411. * writing WAKE_V, the Target may scribble over Host memory!
  412. */
  413. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  414. PCIE_SOC_WAKE_V_MASK);
  415. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  416. if (hif_targ_is_awake(scn, mem))
  417. break;
  418. qdf_mdelay(1);
  419. }
  420. /*
  421. * Disable Pending interrupts
  422. */
  423. val =
  424. hif_read32_mb(sc, mem +
  425. (SOC_CORE_BASE_ADDRESS |
  426. PCIE_INTR_CAUSE_ADDRESS));
  427. HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
  428. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
  429. /* Target CPU Intr Cause */
  430. val = hif_read32_mb(sc, mem +
  431. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  432. HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
  433. val =
  434. hif_read32_mb(sc, mem +
  435. (SOC_CORE_BASE_ADDRESS |
  436. PCIE_INTR_ENABLE_ADDRESS));
  437. hif_write32_mb(sc, (mem +
  438. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
  439. hif_write32_mb(sc, (mem +
  440. (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
  441. HOST_GROUP0_MASK);
  442. qdf_mdelay(100);
  443. /* Clear FW_INDICATOR_ADDRESS */
  444. if (HAS_FW_INDICATOR) {
  445. fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  446. hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
  447. }
  448. /* Clear Target LF Timer interrupts */
  449. val =
  450. hif_read32_mb(sc, mem +
  451. (RTC_SOC_BASE_ADDRESS +
  452. SOC_LF_TIMER_CONTROL0_ADDRESS));
  453. HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
  454. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
  455. val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
  456. hif_write32_mb(sc, mem +
  457. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
  458. val);
  459. /* Reset CE */
  460. val =
  461. hif_read32_mb(sc, mem +
  462. (RTC_SOC_BASE_ADDRESS |
  463. SOC_RESET_CONTROL_ADDRESS));
  464. val |= SOC_RESET_CONTROL_CE_RST_MASK;
  465. hif_write32_mb(sc, (mem +
  466. (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
  467. val);
  468. val =
  469. hif_read32_mb(sc, mem +
  470. (RTC_SOC_BASE_ADDRESS |
  471. SOC_RESET_CONTROL_ADDRESS));
  472. qdf_mdelay(10);
  473. /* CE unreset */
  474. val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
  475. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  476. SOC_RESET_CONTROL_ADDRESS), val);
  477. val =
  478. hif_read32_mb(sc, mem +
  479. (RTC_SOC_BASE_ADDRESS |
  480. SOC_RESET_CONTROL_ADDRESS));
  481. qdf_mdelay(10);
  482. /* Read Target CPU Intr Cause */
  483. val = hif_read32_mb(sc, mem +
  484. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  485. HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
  486. __func__, val);
  487. /* CPU warm RESET */
  488. val =
  489. hif_read32_mb(sc, mem +
  490. (RTC_SOC_BASE_ADDRESS |
  491. SOC_RESET_CONTROL_ADDRESS));
  492. val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
  493. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  494. SOC_RESET_CONTROL_ADDRESS), val);
  495. val =
  496. hif_read32_mb(sc, mem +
  497. (RTC_SOC_BASE_ADDRESS |
  498. SOC_RESET_CONTROL_ADDRESS));
  499. HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
  500. __func__, val);
  501. qdf_mdelay(100);
  502. HIF_INFO_MED("%s: Target Warm reset complete", __func__);
  503. }
  504. #ifndef QCA_WIFI_3_0
  505. /* only applicable to legacy ce */
  506. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
  507. {
  508. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  509. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  510. void __iomem *mem = sc->mem;
  511. uint32_t val;
  512. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  513. return ATH_ISR_NOSCHED;
  514. val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  515. if (Q_TARGET_ACCESS_END(scn) < 0)
  516. return ATH_ISR_SCHED;
  517. HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
  518. if (val & FW_IND_HELPER)
  519. return 0;
  520. return 1;
  521. }
  522. #endif
  523. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  524. {
  525. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  526. uint16_t device_id = 0;
  527. uint32_t val;
  528. uint16_t timeout_count = 0;
  529. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  530. /* Check device ID from PCIe configuration space for link status */
  531. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
  532. if (device_id != sc->devid) {
  533. HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
  534. __func__, device_id, sc->devid);
  535. return -EACCES;
  536. }
  537. /* Check PCIe local register for bar/memory access */
  538. val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  539. RTC_STATE_ADDRESS);
  540. HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
  541. /* Try to wake up taget if it sleeps */
  542. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  543. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  544. HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
  545. hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  546. PCIE_SOC_WAKE_ADDRESS));
  547. /* Check if taget can be woken up */
  548. while (!hif_targ_is_awake(scn, sc->mem)) {
  549. if (timeout_count >= PCIE_WAKE_TIMEOUT) {
  550. HIF_ERROR("%s: wake up timeout, %08x, %08x",
  551. __func__,
  552. hif_read32_mb(sc, sc->mem +
  553. PCIE_LOCAL_BASE_ADDRESS +
  554. RTC_STATE_ADDRESS),
  555. hif_read32_mb(sc, sc->mem +
  556. PCIE_LOCAL_BASE_ADDRESS +
  557. PCIE_SOC_WAKE_ADDRESS));
  558. return -EACCES;
  559. }
  560. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  561. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  562. qdf_mdelay(100);
  563. timeout_count += 100;
  564. }
  565. /* Check Power register for SoC internal bus issues */
  566. val =
  567. hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
  568. SOC_POWER_REG_OFFSET);
  569. HIF_INFO_MED("%s: Power register is %08x", __func__, val);
  570. return 0;
  571. }
  572. /**
  573. * __hif_pci_dump_registers(): dump other PCI debug registers
  574. * @scn: struct hif_softc
  575. *
  576. * This function dumps pci debug registers. The parrent function
  577. * dumps the copy engine registers before calling this function.
  578. *
  579. * Return: void
  580. */
  581. static void __hif_pci_dump_registers(struct hif_softc *scn)
  582. {
  583. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  584. void __iomem *mem = sc->mem;
  585. uint32_t val, i, j;
  586. uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
  587. uint32_t ce_base;
  588. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  589. return;
  590. /* DEBUG_INPUT_SEL_SRC = 0x6 */
  591. val =
  592. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  593. WLAN_DEBUG_INPUT_SEL_OFFSET);
  594. val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
  595. val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
  596. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  597. WLAN_DEBUG_INPUT_SEL_OFFSET, val);
  598. /* DEBUG_CONTROL_ENABLE = 0x1 */
  599. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  600. WLAN_DEBUG_CONTROL_OFFSET);
  601. val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
  602. val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
  603. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  604. WLAN_DEBUG_CONTROL_OFFSET, val);
  605. HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
  606. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  607. WLAN_DEBUG_INPUT_SEL_OFFSET),
  608. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  609. WLAN_DEBUG_CONTROL_OFFSET));
  610. HIF_INFO_MED("%s: Debug CE", __func__);
  611. /* Loop CE debug output */
  612. /* AMBA_DEBUG_BUS_SEL = 0xc */
  613. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  614. AMBA_DEBUG_BUS_OFFSET);
  615. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  616. val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
  617. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
  618. val);
  619. for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
  620. /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
  621. val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  622. CE_WRAPPER_DEBUG_OFFSET);
  623. val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
  624. val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
  625. hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  626. CE_WRAPPER_DEBUG_OFFSET, val);
  627. HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
  628. __func__, wrapper_idx[i],
  629. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  630. AMBA_DEBUG_BUS_OFFSET),
  631. hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  632. CE_WRAPPER_DEBUG_OFFSET));
  633. if (wrapper_idx[i] <= 7) {
  634. for (j = 0; j <= 5; j++) {
  635. ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
  636. /* For (j=0~5) write CE_DEBUG_SEL = j */
  637. val =
  638. hif_read32_mb(sc, mem + ce_base +
  639. CE_DEBUG_OFFSET);
  640. val &= ~CE_DEBUG_SEL_MASK;
  641. val |= CE_DEBUG_SEL_SET(j);
  642. hif_write32_mb(sc, mem + ce_base +
  643. CE_DEBUG_OFFSET, val);
  644. /* read (@gpio_athr_wlan_reg)
  645. * WLAN_DEBUG_OUT_DATA
  646. */
  647. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
  648. + WLAN_DEBUG_OUT_OFFSET);
  649. val = WLAN_DEBUG_OUT_DATA_GET(val);
  650. HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
  651. __func__, j,
  652. hif_read32_mb(sc, mem + ce_base +
  653. CE_DEBUG_OFFSET), val);
  654. }
  655. } else {
  656. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  657. val =
  658. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  659. WLAN_DEBUG_OUT_OFFSET);
  660. val = WLAN_DEBUG_OUT_DATA_GET(val);
  661. HIF_INFO_MED("%s: out: %x", __func__, val);
  662. }
  663. }
  664. HIF_INFO_MED("%s: Debug PCIe:", __func__);
  665. /* Loop PCIe debug output */
  666. /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
  667. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  668. AMBA_DEBUG_BUS_OFFSET);
  669. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  670. val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
  671. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  672. AMBA_DEBUG_BUS_OFFSET, val);
  673. for (i = 0; i <= 8; i++) {
  674. /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
  675. val =
  676. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  677. AMBA_DEBUG_BUS_OFFSET);
  678. val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
  679. val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
  680. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  681. AMBA_DEBUG_BUS_OFFSET, val);
  682. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  683. val =
  684. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  685. WLAN_DEBUG_OUT_OFFSET);
  686. val = WLAN_DEBUG_OUT_DATA_GET(val);
  687. HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
  688. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  689. WLAN_DEBUG_OUT_OFFSET), val,
  690. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  691. WLAN_DEBUG_OUT_OFFSET));
  692. }
  693. Q_TARGET_ACCESS_END(scn);
  694. }
  695. /**
  696. * hif_dump_registers(): dump bus debug registers
  697. * @scn: struct hif_opaque_softc
  698. *
  699. * This function dumps hif bus debug registers
  700. *
  701. * Return: 0 for success or error code
  702. */
  703. int hif_pci_dump_registers(struct hif_softc *hif_ctx)
  704. {
  705. int status;
  706. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  707. status = hif_dump_ce_registers(scn);
  708. if (status)
  709. HIF_ERROR("%s: Dump CE Registers Failed", __func__);
  710. /* dump non copy engine pci registers */
  711. __hif_pci_dump_registers(scn);
  712. return 0;
  713. }
  714. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  715. /* worker thread to schedule wlan_tasklet in SLUB debug build */
  716. static void reschedule_tasklet_work_handler(void *arg)
  717. {
  718. struct hif_pci_softc *sc = arg;
  719. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  720. if (!scn) {
  721. HIF_ERROR("%s: hif_softc is NULL\n", __func__);
  722. return;
  723. }
  724. if (scn->hif_init_done == false) {
  725. HIF_ERROR("%s: wlan driver is unloaded", __func__);
  726. return;
  727. }
  728. tasklet_schedule(&sc->intr_tq);
  729. }
  730. /**
  731. * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
  732. * work
  733. * @sc: HIF PCI Context
  734. *
  735. * Return: void
  736. */
  737. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
  738. {
  739. qdf_create_work(0, &sc->reschedule_tasklet_work,
  740. reschedule_tasklet_work_handler, NULL);
  741. }
  742. #else
  743. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
  744. #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
  745. void wlan_tasklet(unsigned long data)
  746. {
  747. struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
  748. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  749. if (scn->hif_init_done == false)
  750. goto end;
  751. if (qdf_atomic_read(&scn->link_suspended))
  752. goto end;
  753. if (!ADRASTEA_BU) {
  754. hif_fw_interrupt_handler(sc->irq_event, scn);
  755. if (scn->target_status == TARGET_STATUS_RESET)
  756. goto end;
  757. }
  758. end:
  759. qdf_atomic_set(&scn->tasklet_from_intr, 0);
  760. qdf_atomic_dec(&scn->active_tasklet_cnt);
  761. }
  762. #ifdef FEATURE_RUNTIME_PM
  763. static const char *hif_pm_runtime_state_to_string(uint32_t state)
  764. {
  765. switch (state) {
  766. case HIF_PM_RUNTIME_STATE_NONE:
  767. return "INIT_STATE";
  768. case HIF_PM_RUNTIME_STATE_ON:
  769. return "ON";
  770. case HIF_PM_RUNTIME_STATE_RESUMING:
  771. return "RESUMING";
  772. case HIF_PM_RUNTIME_STATE_SUSPENDING:
  773. return "SUSPENDING";
  774. case HIF_PM_RUNTIME_STATE_SUSPENDED:
  775. return "SUSPENDED";
  776. default:
  777. return "INVALID STATE";
  778. }
  779. }
  780. #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
  781. seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
  782. /**
  783. * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
  784. * @sc: hif_pci_softc context
  785. * @msg: log message
  786. *
  787. * log runtime pm stats when something seems off.
  788. *
  789. * Return: void
  790. */
  791. static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
  792. {
  793. struct hif_pm_runtime_lock *ctx;
  794. HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
  795. msg, atomic_read(&sc->dev->power.usage_count),
  796. hif_pm_runtime_state_to_string(
  797. atomic_read(&sc->pm_state)),
  798. sc->prevent_suspend_cnt);
  799. HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
  800. sc->dev->power.runtime_status,
  801. sc->dev->power.runtime_error,
  802. sc->dev->power.disable_depth,
  803. sc->dev->power.autosuspend_delay);
  804. HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
  805. sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
  806. sc->pm_stats.request_resume);
  807. HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
  808. sc->pm_stats.allow_suspend,
  809. sc->pm_stats.prevent_suspend);
  810. HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
  811. sc->pm_stats.prevent_suspend_timeout,
  812. sc->pm_stats.allow_suspend_timeout);
  813. HIF_ERROR("Suspended: %u, resumed: %u count",
  814. sc->pm_stats.suspended,
  815. sc->pm_stats.resumed);
  816. HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
  817. sc->pm_stats.suspend_err,
  818. sc->pm_stats.runtime_get_err);
  819. HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
  820. list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
  821. HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
  822. }
  823. WARN_ON(1);
  824. }
  825. /**
  826. * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
  827. * @s: file to print to
  828. * @data: unused
  829. *
  830. * debugging tool added to the debug fs for displaying runtimepm stats
  831. *
  832. * Return: 0
  833. */
  834. static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
  835. {
  836. struct hif_pci_softc *sc = s->private;
  837. static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
  838. "SUSPENDING", "SUSPENDED"};
  839. unsigned int msecs_age;
  840. qdf_time_t usecs_age;
  841. int pm_state = atomic_read(&sc->pm_state);
  842. unsigned long timer_expires;
  843. struct hif_pm_runtime_lock *ctx;
  844. seq_printf(s, "%30s: %s\n", "Runtime PM state",
  845. autopm_state[pm_state]);
  846. seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
  847. sc->pm_stats.last_resume_caller);
  848. seq_printf(s, "%30s: %pf\n", "Last Busy Marker",
  849. sc->pm_stats.last_busy_marker);
  850. usecs_age = qdf_get_log_timestamp_usecs() -
  851. sc->pm_stats.last_busy_timestamp;
  852. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
  853. sc->pm_stats.last_busy_timestamp / 1000000,
  854. sc->pm_stats.last_busy_timestamp % 1000000);
  855. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
  856. usecs_age / 1000000, usecs_age % 1000000);
  857. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
  858. msecs_age = jiffies_to_msecs(jiffies -
  859. sc->pm_stats.suspend_jiffies);
  860. seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
  861. msecs_age / 1000, msecs_age % 1000);
  862. }
  863. seq_printf(s, "%30s: %d\n", "PM Usage count",
  864. atomic_read(&sc->dev->power.usage_count));
  865. seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
  866. sc->prevent_suspend_cnt);
  867. HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
  868. HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
  869. HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
  870. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
  871. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
  872. HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
  873. HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
  874. HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
  875. HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
  876. HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
  877. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
  878. timer_expires = sc->runtime_timer_expires;
  879. if (timer_expires > 0) {
  880. msecs_age = jiffies_to_msecs(timer_expires - jiffies);
  881. seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
  882. msecs_age / 1000, msecs_age % 1000);
  883. }
  884. spin_lock_bh(&sc->runtime_lock);
  885. if (list_empty(&sc->prevent_suspend_list)) {
  886. spin_unlock_bh(&sc->runtime_lock);
  887. return 0;
  888. }
  889. seq_printf(s, "%30s: ", "Active Wakeup_Sources");
  890. list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
  891. seq_printf(s, "%s", ctx->name);
  892. if (ctx->timeout)
  893. seq_printf(s, "(%d ms)", ctx->timeout);
  894. seq_puts(s, " ");
  895. }
  896. seq_puts(s, "\n");
  897. spin_unlock_bh(&sc->runtime_lock);
  898. return 0;
  899. }
  900. #undef HIF_PCI_RUNTIME_PM_STATS
  901. /**
  902. * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
  903. * @inode
  904. * @file
  905. *
  906. * Return: linux error code of single_open.
  907. */
  908. static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
  909. {
  910. return single_open(file, hif_pci_pm_runtime_debugfs_show,
  911. inode->i_private);
  912. }
  913. static const struct file_operations hif_pci_runtime_pm_fops = {
  914. .owner = THIS_MODULE,
  915. .open = hif_pci_runtime_pm_open,
  916. .release = single_release,
  917. .read = seq_read,
  918. .llseek = seq_lseek,
  919. };
  920. /**
  921. * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
  922. * @sc: pci context
  923. *
  924. * creates a debugfs entry to debug the runtime pm feature.
  925. */
  926. static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
  927. {
  928. sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
  929. 0400, NULL, sc,
  930. &hif_pci_runtime_pm_fops);
  931. }
  932. /**
  933. * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
  934. * @sc: pci context
  935. *
  936. * removes the debugfs entry to debug the runtime pm feature.
  937. */
  938. static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
  939. {
  940. debugfs_remove(sc->pm_dentry);
  941. }
  942. static void hif_runtime_init(struct device *dev, int delay)
  943. {
  944. pm_runtime_set_autosuspend_delay(dev, delay);
  945. pm_runtime_use_autosuspend(dev);
  946. pm_runtime_allow(dev);
  947. pm_runtime_mark_last_busy(dev);
  948. pm_runtime_put_noidle(dev);
  949. pm_suspend_ignore_children(dev, true);
  950. }
  951. static void hif_runtime_exit(struct device *dev)
  952. {
  953. pm_runtime_get_noresume(dev);
  954. pm_runtime_set_active(dev);
  955. }
  956. static void hif_pm_runtime_lock_timeout_fn(void *data);
  957. /**
  958. * hif_pm_runtime_start(): start the runtime pm
  959. * @sc: pci context
  960. *
  961. * After this call, runtime pm will be active.
  962. */
  963. static void hif_pm_runtime_start(struct hif_pci_softc *sc)
  964. {
  965. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  966. uint32_t mode = hif_get_conparam(ol_sc);
  967. if (!ol_sc->hif_config.enable_runtime_pm) {
  968. HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
  969. return;
  970. }
  971. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  972. mode == QDF_GLOBAL_MONITOR_MODE) {
  973. HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
  974. __func__);
  975. return;
  976. }
  977. qdf_timer_init(NULL, &sc->runtime_timer,
  978. hif_pm_runtime_lock_timeout_fn,
  979. sc, QDF_TIMER_TYPE_WAKE_APPS);
  980. HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
  981. ol_sc->hif_config.runtime_pm_delay);
  982. hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
  983. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
  984. hif_runtime_pm_debugfs_create(sc);
  985. }
  986. /**
  987. * hif_pm_runtime_stop(): stop runtime pm
  988. * @sc: pci context
  989. *
  990. * Turns off runtime pm and frees corresponding resources
  991. * that were acquired by hif_runtime_pm_start().
  992. */
  993. static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
  994. {
  995. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  996. uint32_t mode = hif_get_conparam(ol_sc);
  997. if (!ol_sc->hif_config.enable_runtime_pm)
  998. return;
  999. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  1000. mode == QDF_GLOBAL_MONITOR_MODE)
  1001. return;
  1002. hif_runtime_exit(sc->dev);
  1003. hif_pm_runtime_resume(sc->dev);
  1004. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  1005. hif_runtime_pm_debugfs_remove(sc);
  1006. qdf_timer_free(&sc->runtime_timer);
  1007. /* doesn't wait for penting trafic unlike cld-2.0 */
  1008. }
  1009. /**
  1010. * hif_pm_runtime_open(): initialize runtime pm
  1011. * @sc: pci data structure
  1012. *
  1013. * Early initialization
  1014. */
  1015. static void hif_pm_runtime_open(struct hif_pci_softc *sc)
  1016. {
  1017. spin_lock_init(&sc->runtime_lock);
  1018. qdf_atomic_init(&sc->pm_state);
  1019. qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
  1020. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  1021. INIT_LIST_HEAD(&sc->prevent_suspend_list);
  1022. }
  1023. /**
  1024. * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
  1025. * @sc: pci context
  1026. *
  1027. * Ensure we have only one vote against runtime suspend before closing
  1028. * the runtime suspend feature.
  1029. *
  1030. * all gets by the wlan driver should have been returned
  1031. * one vote should remain as part of cnss_runtime_exit
  1032. *
  1033. * needs to be revisited if we share the root complex.
  1034. */
  1035. static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
  1036. {
  1037. struct hif_pm_runtime_lock *ctx, *tmp;
  1038. if (atomic_read(&sc->dev->power.usage_count) != 1)
  1039. hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
  1040. else
  1041. return;
  1042. spin_lock_bh(&sc->runtime_lock);
  1043. list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
  1044. spin_unlock_bh(&sc->runtime_lock);
  1045. hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
  1046. spin_lock_bh(&sc->runtime_lock);
  1047. }
  1048. spin_unlock_bh(&sc->runtime_lock);
  1049. /* ensure 1 and only 1 usage count so that when the wlan
  1050. * driver is re-insmodded runtime pm won't be
  1051. * disabled also ensures runtime pm doesn't get
  1052. * broken on by being less than 1.
  1053. */
  1054. if (atomic_read(&sc->dev->power.usage_count) <= 0)
  1055. atomic_set(&sc->dev->power.usage_count, 1);
  1056. while (atomic_read(&sc->dev->power.usage_count) > 1)
  1057. hif_pm_runtime_put_auto(sc->dev);
  1058. }
  1059. static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
  1060. struct hif_pm_runtime_lock *lock);
  1061. /**
  1062. * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
  1063. * @sc: PCIe Context
  1064. *
  1065. * API is used to empty the runtime pm prevent suspend list.
  1066. *
  1067. * Return: void
  1068. */
  1069. static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
  1070. {
  1071. struct hif_pm_runtime_lock *ctx, *tmp;
  1072. spin_lock_bh(&sc->runtime_lock);
  1073. list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
  1074. __hif_pm_runtime_allow_suspend(sc, ctx);
  1075. }
  1076. spin_unlock_bh(&sc->runtime_lock);
  1077. }
  1078. /**
  1079. * hif_pm_runtime_close(): close runtime pm
  1080. * @sc: pci bus handle
  1081. *
  1082. * ensure runtime_pm is stopped before closing the driver
  1083. */
  1084. static void hif_pm_runtime_close(struct hif_pci_softc *sc)
  1085. {
  1086. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1087. qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
  1088. if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
  1089. return;
  1090. hif_pm_runtime_stop(sc);
  1091. hif_is_recovery_in_progress(scn) ?
  1092. hif_pm_runtime_sanitize_on_ssr_exit(sc) :
  1093. hif_pm_runtime_sanitize_on_exit(sc);
  1094. }
  1095. #else
  1096. static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
  1097. static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
  1098. static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
  1099. static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
  1100. #endif
  1101. /**
  1102. * hif_disable_power_gating() - disable HW power gating
  1103. * @hif_ctx: hif context
  1104. *
  1105. * disables pcie L1 power states
  1106. */
  1107. static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
  1108. {
  1109. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1110. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1111. if (!scn) {
  1112. HIF_ERROR("%s: Could not disable ASPM scn is null",
  1113. __func__);
  1114. return;
  1115. }
  1116. /* Disable ASPM when pkt log is enabled */
  1117. pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
  1118. pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
  1119. }
  1120. /**
  1121. * hif_enable_power_gating() - enable HW power gating
  1122. * @hif_ctx: hif context
  1123. *
  1124. * enables pcie L1 power states
  1125. */
  1126. static void hif_enable_power_gating(struct hif_pci_softc *sc)
  1127. {
  1128. if (!sc) {
  1129. HIF_ERROR("%s: Could not disable ASPM scn is null",
  1130. __func__);
  1131. return;
  1132. }
  1133. /* Re-enable ASPM after firmware/OTP download is complete */
  1134. pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
  1135. }
  1136. /**
  1137. * hif_enable_power_management() - enable power management
  1138. * @hif_ctx: hif context
  1139. *
  1140. * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
  1141. * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
  1142. *
  1143. * note: epping mode does not call this function as it does not
  1144. * care about saving power.
  1145. */
  1146. void hif_pci_enable_power_management(struct hif_softc *hif_sc,
  1147. bool is_packet_log_enabled)
  1148. {
  1149. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
  1150. uint32_t mode;
  1151. if (!pci_ctx) {
  1152. HIF_ERROR("%s, hif_ctx null", __func__);
  1153. return;
  1154. }
  1155. mode = hif_get_conparam(hif_sc);
  1156. if (mode == QDF_GLOBAL_FTM_MODE) {
  1157. HIF_INFO("%s: Enable power gating for FTM mode", __func__);
  1158. hif_enable_power_gating(pci_ctx);
  1159. return;
  1160. }
  1161. hif_pm_runtime_start(pci_ctx);
  1162. if (!is_packet_log_enabled)
  1163. hif_enable_power_gating(pci_ctx);
  1164. if (!CONFIG_ATH_PCIE_MAX_PERF &&
  1165. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
  1166. !ce_srng_based(hif_sc)) {
  1167. /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
  1168. if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
  1169. HIF_ERROR("%s, failed to set target to sleep",
  1170. __func__);
  1171. }
  1172. }
  1173. /**
  1174. * hif_disable_power_management() - disable power management
  1175. * @hif_ctx: hif context
  1176. *
  1177. * Currently disables runtime pm. Should be updated to behave
  1178. * if runtime pm is not started. Should be updated to take care
  1179. * of aspm and soc sleep for driver load.
  1180. */
  1181. void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
  1182. {
  1183. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1184. if (!pci_ctx) {
  1185. HIF_ERROR("%s, hif_ctx null", __func__);
  1186. return;
  1187. }
  1188. hif_pm_runtime_stop(pci_ctx);
  1189. }
  1190. void hif_pci_display_stats(struct hif_softc *hif_ctx)
  1191. {
  1192. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1193. if (!pci_ctx) {
  1194. HIF_ERROR("%s, hif_ctx null", __func__);
  1195. return;
  1196. }
  1197. hif_display_ce_stats(&pci_ctx->ce_sc);
  1198. hif_print_pci_stats(pci_ctx);
  1199. }
  1200. void hif_pci_clear_stats(struct hif_softc *hif_ctx)
  1201. {
  1202. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1203. if (!pci_ctx) {
  1204. HIF_ERROR("%s, hif_ctx null", __func__);
  1205. return;
  1206. }
  1207. hif_clear_ce_stats(&pci_ctx->ce_sc);
  1208. }
  1209. #define ATH_PCI_PROBE_RETRY_MAX 3
  1210. /**
  1211. * hif_bus_open(): hif_bus_open
  1212. * @scn: scn
  1213. * @bus_type: bus type
  1214. *
  1215. * Return: n/a
  1216. */
  1217. QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  1218. {
  1219. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1220. hif_ctx->bus_type = bus_type;
  1221. hif_pm_runtime_open(sc);
  1222. qdf_spinlock_create(&sc->irq_lock);
  1223. return hif_ce_open(hif_ctx);
  1224. }
  1225. /**
  1226. * hif_wake_target_cpu() - wake the target's cpu
  1227. * @scn: hif context
  1228. *
  1229. * Send an interrupt to the device to wake up the Target CPU
  1230. * so it has an opportunity to notice any changed state.
  1231. */
  1232. static void hif_wake_target_cpu(struct hif_softc *scn)
  1233. {
  1234. QDF_STATUS rv;
  1235. uint32_t core_ctrl;
  1236. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1237. rv = hif_diag_read_access(hif_hdl,
  1238. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1239. &core_ctrl);
  1240. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1241. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1242. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1243. rv = hif_diag_write_access(hif_hdl,
  1244. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1245. core_ctrl);
  1246. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1247. }
  1248. /**
  1249. * soc_wake_reset() - allow the target to go to sleep
  1250. * @scn: hif_softc
  1251. *
  1252. * Clear the force wake register. This is done by
  1253. * hif_sleep_entry and cancel defered timer sleep.
  1254. */
  1255. static void soc_wake_reset(struct hif_softc *scn)
  1256. {
  1257. hif_write32_mb(scn, scn->mem +
  1258. PCIE_LOCAL_BASE_ADDRESS +
  1259. PCIE_SOC_WAKE_ADDRESS,
  1260. PCIE_SOC_WAKE_RESET);
  1261. }
  1262. /**
  1263. * hif_sleep_entry() - gate target sleep
  1264. * @arg: hif context
  1265. *
  1266. * This function is the callback for the sleep timer.
  1267. * Check if last force awake critical section was at least
  1268. * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
  1269. * allow the target to go to sleep and cancel the sleep timer.
  1270. * otherwise reschedule the sleep timer.
  1271. */
  1272. static void hif_sleep_entry(void *arg)
  1273. {
  1274. struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
  1275. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1276. uint32_t idle_ms;
  1277. if (scn->recovery)
  1278. return;
  1279. if (hif_is_driver_unloading(scn))
  1280. return;
  1281. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  1282. if (hif_state->fake_sleep) {
  1283. idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
  1284. - hif_state->sleep_ticks);
  1285. if (!hif_state->verified_awake &&
  1286. idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
  1287. if (!qdf_atomic_read(&scn->link_suspended)) {
  1288. soc_wake_reset(scn);
  1289. hif_state->fake_sleep = false;
  1290. }
  1291. } else {
  1292. qdf_timer_stop(&hif_state->sleep_timer);
  1293. qdf_timer_start(&hif_state->sleep_timer,
  1294. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  1295. }
  1296. }
  1297. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  1298. }
  1299. #define HIF_HIA_MAX_POLL_LOOP 1000000
  1300. #define HIF_HIA_POLLING_DELAY_MS 10
  1301. #ifdef QCA_HIF_HIA_EXTND
  1302. static void hif_set_hia_extnd(struct hif_softc *scn)
  1303. {
  1304. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1305. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1306. uint32_t target_type = tgt_info->target_type;
  1307. HIF_TRACE("%s: E", __func__);
  1308. if ((target_type == TARGET_TYPE_AR900B) ||
  1309. target_type == TARGET_TYPE_QCA9984 ||
  1310. target_type == TARGET_TYPE_QCA9888) {
  1311. /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
  1312. * in RTC space
  1313. */
  1314. tgt_info->target_revision
  1315. = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
  1316. + CHIP_ID_ADDRESS));
  1317. qdf_print("chip_id 0x%x chip_revision 0x%x",
  1318. target_type, tgt_info->target_revision);
  1319. }
  1320. {
  1321. uint32_t flag2_value = 0;
  1322. uint32_t flag2_targ_addr =
  1323. host_interest_item_address(target_type,
  1324. offsetof(struct host_interest_s, hi_skip_clock_init));
  1325. if ((ar900b_20_targ_clk != -1) &&
  1326. (frac != -1) && (intval != -1)) {
  1327. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1328. &flag2_value);
  1329. qdf_print("\n Setting clk_override");
  1330. flag2_value |= CLOCK_OVERRIDE;
  1331. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1332. flag2_value);
  1333. qdf_print("\n CLOCK PLL val set %d", flag2_value);
  1334. } else {
  1335. qdf_print("\n CLOCK PLL skipped");
  1336. }
  1337. }
  1338. if (target_type == TARGET_TYPE_AR900B
  1339. || target_type == TARGET_TYPE_QCA9984
  1340. || target_type == TARGET_TYPE_QCA9888) {
  1341. /* for AR9980_2.0, 300 mhz clock is used, right now we assume
  1342. * this would be supplied through module parameters,
  1343. * if not supplied assumed default or same behavior as 1.0.
  1344. * Assume 1.0 clock can't be tuned, reset to defaults
  1345. */
  1346. qdf_print(KERN_INFO
  1347. "%s: setting the target pll frac %x intval %x",
  1348. __func__, frac, intval);
  1349. /* do not touch frac, and int val, let them be default -1,
  1350. * if desired, host can supply these through module params
  1351. */
  1352. if (frac != -1 || intval != -1) {
  1353. uint32_t flag2_value = 0;
  1354. uint32_t flag2_targ_addr;
  1355. flag2_targ_addr =
  1356. host_interest_item_address(target_type,
  1357. offsetof(struct host_interest_s,
  1358. hi_clock_info));
  1359. hif_diag_read_access(hif_hdl,
  1360. flag2_targ_addr, &flag2_value);
  1361. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1362. flag2_value);
  1363. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1364. qdf_print("\n INT Val %x Address %x",
  1365. intval, flag2_value + 4);
  1366. hif_diag_write_access(hif_hdl,
  1367. flag2_value + 4, intval);
  1368. } else {
  1369. qdf_print(KERN_INFO
  1370. "%s: no frac provided, skipping pre-configuring PLL",
  1371. __func__);
  1372. }
  1373. /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
  1374. if ((target_type == TARGET_TYPE_AR900B)
  1375. && (tgt_info->target_revision == AR900B_REV_2)
  1376. && ar900b_20_targ_clk != -1) {
  1377. uint32_t flag2_value = 0;
  1378. uint32_t flag2_targ_addr;
  1379. flag2_targ_addr
  1380. = host_interest_item_address(target_type,
  1381. offsetof(struct host_interest_s,
  1382. hi_desired_cpu_speed_hz));
  1383. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1384. &flag2_value);
  1385. qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
  1386. flag2_value);
  1387. hif_diag_write_access(hif_hdl, flag2_value,
  1388. ar900b_20_targ_clk/*300000000u*/);
  1389. } else if (target_type == TARGET_TYPE_QCA9888) {
  1390. uint32_t flag2_targ_addr;
  1391. if (200000000u != qca9888_20_targ_clk) {
  1392. qca9888_20_targ_clk = 300000000u;
  1393. /* Setting the target clock speed to 300 mhz */
  1394. }
  1395. flag2_targ_addr
  1396. = host_interest_item_address(target_type,
  1397. offsetof(struct host_interest_s,
  1398. hi_desired_cpu_speed_hz));
  1399. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1400. qca9888_20_targ_clk);
  1401. } else {
  1402. qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
  1403. __func__);
  1404. }
  1405. } else {
  1406. if (frac != -1 || intval != -1) {
  1407. uint32_t flag2_value = 0;
  1408. uint32_t flag2_targ_addr =
  1409. host_interest_item_address(target_type,
  1410. offsetof(struct host_interest_s,
  1411. hi_clock_info));
  1412. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1413. &flag2_value);
  1414. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1415. flag2_value);
  1416. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1417. qdf_print("\n INT Val %x Address %x", intval,
  1418. flag2_value + 4);
  1419. hif_diag_write_access(hif_hdl, flag2_value + 4,
  1420. intval);
  1421. }
  1422. }
  1423. }
  1424. #else
  1425. static void hif_set_hia_extnd(struct hif_softc *scn)
  1426. {
  1427. }
  1428. #endif
  1429. /**
  1430. * hif_set_hia() - fill out the host interest area
  1431. * @scn: hif context
  1432. *
  1433. * This is replaced by hif_wlan_enable for integrated targets.
  1434. * This fills out the host interest area. The firmware will
  1435. * process these memory addresses when it is first brought out
  1436. * of reset.
  1437. *
  1438. * Return: 0 for success.
  1439. */
  1440. static int hif_set_hia(struct hif_softc *scn)
  1441. {
  1442. QDF_STATUS rv;
  1443. uint32_t interconnect_targ_addr = 0;
  1444. uint32_t pcie_state_targ_addr = 0;
  1445. uint32_t pipe_cfg_targ_addr = 0;
  1446. uint32_t svc_to_pipe_map = 0;
  1447. uint32_t pcie_config_flags = 0;
  1448. uint32_t flag2_value = 0;
  1449. uint32_t flag2_targ_addr = 0;
  1450. #ifdef QCA_WIFI_3_0
  1451. uint32_t host_interest_area = 0;
  1452. uint8_t i;
  1453. #else
  1454. uint32_t ealloc_value = 0;
  1455. uint32_t ealloc_targ_addr = 0;
  1456. uint8_t banks_switched = 1;
  1457. uint32_t chip_id;
  1458. #endif
  1459. uint32_t pipe_cfg_addr;
  1460. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1461. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1462. uint32_t target_type = tgt_info->target_type;
  1463. uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
  1464. static struct CE_pipe_config *target_ce_config;
  1465. struct service_to_pipe *target_service_to_ce_map;
  1466. HIF_TRACE("%s: E", __func__);
  1467. hif_get_target_ce_config(scn,
  1468. &target_ce_config, &target_ce_config_sz,
  1469. &target_service_to_ce_map,
  1470. &target_service_to_ce_map_sz,
  1471. NULL, NULL);
  1472. if (ADRASTEA_BU)
  1473. return QDF_STATUS_SUCCESS;
  1474. #ifdef QCA_WIFI_3_0
  1475. i = 0;
  1476. while (i < HIF_HIA_MAX_POLL_LOOP) {
  1477. host_interest_area = hif_read32_mb(scn, scn->mem +
  1478. A_SOC_CORE_SCRATCH_0_ADDRESS);
  1479. if ((host_interest_area & 0x01) == 0) {
  1480. qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
  1481. host_interest_area = 0;
  1482. i++;
  1483. if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
  1484. HIF_ERROR("%s: poll timeout(%d)", __func__, i);
  1485. } else {
  1486. host_interest_area &= (~0x01);
  1487. hif_write32_mb(scn, scn->mem + 0x113014, 0);
  1488. break;
  1489. }
  1490. }
  1491. if (i >= HIF_HIA_MAX_POLL_LOOP) {
  1492. HIF_ERROR("%s: hia polling timeout", __func__);
  1493. return -EIO;
  1494. }
  1495. if (host_interest_area == 0) {
  1496. HIF_ERROR("%s: host_interest_area = 0", __func__);
  1497. return -EIO;
  1498. }
  1499. interconnect_targ_addr = host_interest_area +
  1500. offsetof(struct host_interest_area_t,
  1501. hi_interconnect_state);
  1502. flag2_targ_addr = host_interest_area +
  1503. offsetof(struct host_interest_area_t, hi_option_flag2);
  1504. #else
  1505. interconnect_targ_addr = hif_hia_item_address(target_type,
  1506. offsetof(struct host_interest_s, hi_interconnect_state));
  1507. ealloc_targ_addr = hif_hia_item_address(target_type,
  1508. offsetof(struct host_interest_s, hi_early_alloc));
  1509. flag2_targ_addr = hif_hia_item_address(target_type,
  1510. offsetof(struct host_interest_s, hi_option_flag2));
  1511. #endif
  1512. /* Supply Target-side CE configuration */
  1513. rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
  1514. &pcie_state_targ_addr);
  1515. if (rv != QDF_STATUS_SUCCESS) {
  1516. HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
  1517. __func__, interconnect_targ_addr, rv);
  1518. goto done;
  1519. }
  1520. if (pcie_state_targ_addr == 0) {
  1521. rv = QDF_STATUS_E_FAILURE;
  1522. HIF_ERROR("%s: pcie state addr is 0", __func__);
  1523. goto done;
  1524. }
  1525. pipe_cfg_addr = pcie_state_targ_addr +
  1526. offsetof(struct pcie_state_s,
  1527. pipe_cfg_addr);
  1528. rv = hif_diag_read_access(hif_hdl,
  1529. pipe_cfg_addr,
  1530. &pipe_cfg_targ_addr);
  1531. if (rv != QDF_STATUS_SUCCESS) {
  1532. HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
  1533. __func__, pipe_cfg_addr, rv);
  1534. goto done;
  1535. }
  1536. if (pipe_cfg_targ_addr == 0) {
  1537. rv = QDF_STATUS_E_FAILURE;
  1538. HIF_ERROR("%s: pipe cfg addr is 0", __func__);
  1539. goto done;
  1540. }
  1541. rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
  1542. (uint8_t *) target_ce_config,
  1543. target_ce_config_sz);
  1544. if (rv != QDF_STATUS_SUCCESS) {
  1545. HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
  1546. goto done;
  1547. }
  1548. rv = hif_diag_read_access(hif_hdl,
  1549. pcie_state_targ_addr +
  1550. offsetof(struct pcie_state_s,
  1551. svc_to_pipe_map),
  1552. &svc_to_pipe_map);
  1553. if (rv != QDF_STATUS_SUCCESS) {
  1554. HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
  1555. goto done;
  1556. }
  1557. if (svc_to_pipe_map == 0) {
  1558. rv = QDF_STATUS_E_FAILURE;
  1559. HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
  1560. goto done;
  1561. }
  1562. rv = hif_diag_write_mem(hif_hdl,
  1563. svc_to_pipe_map,
  1564. (uint8_t *) target_service_to_ce_map,
  1565. target_service_to_ce_map_sz);
  1566. if (rv != QDF_STATUS_SUCCESS) {
  1567. HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
  1568. goto done;
  1569. }
  1570. rv = hif_diag_read_access(hif_hdl,
  1571. pcie_state_targ_addr +
  1572. offsetof(struct pcie_state_s,
  1573. config_flags),
  1574. &pcie_config_flags);
  1575. if (rv != QDF_STATUS_SUCCESS) {
  1576. HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
  1577. goto done;
  1578. }
  1579. #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
  1580. pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
  1581. #else
  1582. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1583. #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
  1584. pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
  1585. #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
  1586. pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
  1587. #endif
  1588. rv = hif_diag_write_mem(hif_hdl,
  1589. pcie_state_targ_addr +
  1590. offsetof(struct pcie_state_s,
  1591. config_flags),
  1592. (uint8_t *) &pcie_config_flags,
  1593. sizeof(pcie_config_flags));
  1594. if (rv != QDF_STATUS_SUCCESS) {
  1595. HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
  1596. goto done;
  1597. }
  1598. #ifndef QCA_WIFI_3_0
  1599. /* configure early allocation */
  1600. ealloc_targ_addr = hif_hia_item_address(target_type,
  1601. offsetof(
  1602. struct host_interest_s,
  1603. hi_early_alloc));
  1604. rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
  1605. &ealloc_value);
  1606. if (rv != QDF_STATUS_SUCCESS) {
  1607. HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
  1608. goto done;
  1609. }
  1610. /* 1 bank is switched to IRAM, except ROME 1.0 */
  1611. ealloc_value |=
  1612. ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1613. HI_EARLY_ALLOC_MAGIC_MASK);
  1614. rv = hif_diag_read_access(hif_hdl,
  1615. CHIP_ID_ADDRESS |
  1616. RTC_SOC_BASE_ADDRESS, &chip_id);
  1617. if (rv != QDF_STATUS_SUCCESS) {
  1618. HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
  1619. goto done;
  1620. }
  1621. if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
  1622. tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
  1623. switch (CHIP_ID_REVISION_GET(chip_id)) {
  1624. case 0x2: /* ROME 1.3 */
  1625. /* 2 banks are switched to IRAM */
  1626. banks_switched = 2;
  1627. break;
  1628. case 0x4: /* ROME 2.1 */
  1629. case 0x5: /* ROME 2.2 */
  1630. banks_switched = 6;
  1631. break;
  1632. case 0x8: /* ROME 3.0 */
  1633. case 0x9: /* ROME 3.1 */
  1634. case 0xA: /* ROME 3.2 */
  1635. banks_switched = 9;
  1636. break;
  1637. case 0x0: /* ROME 1.0 */
  1638. case 0x1: /* ROME 1.1 */
  1639. default:
  1640. /* 3 banks are switched to IRAM */
  1641. banks_switched = 3;
  1642. break;
  1643. }
  1644. }
  1645. ealloc_value |=
  1646. ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
  1647. & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1648. rv = hif_diag_write_access(hif_hdl,
  1649. ealloc_targ_addr,
  1650. ealloc_value);
  1651. if (rv != QDF_STATUS_SUCCESS) {
  1652. HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
  1653. goto done;
  1654. }
  1655. #endif
  1656. if ((target_type == TARGET_TYPE_AR900B)
  1657. || (target_type == TARGET_TYPE_QCA9984)
  1658. || (target_type == TARGET_TYPE_QCA9888)
  1659. || (target_type == TARGET_TYPE_AR9888)) {
  1660. hif_set_hia_extnd(scn);
  1661. }
  1662. /* Tell Target to proceed with initialization */
  1663. flag2_targ_addr = hif_hia_item_address(target_type,
  1664. offsetof(
  1665. struct host_interest_s,
  1666. hi_option_flag2));
  1667. rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1668. &flag2_value);
  1669. if (rv != QDF_STATUS_SUCCESS) {
  1670. HIF_ERROR("%s: get option val (%d)", __func__, rv);
  1671. goto done;
  1672. }
  1673. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1674. rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1675. flag2_value);
  1676. if (rv != QDF_STATUS_SUCCESS) {
  1677. HIF_ERROR("%s: set option val (%d)", __func__, rv);
  1678. goto done;
  1679. }
  1680. hif_wake_target_cpu(scn);
  1681. done:
  1682. return rv;
  1683. }
  1684. /**
  1685. * hif_bus_configure() - configure the pcie bus
  1686. * @hif_sc: pointer to the hif context.
  1687. *
  1688. * return: 0 for success. nonzero for failure.
  1689. */
  1690. int hif_pci_bus_configure(struct hif_softc *hif_sc)
  1691. {
  1692. int status = 0;
  1693. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1694. struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
  1695. hif_ce_prepare_config(hif_sc);
  1696. /* initialize sleep state adjust variables */
  1697. hif_state->sleep_timer_init = true;
  1698. hif_state->keep_awake_count = 0;
  1699. hif_state->fake_sleep = false;
  1700. hif_state->sleep_ticks = 0;
  1701. qdf_timer_init(NULL, &hif_state->sleep_timer,
  1702. hif_sleep_entry, (void *)hif_state,
  1703. QDF_TIMER_TYPE_WAKE_APPS);
  1704. hif_state->sleep_timer_init = true;
  1705. status = hif_wlan_enable(hif_sc);
  1706. if (status) {
  1707. HIF_ERROR("%s: hif_wlan_enable error = %d",
  1708. __func__, status);
  1709. goto timer_free;
  1710. }
  1711. A_TARGET_ACCESS_LIKELY(hif_sc);
  1712. if ((CONFIG_ATH_PCIE_MAX_PERF ||
  1713. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
  1714. !ce_srng_based(hif_sc)) {
  1715. /*
  1716. * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
  1717. * prevent sleep when we want to keep firmware always awake
  1718. * note: when we want to keep firmware always awake,
  1719. * hif_target_sleep_state_adjust will point to a dummy
  1720. * function, and hif_pci_target_sleep_state_adjust must
  1721. * be called instead.
  1722. * note: bus type check is here because AHB bus is reusing
  1723. * hif_pci_bus_configure code.
  1724. */
  1725. if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
  1726. if (hif_pci_target_sleep_state_adjust(hif_sc,
  1727. false, true) < 0) {
  1728. status = -EACCES;
  1729. goto disable_wlan;
  1730. }
  1731. }
  1732. }
  1733. /* todo: consider replacing this with an srng field */
  1734. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1735. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1736. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
  1737. (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
  1738. hif_sc->per_ce_irq = true;
  1739. }
  1740. status = hif_config_ce(hif_sc);
  1741. if (status)
  1742. goto disable_wlan;
  1743. /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
  1744. if (hif_needs_bmi(hif_osc)) {
  1745. status = hif_set_hia(hif_sc);
  1746. if (status)
  1747. goto unconfig_ce;
  1748. HIF_INFO_MED("%s: hif_set_hia done", __func__);
  1749. }
  1750. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1751. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1752. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
  1753. (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
  1754. HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
  1755. __func__);
  1756. else {
  1757. status = hif_configure_irq(hif_sc);
  1758. if (status < 0)
  1759. goto unconfig_ce;
  1760. }
  1761. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1762. return status;
  1763. unconfig_ce:
  1764. hif_unconfig_ce(hif_sc);
  1765. disable_wlan:
  1766. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1767. hif_wlan_disable(hif_sc);
  1768. timer_free:
  1769. qdf_timer_stop(&hif_state->sleep_timer);
  1770. qdf_timer_free(&hif_state->sleep_timer);
  1771. hif_state->sleep_timer_init = false;
  1772. HIF_ERROR("%s: failed, status = %d", __func__, status);
  1773. return status;
  1774. }
  1775. /**
  1776. * hif_bus_close(): hif_bus_close
  1777. *
  1778. * Return: n/a
  1779. */
  1780. void hif_pci_close(struct hif_softc *hif_sc)
  1781. {
  1782. struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
  1783. hif_pm_runtime_close(hif_pci_sc);
  1784. hif_ce_close(hif_sc);
  1785. }
  1786. #define BAR_NUM 0
  1787. static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
  1788. struct pci_dev *pdev,
  1789. const struct pci_device_id *id)
  1790. {
  1791. void __iomem *mem;
  1792. int ret = 0;
  1793. uint16_t device_id = 0;
  1794. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1795. pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
  1796. if (device_id != id->device) {
  1797. HIF_ERROR(
  1798. "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
  1799. __func__, device_id, id->device);
  1800. /* pci link is down, so returing with error code */
  1801. return -EIO;
  1802. }
  1803. /* FIXME: temp. commenting out assign_resource
  1804. * call for dev_attach to work on 2.6.38 kernel
  1805. */
  1806. #if (!defined(__LINUX_ARM_ARCH__))
  1807. if (pci_assign_resource(pdev, BAR_NUM)) {
  1808. HIF_ERROR("%s: pci_assign_resource error", __func__);
  1809. return -EIO;
  1810. }
  1811. #endif
  1812. if (pci_enable_device(pdev)) {
  1813. HIF_ERROR("%s: pci_enable_device error",
  1814. __func__);
  1815. return -EIO;
  1816. }
  1817. /* Request MMIO resources */
  1818. ret = pci_request_region(pdev, BAR_NUM, "ath");
  1819. if (ret) {
  1820. HIF_ERROR("%s: PCI MMIO reservation error", __func__);
  1821. ret = -EIO;
  1822. goto err_region;
  1823. }
  1824. #ifdef CONFIG_ARM_LPAE
  1825. /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
  1826. * for 32 bits device also.
  1827. */
  1828. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1829. if (ret) {
  1830. HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
  1831. goto err_dma;
  1832. }
  1833. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1834. if (ret) {
  1835. HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
  1836. goto err_dma;
  1837. }
  1838. #else
  1839. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1840. if (ret) {
  1841. HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
  1842. goto err_dma;
  1843. }
  1844. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1845. if (ret) {
  1846. HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
  1847. __func__);
  1848. goto err_dma;
  1849. }
  1850. #endif
  1851. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1852. /* Set bus master bit in PCI_COMMAND to enable DMA */
  1853. pci_set_master(pdev);
  1854. /* Arrange for access to Target SoC registers. */
  1855. mem = pci_iomap(pdev, BAR_NUM, 0);
  1856. if (!mem) {
  1857. HIF_ERROR("%s: PCI iomap error", __func__);
  1858. ret = -EIO;
  1859. goto err_iomap;
  1860. }
  1861. HIF_INFO("*****BAR is %pK\n", (void *)mem);
  1862. sc->mem = mem;
  1863. /* Hawkeye emulation specific change */
  1864. if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
  1865. (device_id == RUMIM2M_DEVICE_ID_NODE1) ||
  1866. (device_id == RUMIM2M_DEVICE_ID_NODE2) ||
  1867. (device_id == RUMIM2M_DEVICE_ID_NODE3) ||
  1868. (device_id == RUMIM2M_DEVICE_ID_NODE4) ||
  1869. (device_id == RUMIM2M_DEVICE_ID_NODE5)) {
  1870. mem = mem + 0x0c000000;
  1871. sc->mem = mem;
  1872. HIF_INFO("%s: Changing PCI mem base to %pK\n",
  1873. __func__, sc->mem);
  1874. }
  1875. sc->mem_len = pci_resource_len(pdev, BAR_NUM);
  1876. ol_sc->mem = mem;
  1877. ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
  1878. sc->pci_enabled = true;
  1879. return ret;
  1880. err_iomap:
  1881. pci_clear_master(pdev);
  1882. err_dma:
  1883. pci_release_region(pdev, BAR_NUM);
  1884. err_region:
  1885. pci_disable_device(pdev);
  1886. return ret;
  1887. }
  1888. static int hif_enable_pci_pld(struct hif_pci_softc *sc,
  1889. struct pci_dev *pdev,
  1890. const struct pci_device_id *id)
  1891. {
  1892. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1893. sc->pci_enabled = true;
  1894. return 0;
  1895. }
  1896. static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
  1897. {
  1898. pci_disable_msi(sc->pdev);
  1899. pci_iounmap(sc->pdev, sc->mem);
  1900. pci_clear_master(sc->pdev);
  1901. pci_release_region(sc->pdev, BAR_NUM);
  1902. pci_disable_device(sc->pdev);
  1903. }
  1904. static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
  1905. static void hif_disable_pci(struct hif_pci_softc *sc)
  1906. {
  1907. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1908. if (!ol_sc) {
  1909. HIF_ERROR("%s: ol_sc = NULL", __func__);
  1910. return;
  1911. }
  1912. hif_pci_device_reset(sc);
  1913. sc->hif_pci_deinit(sc);
  1914. sc->mem = NULL;
  1915. ol_sc->mem = NULL;
  1916. }
  1917. static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
  1918. {
  1919. int ret = 0;
  1920. int targ_awake_limit = 500;
  1921. #ifndef QCA_WIFI_3_0
  1922. uint32_t fw_indicator;
  1923. #endif
  1924. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1925. /*
  1926. * Verify that the Target was started cleanly.*
  1927. * The case where this is most likely is with an AUX-powered
  1928. * Target and a Host in WoW mode. If the Host crashes,
  1929. * loses power, or is restarted (without unloading the driver)
  1930. * then the Target is left (aux) powered and running. On a
  1931. * subsequent driver load, the Target is in an unexpected state.
  1932. * We try to catch that here in order to reset the Target and
  1933. * retry the probe.
  1934. */
  1935. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1936. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1937. while (!hif_targ_is_awake(scn, sc->mem)) {
  1938. if (0 == targ_awake_limit) {
  1939. HIF_ERROR("%s: target awake timeout", __func__);
  1940. ret = -EAGAIN;
  1941. goto end;
  1942. }
  1943. qdf_mdelay(1);
  1944. targ_awake_limit--;
  1945. }
  1946. #if PCIE_BAR0_READY_CHECKING
  1947. {
  1948. int wait_limit = 200;
  1949. /* Synchronization point: wait the BAR0 is configured */
  1950. while (wait_limit-- &&
  1951. !(hif_read32_mb(sc, c->mem +
  1952. PCIE_LOCAL_BASE_ADDRESS +
  1953. PCIE_SOC_RDY_STATUS_ADDRESS)
  1954. & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
  1955. qdf_mdelay(10);
  1956. }
  1957. if (wait_limit < 0) {
  1958. /* AR6320v1 doesn't support checking of BAR0
  1959. * configuration, takes one sec to wait BAR0 ready
  1960. */
  1961. HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
  1962. __func__);
  1963. }
  1964. }
  1965. #endif
  1966. #ifndef QCA_WIFI_3_0
  1967. fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
  1968. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1969. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1970. if (fw_indicator & FW_IND_INITIALIZED) {
  1971. HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
  1972. __func__);
  1973. ret = -EAGAIN;
  1974. goto end;
  1975. }
  1976. #endif
  1977. end:
  1978. return ret;
  1979. }
  1980. static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
  1981. {
  1982. int ret = 0;
  1983. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1984. uint32_t target_type = scn->target_info.target_type;
  1985. HIF_TRACE("%s: E", __func__);
  1986. /* do notn support MSI or MSI IRQ failed */
  1987. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  1988. ret = request_irq(sc->pdev->irq,
  1989. hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
  1990. "wlan_pci", sc);
  1991. if (ret) {
  1992. HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
  1993. goto end;
  1994. }
  1995. scn->wake_irq = sc->pdev->irq;
  1996. /* Use sc->irq instead of sc->pdev-irq
  1997. * platform_device pdev doesn't have an irq field
  1998. */
  1999. sc->irq = sc->pdev->irq;
  2000. /* Use Legacy PCI Interrupts */
  2001. hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  2002. PCIE_INTR_ENABLE_ADDRESS),
  2003. HOST_GROUP0_MASK);
  2004. hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  2005. PCIE_INTR_ENABLE_ADDRESS));
  2006. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  2007. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  2008. if ((target_type == TARGET_TYPE_IPQ4019) ||
  2009. (target_type == TARGET_TYPE_AR900B) ||
  2010. (target_type == TARGET_TYPE_QCA9984) ||
  2011. (target_type == TARGET_TYPE_AR9888) ||
  2012. (target_type == TARGET_TYPE_QCA9888) ||
  2013. (target_type == TARGET_TYPE_AR6320V1) ||
  2014. (target_type == TARGET_TYPE_AR6320V2) ||
  2015. (target_type == TARGET_TYPE_AR6320V3)) {
  2016. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  2017. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  2018. }
  2019. end:
  2020. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
  2021. "%s: X, ret = %d", __func__, ret);
  2022. return ret;
  2023. }
  2024. static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
  2025. {
  2026. int ret;
  2027. int ce_id, irq;
  2028. uint32_t msi_data_start;
  2029. uint32_t msi_data_count;
  2030. uint32_t msi_irq_start;
  2031. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2032. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  2033. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2034. &msi_data_count, &msi_data_start,
  2035. &msi_irq_start);
  2036. if (ret)
  2037. return ret;
  2038. /* needs to match the ce_id -> irq data mapping
  2039. * used in the srng parameter configuration
  2040. */
  2041. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2042. unsigned int msi_data;
  2043. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2044. continue;
  2045. if (!ce_sc->tasklets[ce_id].inited)
  2046. continue;
  2047. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  2048. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2049. hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
  2050. ce_id, msi_data, irq);
  2051. free_irq(irq, &ce_sc->tasklets[ce_id]);
  2052. }
  2053. return ret;
  2054. }
  2055. static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
  2056. {
  2057. int i, j, irq;
  2058. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2059. struct hif_exec_context *hif_ext_group;
  2060. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2061. hif_ext_group = hif_state->hif_ext_group[i];
  2062. if (hif_ext_group->irq_requested) {
  2063. hif_ext_group->irq_requested = false;
  2064. for (j = 0; j < hif_ext_group->numirq; j++) {
  2065. irq = hif_ext_group->os_irq[j];
  2066. free_irq(irq, hif_ext_group);
  2067. }
  2068. hif_ext_group->numirq = 0;
  2069. }
  2070. }
  2071. }
  2072. /**
  2073. * hif_nointrs(): disable IRQ
  2074. *
  2075. * This function stops interrupt(s)
  2076. *
  2077. * @scn: struct hif_softc
  2078. *
  2079. * Return: none
  2080. */
  2081. void hif_pci_nointrs(struct hif_softc *scn)
  2082. {
  2083. int i, ret;
  2084. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2085. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2086. ce_unregister_irq(hif_state, CE_ALL_BITMAP);
  2087. if (scn->request_irq_done == false)
  2088. return;
  2089. hif_pci_deconfigure_grp_irq(scn);
  2090. ret = hif_ce_srng_msi_free_irq(scn);
  2091. if (ret != -EINVAL) {
  2092. /* ce irqs freed in hif_ce_srng_msi_free_irq */
  2093. if (scn->wake_irq)
  2094. free_irq(scn->wake_irq, scn);
  2095. scn->wake_irq = 0;
  2096. } else if (sc->num_msi_intrs > 0) {
  2097. /* MSI interrupt(s) */
  2098. for (i = 0; i < sc->num_msi_intrs; i++)
  2099. free_irq(sc->irq + i, sc);
  2100. sc->num_msi_intrs = 0;
  2101. } else {
  2102. /* Legacy PCI line interrupt
  2103. * Use sc->irq instead of sc->pdev-irq
  2104. * platform_device pdev doesn't have an irq field
  2105. */
  2106. free_irq(sc->irq, sc);
  2107. }
  2108. scn->request_irq_done = false;
  2109. }
  2110. /**
  2111. * hif_disable_bus(): hif_disable_bus
  2112. *
  2113. * This function disables the bus
  2114. *
  2115. * @bdev: bus dev
  2116. *
  2117. * Return: none
  2118. */
  2119. void hif_pci_disable_bus(struct hif_softc *scn)
  2120. {
  2121. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2122. struct pci_dev *pdev;
  2123. void __iomem *mem;
  2124. struct hif_target_info *tgt_info = &scn->target_info;
  2125. /* Attach did not succeed, all resources have been
  2126. * freed in error handler
  2127. */
  2128. if (!sc)
  2129. return;
  2130. pdev = sc->pdev;
  2131. if (ADRASTEA_BU) {
  2132. hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
  2133. hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
  2134. hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
  2135. HOST_GROUP0_MASK);
  2136. }
  2137. #if defined(CPU_WARM_RESET_WAR)
  2138. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2139. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2140. * verified for AR9888_REV1
  2141. */
  2142. if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
  2143. (tgt_info->target_version == AR9887_REV1_VERSION))
  2144. hif_pci_device_warm_reset(sc);
  2145. else
  2146. hif_pci_device_reset(sc);
  2147. #else
  2148. hif_pci_device_reset(sc);
  2149. #endif
  2150. mem = (void __iomem *)sc->mem;
  2151. if (mem) {
  2152. hif_dump_pipe_debug_count(scn);
  2153. if (scn->athdiag_procfs_inited) {
  2154. athdiag_procfs_remove();
  2155. scn->athdiag_procfs_inited = false;
  2156. }
  2157. sc->hif_pci_deinit(sc);
  2158. scn->mem = NULL;
  2159. }
  2160. HIF_INFO("%s: X", __func__);
  2161. }
  2162. #define OL_ATH_PCI_PM_CONTROL 0x44
  2163. #ifdef FEATURE_RUNTIME_PM
  2164. /**
  2165. * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
  2166. * @scn: hif context
  2167. * @flag: prevent linkdown if true otherwise allow
  2168. *
  2169. * this api should only be called as part of bus prevent linkdown
  2170. */
  2171. static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  2172. {
  2173. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2174. if (flag)
  2175. qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
  2176. else
  2177. qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
  2178. }
  2179. #else
  2180. static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  2181. {
  2182. }
  2183. #endif
  2184. #if defined(CONFIG_PCI_MSM)
  2185. /**
  2186. * hif_bus_prevent_linkdown(): allow or permit linkdown
  2187. * @flag: true prevents linkdown, false allows
  2188. *
  2189. * Calls into the platform driver to vote against taking down the
  2190. * pcie link.
  2191. *
  2192. * Return: n/a
  2193. */
  2194. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2195. {
  2196. int errno;
  2197. HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
  2198. hif_runtime_prevent_linkdown(scn, flag);
  2199. errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
  2200. if (errno)
  2201. HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
  2202. __func__, errno);
  2203. }
  2204. #else
  2205. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2206. {
  2207. HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
  2208. hif_runtime_prevent_linkdown(scn, flag);
  2209. }
  2210. #endif
  2211. /**
  2212. * hif_pci_bus_suspend(): prepare hif for suspend
  2213. *
  2214. * Return: Errno
  2215. */
  2216. int hif_pci_bus_suspend(struct hif_softc *scn)
  2217. {
  2218. hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
  2219. if (hif_drain_tasklets(scn)) {
  2220. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2221. return -EBUSY;
  2222. }
  2223. /* Stop the HIF Sleep Timer */
  2224. hif_cancel_deferred_target_sleep(scn);
  2225. return 0;
  2226. }
  2227. /**
  2228. * __hif_check_link_status() - API to check if PCIe link is active/not
  2229. * @scn: HIF Context
  2230. *
  2231. * API reads the PCIe config space to verify if PCIe link training is
  2232. * successful or not.
  2233. *
  2234. * Return: Success/Failure
  2235. */
  2236. static int __hif_check_link_status(struct hif_softc *scn)
  2237. {
  2238. uint16_t dev_id = 0;
  2239. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2240. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2241. if (!sc) {
  2242. HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
  2243. return -EINVAL;
  2244. }
  2245. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
  2246. if (dev_id == sc->devid)
  2247. return 0;
  2248. HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
  2249. __func__, dev_id);
  2250. scn->recovery = true;
  2251. if (cbk && cbk->set_recovery_in_progress)
  2252. cbk->set_recovery_in_progress(cbk->context, true);
  2253. else
  2254. HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
  2255. pld_is_pci_link_down(sc->dev);
  2256. return -EACCES;
  2257. }
  2258. /**
  2259. * hif_pci_bus_resume(): prepare hif for resume
  2260. *
  2261. * Return: Errno
  2262. */
  2263. int hif_pci_bus_resume(struct hif_softc *scn)
  2264. {
  2265. int errno;
  2266. errno = __hif_check_link_status(scn);
  2267. if (errno)
  2268. return errno;
  2269. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2270. return 0;
  2271. }
  2272. /**
  2273. * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
  2274. * @scn: hif context
  2275. *
  2276. * Ensure that if we received the wakeup message before the irq
  2277. * was disabled that the message is pocessed before suspending.
  2278. *
  2279. * Return: -EBUSY if we fail to flush the tasklets.
  2280. */
  2281. int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
  2282. {
  2283. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2284. qdf_atomic_set(&scn->link_suspended, 1);
  2285. hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
  2286. return 0;
  2287. }
  2288. /**
  2289. * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
  2290. * @scn: hif context
  2291. *
  2292. * Ensure that if we received the wakeup message before the irq
  2293. * was disabled that the message is pocessed before suspending.
  2294. *
  2295. * Return: -EBUSY if we fail to flush the tasklets.
  2296. */
  2297. int hif_pci_bus_resume_noirq(struct hif_softc *scn)
  2298. {
  2299. hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
  2300. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2301. qdf_atomic_set(&scn->link_suspended, 0);
  2302. return 0;
  2303. }
  2304. #ifdef FEATURE_RUNTIME_PM
  2305. /**
  2306. * __hif_runtime_pm_set_state(): utility function
  2307. * @state: state to set
  2308. *
  2309. * indexes into the runtime pm state and sets it.
  2310. */
  2311. static void __hif_runtime_pm_set_state(struct hif_softc *scn,
  2312. enum hif_pm_runtime_state state)
  2313. {
  2314. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2315. if (!sc) {
  2316. HIF_ERROR("%s: HIF_CTX not initialized",
  2317. __func__);
  2318. return;
  2319. }
  2320. qdf_atomic_set(&sc->pm_state, state);
  2321. }
  2322. /**
  2323. * hif_runtime_pm_set_state_on(): adjust runtime pm state
  2324. *
  2325. * Notify hif that a the runtime pm state should be on
  2326. */
  2327. static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
  2328. {
  2329. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
  2330. }
  2331. /**
  2332. * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
  2333. *
  2334. * Notify hif that a runtime pm resuming has started
  2335. */
  2336. static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
  2337. {
  2338. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
  2339. }
  2340. /**
  2341. * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
  2342. *
  2343. * Notify hif that a runtime pm suspend has started
  2344. */
  2345. static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
  2346. {
  2347. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
  2348. }
  2349. /**
  2350. * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
  2351. *
  2352. * Notify hif that a runtime suspend attempt has been completed successfully
  2353. */
  2354. static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
  2355. {
  2356. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
  2357. }
  2358. /**
  2359. * hif_log_runtime_suspend_success() - log a successful runtime suspend
  2360. */
  2361. static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
  2362. {
  2363. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2364. if (!sc)
  2365. return;
  2366. sc->pm_stats.suspended++;
  2367. sc->pm_stats.suspend_jiffies = jiffies;
  2368. }
  2369. /**
  2370. * hif_log_runtime_suspend_failure() - log a failed runtime suspend
  2371. *
  2372. * log a failed runtime suspend
  2373. * mark last busy to prevent immediate runtime suspend
  2374. */
  2375. static void hif_log_runtime_suspend_failure(void *hif_ctx)
  2376. {
  2377. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2378. if (!sc)
  2379. return;
  2380. sc->pm_stats.suspend_err++;
  2381. }
  2382. /**
  2383. * hif_log_runtime_resume_success() - log a successful runtime resume
  2384. *
  2385. * log a successful runtime resume
  2386. * mark last busy to prevent immediate runtime suspend
  2387. */
  2388. static void hif_log_runtime_resume_success(void *hif_ctx)
  2389. {
  2390. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2391. if (!sc)
  2392. return;
  2393. sc->pm_stats.resumed++;
  2394. }
  2395. /**
  2396. * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
  2397. *
  2398. * Record the failure.
  2399. * mark last busy to delay a retry.
  2400. * adjust the runtime_pm state.
  2401. */
  2402. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
  2403. {
  2404. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2405. hif_log_runtime_suspend_failure(hif_ctx);
  2406. hif_pm_runtime_mark_last_busy(hif_ctx);
  2407. hif_runtime_pm_set_state_on(scn);
  2408. }
  2409. /**
  2410. * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
  2411. *
  2412. * Makes sure that the pci link will be taken down by the suspend opperation.
  2413. * If the hif layer is configured to leave the bus on, runtime suspend will
  2414. * not save any power.
  2415. *
  2416. * Set the runtime suspend state to in progress.
  2417. *
  2418. * return -EINVAL if the bus won't go down. otherwise return 0
  2419. */
  2420. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  2421. {
  2422. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2423. if (!hif_can_suspend_link(hif_ctx)) {
  2424. HIF_ERROR("Runtime PM not supported for link up suspend");
  2425. return -EINVAL;
  2426. }
  2427. hif_runtime_pm_set_state_suspending(scn);
  2428. return 0;
  2429. }
  2430. /**
  2431. * hif_process_runtime_suspend_success() - bookkeeping of suspend success
  2432. *
  2433. * Record the success.
  2434. * adjust the runtime_pm state
  2435. */
  2436. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
  2437. {
  2438. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2439. hif_runtime_pm_set_state_suspended(scn);
  2440. hif_log_runtime_suspend_success(scn);
  2441. }
  2442. /**
  2443. * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
  2444. *
  2445. * update the runtime pm state.
  2446. */
  2447. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
  2448. {
  2449. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2450. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  2451. hif_runtime_pm_set_state_resuming(scn);
  2452. }
  2453. /**
  2454. * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
  2455. *
  2456. * record the success.
  2457. * adjust the runtime_pm state
  2458. */
  2459. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
  2460. {
  2461. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2462. hif_log_runtime_resume_success(hif_ctx);
  2463. hif_pm_runtime_mark_last_busy(hif_ctx);
  2464. hif_runtime_pm_set_state_on(scn);
  2465. }
  2466. /**
  2467. * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
  2468. *
  2469. * Return: 0 for success and non-zero error code for failure
  2470. */
  2471. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  2472. {
  2473. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2474. int errno;
  2475. errno = hif_bus_suspend(hif_ctx);
  2476. if (errno) {
  2477. HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
  2478. return errno;
  2479. }
  2480. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
  2481. errno = hif_bus_suspend_noirq(hif_ctx);
  2482. if (errno) {
  2483. HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
  2484. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  2485. goto bus_resume;
  2486. }
  2487. qdf_atomic_set(&sc->pm_dp_rx_busy, 0);
  2488. return 0;
  2489. bus_resume:
  2490. QDF_BUG(!hif_bus_resume(hif_ctx));
  2491. return errno;
  2492. }
  2493. /**
  2494. * hif_fastpath_resume() - resume fastpath for runtimepm
  2495. *
  2496. * ensure that the fastpath write index register is up to date
  2497. * since runtime pm may cause ce_send_fast to skip the register
  2498. * write.
  2499. *
  2500. * fastpath only applicable to legacy copy engine
  2501. */
  2502. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  2503. {
  2504. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2505. struct CE_state *ce_state;
  2506. if (!scn)
  2507. return;
  2508. if (scn->fastpath_mode_on) {
  2509. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  2510. return;
  2511. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  2512. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  2513. /*war_ce_src_ring_write_idx_set */
  2514. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  2515. ce_state->src_ring->write_index);
  2516. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  2517. Q_TARGET_ACCESS_END(scn);
  2518. }
  2519. }
  2520. /**
  2521. * hif_runtime_resume() - do the bus resume part of a runtime resume
  2522. *
  2523. * Return: 0 for success and non-zero error code for failure
  2524. */
  2525. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  2526. {
  2527. QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
  2528. QDF_BUG(!hif_bus_resume(hif_ctx));
  2529. return 0;
  2530. }
  2531. #endif /* #ifdef FEATURE_RUNTIME_PM */
  2532. #if CONFIG_PCIE_64BIT_MSI
  2533. static void hif_free_msi_ctx(struct hif_softc *scn)
  2534. {
  2535. struct hif_pci_softc *sc = scn->hif_sc;
  2536. struct hif_msi_info *info = &sc->msi_info;
  2537. struct device *dev = scn->qdf_dev->dev;
  2538. OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
  2539. OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
  2540. info->magic = NULL;
  2541. info->magic_dma = 0;
  2542. }
  2543. #else
  2544. static void hif_free_msi_ctx(struct hif_softc *scn)
  2545. {
  2546. }
  2547. #endif
  2548. void hif_pci_disable_isr(struct hif_softc *scn)
  2549. {
  2550. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2551. hif_exec_kill(&scn->osc);
  2552. hif_nointrs(scn);
  2553. hif_free_msi_ctx(scn);
  2554. /* Cancel the pending tasklet */
  2555. ce_tasklet_kill(scn);
  2556. tasklet_kill(&sc->intr_tq);
  2557. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  2558. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  2559. }
  2560. /* Function to reset SoC */
  2561. void hif_pci_reset_soc(struct hif_softc *hif_sc)
  2562. {
  2563. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
  2564. struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
  2565. struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
  2566. #if defined(CPU_WARM_RESET_WAR)
  2567. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2568. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2569. * verified for AR9888_REV1
  2570. */
  2571. if (tgt_info->target_version == AR9888_REV2_VERSION)
  2572. hif_pci_device_warm_reset(sc);
  2573. else
  2574. hif_pci_device_reset(sc);
  2575. #else
  2576. hif_pci_device_reset(sc);
  2577. #endif
  2578. }
  2579. #ifdef CONFIG_PCI_MSM
  2580. static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
  2581. {
  2582. msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
  2583. msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
  2584. }
  2585. #else
  2586. static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
  2587. #endif
  2588. /**
  2589. * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
  2590. * @sc: HIF PCIe Context
  2591. *
  2592. * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
  2593. *
  2594. * Return: Failure to caller
  2595. */
  2596. static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
  2597. {
  2598. uint16_t val = 0;
  2599. uint32_t bar = 0;
  2600. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
  2601. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2602. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
  2603. struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
  2604. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2605. A_target_id_t pci_addr = scn->mem;
  2606. HIF_ERROR("%s: keep_awake_count = %d",
  2607. __func__, hif_state->keep_awake_count);
  2608. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  2609. HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
  2610. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  2611. HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
  2612. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  2613. HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
  2614. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  2615. HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
  2616. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
  2617. HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
  2618. HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
  2619. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2620. PCIE_SOC_WAKE_ADDRESS));
  2621. HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
  2622. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2623. RTC_STATE_ADDRESS));
  2624. HIF_ERROR("%s:error, wakeup target", __func__);
  2625. hif_msm_pcie_debug_info(sc);
  2626. if (!cfg->enable_self_recovery)
  2627. QDF_BUG(0);
  2628. scn->recovery = true;
  2629. if (cbk->set_recovery_in_progress)
  2630. cbk->set_recovery_in_progress(cbk->context, true);
  2631. pld_is_pci_link_down(sc->dev);
  2632. return -EACCES;
  2633. }
  2634. /*
  2635. * For now, we use simple on-demand sleep/wake.
  2636. * Some possible improvements:
  2637. * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
  2638. * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
  2639. * Careful, though, these functions may be used by
  2640. * interrupt handlers ("atomic")
  2641. * -Don't use host_reg_table for this code; instead use values directly
  2642. * -Use a separate timer to track activity and allow Target to sleep only
  2643. * if it hasn't done anything for a while; may even want to delay some
  2644. * processing for a short while in order to "batch" (e.g.) transmit
  2645. * requests with completion processing into "windows of up time". Costs
  2646. * some performance, but improves power utilization.
  2647. * -On some platforms, it might be possible to eliminate explicit
  2648. * sleep/wakeup. Instead, take a chance that each access works OK. If not,
  2649. * recover from the failure by forcing the Target awake.
  2650. * -Change keep_awake_count to an atomic_t in order to avoid spin lock
  2651. * overhead in some cases. Perhaps this makes more sense when
  2652. * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
  2653. * disabled.
  2654. * -It is possible to compile this code out and simply force the Target
  2655. * to remain awake. That would yield optimal performance at the cost of
  2656. * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
  2657. *
  2658. * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
  2659. */
  2660. /**
  2661. * hif_target_sleep_state_adjust() - on-demand sleep/wake
  2662. * @scn: hif_softc pointer.
  2663. * @sleep_ok: bool
  2664. * @wait_for_it: bool
  2665. *
  2666. * Output the pipe error counts of each pipe to log file
  2667. *
  2668. * Return: int
  2669. */
  2670. int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
  2671. bool sleep_ok, bool wait_for_it)
  2672. {
  2673. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2674. A_target_id_t pci_addr = scn->mem;
  2675. static int max_delay;
  2676. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2677. static int debug;
  2678. if (scn->recovery)
  2679. return -EACCES;
  2680. if (qdf_atomic_read(&scn->link_suspended)) {
  2681. HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
  2682. debug = true;
  2683. QDF_ASSERT(0);
  2684. return -EACCES;
  2685. }
  2686. if (debug) {
  2687. wait_for_it = true;
  2688. HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
  2689. __func__);
  2690. QDF_ASSERT(0);
  2691. }
  2692. if (sleep_ok) {
  2693. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2694. hif_state->keep_awake_count--;
  2695. if (hif_state->keep_awake_count == 0) {
  2696. /* Allow sleep */
  2697. hif_state->verified_awake = false;
  2698. hif_state->sleep_ticks = qdf_system_ticks();
  2699. }
  2700. if (hif_state->fake_sleep == false) {
  2701. /* Set the Fake Sleep */
  2702. hif_state->fake_sleep = true;
  2703. /* Start the Sleep Timer */
  2704. qdf_timer_stop(&hif_state->sleep_timer);
  2705. qdf_timer_start(&hif_state->sleep_timer,
  2706. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  2707. }
  2708. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2709. } else {
  2710. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2711. if (hif_state->fake_sleep) {
  2712. hif_state->verified_awake = true;
  2713. } else {
  2714. if (hif_state->keep_awake_count == 0) {
  2715. /* Force AWAKE */
  2716. hif_write32_mb(sc, pci_addr +
  2717. PCIE_LOCAL_BASE_ADDRESS +
  2718. PCIE_SOC_WAKE_ADDRESS,
  2719. PCIE_SOC_WAKE_V_MASK);
  2720. }
  2721. }
  2722. hif_state->keep_awake_count++;
  2723. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2724. if (wait_for_it && !hif_state->verified_awake) {
  2725. #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
  2726. int tot_delay = 0;
  2727. int curr_delay = 5;
  2728. for (;; ) {
  2729. if (hif_targ_is_awake(scn, pci_addr)) {
  2730. hif_state->verified_awake = true;
  2731. break;
  2732. }
  2733. if (!hif_pci_targ_is_present(scn, pci_addr))
  2734. break;
  2735. if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
  2736. return hif_log_soc_wakeup_timeout(sc);
  2737. OS_DELAY(curr_delay);
  2738. tot_delay += curr_delay;
  2739. if (curr_delay < 50)
  2740. curr_delay += 5;
  2741. }
  2742. /*
  2743. * NB: If Target has to come out of Deep Sleep,
  2744. * this may take a few Msecs. Typically, though
  2745. * this delay should be <30us.
  2746. */
  2747. if (tot_delay > max_delay)
  2748. max_delay = tot_delay;
  2749. }
  2750. }
  2751. if (debug && hif_state->verified_awake) {
  2752. debug = 0;
  2753. HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
  2754. __func__,
  2755. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2756. PCIE_INTR_ENABLE_ADDRESS),
  2757. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2758. PCIE_INTR_CAUSE_ADDRESS),
  2759. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2760. CPU_INTR_ADDRESS),
  2761. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2762. PCIE_INTR_CLR_ADDRESS),
  2763. hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
  2764. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
  2765. }
  2766. return 0;
  2767. }
  2768. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  2769. uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
  2770. {
  2771. uint32_t value;
  2772. void *addr;
  2773. addr = scn->mem + offset;
  2774. value = hif_read32_mb(scn, addr);
  2775. {
  2776. unsigned long irq_flags;
  2777. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2778. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2779. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2780. pcie_access_log[idx].is_write = false;
  2781. pcie_access_log[idx].addr = addr;
  2782. pcie_access_log[idx].value = value;
  2783. pcie_access_log_seqnum++;
  2784. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2785. }
  2786. return value;
  2787. }
  2788. void
  2789. hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
  2790. {
  2791. void *addr;
  2792. addr = scn->mem + (offset);
  2793. hif_write32_mb(scn, addr, value);
  2794. {
  2795. unsigned long irq_flags;
  2796. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2797. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2798. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2799. pcie_access_log[idx].is_write = true;
  2800. pcie_access_log[idx].addr = addr;
  2801. pcie_access_log[idx].value = value;
  2802. pcie_access_log_seqnum++;
  2803. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2804. }
  2805. }
  2806. /**
  2807. * hif_target_dump_access_log() - dump access log
  2808. *
  2809. * dump access log
  2810. *
  2811. * Return: n/a
  2812. */
  2813. void hif_target_dump_access_log(void)
  2814. {
  2815. int idx, len, start_idx, cur_idx;
  2816. unsigned long irq_flags;
  2817. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2818. if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
  2819. len = PCIE_ACCESS_LOG_NUM;
  2820. start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2821. } else {
  2822. len = pcie_access_log_seqnum;
  2823. start_idx = 0;
  2824. }
  2825. for (idx = 0; idx < len; idx++) {
  2826. cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
  2827. HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
  2828. __func__, idx,
  2829. pcie_access_log[cur_idx].seqnum,
  2830. pcie_access_log[cur_idx].is_write,
  2831. pcie_access_log[cur_idx].addr,
  2832. pcie_access_log[cur_idx].value);
  2833. }
  2834. pcie_access_log_seqnum = 0;
  2835. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2836. }
  2837. #endif
  2838. #ifndef HIF_AHB
  2839. int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
  2840. {
  2841. QDF_BUG(0);
  2842. return -EINVAL;
  2843. }
  2844. int hif_ahb_configure_irq(struct hif_pci_softc *sc)
  2845. {
  2846. QDF_BUG(0);
  2847. return -EINVAL;
  2848. }
  2849. #endif
  2850. static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
  2851. {
  2852. struct ce_tasklet_entry *tasklet_entry = context;
  2853. return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
  2854. }
  2855. extern const char *ce_name[];
  2856. static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  2857. {
  2858. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  2859. return pci_scn->ce_msi_irq_num[ce_id];
  2860. }
  2861. /* hif_srng_msi_irq_disable() - disable the irq for msi
  2862. * @hif_sc: hif context
  2863. * @ce_id: which ce to disable copy complete interrupts for
  2864. *
  2865. * since MSI interrupts are not level based, the system can function
  2866. * without disabling these interrupts. Interrupt mitigation can be
  2867. * added here for better system performance.
  2868. */
  2869. static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2870. {
  2871. disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2872. }
  2873. static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2874. {
  2875. enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2876. }
  2877. static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2878. {
  2879. disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2880. }
  2881. static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2882. {
  2883. enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2884. }
  2885. static int hif_ce_msi_configure_irq(struct hif_softc *scn)
  2886. {
  2887. int ret;
  2888. int ce_id, irq;
  2889. uint32_t msi_data_start;
  2890. uint32_t msi_data_count;
  2891. uint32_t msi_irq_start;
  2892. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2893. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2894. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  2895. /* do wake irq assignment */
  2896. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
  2897. &msi_data_count, &msi_data_start,
  2898. &msi_irq_start);
  2899. if (ret)
  2900. return ret;
  2901. scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
  2902. ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler,
  2903. IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
  2904. if (ret)
  2905. return ret;
  2906. /* do ce irq assignments */
  2907. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2908. &msi_data_count, &msi_data_start,
  2909. &msi_irq_start);
  2910. if (ret)
  2911. goto free_wake_irq;
  2912. if (ce_srng_based(scn)) {
  2913. scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
  2914. scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
  2915. } else {
  2916. scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
  2917. scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
  2918. }
  2919. scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
  2920. /* needs to match the ce_id -> irq data mapping
  2921. * used in the srng parameter configuration
  2922. */
  2923. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2924. unsigned int msi_data = (ce_id % msi_data_count) +
  2925. msi_irq_start;
  2926. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2927. continue;
  2928. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2929. HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
  2930. __func__, ce_id, msi_data, irq,
  2931. &ce_sc->tasklets[ce_id]);
  2932. /* implies the ce is also initialized */
  2933. if (!ce_sc->tasklets[ce_id].inited)
  2934. continue;
  2935. pci_sc->ce_msi_irq_num[ce_id] = irq;
  2936. ret = request_irq(irq, hif_ce_interrupt_handler,
  2937. IRQF_SHARED,
  2938. ce_name[ce_id],
  2939. &ce_sc->tasklets[ce_id]);
  2940. if (ret)
  2941. goto free_irq;
  2942. }
  2943. return ret;
  2944. free_irq:
  2945. /* the request_irq for the last ce_id failed so skip it. */
  2946. while (ce_id > 0 && ce_id < scn->ce_count) {
  2947. unsigned int msi_data;
  2948. ce_id--;
  2949. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  2950. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2951. free_irq(irq, &ce_sc->tasklets[ce_id]);
  2952. }
  2953. free_wake_irq:
  2954. free_irq(scn->wake_irq, scn->qdf_dev->dev);
  2955. scn->wake_irq = 0;
  2956. return ret;
  2957. }
  2958. static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
  2959. {
  2960. int i;
  2961. for (i = 0; i < hif_ext_group->numirq; i++)
  2962. disable_irq_nosync(hif_ext_group->os_irq[i]);
  2963. }
  2964. static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
  2965. {
  2966. int i;
  2967. for (i = 0; i < hif_ext_group->numirq; i++)
  2968. enable_irq(hif_ext_group->os_irq[i]);
  2969. }
  2970. /**
  2971. * hif_pci_get_irq_name() - get irqname
  2972. * This function gives irqnumber to irqname
  2973. * mapping.
  2974. *
  2975. * @irq_no: irq number
  2976. *
  2977. * Return: irq name
  2978. */
  2979. const char *hif_pci_get_irq_name(int irq_no)
  2980. {
  2981. return "pci-dummy";
  2982. }
  2983. int hif_pci_configure_grp_irq(struct hif_softc *scn,
  2984. struct hif_exec_context *hif_ext_group)
  2985. {
  2986. int ret = 0;
  2987. int irq = 0;
  2988. int j;
  2989. hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
  2990. hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
  2991. hif_ext_group->irq_name = &hif_pci_get_irq_name;
  2992. hif_ext_group->work_complete = &hif_dummy_grp_done;
  2993. for (j = 0; j < hif_ext_group->numirq; j++) {
  2994. irq = hif_ext_group->irq[j];
  2995. hif_info("request_irq = %d for grp %d",
  2996. irq, hif_ext_group->grp_id);
  2997. ret = request_irq(irq,
  2998. hif_ext_group_interrupt_handler,
  2999. IRQF_SHARED | IRQF_NO_SUSPEND,
  3000. "wlan_EXT_GRP",
  3001. hif_ext_group);
  3002. if (ret) {
  3003. HIF_ERROR("%s: request_irq failed ret = %d",
  3004. __func__, ret);
  3005. return -EFAULT;
  3006. }
  3007. hif_ext_group->os_irq[j] = irq;
  3008. }
  3009. hif_ext_group->irq_requested = true;
  3010. return 0;
  3011. }
  3012. /**
  3013. * hif_configure_irq() - configure interrupt
  3014. *
  3015. * This function configures interrupt(s)
  3016. *
  3017. * @sc: PCIe control struct
  3018. * @hif_hdl: struct HIF_CE_state
  3019. *
  3020. * Return: 0 - for success
  3021. */
  3022. int hif_configure_irq(struct hif_softc *scn)
  3023. {
  3024. int ret = 0;
  3025. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3026. HIF_TRACE("%s: E", __func__);
  3027. if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
  3028. scn->request_irq_done = false;
  3029. return 0;
  3030. }
  3031. hif_init_reschedule_tasklet_work(sc);
  3032. ret = hif_ce_msi_configure_irq(scn);
  3033. if (ret == 0) {
  3034. goto end;
  3035. }
  3036. switch (scn->target_info.target_type) {
  3037. case TARGET_TYPE_IPQ4019:
  3038. ret = hif_ahb_configure_legacy_irq(sc);
  3039. break;
  3040. case TARGET_TYPE_QCA8074:
  3041. case TARGET_TYPE_QCA8074V2:
  3042. case TARGET_TYPE_QCA6018:
  3043. ret = hif_ahb_configure_irq(sc);
  3044. break;
  3045. default:
  3046. ret = hif_pci_configure_legacy_irq(sc);
  3047. break;
  3048. }
  3049. if (ret < 0) {
  3050. HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
  3051. __func__, ret);
  3052. return ret;
  3053. }
  3054. end:
  3055. scn->request_irq_done = true;
  3056. return 0;
  3057. }
  3058. /**
  3059. * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
  3060. * @scn: hif control structure
  3061. *
  3062. * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
  3063. * stuck at a polling loop in pcie_address_config in FW
  3064. *
  3065. * Return: none
  3066. */
  3067. static void hif_trigger_timer_irq(struct hif_softc *scn)
  3068. {
  3069. int tmp;
  3070. /* Trigger IRQ on Peregrine/Swift by setting
  3071. * IRQ Bit of LF_TIMER 0
  3072. */
  3073. tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  3074. SOC_LF_TIMER_STATUS0_ADDRESS));
  3075. /* Set Raw IRQ Bit */
  3076. tmp |= 1;
  3077. /* SOC_LF_TIMER_STATUS0 */
  3078. hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  3079. SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
  3080. }
  3081. /**
  3082. * hif_target_sync() : ensure the target is ready
  3083. * @scn: hif control structure
  3084. *
  3085. * Informs fw that we plan to use legacy interupts so that
  3086. * it can begin booting. Ensures that the fw finishes booting
  3087. * before continuing. Should be called before trying to write
  3088. * to the targets other registers for the first time.
  3089. *
  3090. * Return: none
  3091. */
  3092. static void hif_target_sync(struct hif_softc *scn)
  3093. {
  3094. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3095. PCIE_INTR_ENABLE_ADDRESS),
  3096. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  3097. /* read to flush pcie write */
  3098. (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3099. PCIE_INTR_ENABLE_ADDRESS));
  3100. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3101. PCIE_SOC_WAKE_ADDRESS,
  3102. PCIE_SOC_WAKE_V_MASK);
  3103. while (!hif_targ_is_awake(scn, scn->mem))
  3104. ;
  3105. if (HAS_FW_INDICATOR) {
  3106. int wait_limit = 500;
  3107. int fw_ind = 0;
  3108. int retry_count = 0;
  3109. uint32_t target_type = scn->target_info.target_type;
  3110. fw_retry:
  3111. HIF_TRACE("%s: Loop checking FW signal", __func__);
  3112. while (1) {
  3113. fw_ind = hif_read32_mb(scn, scn->mem +
  3114. FW_INDICATOR_ADDRESS);
  3115. if (fw_ind & FW_IND_INITIALIZED)
  3116. break;
  3117. if (wait_limit-- < 0)
  3118. break;
  3119. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3120. PCIE_INTR_ENABLE_ADDRESS),
  3121. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  3122. /* read to flush pcie write */
  3123. (void)hif_read32_mb(scn, scn->mem +
  3124. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
  3125. qdf_mdelay(10);
  3126. }
  3127. if (wait_limit < 0) {
  3128. if (target_type == TARGET_TYPE_AR9888 &&
  3129. retry_count++ < 2) {
  3130. hif_trigger_timer_irq(scn);
  3131. wait_limit = 500;
  3132. goto fw_retry;
  3133. }
  3134. HIF_TRACE("%s: FW signal timed out",
  3135. __func__);
  3136. qdf_assert_always(0);
  3137. } else {
  3138. HIF_TRACE("%s: Got FW signal, retries = %x",
  3139. __func__, 500-wait_limit);
  3140. }
  3141. }
  3142. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3143. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  3144. }
  3145. static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
  3146. struct device *dev)
  3147. {
  3148. struct pld_soc_info info;
  3149. pld_get_soc_info(dev, &info);
  3150. sc->mem = info.v_addr;
  3151. sc->ce_sc.ol_sc.mem = info.v_addr;
  3152. sc->ce_sc.ol_sc.mem_pa = info.p_addr;
  3153. }
  3154. static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
  3155. struct device *dev)
  3156. {}
  3157. static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
  3158. int device_id)
  3159. {
  3160. if (!pld_have_platform_driver_support(sc->dev))
  3161. return false;
  3162. switch (device_id) {
  3163. case QCA6290_DEVICE_ID:
  3164. case QCN9000_DEVICE_ID:
  3165. case QCA6290_EMULATION_DEVICE_ID:
  3166. case QCA6390_DEVICE_ID:
  3167. case QCA6490_DEVICE_ID:
  3168. case AR6320_DEVICE_ID:
  3169. case QCN7605_DEVICE_ID:
  3170. return true;
  3171. }
  3172. return false;
  3173. }
  3174. static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
  3175. int device_id)
  3176. {
  3177. if (hif_is_pld_based_target(sc, device_id)) {
  3178. sc->hif_enable_pci = hif_enable_pci_pld;
  3179. sc->hif_pci_deinit = hif_pci_deinit_pld;
  3180. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
  3181. } else {
  3182. sc->hif_enable_pci = hif_enable_pci_nopld;
  3183. sc->hif_pci_deinit = hif_pci_deinit_nopld;
  3184. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
  3185. }
  3186. }
  3187. #ifdef HIF_REG_WINDOW_SUPPORT
  3188. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  3189. u32 target_type)
  3190. {
  3191. switch (target_type) {
  3192. case TARGET_TYPE_QCN7605:
  3193. sc->use_register_windowing = true;
  3194. qdf_spinlock_create(&sc->register_access_lock);
  3195. sc->register_window = 0;
  3196. break;
  3197. default:
  3198. sc->use_register_windowing = false;
  3199. }
  3200. }
  3201. #else
  3202. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  3203. u32 target_type)
  3204. {
  3205. sc->use_register_windowing = false;
  3206. }
  3207. #endif
  3208. /**
  3209. * hif_enable_bus(): enable bus
  3210. *
  3211. * This function enables the bus
  3212. *
  3213. * @ol_sc: soft_sc struct
  3214. * @dev: device pointer
  3215. * @bdev: bus dev pointer
  3216. * bid: bus id pointer
  3217. * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
  3218. * Return: QDF_STATUS
  3219. */
  3220. QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
  3221. struct device *dev, void *bdev,
  3222. const struct hif_bus_id *bid,
  3223. enum hif_enable_type type)
  3224. {
  3225. int ret = 0;
  3226. uint32_t hif_type, target_type;
  3227. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
  3228. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
  3229. uint16_t revision_id = 0;
  3230. int probe_again = 0;
  3231. struct pci_dev *pdev = bdev;
  3232. const struct pci_device_id *id = (const struct pci_device_id *)bid;
  3233. struct hif_target_info *tgt_info;
  3234. if (!ol_sc) {
  3235. HIF_ERROR("%s: hif_ctx is NULL", __func__);
  3236. return QDF_STATUS_E_NOMEM;
  3237. }
  3238. HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
  3239. __func__, hif_get_conparam(ol_sc), id->device);
  3240. sc->pdev = pdev;
  3241. sc->dev = &pdev->dev;
  3242. sc->devid = id->device;
  3243. sc->cacheline_sz = dma_get_cache_alignment();
  3244. tgt_info = hif_get_target_info_handle(hif_hdl);
  3245. hif_pci_init_deinit_ops_attach(sc, id->device);
  3246. sc->hif_pci_get_soc_info(sc, dev);
  3247. again:
  3248. ret = sc->hif_enable_pci(sc, pdev, id);
  3249. if (ret < 0) {
  3250. HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
  3251. __func__, ret);
  3252. goto err_enable_pci;
  3253. }
  3254. HIF_TRACE("%s: hif_enable_pci done", __func__);
  3255. /* Temporary FIX: disable ASPM on peregrine.
  3256. * Will be removed after the OTP is programmed
  3257. */
  3258. hif_disable_power_gating(hif_hdl);
  3259. device_disable_async_suspend(&pdev->dev);
  3260. pci_read_config_word(pdev, 0x08, &revision_id);
  3261. ret = hif_get_device_type(id->device, revision_id,
  3262. &hif_type, &target_type);
  3263. if (ret < 0) {
  3264. HIF_ERROR("%s: invalid device id/revision_id", __func__);
  3265. goto err_tgtstate;
  3266. }
  3267. HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
  3268. __func__, hif_type, target_type);
  3269. hif_register_tbl_attach(ol_sc, hif_type);
  3270. hif_target_register_tbl_attach(ol_sc, target_type);
  3271. hif_pci_init_reg_windowing_support(sc, target_type);
  3272. tgt_info->target_type = target_type;
  3273. if (ce_srng_based(ol_sc)) {
  3274. HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
  3275. } else {
  3276. ret = hif_pci_probe_tgt_wakeup(sc);
  3277. if (ret < 0) {
  3278. HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
  3279. __func__, ret);
  3280. if (ret == -EAGAIN)
  3281. probe_again++;
  3282. goto err_tgtstate;
  3283. }
  3284. HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
  3285. }
  3286. if (!ol_sc->mem_pa) {
  3287. HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
  3288. ret = -EIO;
  3289. goto err_tgtstate;
  3290. }
  3291. if (!ce_srng_based(ol_sc)) {
  3292. hif_target_sync(ol_sc);
  3293. if (ADRASTEA_BU)
  3294. hif_vote_link_up(hif_hdl);
  3295. }
  3296. return 0;
  3297. err_tgtstate:
  3298. hif_disable_pci(sc);
  3299. sc->pci_enabled = false;
  3300. HIF_ERROR("%s: error, hif_disable_pci done", __func__);
  3301. return QDF_STATUS_E_ABORTED;
  3302. err_enable_pci:
  3303. if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
  3304. int delay_time;
  3305. HIF_INFO("%s: pci reprobe", __func__);
  3306. /* 10, 40, 90, 100, 100, ... */
  3307. delay_time = max(100, 10 * (probe_again * probe_again));
  3308. qdf_mdelay(delay_time);
  3309. goto again;
  3310. }
  3311. return ret;
  3312. }
  3313. /**
  3314. * hif_pci_irq_enable() - ce_irq_enable
  3315. * @scn: hif_softc
  3316. * @ce_id: ce_id
  3317. *
  3318. * Return: void
  3319. */
  3320. void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
  3321. {
  3322. uint32_t tmp = 1 << ce_id;
  3323. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3324. qdf_spin_lock_irqsave(&sc->irq_lock);
  3325. scn->ce_irq_summary &= ~tmp;
  3326. if (scn->ce_irq_summary == 0) {
  3327. /* Enable Legacy PCI line interrupts */
  3328. if (LEGACY_INTERRUPTS(sc) &&
  3329. (scn->target_status != TARGET_STATUS_RESET) &&
  3330. (!qdf_atomic_read(&scn->link_suspended))) {
  3331. hif_write32_mb(scn, scn->mem +
  3332. (SOC_CORE_BASE_ADDRESS |
  3333. PCIE_INTR_ENABLE_ADDRESS),
  3334. HOST_GROUP0_MASK);
  3335. hif_read32_mb(scn, scn->mem +
  3336. (SOC_CORE_BASE_ADDRESS |
  3337. PCIE_INTR_ENABLE_ADDRESS));
  3338. }
  3339. }
  3340. if (scn->hif_init_done == true)
  3341. Q_TARGET_ACCESS_END(scn);
  3342. qdf_spin_unlock_irqrestore(&sc->irq_lock);
  3343. /* check for missed firmware crash */
  3344. hif_fw_interrupt_handler(0, scn);
  3345. }
  3346. /**
  3347. * hif_pci_irq_disable() - ce_irq_disable
  3348. * @scn: hif_softc
  3349. * @ce_id: ce_id
  3350. *
  3351. * only applicable to legacy copy engine...
  3352. *
  3353. * Return: void
  3354. */
  3355. void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
  3356. {
  3357. /* For Rome only need to wake up target */
  3358. /* target access is maintained until interrupts are re-enabled */
  3359. Q_TARGET_ACCESS_BEGIN(scn);
  3360. }
  3361. #ifdef FEATURE_RUNTIME_PM
  3362. /**
  3363. * hif_pm_runtime_get_sync() - do a get operation with sync resume
  3364. *
  3365. * A get operation will prevent a runtime suspend until a corresponding
  3366. * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
  3367. * resume instead of requesting a resume if it is runtime PM suspended
  3368. * so it can only be called in non-atomic context.
  3369. *
  3370. * @hif_ctx: pointer of HIF context
  3371. *
  3372. * Return: 0 if it is runtime PM resumed otherwise an error code.
  3373. */
  3374. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
  3375. {
  3376. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3377. int pm_state;
  3378. int ret;
  3379. if (!sc)
  3380. return -EINVAL;
  3381. pm_state = qdf_atomic_read(&sc->pm_state);
  3382. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  3383. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  3384. hif_info_high("Runtime PM resume is requested by %ps",
  3385. (void *)_RET_IP_);
  3386. sc->pm_stats.runtime_get++;
  3387. ret = pm_runtime_get_sync(sc->dev);
  3388. /* Get can return 1 if the device is already active, just return
  3389. * success in that case.
  3390. */
  3391. if (ret > 0)
  3392. ret = 0;
  3393. if (ret) {
  3394. sc->pm_stats.runtime_get_err++;
  3395. hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
  3396. qdf_atomic_read(&sc->pm_state), ret);
  3397. hif_pm_runtime_put(hif_ctx);
  3398. }
  3399. return ret;
  3400. }
  3401. /**
  3402. * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
  3403. *
  3404. * This API will do a runtime put operation followed by a sync suspend if usage
  3405. * count is 0 so it can only be called in non-atomic context.
  3406. *
  3407. * @hif_ctx: pointer of HIF context
  3408. *
  3409. * Return: 0 for success otherwise an error code
  3410. */
  3411. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
  3412. {
  3413. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3414. int usage_count, pm_state;
  3415. char *err = NULL;
  3416. if (!sc)
  3417. return -EINVAL;
  3418. usage_count = atomic_read(&sc->dev->power.usage_count);
  3419. if (usage_count == 1) {
  3420. pm_state = qdf_atomic_read(&sc->pm_state);
  3421. if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
  3422. err = "Ignore unexpected Put as runtime PM is disabled";
  3423. } else if (usage_count == 0) {
  3424. err = "Put without a Get Operation";
  3425. }
  3426. if (err) {
  3427. hif_pci_runtime_pm_warn(sc, err);
  3428. return -EINVAL;
  3429. }
  3430. sc->pm_stats.runtime_put++;
  3431. return pm_runtime_put_sync_suspend(sc->dev);
  3432. }
  3433. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  3434. {
  3435. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3436. int pm_state;
  3437. if (!sc)
  3438. return -EINVAL;
  3439. pm_state = qdf_atomic_read(&sc->pm_state);
  3440. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  3441. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  3442. HIF_INFO("Runtime PM resume is requested by %ps",
  3443. (void *)_RET_IP_);
  3444. sc->pm_stats.request_resume++;
  3445. sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
  3446. return hif_pm_request_resume(sc->dev);
  3447. }
  3448. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
  3449. {
  3450. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3451. if (!sc)
  3452. return;
  3453. sc->pm_stats.last_busy_marker = (void *)_RET_IP_;
  3454. sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
  3455. return pm_runtime_mark_last_busy(sc->dev);
  3456. }
  3457. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  3458. {
  3459. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3460. if (!sc)
  3461. return;
  3462. sc->pm_stats.runtime_get++;
  3463. pm_runtime_get_noresume(sc->dev);
  3464. }
  3465. /**
  3466. * hif_pm_runtime_get() - do a get opperation on the device
  3467. *
  3468. * A get opperation will prevent a runtime suspend until a
  3469. * corresponding put is done. This api should be used when sending
  3470. * data.
  3471. *
  3472. * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
  3473. * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
  3474. *
  3475. * return: success if the bus is up and a get has been issued
  3476. * otherwise an error code.
  3477. */
  3478. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  3479. {
  3480. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  3481. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3482. int ret;
  3483. int pm_state;
  3484. if (!scn) {
  3485. hif_err("Could not do runtime get, scn is null");
  3486. return -EFAULT;
  3487. }
  3488. pm_state = qdf_atomic_read(&sc->pm_state);
  3489. if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
  3490. pm_state == HIF_PM_RUNTIME_STATE_NONE) {
  3491. sc->pm_stats.runtime_get++;
  3492. ret = __hif_pm_runtime_get(sc->dev);
  3493. /* Get can return 1 if the device is already active, just return
  3494. * success in that case
  3495. */
  3496. if (ret > 0)
  3497. ret = 0;
  3498. if (ret)
  3499. hif_pm_runtime_put(hif_ctx);
  3500. if (ret && ret != -EINPROGRESS) {
  3501. sc->pm_stats.runtime_get_err++;
  3502. hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
  3503. qdf_atomic_read(&sc->pm_state), ret);
  3504. }
  3505. return ret;
  3506. }
  3507. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  3508. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
  3509. hif_info_high("Runtime PM resume is requested by %ps",
  3510. (void *)_RET_IP_);
  3511. ret = -EAGAIN;
  3512. } else {
  3513. ret = -EBUSY;
  3514. }
  3515. sc->pm_stats.request_resume++;
  3516. sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
  3517. hif_pm_request_resume(sc->dev);
  3518. return ret;
  3519. }
  3520. /**
  3521. * hif_pm_runtime_put() - do a put opperation on the device
  3522. *
  3523. * A put opperation will allow a runtime suspend after a corresponding
  3524. * get was done. This api should be used when sending data.
  3525. *
  3526. * This api will return a failure if runtime pm is stopped
  3527. * This api will return failure if it would decrement the usage count below 0.
  3528. *
  3529. * return: QDF_STATUS_SUCCESS if the put is performed
  3530. */
  3531. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  3532. {
  3533. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  3534. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3535. int pm_state, usage_count;
  3536. char *error = NULL;
  3537. if (!scn) {
  3538. HIF_ERROR("%s: Could not do runtime put, scn is null",
  3539. __func__);
  3540. return -EFAULT;
  3541. }
  3542. usage_count = atomic_read(&sc->dev->power.usage_count);
  3543. if (usage_count == 1) {
  3544. pm_state = qdf_atomic_read(&sc->pm_state);
  3545. if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
  3546. error = "Ignoring unexpected put when runtime pm is disabled";
  3547. } else if (usage_count == 0) {
  3548. error = "PUT Without a Get Operation";
  3549. }
  3550. if (error) {
  3551. hif_pci_runtime_pm_warn(sc, error);
  3552. return -EINVAL;
  3553. }
  3554. sc->pm_stats.runtime_put++;
  3555. hif_pm_runtime_mark_last_busy(hif_ctx);
  3556. hif_pm_runtime_put_auto(sc->dev);
  3557. return 0;
  3558. }
  3559. /**
  3560. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
  3561. * reason
  3562. * @hif_sc: pci context
  3563. * @lock: runtime_pm lock being acquired
  3564. *
  3565. * Return 0 if successful.
  3566. */
  3567. static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
  3568. *hif_sc, struct hif_pm_runtime_lock *lock)
  3569. {
  3570. int ret = 0;
  3571. /*
  3572. * We shouldn't be setting context->timeout to zero here when
  3573. * context is active as we will have a case where Timeout API's
  3574. * for the same context called back to back.
  3575. * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
  3576. * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
  3577. * API to ensure the timeout version is no more active and
  3578. * list entry of this context will be deleted during allow suspend.
  3579. */
  3580. if (lock->active)
  3581. return 0;
  3582. ret = __hif_pm_runtime_get(hif_sc->dev);
  3583. /**
  3584. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  3585. * RPM_SUSPENDING. Any other negative value is an error.
  3586. * We shouldn't be do runtime_put here as in later point allow
  3587. * suspend gets called with the the context and there the usage count
  3588. * is decremented, so suspend will be prevented.
  3589. */
  3590. if (ret < 0 && ret != -EINPROGRESS) {
  3591. hif_sc->pm_stats.runtime_get_err++;
  3592. hif_pci_runtime_pm_warn(hif_sc,
  3593. "Prevent Suspend Runtime PM Error");
  3594. }
  3595. hif_sc->prevent_suspend_cnt++;
  3596. lock->active = true;
  3597. list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
  3598. hif_sc->pm_stats.prevent_suspend++;
  3599. HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
  3600. hif_pm_runtime_state_to_string(
  3601. qdf_atomic_read(&hif_sc->pm_state)),
  3602. ret);
  3603. return ret;
  3604. }
  3605. static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
  3606. struct hif_pm_runtime_lock *lock)
  3607. {
  3608. struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc);
  3609. int ret = 0;
  3610. int usage_count;
  3611. if (hif_sc->prevent_suspend_cnt == 0)
  3612. return ret;
  3613. if (!lock->active)
  3614. return ret;
  3615. usage_count = atomic_read(&hif_sc->dev->power.usage_count);
  3616. /*
  3617. * During Driver unload, platform driver increments the usage
  3618. * count to prevent any runtime suspend getting called.
  3619. * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
  3620. * usage_count should be one. Ideally this shouldn't happen as
  3621. * context->active should be active for allow suspend to happen
  3622. * Handling this case here to prevent any failures.
  3623. */
  3624. if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
  3625. && usage_count == 1) || usage_count == 0) {
  3626. hif_pci_runtime_pm_warn(hif_sc,
  3627. "Allow without a prevent suspend");
  3628. return -EINVAL;
  3629. }
  3630. list_del(&lock->list);
  3631. hif_sc->prevent_suspend_cnt--;
  3632. lock->active = false;
  3633. lock->timeout = 0;
  3634. hif_pm_runtime_mark_last_busy(hif_ctx);
  3635. ret = hif_pm_runtime_put_auto(hif_sc->dev);
  3636. HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
  3637. hif_pm_runtime_state_to_string(
  3638. qdf_atomic_read(&hif_sc->pm_state)),
  3639. ret);
  3640. hif_sc->pm_stats.allow_suspend++;
  3641. return ret;
  3642. }
  3643. /**
  3644. * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
  3645. * @data: calback data that is the pci context
  3646. *
  3647. * if runtime locks are acquired with a timeout, this function releases
  3648. * the locks when the last runtime lock expires.
  3649. *
  3650. * dummy implementation until lock acquisition is implemented.
  3651. */
  3652. static void hif_pm_runtime_lock_timeout_fn(void *data)
  3653. {
  3654. struct hif_pci_softc *hif_sc = data;
  3655. unsigned long timer_expires;
  3656. struct hif_pm_runtime_lock *context, *temp;
  3657. spin_lock_bh(&hif_sc->runtime_lock);
  3658. timer_expires = hif_sc->runtime_timer_expires;
  3659. /* Make sure we are not called too early, this should take care of
  3660. * following case
  3661. *
  3662. * CPU0 CPU1 (timeout function)
  3663. * ---- ----------------------
  3664. * spin_lock_irq
  3665. * timeout function called
  3666. *
  3667. * mod_timer()
  3668. *
  3669. * spin_unlock_irq
  3670. * spin_lock_irq
  3671. */
  3672. if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
  3673. hif_sc->runtime_timer_expires = 0;
  3674. list_for_each_entry_safe(context, temp,
  3675. &hif_sc->prevent_suspend_list, list) {
  3676. if (context->timeout) {
  3677. __hif_pm_runtime_allow_suspend(hif_sc, context);
  3678. hif_sc->pm_stats.allow_suspend_timeout++;
  3679. }
  3680. }
  3681. }
  3682. spin_unlock_bh(&hif_sc->runtime_lock);
  3683. }
  3684. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  3685. struct hif_pm_runtime_lock *data)
  3686. {
  3687. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3688. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
  3689. struct hif_pm_runtime_lock *context = data;
  3690. if (!sc->hif_config.enable_runtime_pm)
  3691. return 0;
  3692. if (!context)
  3693. return -EINVAL;
  3694. if (in_irq())
  3695. WARN_ON(1);
  3696. spin_lock_bh(&hif_sc->runtime_lock);
  3697. context->timeout = 0;
  3698. __hif_pm_runtime_prevent_suspend(hif_sc, context);
  3699. spin_unlock_bh(&hif_sc->runtime_lock);
  3700. return 0;
  3701. }
  3702. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  3703. struct hif_pm_runtime_lock *data)
  3704. {
  3705. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3706. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
  3707. struct hif_pm_runtime_lock *context = data;
  3708. if (!sc->hif_config.enable_runtime_pm)
  3709. return 0;
  3710. if (!context)
  3711. return -EINVAL;
  3712. if (in_irq())
  3713. WARN_ON(1);
  3714. spin_lock_bh(&hif_sc->runtime_lock);
  3715. __hif_pm_runtime_allow_suspend(hif_sc, context);
  3716. /* The list can be empty as well in cases where
  3717. * we have one context in the list and the allow
  3718. * suspend came before the timer expires and we delete
  3719. * context above from the list.
  3720. * When list is empty prevent_suspend count will be zero.
  3721. */
  3722. if (hif_sc->prevent_suspend_cnt == 0 &&
  3723. hif_sc->runtime_timer_expires > 0) {
  3724. qdf_timer_free(&hif_sc->runtime_timer);
  3725. hif_sc->runtime_timer_expires = 0;
  3726. }
  3727. spin_unlock_bh(&hif_sc->runtime_lock);
  3728. return 0;
  3729. }
  3730. /**
  3731. * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
  3732. * @ol_sc: HIF context
  3733. * @lock: which lock is being acquired
  3734. * @delay: Timeout in milliseconds
  3735. *
  3736. * Prevent runtime suspend with a timeout after which runtime suspend would be
  3737. * allowed. This API uses a single timer to allow the suspend and timer is
  3738. * modified if the timeout is changed before timer fires.
  3739. * If the timeout is less than autosuspend_delay then use mark_last_busy instead
  3740. * of starting the timer.
  3741. *
  3742. * It is wise to try not to use this API and correct the design if possible.
  3743. *
  3744. * Return: 0 on success and negative error code on failure
  3745. */
  3746. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  3747. struct hif_pm_runtime_lock *lock, unsigned int delay)
  3748. {
  3749. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3750. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
  3751. int ret = 0;
  3752. unsigned long expires;
  3753. struct hif_pm_runtime_lock *context = lock;
  3754. if (hif_is_load_or_unload_in_progress(sc)) {
  3755. HIF_ERROR("%s: Load/unload in progress, ignore!",
  3756. __func__);
  3757. return -EINVAL;
  3758. }
  3759. if (hif_is_recovery_in_progress(sc)) {
  3760. HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
  3761. return -EINVAL;
  3762. }
  3763. if (!sc->hif_config.enable_runtime_pm)
  3764. return 0;
  3765. if (!context)
  3766. return -EINVAL;
  3767. if (in_irq())
  3768. WARN_ON(1);
  3769. /*
  3770. * Don't use internal timer if the timeout is less than auto suspend
  3771. * delay.
  3772. */
  3773. if (delay <= hif_sc->dev->power.autosuspend_delay) {
  3774. hif_pm_request_resume(hif_sc->dev);
  3775. hif_pm_runtime_mark_last_busy(ol_sc);
  3776. return ret;
  3777. }
  3778. expires = jiffies + msecs_to_jiffies(delay);
  3779. expires += !expires;
  3780. spin_lock_bh(&hif_sc->runtime_lock);
  3781. context->timeout = delay;
  3782. ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
  3783. hif_sc->pm_stats.prevent_suspend_timeout++;
  3784. /* Modify the timer only if new timeout is after already configured
  3785. * timeout
  3786. */
  3787. if (time_after(expires, hif_sc->runtime_timer_expires)) {
  3788. qdf_timer_mod(&hif_sc->runtime_timer, delay);
  3789. hif_sc->runtime_timer_expires = expires;
  3790. }
  3791. spin_unlock_bh(&hif_sc->runtime_lock);
  3792. HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
  3793. hif_pm_runtime_state_to_string(
  3794. qdf_atomic_read(&hif_sc->pm_state)),
  3795. delay, ret);
  3796. return ret;
  3797. }
  3798. /**
  3799. * hif_runtime_lock_init() - API to initialize Runtime PM context
  3800. * @name: Context name
  3801. *
  3802. * This API initializes the Runtime PM context of the caller and
  3803. * return the pointer.
  3804. *
  3805. * Return: None
  3806. */
  3807. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
  3808. {
  3809. struct hif_pm_runtime_lock *context;
  3810. HIF_INFO("Initializing Runtime PM wakelock %s", name);
  3811. context = qdf_mem_malloc(sizeof(*context));
  3812. if (!context)
  3813. return -ENOMEM;
  3814. context->name = name ? name : "Default";
  3815. lock->lock = context;
  3816. return 0;
  3817. }
  3818. /**
  3819. * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
  3820. * @data: Runtime PM context
  3821. *
  3822. * Return: void
  3823. */
  3824. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  3825. struct hif_pm_runtime_lock *data)
  3826. {
  3827. struct hif_pm_runtime_lock *context = data;
  3828. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3829. if (!context) {
  3830. HIF_ERROR("Runtime PM wakelock context is NULL");
  3831. return;
  3832. }
  3833. HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
  3834. /*
  3835. * Ensure to delete the context list entry and reduce the usage count
  3836. * before freeing the context if context is active.
  3837. */
  3838. if (sc) {
  3839. spin_lock_bh(&sc->runtime_lock);
  3840. __hif_pm_runtime_allow_suspend(sc, context);
  3841. spin_unlock_bh(&sc->runtime_lock);
  3842. }
  3843. qdf_mem_free(context);
  3844. }
  3845. /**
  3846. * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
  3847. * @hif_ctx: HIF context
  3848. *
  3849. * Return: true for runtime suspended, otherwise false
  3850. */
  3851. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  3852. {
  3853. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3854. return qdf_atomic_read(&sc->pm_state) ==
  3855. HIF_PM_RUNTIME_STATE_SUSPENDED;
  3856. }
  3857. /**
  3858. * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
  3859. * @hif_ctx: HIF context
  3860. *
  3861. * monitor_wake_intr variable can be used to indicate if driver expects wake
  3862. * MSI for runtime PM
  3863. *
  3864. * Return: monitor_wake_intr variable
  3865. */
  3866. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  3867. {
  3868. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3869. return qdf_atomic_read(&sc->monitor_wake_intr);
  3870. }
  3871. /**
  3872. * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
  3873. * @hif_ctx: HIF context
  3874. * @val: value to set
  3875. *
  3876. * monitor_wake_intr variable can be used to indicate if driver expects wake
  3877. * MSI for runtime PM
  3878. *
  3879. * Return: void
  3880. */
  3881. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  3882. int val)
  3883. {
  3884. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3885. qdf_atomic_set(&sc->monitor_wake_intr, val);
  3886. }
  3887. /**
  3888. * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
  3889. * @hif_ctx: HIF context
  3890. *
  3891. * Return: void
  3892. */
  3893. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  3894. {
  3895. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3896. if (!sc)
  3897. return;
  3898. qdf_atomic_set(&sc->pm_dp_rx_busy, 1);
  3899. sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
  3900. hif_pm_runtime_mark_last_busy(hif_ctx);
  3901. }
  3902. /**
  3903. * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
  3904. * @hif_ctx: HIF context
  3905. *
  3906. * Return: dp rx busy set value
  3907. */
  3908. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  3909. {
  3910. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3911. if (!sc)
  3912. return 0;
  3913. return qdf_atomic_read(&sc->pm_dp_rx_busy);
  3914. }
  3915. /**
  3916. * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
  3917. * @hif_ctx: HIF context
  3918. *
  3919. * Return: timestamp of last mark busy by dp rx
  3920. */
  3921. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  3922. {
  3923. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3924. if (!sc)
  3925. return 0;
  3926. return sc->dp_last_busy_timestamp;
  3927. }
  3928. #endif /* FEATURE_RUNTIME_PM */
  3929. int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  3930. {
  3931. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3932. /* legacy case only has one irq */
  3933. return pci_scn->irq;
  3934. }
  3935. int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
  3936. {
  3937. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3938. struct hif_target_info *tgt_info;
  3939. tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
  3940. if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
  3941. tgt_info->target_type == TARGET_TYPE_QCA6390 ||
  3942. tgt_info->target_type == TARGET_TYPE_QCA6490 ||
  3943. tgt_info->target_type == TARGET_TYPE_QCA8074) {
  3944. /*
  3945. * Need to consider offset's memtype for QCA6290/QCA8074,
  3946. * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
  3947. * well initialized/defined.
  3948. */
  3949. return 0;
  3950. }
  3951. if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
  3952. || (offset + sizeof(unsigned int) <= sc->mem_len)) {
  3953. return 0;
  3954. }
  3955. HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
  3956. offset, (uint32_t)(offset + sizeof(unsigned int)),
  3957. sc->mem_len);
  3958. return -EINVAL;
  3959. }
  3960. /**
  3961. * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
  3962. * @scn: hif context
  3963. *
  3964. * Return: true if soc needs driver bmi otherwise false
  3965. */
  3966. bool hif_pci_needs_bmi(struct hif_softc *scn)
  3967. {
  3968. return !ce_srng_based(scn);
  3969. }
  3970. #ifdef FORCE_WAKE
  3971. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  3972. {
  3973. uint32_t timeout = 0, value;
  3974. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3975. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3976. if (pld_force_wake_request(scn->qdf_dev->dev)) {
  3977. hif_err("force wake request send failed");
  3978. return -EINVAL;
  3979. }
  3980. HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
  3981. while (!pld_is_device_awake(scn->qdf_dev->dev) &&
  3982. timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
  3983. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  3984. timeout += FORCE_WAKE_DELAY_MS;
  3985. }
  3986. if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
  3987. hif_err("Unable to wake up mhi");
  3988. HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
  3989. return -EINVAL;
  3990. }
  3991. HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
  3992. hif_write32_mb(scn,
  3993. scn->mem +
  3994. PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG,
  3995. 0);
  3996. hif_write32_mb(scn,
  3997. scn->mem +
  3998. PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
  3999. 1);
  4000. HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
  4001. /*
  4002. * do not reset the timeout
  4003. * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
  4004. */
  4005. do {
  4006. value =
  4007. hif_read32_mb(scn,
  4008. scn->mem +
  4009. PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
  4010. if (value)
  4011. break;
  4012. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  4013. timeout += FORCE_WAKE_DELAY_MS;
  4014. } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
  4015. if (!value) {
  4016. hif_err("failed handshake mechanism");
  4017. HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
  4018. return -ETIMEDOUT;
  4019. }
  4020. HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
  4021. return 0;
  4022. }
  4023. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  4024. {
  4025. int ret;
  4026. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  4027. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  4028. ret = pld_force_wake_release(scn->qdf_dev->dev);
  4029. if (ret) {
  4030. hif_err("force wake release failure");
  4031. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  4032. return ret;
  4033. }
  4034. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  4035. hif_write32_mb(scn,
  4036. scn->mem +
  4037. PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
  4038. 0);
  4039. HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
  4040. return 0;
  4041. }
  4042. void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
  4043. {
  4044. hif_debug("mhi_force_wake_request_vote: %d",
  4045. pci_handle->stats.mhi_force_wake_request_vote);
  4046. hif_debug("mhi_force_wake_failure: %d",
  4047. pci_handle->stats.mhi_force_wake_failure);
  4048. hif_debug("mhi_force_wake_success: %d",
  4049. pci_handle->stats.mhi_force_wake_success);
  4050. hif_debug("soc_force_wake_register_write_success: %d",
  4051. pci_handle->stats.soc_force_wake_register_write_success);
  4052. hif_debug("soc_force_wake_failure: %d",
  4053. pci_handle->stats.soc_force_wake_failure);
  4054. hif_debug("soc_force_wake_success: %d",
  4055. pci_handle->stats.soc_force_wake_success);
  4056. hif_debug("mhi_force_wake_release_failure: %d",
  4057. pci_handle->stats.mhi_force_wake_release_failure);
  4058. hif_debug("mhi_force_wake_release_success: %d",
  4059. pci_handle->stats.mhi_force_wake_release_success);
  4060. hif_debug("oc_force_wake_release_success: %d",
  4061. pci_handle->stats.soc_force_wake_release_success);
  4062. }
  4063. #endif /* FORCE_WAKE */