if_pci.c 113 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322
  1. /*
  2. * Copyright (c) 2013-2021 The Linux Foundation. All rights reserved.
  3. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. *
  5. * Permission to use, copy, modify, and/or distribute this software for
  6. * any purpose with or without fee is hereby granted, provided that the
  7. * above copyright notice and this permission notice appear in all
  8. * copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  11. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  12. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  13. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  14. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  15. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  16. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  17. * PERFORMANCE OF THIS SOFTWARE.
  18. */
  19. #include <linux/pci.h>
  20. #include <linux/slab.h>
  21. #include <linux/interrupt.h>
  22. #include <linux/if_arp.h>
  23. #include <linux/of_pci.h>
  24. #include <linux/version.h>
  25. #include "hif_io32.h"
  26. #include "if_pci.h"
  27. #include "hif.h"
  28. #include "target_type.h"
  29. #include "hif_main.h"
  30. #include "ce_main.h"
  31. #include "ce_api.h"
  32. #include "ce_internal.h"
  33. #include "ce_reg.h"
  34. #include "ce_bmi.h"
  35. #include "regtable.h"
  36. #include "hif_hw_version.h"
  37. #include <linux/debugfs.h>
  38. #include <linux/seq_file.h>
  39. #include "qdf_status.h"
  40. #include "qdf_atomic.h"
  41. #include "qdf_platform.h"
  42. #include "pld_common.h"
  43. #include "mp_dev.h"
  44. #include "hif_debug.h"
  45. #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
  46. char *legacy_ic_irqname[] = {
  47. "ce0",
  48. "ce1",
  49. "ce2",
  50. "ce3",
  51. "ce4",
  52. "ce5",
  53. "ce6",
  54. "ce7",
  55. "ce8",
  56. "ce9",
  57. "ce10",
  58. "ce11",
  59. "ce12",
  60. "ce13",
  61. "ce14",
  62. "ce15",
  63. "reo2sw8_intr2",
  64. "reo2sw7_intr2",
  65. "reo2sw6_intr2",
  66. "reo2sw5_intr2",
  67. "reo2sw4_intr2",
  68. "reo2sw3_intr2",
  69. "reo2sw2_intr2",
  70. "reo2sw1_intr2",
  71. "reo2sw0_intr2",
  72. "reo2sw8_intr",
  73. "reo2sw7_intr",
  74. "reo2sw6_inrr",
  75. "reo2sw5_intr",
  76. "reo2sw4_intr",
  77. "reo2sw3_intr",
  78. "reo2sw2_intr",
  79. "reo2sw1_intr",
  80. "reo2sw0_intr",
  81. "reo2status_intr2",
  82. "reo_status",
  83. "reo2rxdma_out_2",
  84. "reo2rxdma_out_1",
  85. "reo_cmd",
  86. "sw2reo6",
  87. "sw2reo5",
  88. "sw2reo1",
  89. "sw2reo",
  90. "rxdma2reo_mlo_0_dst_ring1",
  91. "rxdma2reo_mlo_0_dst_ring0",
  92. "rxdma2reo_mlo_1_dst_ring1",
  93. "rxdma2reo_mlo_1_dst_ring0",
  94. "rxdma2reo_dst_ring1",
  95. "rxdma2reo_dst_ring0",
  96. "rxdma2sw_dst_ring1",
  97. "rxdma2sw_dst_ring0",
  98. "rxdma2release_dst_ring1",
  99. "rxdma2release_dst_ring0",
  100. "sw2rxdma_2_src_ring",
  101. "sw2rxdma_1_src_ring",
  102. "sw2rxdma_0",
  103. "wbm2sw6_release2",
  104. "wbm2sw5_release2",
  105. "wbm2sw4_release2",
  106. "wbm2sw3_release2",
  107. "wbm2sw2_release2",
  108. "wbm2sw1_release2",
  109. "wbm2sw0_release2",
  110. "wbm2sw6_release",
  111. "wbm2sw5_release",
  112. "wbm2sw4_release",
  113. "wbm2sw3_release",
  114. "wbm2sw2_release",
  115. "wbm2sw1_release",
  116. "wbm2sw0_release",
  117. "wbm2sw_link",
  118. "wbm_error_release",
  119. "sw2txmon_src_ring",
  120. "sw2rxmon_src_ring",
  121. "txmon2sw_p1_intr1",
  122. "txmon2sw_p1_intr0",
  123. "txmon2sw_p0_dest1",
  124. "txmon2sw_p0_dest0",
  125. "rxmon2sw_p1_intr1",
  126. "rxmon2sw_p1_intr0",
  127. "rxmon2sw_p0_dest1",
  128. "rxmon2sw_p0_dest0",
  129. "sw_release",
  130. "sw2tcl_credit2",
  131. "sw2tcl_credit",
  132. "sw2tcl4",
  133. "sw2tcl5",
  134. "sw2tcl3",
  135. "sw2tcl2",
  136. "sw2tcl1",
  137. "sw2wbm1",
  138. "misc_8",
  139. "misc_7",
  140. "misc_6",
  141. "misc_5",
  142. "misc_4",
  143. "misc_3",
  144. "misc_2",
  145. "misc_1",
  146. "misc_0",
  147. };
  148. #endif
  149. #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  150. defined(QCA_WIFI_KIWI))
  151. #include "hal_api.h"
  152. #endif
  153. #include "if_pci_internal.h"
  154. #include "ce_tasklet.h"
  155. #include "targaddrs.h"
  156. #include "hif_exec.h"
  157. #include "pci_api.h"
  158. #include "ahb_api.h"
  159. #include "wlan_cfg.h"
  160. #include "qdf_hang_event_notifier.h"
  161. #include "qdf_platform.h"
  162. #include "qal_devnode.h"
  163. #include "qdf_irq.h"
  164. /* Maximum ms timeout for host to wake up target */
  165. #define PCIE_WAKE_TIMEOUT 1000
  166. #define RAMDUMP_EVENT_TIMEOUT 2500
  167. /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
  168. * PCIe data bus error
  169. * As workaround for this issue - changing the reset sequence to
  170. * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
  171. */
  172. #define CPU_WARM_RESET_WAR
  173. #define WLAN_CFG_MAX_PCIE_GROUPS 5
  174. #ifdef QCA_WIFI_QCN9224
  175. #define WLAN_CFG_MAX_CE_COUNT 16
  176. #else
  177. #define WLAN_CFG_MAX_CE_COUNT 12
  178. #endif
  179. #define DP_IRQ_NAME_LEN 25
  180. char dp_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_INT_NUM_CONTEXTS][DP_IRQ_NAME_LEN] = {};
  181. char ce_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_CE_COUNT][DP_IRQ_NAME_LEN] = {};
  182. #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
  183. #define WLAN_CFG_MAX_LEGACY_IRQ_COUNT 160
  184. char dp_legacy_irqname[WLAN_CFG_MAX_PCIE_GROUPS][WLAN_CFG_MAX_LEGACY_IRQ_COUNT][DP_IRQ_NAME_LEN] = {};
  185. #endif
  186. static inline int hif_get_pci_slot(struct hif_softc *scn)
  187. {
  188. int pci_slot = pld_get_pci_slot(scn->qdf_dev->dev);
  189. if (pci_slot < 0) {
  190. hif_err("Invalid PCI SLOT %d", pci_slot);
  191. qdf_assert_always(0);
  192. return 0;
  193. } else {
  194. return pci_slot;
  195. }
  196. }
  197. /*
  198. * Top-level interrupt handler for all PCI interrupts from a Target.
  199. * When a block of MSI interrupts is allocated, this top-level handler
  200. * is not used; instead, we directly call the correct sub-handler.
  201. */
  202. struct ce_irq_reg_table {
  203. uint32_t irq_enable;
  204. uint32_t irq_status;
  205. };
  206. #ifndef QCA_WIFI_3_0_ADRASTEA
  207. static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  208. {
  209. }
  210. #else
  211. static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  212. {
  213. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  214. unsigned int target_enable0, target_enable1;
  215. unsigned int target_cause0, target_cause1;
  216. target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
  217. target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
  218. target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
  219. target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
  220. if ((target_enable0 & target_cause0) ||
  221. (target_enable1 & target_cause1)) {
  222. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
  223. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
  224. if (scn->notice_send)
  225. pld_intr_notify_q6(sc->dev);
  226. }
  227. }
  228. #endif
  229. /**
  230. * pci_dispatch_interrupt() - PCI interrupt dispatcher
  231. * @scn: scn
  232. *
  233. * Return: N/A
  234. */
  235. static void pci_dispatch_interrupt(struct hif_softc *scn)
  236. {
  237. uint32_t intr_summary;
  238. int id;
  239. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  240. if (scn->hif_init_done != true)
  241. return;
  242. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  243. return;
  244. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  245. if (intr_summary == 0) {
  246. if ((scn->target_status != TARGET_STATUS_RESET) &&
  247. (!qdf_atomic_read(&scn->link_suspended))) {
  248. hif_write32_mb(scn, scn->mem +
  249. (SOC_CORE_BASE_ADDRESS |
  250. PCIE_INTR_ENABLE_ADDRESS),
  251. HOST_GROUP0_MASK);
  252. hif_read32_mb(scn, scn->mem +
  253. (SOC_CORE_BASE_ADDRESS |
  254. PCIE_INTR_ENABLE_ADDRESS));
  255. }
  256. Q_TARGET_ACCESS_END(scn);
  257. return;
  258. }
  259. Q_TARGET_ACCESS_END(scn);
  260. scn->ce_irq_summary = intr_summary;
  261. for (id = 0; intr_summary && (id < scn->ce_count); id++) {
  262. if (intr_summary & (1 << id)) {
  263. intr_summary &= ~(1 << id);
  264. ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
  265. }
  266. }
  267. }
  268. irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
  269. {
  270. struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
  271. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  272. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
  273. volatile int tmp;
  274. uint16_t val = 0;
  275. uint32_t bar0 = 0;
  276. uint32_t fw_indicator_address, fw_indicator;
  277. bool ssr_irq = false;
  278. unsigned int host_cause, host_enable;
  279. if (LEGACY_INTERRUPTS(sc)) {
  280. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  281. return IRQ_HANDLED;
  282. if (ADRASTEA_BU) {
  283. host_enable = hif_read32_mb(sc, sc->mem +
  284. PCIE_INTR_ENABLE_ADDRESS);
  285. host_cause = hif_read32_mb(sc, sc->mem +
  286. PCIE_INTR_CAUSE_ADDRESS);
  287. if (!(host_enable & host_cause)) {
  288. hif_pci_route_adrastea_interrupt(sc);
  289. return IRQ_HANDLED;
  290. }
  291. }
  292. /* Clear Legacy PCI line interrupts
  293. * IMPORTANT: INTR_CLR register has to be set
  294. * after INTR_ENABLE is set to 0,
  295. * otherwise interrupt can not be really cleared
  296. */
  297. hif_write32_mb(sc, sc->mem +
  298. (SOC_CORE_BASE_ADDRESS |
  299. PCIE_INTR_ENABLE_ADDRESS), 0);
  300. hif_write32_mb(sc, sc->mem +
  301. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
  302. ADRASTEA_BU ?
  303. (host_enable & host_cause) :
  304. HOST_GROUP0_MASK);
  305. if (ADRASTEA_BU)
  306. hif_write32_mb(sc, sc->mem + 0x2f100c,
  307. (host_cause >> 1));
  308. /* IMPORTANT: this extra read transaction is required to
  309. * flush the posted write buffer
  310. */
  311. if (!ADRASTEA_BU) {
  312. tmp =
  313. hif_read32_mb(sc, sc->mem +
  314. (SOC_CORE_BASE_ADDRESS |
  315. PCIE_INTR_ENABLE_ADDRESS));
  316. if (tmp == 0xdeadbeef) {
  317. hif_err("SoC returns 0xdeadbeef!!");
  318. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  319. hif_err("PCI Vendor ID = 0x%04x", val);
  320. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  321. hif_err("PCI Device ID = 0x%04x", val);
  322. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  323. hif_err("PCI Command = 0x%04x", val);
  324. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  325. hif_err("PCI Status = 0x%04x", val);
  326. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
  327. &bar0);
  328. hif_err("PCI BAR0 = 0x%08x", bar0);
  329. hif_err("RTC_STATE_ADDRESS = 0x%08x",
  330. hif_read32_mb(sc, sc->mem +
  331. PCIE_LOCAL_BASE_ADDRESS
  332. + RTC_STATE_ADDRESS));
  333. hif_err("PCIE_SOC_WAKE_ADDRESS = 0x%08x",
  334. hif_read32_mb(sc, sc->mem +
  335. PCIE_LOCAL_BASE_ADDRESS
  336. + PCIE_SOC_WAKE_ADDRESS));
  337. hif_err("0x80008 = 0x%08x, 0x8000c = 0x%08x",
  338. hif_read32_mb(sc, sc->mem + 0x80008),
  339. hif_read32_mb(sc, sc->mem + 0x8000c));
  340. hif_err("0x80010 = 0x%08x, 0x80014 = 0x%08x",
  341. hif_read32_mb(sc, sc->mem + 0x80010),
  342. hif_read32_mb(sc, sc->mem + 0x80014));
  343. hif_err("0x80018 = 0x%08x, 0x8001c = 0x%08x",
  344. hif_read32_mb(sc, sc->mem + 0x80018),
  345. hif_read32_mb(sc, sc->mem + 0x8001c));
  346. QDF_BUG(0);
  347. }
  348. PCI_CLR_CAUSE0_REGISTER(sc);
  349. }
  350. if (HAS_FW_INDICATOR) {
  351. fw_indicator_address = hif_state->fw_indicator_address;
  352. fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
  353. if ((fw_indicator != ~0) &&
  354. (fw_indicator & FW_IND_EVENT_PENDING))
  355. ssr_irq = true;
  356. }
  357. if (Q_TARGET_ACCESS_END(scn) < 0)
  358. return IRQ_HANDLED;
  359. }
  360. /* TBDXXX: Add support for WMAC */
  361. if (ssr_irq) {
  362. sc->irq_event = irq;
  363. qdf_atomic_set(&scn->tasklet_from_intr, 1);
  364. qdf_atomic_inc(&scn->active_tasklet_cnt);
  365. tasklet_schedule(&sc->intr_tq);
  366. } else {
  367. pci_dispatch_interrupt(scn);
  368. }
  369. return IRQ_HANDLED;
  370. }
  371. bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
  372. {
  373. return 1; /* FIX THIS */
  374. }
  375. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
  376. {
  377. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  378. int i = 0;
  379. if (!irq || !size) {
  380. return -EINVAL;
  381. }
  382. if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
  383. irq[0] = sc->irq;
  384. return 1;
  385. }
  386. if (sc->num_msi_intrs > size) {
  387. qdf_print("Not enough space in irq buffer to return irqs");
  388. return -EINVAL;
  389. }
  390. for (i = 0; i < sc->num_msi_intrs; i++) {
  391. irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
  392. }
  393. return sc->num_msi_intrs;
  394. }
  395. /**
  396. * hif_pci_cancel_deferred_target_sleep() - cancels the deferred target sleep
  397. * @scn: hif_softc
  398. *
  399. * Return: void
  400. */
  401. #if CONFIG_ATH_PCIE_MAX_PERF == 0
  402. void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  403. {
  404. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  405. A_target_id_t pci_addr = scn->mem;
  406. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  407. /*
  408. * If the deferred sleep timer is running cancel it
  409. * and put the soc into sleep.
  410. */
  411. if (hif_state->fake_sleep == true) {
  412. qdf_timer_stop(&hif_state->sleep_timer);
  413. if (hif_state->verified_awake == false) {
  414. hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  415. PCIE_SOC_WAKE_ADDRESS,
  416. PCIE_SOC_WAKE_RESET);
  417. }
  418. hif_state->fake_sleep = false;
  419. }
  420. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  421. }
  422. #else
  423. inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  424. {
  425. }
  426. #endif
  427. #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
  428. hif_read32_mb(sc, (char *)(mem) + \
  429. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
  430. #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
  431. hif_write32_mb(sc, ((char *)(mem) + \
  432. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
  433. #ifdef QCA_WIFI_3_0
  434. /**
  435. * hif_targ_is_awake() - check to see if the target is awake
  436. * @hif_ctx: hif context
  437. * @mem:
  438. *
  439. * emulation never goes to sleep
  440. *
  441. * Return: true if target is awake
  442. */
  443. static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
  444. {
  445. return true;
  446. }
  447. #else
  448. /**
  449. * hif_targ_is_awake() - check to see if the target is awake
  450. * @scn: hif context
  451. * @mem:
  452. *
  453. * Return: true if the targets clocks are on
  454. */
  455. static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
  456. {
  457. uint32_t val;
  458. if (scn->recovery)
  459. return false;
  460. val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
  461. + RTC_STATE_ADDRESS);
  462. return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
  463. }
  464. #endif
  465. #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
  466. static void hif_pci_device_reset(struct hif_pci_softc *sc)
  467. {
  468. void __iomem *mem = sc->mem;
  469. int i;
  470. uint32_t val;
  471. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  472. if (!scn->hostdef)
  473. return;
  474. /* NB: Don't check resetok here. This form of reset
  475. * is integral to correct operation.
  476. */
  477. if (!SOC_GLOBAL_RESET_ADDRESS)
  478. return;
  479. if (!mem)
  480. return;
  481. hif_err("Reset Device");
  482. /*
  483. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  484. * writing WAKE_V, the Target may scribble over Host memory!
  485. */
  486. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  487. PCIE_SOC_WAKE_V_MASK);
  488. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  489. if (hif_targ_is_awake(scn, mem))
  490. break;
  491. qdf_mdelay(1);
  492. }
  493. /* Put Target, including PCIe, into RESET. */
  494. val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
  495. val |= 1;
  496. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  497. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  498. if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  499. RTC_STATE_COLD_RESET_MASK)
  500. break;
  501. qdf_mdelay(1);
  502. }
  503. /* Pull Target, including PCIe, out of RESET. */
  504. val &= ~1;
  505. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  506. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  507. if (!
  508. (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  509. RTC_STATE_COLD_RESET_MASK))
  510. break;
  511. qdf_mdelay(1);
  512. }
  513. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  514. PCIE_SOC_WAKE_RESET);
  515. }
  516. /* CPU warm reset function
  517. * Steps:
  518. * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
  519. * 2. Clear the FW_INDICATOR_ADDRESS -so Target CPU initializes FW
  520. * correctly on WARM reset
  521. * 3. Clear TARGET CPU LF timer interrupt
  522. * 4. Reset all CEs to clear any pending CE tarnsactions
  523. * 5. Warm reset CPU
  524. */
  525. static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
  526. {
  527. void __iomem *mem = sc->mem;
  528. int i;
  529. uint32_t val;
  530. uint32_t fw_indicator;
  531. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  532. /* NB: Don't check resetok here. This form of reset is
  533. * integral to correct operation.
  534. */
  535. if (!mem)
  536. return;
  537. hif_debug("Target Warm Reset");
  538. /*
  539. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  540. * writing WAKE_V, the Target may scribble over Host memory!
  541. */
  542. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  543. PCIE_SOC_WAKE_V_MASK);
  544. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  545. if (hif_targ_is_awake(scn, mem))
  546. break;
  547. qdf_mdelay(1);
  548. }
  549. /*
  550. * Disable Pending interrupts
  551. */
  552. val =
  553. hif_read32_mb(sc, mem +
  554. (SOC_CORE_BASE_ADDRESS |
  555. PCIE_INTR_CAUSE_ADDRESS));
  556. hif_debug("Host Intr Cause reg 0x%x: value : 0x%x",
  557. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
  558. /* Target CPU Intr Cause */
  559. val = hif_read32_mb(sc, mem +
  560. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  561. hif_debug("Target CPU Intr Cause 0x%x", val);
  562. val =
  563. hif_read32_mb(sc, mem +
  564. (SOC_CORE_BASE_ADDRESS |
  565. PCIE_INTR_ENABLE_ADDRESS));
  566. hif_write32_mb(sc, (mem +
  567. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
  568. hif_write32_mb(sc, (mem +
  569. (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
  570. HOST_GROUP0_MASK);
  571. qdf_mdelay(100);
  572. /* Clear FW_INDICATOR_ADDRESS */
  573. if (HAS_FW_INDICATOR) {
  574. fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  575. hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
  576. }
  577. /* Clear Target LF Timer interrupts */
  578. val =
  579. hif_read32_mb(sc, mem +
  580. (RTC_SOC_BASE_ADDRESS +
  581. SOC_LF_TIMER_CONTROL0_ADDRESS));
  582. hif_debug("addr 0x%x : 0x%x",
  583. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
  584. val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
  585. hif_write32_mb(sc, mem +
  586. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
  587. val);
  588. /* Reset CE */
  589. val =
  590. hif_read32_mb(sc, mem +
  591. (RTC_SOC_BASE_ADDRESS |
  592. SOC_RESET_CONTROL_ADDRESS));
  593. val |= SOC_RESET_CONTROL_CE_RST_MASK;
  594. hif_write32_mb(sc, (mem +
  595. (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
  596. val);
  597. val =
  598. hif_read32_mb(sc, mem +
  599. (RTC_SOC_BASE_ADDRESS |
  600. SOC_RESET_CONTROL_ADDRESS));
  601. qdf_mdelay(10);
  602. /* CE unreset */
  603. val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
  604. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  605. SOC_RESET_CONTROL_ADDRESS), val);
  606. val =
  607. hif_read32_mb(sc, mem +
  608. (RTC_SOC_BASE_ADDRESS |
  609. SOC_RESET_CONTROL_ADDRESS));
  610. qdf_mdelay(10);
  611. /* Read Target CPU Intr Cause */
  612. val = hif_read32_mb(sc, mem +
  613. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  614. hif_debug("Target CPU Intr Cause after CE reset 0x%x", val);
  615. /* CPU warm RESET */
  616. val =
  617. hif_read32_mb(sc, mem +
  618. (RTC_SOC_BASE_ADDRESS |
  619. SOC_RESET_CONTROL_ADDRESS));
  620. val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
  621. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  622. SOC_RESET_CONTROL_ADDRESS), val);
  623. val =
  624. hif_read32_mb(sc, mem +
  625. (RTC_SOC_BASE_ADDRESS |
  626. SOC_RESET_CONTROL_ADDRESS));
  627. hif_debug("RESET_CONTROL after cpu warm reset 0x%x", val);
  628. qdf_mdelay(100);
  629. hif_debug("Target Warm reset complete");
  630. }
  631. #ifndef QCA_WIFI_3_0
  632. /* only applicable to legacy ce */
  633. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
  634. {
  635. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  636. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  637. void __iomem *mem = sc->mem;
  638. uint32_t val;
  639. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  640. return ATH_ISR_NOSCHED;
  641. val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  642. if (Q_TARGET_ACCESS_END(scn) < 0)
  643. return ATH_ISR_SCHED;
  644. hif_debug("FW_INDICATOR register is 0x%x", val);
  645. if (val & FW_IND_HELPER)
  646. return 0;
  647. return 1;
  648. }
  649. #endif
  650. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  651. {
  652. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  653. uint16_t device_id = 0;
  654. uint32_t val;
  655. uint16_t timeout_count = 0;
  656. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  657. /* Check device ID from PCIe configuration space for link status */
  658. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
  659. if (device_id != sc->devid) {
  660. hif_err("Device ID does match (read 0x%x, expect 0x%x)",
  661. device_id, sc->devid);
  662. return -EACCES;
  663. }
  664. /* Check PCIe local register for bar/memory access */
  665. val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  666. RTC_STATE_ADDRESS);
  667. hif_debug("RTC_STATE_ADDRESS is %08x", val);
  668. /* Try to wake up target if it sleeps */
  669. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  670. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  671. hif_debug("PCIE_SOC_WAKE_ADDRESS is %08x",
  672. hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  673. PCIE_SOC_WAKE_ADDRESS));
  674. /* Check if target can be woken up */
  675. while (!hif_targ_is_awake(scn, sc->mem)) {
  676. if (timeout_count >= PCIE_WAKE_TIMEOUT) {
  677. hif_err("wake up timeout, %08x, %08x",
  678. hif_read32_mb(sc, sc->mem +
  679. PCIE_LOCAL_BASE_ADDRESS +
  680. RTC_STATE_ADDRESS),
  681. hif_read32_mb(sc, sc->mem +
  682. PCIE_LOCAL_BASE_ADDRESS +
  683. PCIE_SOC_WAKE_ADDRESS));
  684. return -EACCES;
  685. }
  686. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  687. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  688. qdf_mdelay(100);
  689. timeout_count += 100;
  690. }
  691. /* Check Power register for SoC internal bus issues */
  692. val =
  693. hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
  694. SOC_POWER_REG_OFFSET);
  695. hif_debug("Power register is %08x", val);
  696. return 0;
  697. }
  698. /**
  699. * __hif_pci_dump_registers(): dump other PCI debug registers
  700. * @scn: struct hif_softc
  701. *
  702. * This function dumps pci debug registers. The parent function
  703. * dumps the copy engine registers before calling this function.
  704. *
  705. * Return: void
  706. */
  707. static void __hif_pci_dump_registers(struct hif_softc *scn)
  708. {
  709. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  710. void __iomem *mem = sc->mem;
  711. uint32_t val, i, j;
  712. uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
  713. uint32_t ce_base;
  714. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  715. return;
  716. /* DEBUG_INPUT_SEL_SRC = 0x6 */
  717. val =
  718. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  719. WLAN_DEBUG_INPUT_SEL_OFFSET);
  720. val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
  721. val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
  722. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  723. WLAN_DEBUG_INPUT_SEL_OFFSET, val);
  724. /* DEBUG_CONTROL_ENABLE = 0x1 */
  725. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  726. WLAN_DEBUG_CONTROL_OFFSET);
  727. val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
  728. val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
  729. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  730. WLAN_DEBUG_CONTROL_OFFSET, val);
  731. hif_debug("Debug: inputsel: %x dbgctrl: %x",
  732. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  733. WLAN_DEBUG_INPUT_SEL_OFFSET),
  734. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  735. WLAN_DEBUG_CONTROL_OFFSET));
  736. hif_debug("Debug CE");
  737. /* Loop CE debug output */
  738. /* AMBA_DEBUG_BUS_SEL = 0xc */
  739. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  740. AMBA_DEBUG_BUS_OFFSET);
  741. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  742. val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
  743. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
  744. val);
  745. for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
  746. /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
  747. val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  748. CE_WRAPPER_DEBUG_OFFSET);
  749. val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
  750. val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
  751. hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  752. CE_WRAPPER_DEBUG_OFFSET, val);
  753. hif_debug("ce wrapper: %d amdbg: %x cewdbg: %x",
  754. wrapper_idx[i],
  755. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  756. AMBA_DEBUG_BUS_OFFSET),
  757. hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  758. CE_WRAPPER_DEBUG_OFFSET));
  759. if (wrapper_idx[i] <= 7) {
  760. for (j = 0; j <= 5; j++) {
  761. ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
  762. /* For (j=0~5) write CE_DEBUG_SEL = j */
  763. val =
  764. hif_read32_mb(sc, mem + ce_base +
  765. CE_DEBUG_OFFSET);
  766. val &= ~CE_DEBUG_SEL_MASK;
  767. val |= CE_DEBUG_SEL_SET(j);
  768. hif_write32_mb(sc, mem + ce_base +
  769. CE_DEBUG_OFFSET, val);
  770. /* read (@gpio_athr_wlan_reg)
  771. * WLAN_DEBUG_OUT_DATA
  772. */
  773. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
  774. + WLAN_DEBUG_OUT_OFFSET);
  775. val = WLAN_DEBUG_OUT_DATA_GET(val);
  776. hif_debug("module%d: cedbg: %x out: %x",
  777. j,
  778. hif_read32_mb(sc, mem + ce_base +
  779. CE_DEBUG_OFFSET), val);
  780. }
  781. } else {
  782. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  783. val =
  784. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  785. WLAN_DEBUG_OUT_OFFSET);
  786. val = WLAN_DEBUG_OUT_DATA_GET(val);
  787. hif_debug("out: %x", val);
  788. }
  789. }
  790. hif_debug("Debug PCIe:");
  791. /* Loop PCIe debug output */
  792. /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
  793. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  794. AMBA_DEBUG_BUS_OFFSET);
  795. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  796. val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
  797. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  798. AMBA_DEBUG_BUS_OFFSET, val);
  799. for (i = 0; i <= 8; i++) {
  800. /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
  801. val =
  802. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  803. AMBA_DEBUG_BUS_OFFSET);
  804. val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
  805. val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
  806. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  807. AMBA_DEBUG_BUS_OFFSET, val);
  808. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  809. val =
  810. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  811. WLAN_DEBUG_OUT_OFFSET);
  812. val = WLAN_DEBUG_OUT_DATA_GET(val);
  813. hif_debug("amdbg: %x out: %x %x",
  814. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  815. WLAN_DEBUG_OUT_OFFSET), val,
  816. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  817. WLAN_DEBUG_OUT_OFFSET));
  818. }
  819. Q_TARGET_ACCESS_END(scn);
  820. }
  821. /**
  822. * hif_pci_dump_registers(): dump bus debug registers
  823. * @hif_ctx: struct hif_opaque_softc
  824. *
  825. * This function dumps hif bus debug registers
  826. *
  827. * Return: 0 for success or error code
  828. */
  829. int hif_pci_dump_registers(struct hif_softc *hif_ctx)
  830. {
  831. int status;
  832. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  833. status = hif_dump_ce_registers(scn);
  834. if (status)
  835. hif_err("Dump CE Registers Failed");
  836. /* dump non copy engine pci registers */
  837. __hif_pci_dump_registers(scn);
  838. return 0;
  839. }
  840. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  841. /* worker thread to schedule wlan_tasklet in SLUB debug build */
  842. static void reschedule_tasklet_work_handler(void *arg)
  843. {
  844. struct hif_pci_softc *sc = arg;
  845. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  846. if (!scn) {
  847. hif_err("hif_softc is NULL");
  848. return;
  849. }
  850. if (scn->hif_init_done == false) {
  851. hif_err("wlan driver is unloaded");
  852. return;
  853. }
  854. tasklet_schedule(&sc->intr_tq);
  855. }
  856. /**
  857. * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
  858. * work
  859. * @sc: HIF PCI Context
  860. *
  861. * Return: void
  862. */
  863. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
  864. {
  865. qdf_create_work(0, &sc->reschedule_tasklet_work,
  866. reschedule_tasklet_work_handler, NULL);
  867. }
  868. #else
  869. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
  870. #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
  871. void wlan_tasklet(unsigned long data)
  872. {
  873. struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
  874. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  875. if (scn->hif_init_done == false)
  876. goto end;
  877. if (qdf_atomic_read(&scn->link_suspended))
  878. goto end;
  879. if (!ADRASTEA_BU) {
  880. hif_fw_interrupt_handler(sc->irq_event, scn);
  881. if (scn->target_status == TARGET_STATUS_RESET)
  882. goto end;
  883. }
  884. end:
  885. qdf_atomic_set(&scn->tasklet_from_intr, 0);
  886. qdf_atomic_dec(&scn->active_tasklet_cnt);
  887. }
  888. /**
  889. * hif_disable_power_gating() - disable HW power gating
  890. * @hif_ctx: hif context
  891. *
  892. * disables pcie L1 power states
  893. */
  894. static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
  895. {
  896. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  897. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  898. if (!scn) {
  899. hif_err("Could not disable ASPM scn is null");
  900. return;
  901. }
  902. /* Disable ASPM when pkt log is enabled */
  903. pfrm_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
  904. pfrm_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
  905. }
  906. /**
  907. * hif_enable_power_gating() - enable HW power gating
  908. * @sc: hif context
  909. *
  910. * enables pcie L1 power states
  911. */
  912. static void hif_enable_power_gating(struct hif_pci_softc *sc)
  913. {
  914. if (!sc) {
  915. hif_err("Could not disable ASPM scn is null");
  916. return;
  917. }
  918. /* Re-enable ASPM after firmware/OTP download is complete */
  919. pfrm_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
  920. }
  921. /**
  922. * hif_pci_enable_power_management() - enable power management
  923. * @hif_sc: hif context
  924. * @is_packet_log_enabled:
  925. *
  926. * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
  927. * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
  928. *
  929. * note: epping mode does not call this function as it does not
  930. * care about saving power.
  931. */
  932. void hif_pci_enable_power_management(struct hif_softc *hif_sc,
  933. bool is_packet_log_enabled)
  934. {
  935. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
  936. uint32_t mode;
  937. if (!pci_ctx) {
  938. hif_err("hif_ctx null");
  939. return;
  940. }
  941. mode = hif_get_conparam(hif_sc);
  942. if (mode == QDF_GLOBAL_FTM_MODE) {
  943. hif_info("Enable power gating for FTM mode");
  944. hif_enable_power_gating(pci_ctx);
  945. return;
  946. }
  947. hif_rtpm_start(hif_sc);
  948. if (!is_packet_log_enabled)
  949. hif_enable_power_gating(pci_ctx);
  950. if (!CONFIG_ATH_PCIE_MAX_PERF &&
  951. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
  952. !ce_srng_based(hif_sc)) {
  953. /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
  954. if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
  955. hif_err("Failed to set target to sleep");
  956. }
  957. }
  958. /**
  959. * hif_pci_disable_power_management() - disable power management
  960. * @hif_ctx: hif context
  961. *
  962. * Currently disables runtime pm. Should be updated to behave
  963. * if runtime pm is not started. Should be updated to take care
  964. * of aspm and soc sleep for driver load.
  965. */
  966. void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
  967. {
  968. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  969. if (!pci_ctx) {
  970. hif_err("hif_ctx null");
  971. return;
  972. }
  973. hif_rtpm_stop(hif_ctx);
  974. }
  975. void hif_pci_display_stats(struct hif_softc *hif_ctx)
  976. {
  977. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  978. if (!pci_ctx) {
  979. hif_err("hif_ctx null");
  980. return;
  981. }
  982. hif_display_ce_stats(hif_ctx);
  983. hif_print_pci_stats(pci_ctx);
  984. }
  985. void hif_pci_clear_stats(struct hif_softc *hif_ctx)
  986. {
  987. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  988. if (!pci_ctx) {
  989. hif_err("hif_ctx null");
  990. return;
  991. }
  992. hif_clear_ce_stats(&pci_ctx->ce_sc);
  993. }
  994. #define ATH_PCI_PROBE_RETRY_MAX 3
  995. /**
  996. * hif_pci_open(): hif_bus_open
  997. * @hif_ctx: scn
  998. * @bus_type: bus type
  999. *
  1000. * Return: n/a
  1001. */
  1002. QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  1003. {
  1004. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1005. hif_ctx->bus_type = bus_type;
  1006. hif_rtpm_open(hif_ctx);
  1007. qdf_spinlock_create(&sc->irq_lock);
  1008. qdf_spinlock_create(&sc->force_wake_lock);
  1009. return hif_ce_open(hif_ctx);
  1010. }
  1011. /**
  1012. * hif_wake_target_cpu() - wake the target's cpu
  1013. * @scn: hif context
  1014. *
  1015. * Send an interrupt to the device to wake up the Target CPU
  1016. * so it has an opportunity to notice any changed state.
  1017. */
  1018. static void hif_wake_target_cpu(struct hif_softc *scn)
  1019. {
  1020. QDF_STATUS rv;
  1021. uint32_t core_ctrl;
  1022. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1023. rv = hif_diag_read_access(hif_hdl,
  1024. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1025. &core_ctrl);
  1026. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1027. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1028. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1029. rv = hif_diag_write_access(hif_hdl,
  1030. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1031. core_ctrl);
  1032. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1033. }
  1034. /**
  1035. * soc_wake_reset() - allow the target to go to sleep
  1036. * @scn: hif_softc
  1037. *
  1038. * Clear the force wake register. This is done by
  1039. * hif_sleep_entry and cancel deferred timer sleep.
  1040. */
  1041. static void soc_wake_reset(struct hif_softc *scn)
  1042. {
  1043. hif_write32_mb(scn, scn->mem +
  1044. PCIE_LOCAL_BASE_ADDRESS +
  1045. PCIE_SOC_WAKE_ADDRESS,
  1046. PCIE_SOC_WAKE_RESET);
  1047. }
  1048. /**
  1049. * hif_sleep_entry() - gate target sleep
  1050. * @arg: hif context
  1051. *
  1052. * This function is the callback for the sleep timer.
  1053. * Check if last force awake critical section was at least
  1054. * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
  1055. * allow the target to go to sleep and cancel the sleep timer.
  1056. * otherwise reschedule the sleep timer.
  1057. */
  1058. static void hif_sleep_entry(void *arg)
  1059. {
  1060. struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
  1061. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1062. uint32_t idle_ms;
  1063. if (scn->recovery)
  1064. return;
  1065. if (hif_is_driver_unloading(scn))
  1066. return;
  1067. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  1068. if (hif_state->fake_sleep) {
  1069. idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
  1070. - hif_state->sleep_ticks);
  1071. if (!hif_state->verified_awake &&
  1072. idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
  1073. if (!qdf_atomic_read(&scn->link_suspended)) {
  1074. soc_wake_reset(scn);
  1075. hif_state->fake_sleep = false;
  1076. }
  1077. } else {
  1078. qdf_timer_stop(&hif_state->sleep_timer);
  1079. qdf_timer_start(&hif_state->sleep_timer,
  1080. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  1081. }
  1082. }
  1083. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  1084. }
  1085. #define HIF_HIA_MAX_POLL_LOOP 1000000
  1086. #define HIF_HIA_POLLING_DELAY_MS 10
  1087. #ifdef QCA_HIF_HIA_EXTND
  1088. static void hif_set_hia_extnd(struct hif_softc *scn)
  1089. {
  1090. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1091. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1092. uint32_t target_type = tgt_info->target_type;
  1093. hif_info("E");
  1094. if ((target_type == TARGET_TYPE_AR900B) ||
  1095. target_type == TARGET_TYPE_QCA9984 ||
  1096. target_type == TARGET_TYPE_QCA9888) {
  1097. /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
  1098. * in RTC space
  1099. */
  1100. tgt_info->target_revision
  1101. = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
  1102. + CHIP_ID_ADDRESS));
  1103. qdf_print("chip_id 0x%x chip_revision 0x%x",
  1104. target_type, tgt_info->target_revision);
  1105. }
  1106. {
  1107. uint32_t flag2_value = 0;
  1108. uint32_t flag2_targ_addr =
  1109. host_interest_item_address(target_type,
  1110. offsetof(struct host_interest_s, hi_skip_clock_init));
  1111. if ((ar900b_20_targ_clk != -1) &&
  1112. (frac != -1) && (intval != -1)) {
  1113. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1114. &flag2_value);
  1115. qdf_print("\n Setting clk_override");
  1116. flag2_value |= CLOCK_OVERRIDE;
  1117. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1118. flag2_value);
  1119. qdf_print("\n CLOCK PLL val set %d", flag2_value);
  1120. } else {
  1121. qdf_print("\n CLOCK PLL skipped");
  1122. }
  1123. }
  1124. if (target_type == TARGET_TYPE_AR900B
  1125. || target_type == TARGET_TYPE_QCA9984
  1126. || target_type == TARGET_TYPE_QCA9888) {
  1127. /* for AR9980_2.0, 300 mhz clock is used, right now we assume
  1128. * this would be supplied through module parameters,
  1129. * if not supplied assumed default or same behavior as 1.0.
  1130. * Assume 1.0 clock can't be tuned, reset to defaults
  1131. */
  1132. qdf_print(KERN_INFO
  1133. "%s: setting the target pll frac %x intval %x",
  1134. __func__, frac, intval);
  1135. /* do not touch frac, and int val, let them be default -1,
  1136. * if desired, host can supply these through module params
  1137. */
  1138. if (frac != -1 || intval != -1) {
  1139. uint32_t flag2_value = 0;
  1140. uint32_t flag2_targ_addr;
  1141. flag2_targ_addr =
  1142. host_interest_item_address(target_type,
  1143. offsetof(struct host_interest_s,
  1144. hi_clock_info));
  1145. hif_diag_read_access(hif_hdl,
  1146. flag2_targ_addr, &flag2_value);
  1147. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1148. flag2_value);
  1149. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1150. qdf_print("\n INT Val %x Address %x",
  1151. intval, flag2_value + 4);
  1152. hif_diag_write_access(hif_hdl,
  1153. flag2_value + 4, intval);
  1154. } else {
  1155. qdf_print(KERN_INFO
  1156. "%s: no frac provided, skipping pre-configuring PLL",
  1157. __func__);
  1158. }
  1159. /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
  1160. if ((target_type == TARGET_TYPE_AR900B)
  1161. && (tgt_info->target_revision == AR900B_REV_2)
  1162. && ar900b_20_targ_clk != -1) {
  1163. uint32_t flag2_value = 0;
  1164. uint32_t flag2_targ_addr;
  1165. flag2_targ_addr
  1166. = host_interest_item_address(target_type,
  1167. offsetof(struct host_interest_s,
  1168. hi_desired_cpu_speed_hz));
  1169. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1170. &flag2_value);
  1171. qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
  1172. flag2_value);
  1173. hif_diag_write_access(hif_hdl, flag2_value,
  1174. ar900b_20_targ_clk/*300000000u*/);
  1175. } else if (target_type == TARGET_TYPE_QCA9888) {
  1176. uint32_t flag2_targ_addr;
  1177. if (200000000u != qca9888_20_targ_clk) {
  1178. qca9888_20_targ_clk = 300000000u;
  1179. /* Setting the target clock speed to 300 mhz */
  1180. }
  1181. flag2_targ_addr
  1182. = host_interest_item_address(target_type,
  1183. offsetof(struct host_interest_s,
  1184. hi_desired_cpu_speed_hz));
  1185. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1186. qca9888_20_targ_clk);
  1187. } else {
  1188. qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
  1189. __func__);
  1190. }
  1191. } else {
  1192. if (frac != -1 || intval != -1) {
  1193. uint32_t flag2_value = 0;
  1194. uint32_t flag2_targ_addr =
  1195. host_interest_item_address(target_type,
  1196. offsetof(struct host_interest_s,
  1197. hi_clock_info));
  1198. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1199. &flag2_value);
  1200. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1201. flag2_value);
  1202. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1203. qdf_print("\n INT Val %x Address %x", intval,
  1204. flag2_value + 4);
  1205. hif_diag_write_access(hif_hdl, flag2_value + 4,
  1206. intval);
  1207. }
  1208. }
  1209. }
  1210. #else
  1211. static void hif_set_hia_extnd(struct hif_softc *scn)
  1212. {
  1213. }
  1214. #endif
  1215. /**
  1216. * hif_set_hia() - fill out the host interest area
  1217. * @scn: hif context
  1218. *
  1219. * This is replaced by hif_wlan_enable for integrated targets.
  1220. * This fills out the host interest area. The firmware will
  1221. * process these memory addresses when it is first brought out
  1222. * of reset.
  1223. *
  1224. * Return: 0 for success.
  1225. */
  1226. static int hif_set_hia(struct hif_softc *scn)
  1227. {
  1228. QDF_STATUS rv;
  1229. uint32_t interconnect_targ_addr = 0;
  1230. uint32_t pcie_state_targ_addr = 0;
  1231. uint32_t pipe_cfg_targ_addr = 0;
  1232. uint32_t svc_to_pipe_map = 0;
  1233. uint32_t pcie_config_flags = 0;
  1234. uint32_t flag2_value = 0;
  1235. uint32_t flag2_targ_addr = 0;
  1236. #ifdef QCA_WIFI_3_0
  1237. uint32_t host_interest_area = 0;
  1238. uint8_t i;
  1239. #else
  1240. uint32_t ealloc_value = 0;
  1241. uint32_t ealloc_targ_addr = 0;
  1242. uint8_t banks_switched = 1;
  1243. uint32_t chip_id;
  1244. #endif
  1245. uint32_t pipe_cfg_addr;
  1246. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1247. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1248. uint32_t target_type = tgt_info->target_type;
  1249. uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
  1250. static struct CE_pipe_config *target_ce_config;
  1251. struct service_to_pipe *target_service_to_ce_map;
  1252. hif_info("E");
  1253. hif_get_target_ce_config(scn,
  1254. &target_ce_config, &target_ce_config_sz,
  1255. &target_service_to_ce_map,
  1256. &target_service_to_ce_map_sz,
  1257. NULL, NULL);
  1258. if (ADRASTEA_BU)
  1259. return 0;
  1260. #ifdef QCA_WIFI_3_0
  1261. i = 0;
  1262. while (i < HIF_HIA_MAX_POLL_LOOP) {
  1263. host_interest_area = hif_read32_mb(scn, scn->mem +
  1264. A_SOC_CORE_SCRATCH_0_ADDRESS);
  1265. if ((host_interest_area & 0x01) == 0) {
  1266. qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
  1267. host_interest_area = 0;
  1268. i++;
  1269. if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
  1270. hif_err("poll timeout: %d", i);
  1271. } else {
  1272. host_interest_area &= (~0x01);
  1273. hif_write32_mb(scn, scn->mem + 0x113014, 0);
  1274. break;
  1275. }
  1276. }
  1277. if (i >= HIF_HIA_MAX_POLL_LOOP) {
  1278. hif_err("hia polling timeout");
  1279. return -EIO;
  1280. }
  1281. if (host_interest_area == 0) {
  1282. hif_err("host_interest_area = 0");
  1283. return -EIO;
  1284. }
  1285. interconnect_targ_addr = host_interest_area +
  1286. offsetof(struct host_interest_area_t,
  1287. hi_interconnect_state);
  1288. flag2_targ_addr = host_interest_area +
  1289. offsetof(struct host_interest_area_t, hi_option_flag2);
  1290. #else
  1291. interconnect_targ_addr = hif_hia_item_address(target_type,
  1292. offsetof(struct host_interest_s, hi_interconnect_state));
  1293. ealloc_targ_addr = hif_hia_item_address(target_type,
  1294. offsetof(struct host_interest_s, hi_early_alloc));
  1295. flag2_targ_addr = hif_hia_item_address(target_type,
  1296. offsetof(struct host_interest_s, hi_option_flag2));
  1297. #endif
  1298. /* Supply Target-side CE configuration */
  1299. rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
  1300. &pcie_state_targ_addr);
  1301. if (rv != QDF_STATUS_SUCCESS) {
  1302. hif_err("interconnect_targ_addr = 0x%0x, ret = %d",
  1303. interconnect_targ_addr, rv);
  1304. goto done;
  1305. }
  1306. if (pcie_state_targ_addr == 0) {
  1307. rv = QDF_STATUS_E_FAILURE;
  1308. hif_err("pcie state addr is 0");
  1309. goto done;
  1310. }
  1311. pipe_cfg_addr = pcie_state_targ_addr +
  1312. offsetof(struct pcie_state_s,
  1313. pipe_cfg_addr);
  1314. rv = hif_diag_read_access(hif_hdl,
  1315. pipe_cfg_addr,
  1316. &pipe_cfg_targ_addr);
  1317. if (rv != QDF_STATUS_SUCCESS) {
  1318. hif_err("pipe_cfg_addr = 0x%0x, ret = %d", pipe_cfg_addr, rv);
  1319. goto done;
  1320. }
  1321. if (pipe_cfg_targ_addr == 0) {
  1322. rv = QDF_STATUS_E_FAILURE;
  1323. hif_err("pipe cfg addr is 0");
  1324. goto done;
  1325. }
  1326. rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
  1327. (uint8_t *) target_ce_config,
  1328. target_ce_config_sz);
  1329. if (rv != QDF_STATUS_SUCCESS) {
  1330. hif_err("write pipe cfg: %d", rv);
  1331. goto done;
  1332. }
  1333. rv = hif_diag_read_access(hif_hdl,
  1334. pcie_state_targ_addr +
  1335. offsetof(struct pcie_state_s,
  1336. svc_to_pipe_map),
  1337. &svc_to_pipe_map);
  1338. if (rv != QDF_STATUS_SUCCESS) {
  1339. hif_err("get svc/pipe map: %d", rv);
  1340. goto done;
  1341. }
  1342. if (svc_to_pipe_map == 0) {
  1343. rv = QDF_STATUS_E_FAILURE;
  1344. hif_err("svc_to_pipe map is 0");
  1345. goto done;
  1346. }
  1347. rv = hif_diag_write_mem(hif_hdl,
  1348. svc_to_pipe_map,
  1349. (uint8_t *) target_service_to_ce_map,
  1350. target_service_to_ce_map_sz);
  1351. if (rv != QDF_STATUS_SUCCESS) {
  1352. hif_err("write svc/pipe map: %d", rv);
  1353. goto done;
  1354. }
  1355. rv = hif_diag_read_access(hif_hdl,
  1356. pcie_state_targ_addr +
  1357. offsetof(struct pcie_state_s,
  1358. config_flags),
  1359. &pcie_config_flags);
  1360. if (rv != QDF_STATUS_SUCCESS) {
  1361. hif_err("get pcie config_flags: %d", rv);
  1362. goto done;
  1363. }
  1364. #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
  1365. pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
  1366. #else
  1367. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1368. #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
  1369. pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
  1370. #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
  1371. pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
  1372. #endif
  1373. rv = hif_diag_write_mem(hif_hdl,
  1374. pcie_state_targ_addr +
  1375. offsetof(struct pcie_state_s,
  1376. config_flags),
  1377. (uint8_t *) &pcie_config_flags,
  1378. sizeof(pcie_config_flags));
  1379. if (rv != QDF_STATUS_SUCCESS) {
  1380. hif_err("write pcie config_flags: %d", rv);
  1381. goto done;
  1382. }
  1383. #ifndef QCA_WIFI_3_0
  1384. /* configure early allocation */
  1385. ealloc_targ_addr = hif_hia_item_address(target_type,
  1386. offsetof(
  1387. struct host_interest_s,
  1388. hi_early_alloc));
  1389. rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
  1390. &ealloc_value);
  1391. if (rv != QDF_STATUS_SUCCESS) {
  1392. hif_err("get early alloc val: %d", rv);
  1393. goto done;
  1394. }
  1395. /* 1 bank is switched to IRAM, except ROME 1.0 */
  1396. ealloc_value |=
  1397. ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1398. HI_EARLY_ALLOC_MAGIC_MASK);
  1399. rv = hif_diag_read_access(hif_hdl,
  1400. CHIP_ID_ADDRESS |
  1401. RTC_SOC_BASE_ADDRESS, &chip_id);
  1402. if (rv != QDF_STATUS_SUCCESS) {
  1403. hif_err("get chip id val: %d", rv);
  1404. goto done;
  1405. }
  1406. if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
  1407. tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
  1408. switch (CHIP_ID_REVISION_GET(chip_id)) {
  1409. case 0x2: /* ROME 1.3 */
  1410. /* 2 banks are switched to IRAM */
  1411. banks_switched = 2;
  1412. break;
  1413. case 0x4: /* ROME 2.1 */
  1414. case 0x5: /* ROME 2.2 */
  1415. banks_switched = 6;
  1416. break;
  1417. case 0x8: /* ROME 3.0 */
  1418. case 0x9: /* ROME 3.1 */
  1419. case 0xA: /* ROME 3.2 */
  1420. banks_switched = 9;
  1421. break;
  1422. case 0x0: /* ROME 1.0 */
  1423. case 0x1: /* ROME 1.1 */
  1424. default:
  1425. /* 3 banks are switched to IRAM */
  1426. banks_switched = 3;
  1427. break;
  1428. }
  1429. }
  1430. ealloc_value |=
  1431. ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
  1432. & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1433. rv = hif_diag_write_access(hif_hdl,
  1434. ealloc_targ_addr,
  1435. ealloc_value);
  1436. if (rv != QDF_STATUS_SUCCESS) {
  1437. hif_err("set early alloc val: %d", rv);
  1438. goto done;
  1439. }
  1440. #endif
  1441. if ((target_type == TARGET_TYPE_AR900B)
  1442. || (target_type == TARGET_TYPE_QCA9984)
  1443. || (target_type == TARGET_TYPE_QCA9888)
  1444. || (target_type == TARGET_TYPE_AR9888)) {
  1445. hif_set_hia_extnd(scn);
  1446. }
  1447. /* Tell Target to proceed with initialization */
  1448. flag2_targ_addr = hif_hia_item_address(target_type,
  1449. offsetof(
  1450. struct host_interest_s,
  1451. hi_option_flag2));
  1452. rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1453. &flag2_value);
  1454. if (rv != QDF_STATUS_SUCCESS) {
  1455. hif_err("get option val: %d", rv);
  1456. goto done;
  1457. }
  1458. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1459. rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1460. flag2_value);
  1461. if (rv != QDF_STATUS_SUCCESS) {
  1462. hif_err("set option val: %d", rv);
  1463. goto done;
  1464. }
  1465. hif_wake_target_cpu(scn);
  1466. done:
  1467. return qdf_status_to_os_return(rv);
  1468. }
  1469. /**
  1470. * hif_pci_bus_configure() - configure the pcie bus
  1471. * @hif_sc: pointer to the hif context.
  1472. *
  1473. * return: 0 for success. nonzero for failure.
  1474. */
  1475. int hif_pci_bus_configure(struct hif_softc *hif_sc)
  1476. {
  1477. int status = 0;
  1478. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1479. struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
  1480. hif_ce_prepare_config(hif_sc);
  1481. /* initialize sleep state adjust variables */
  1482. hif_state->sleep_timer_init = true;
  1483. hif_state->keep_awake_count = 0;
  1484. hif_state->fake_sleep = false;
  1485. hif_state->sleep_ticks = 0;
  1486. qdf_timer_init(NULL, &hif_state->sleep_timer,
  1487. hif_sleep_entry, (void *)hif_state,
  1488. QDF_TIMER_TYPE_WAKE_APPS);
  1489. hif_state->sleep_timer_init = true;
  1490. status = hif_wlan_enable(hif_sc);
  1491. if (status) {
  1492. hif_err("hif_wlan_enable error: %d", status);
  1493. goto timer_free;
  1494. }
  1495. A_TARGET_ACCESS_LIKELY(hif_sc);
  1496. if ((CONFIG_ATH_PCIE_MAX_PERF ||
  1497. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
  1498. !ce_srng_based(hif_sc)) {
  1499. /*
  1500. * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
  1501. * prevent sleep when we want to keep firmware always awake
  1502. * note: when we want to keep firmware always awake,
  1503. * hif_target_sleep_state_adjust will point to a dummy
  1504. * function, and hif_pci_target_sleep_state_adjust must
  1505. * be called instead.
  1506. * note: bus type check is here because AHB bus is reusing
  1507. * hif_pci_bus_configure code.
  1508. */
  1509. if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
  1510. if (hif_pci_target_sleep_state_adjust(hif_sc,
  1511. false, true) < 0) {
  1512. status = -EACCES;
  1513. goto disable_wlan;
  1514. }
  1515. }
  1516. }
  1517. /* todo: consider replacing this with an srng field */
  1518. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1519. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1520. (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
  1521. (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
  1522. (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
  1523. (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
  1524. (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
  1525. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018) ||
  1526. (hif_sc->target_info.target_type == TARGET_TYPE_QCN6432)) &&
  1527. (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
  1528. hif_sc->per_ce_irq = true;
  1529. }
  1530. status = hif_config_ce(hif_sc);
  1531. if (status)
  1532. goto disable_wlan;
  1533. if (hif_needs_bmi(hif_osc)) {
  1534. status = hif_set_hia(hif_sc);
  1535. if (status)
  1536. goto unconfig_ce;
  1537. hif_debug("hif_set_hia done");
  1538. }
  1539. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1540. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1541. (hif_sc->target_info.target_type == TARGET_TYPE_QCA9574) ||
  1542. (hif_sc->target_info.target_type == TARGET_TYPE_QCA5332) ||
  1543. (hif_sc->target_info.target_type == TARGET_TYPE_QCA5018) ||
  1544. (hif_sc->target_info.target_type == TARGET_TYPE_QCN6122) ||
  1545. (hif_sc->target_info.target_type == TARGET_TYPE_QCN9160) ||
  1546. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018) ||
  1547. (hif_sc->target_info.target_type == TARGET_TYPE_QCN6432)) &&
  1548. (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
  1549. hif_debug("Skip irq config for PCI based 8074 target");
  1550. else {
  1551. status = hif_configure_irq(hif_sc);
  1552. if (status < 0)
  1553. goto unconfig_ce;
  1554. }
  1555. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1556. return status;
  1557. unconfig_ce:
  1558. hif_unconfig_ce(hif_sc);
  1559. disable_wlan:
  1560. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1561. hif_wlan_disable(hif_sc);
  1562. timer_free:
  1563. qdf_timer_stop(&hif_state->sleep_timer);
  1564. qdf_timer_free(&hif_state->sleep_timer);
  1565. hif_state->sleep_timer_init = false;
  1566. hif_err("Failed, status: %d", status);
  1567. return status;
  1568. }
  1569. /**
  1570. * hif_pci_close(): hif_bus_close
  1571. * @hif_sc: HIF context
  1572. *
  1573. * Return: n/a
  1574. */
  1575. void hif_pci_close(struct hif_softc *hif_sc)
  1576. {
  1577. hif_rtpm_close(hif_sc);
  1578. hif_ce_close(hif_sc);
  1579. }
  1580. #define BAR_NUM 0
  1581. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0))
  1582. static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
  1583. {
  1584. return dma_set_mask(&pci_dev->dev, mask);
  1585. }
  1586. static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
  1587. u64 mask)
  1588. {
  1589. return dma_set_coherent_mask(&pci_dev->dev, mask);
  1590. }
  1591. #else /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
  1592. static inline int hif_pci_set_dma_mask(struct pci_dev *pci_dev, u64 mask)
  1593. {
  1594. return pci_set_dma_mask(pci_dev, mask);
  1595. }
  1596. static inline int hif_pci_set_coherent_dma_mask(struct pci_dev *pci_dev,
  1597. u64 mask)
  1598. {
  1599. return pci_set_consistent_dma_mask(pci_dev, mask);
  1600. }
  1601. #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)) */
  1602. static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
  1603. struct pci_dev *pdev,
  1604. const struct pci_device_id *id)
  1605. {
  1606. void __iomem *mem;
  1607. int ret = 0;
  1608. uint16_t device_id = 0;
  1609. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1610. pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
  1611. if (device_id != id->device) {
  1612. hif_err(
  1613. "dev id mismatch, config id = 0x%x, probing id = 0x%x",
  1614. device_id, id->device);
  1615. /* pci link is down, so returning with error code */
  1616. return -EIO;
  1617. }
  1618. /* FIXME: temp. commenting out assign_resource
  1619. * call for dev_attach to work on 2.6.38 kernel
  1620. */
  1621. #if (!defined(__LINUX_ARM_ARCH__))
  1622. if (pci_assign_resource(pdev, BAR_NUM)) {
  1623. hif_err("pci_assign_resource error");
  1624. return -EIO;
  1625. }
  1626. #endif
  1627. if (pci_enable_device(pdev)) {
  1628. hif_err("pci_enable_device error");
  1629. return -EIO;
  1630. }
  1631. /* Request MMIO resources */
  1632. ret = pci_request_region(pdev, BAR_NUM, "ath");
  1633. if (ret) {
  1634. hif_err("PCI MMIO reservation error");
  1635. ret = -EIO;
  1636. goto err_region;
  1637. }
  1638. #ifdef CONFIG_ARM_LPAE
  1639. /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
  1640. * for 32 bits device also.
  1641. */
  1642. ret = hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1643. if (ret) {
  1644. hif_err("Cannot enable 64-bit pci DMA");
  1645. goto err_dma;
  1646. }
  1647. ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(64));
  1648. if (ret) {
  1649. hif_err("Cannot enable 64-bit DMA");
  1650. goto err_dma;
  1651. }
  1652. #else
  1653. ret = hif_pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1654. if (ret) {
  1655. hif_err("Cannot enable 32-bit pci DMA");
  1656. goto err_dma;
  1657. }
  1658. ret = hif_pci_set_coherent_dma_mask(pdev, DMA_BIT_MASK(32));
  1659. if (ret) {
  1660. hif_err("Cannot enable 32-bit coherent DMA!");
  1661. goto err_dma;
  1662. }
  1663. #endif
  1664. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1665. /* Set bus master bit in PCI_COMMAND to enable DMA */
  1666. pci_set_master(pdev);
  1667. /* Arrange for access to Target SoC registers. */
  1668. mem = pci_iomap(pdev, BAR_NUM, 0);
  1669. if (!mem) {
  1670. hif_err("PCI iomap error");
  1671. ret = -EIO;
  1672. goto err_iomap;
  1673. }
  1674. hif_info("*****BAR is %pK", (void *)mem);
  1675. sc->mem = mem;
  1676. /* Hawkeye emulation specific change */
  1677. if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
  1678. (device_id == RUMIM2M_DEVICE_ID_NODE1) ||
  1679. (device_id == RUMIM2M_DEVICE_ID_NODE2) ||
  1680. (device_id == RUMIM2M_DEVICE_ID_NODE3) ||
  1681. (device_id == RUMIM2M_DEVICE_ID_NODE4) ||
  1682. (device_id == RUMIM2M_DEVICE_ID_NODE5)) {
  1683. mem = mem + 0x0c000000;
  1684. sc->mem = mem;
  1685. hif_info("Changing PCI mem base to %pK", sc->mem);
  1686. }
  1687. sc->mem_len = pci_resource_len(pdev, BAR_NUM);
  1688. ol_sc->mem = mem;
  1689. ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
  1690. sc->pci_enabled = true;
  1691. return ret;
  1692. err_iomap:
  1693. pci_clear_master(pdev);
  1694. err_dma:
  1695. pci_release_region(pdev, BAR_NUM);
  1696. err_region:
  1697. pci_disable_device(pdev);
  1698. return ret;
  1699. }
  1700. static int hif_enable_pci_pld(struct hif_pci_softc *sc,
  1701. struct pci_dev *pdev,
  1702. const struct pci_device_id *id)
  1703. {
  1704. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1705. sc->pci_enabled = true;
  1706. return 0;
  1707. }
  1708. static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
  1709. {
  1710. pci_disable_msi(sc->pdev);
  1711. pci_iounmap(sc->pdev, sc->mem);
  1712. pci_clear_master(sc->pdev);
  1713. pci_release_region(sc->pdev, BAR_NUM);
  1714. pci_disable_device(sc->pdev);
  1715. }
  1716. static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
  1717. static void hif_disable_pci(struct hif_pci_softc *sc)
  1718. {
  1719. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1720. if (!ol_sc) {
  1721. hif_err("ol_sc = NULL");
  1722. return;
  1723. }
  1724. hif_pci_device_reset(sc);
  1725. sc->hif_pci_deinit(sc);
  1726. sc->mem = NULL;
  1727. ol_sc->mem = NULL;
  1728. }
  1729. static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
  1730. {
  1731. int ret = 0;
  1732. int targ_awake_limit = 500;
  1733. #ifndef QCA_WIFI_3_0
  1734. uint32_t fw_indicator;
  1735. #endif
  1736. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1737. /*
  1738. * Verify that the Target was started cleanly.*
  1739. * The case where this is most likely is with an AUX-powered
  1740. * Target and a Host in WoW mode. If the Host crashes,
  1741. * loses power, or is restarted (without unloading the driver)
  1742. * then the Target is left (aux) powered and running. On a
  1743. * subsequent driver load, the Target is in an unexpected state.
  1744. * We try to catch that here in order to reset the Target and
  1745. * retry the probe.
  1746. */
  1747. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1748. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1749. while (!hif_targ_is_awake(scn, sc->mem)) {
  1750. if (0 == targ_awake_limit) {
  1751. hif_err("target awake timeout");
  1752. ret = -EAGAIN;
  1753. goto end;
  1754. }
  1755. qdf_mdelay(1);
  1756. targ_awake_limit--;
  1757. }
  1758. #if PCIE_BAR0_READY_CHECKING
  1759. {
  1760. int wait_limit = 200;
  1761. /* Synchronization point: wait the BAR0 is configured */
  1762. while (wait_limit-- &&
  1763. !(hif_read32_mb(sc, c->mem +
  1764. PCIE_LOCAL_BASE_ADDRESS +
  1765. PCIE_SOC_RDY_STATUS_ADDRESS)
  1766. & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
  1767. qdf_mdelay(10);
  1768. }
  1769. if (wait_limit < 0) {
  1770. /* AR6320v1 doesn't support checking of BAR0
  1771. * configuration, takes one sec to wait BAR0 ready
  1772. */
  1773. hif_debug("AR6320v1 waits two sec for BAR0");
  1774. }
  1775. }
  1776. #endif
  1777. #ifndef QCA_WIFI_3_0
  1778. fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
  1779. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1780. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1781. if (fw_indicator & FW_IND_INITIALIZED) {
  1782. hif_err("Target is in an unknown state. EAGAIN");
  1783. ret = -EAGAIN;
  1784. goto end;
  1785. }
  1786. #endif
  1787. end:
  1788. return ret;
  1789. }
  1790. static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
  1791. {
  1792. int ret = 0;
  1793. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1794. uint32_t target_type = scn->target_info.target_type;
  1795. hif_info("E");
  1796. /* do notn support MSI or MSI IRQ failed */
  1797. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  1798. ret = request_irq(sc->pdev->irq,
  1799. hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
  1800. "wlan_pci", sc);
  1801. if (ret) {
  1802. hif_err("request_irq failed, ret: %d", ret);
  1803. goto end;
  1804. }
  1805. scn->wake_irq = sc->pdev->irq;
  1806. /* Use sc->irq instead of sc->pdev-irq
  1807. * platform_device pdev doesn't have an irq field
  1808. */
  1809. sc->irq = sc->pdev->irq;
  1810. /* Use Legacy PCI Interrupts */
  1811. hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  1812. PCIE_INTR_ENABLE_ADDRESS),
  1813. HOST_GROUP0_MASK);
  1814. hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  1815. PCIE_INTR_ENABLE_ADDRESS));
  1816. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1817. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1818. if ((target_type == TARGET_TYPE_AR900B) ||
  1819. (target_type == TARGET_TYPE_QCA9984) ||
  1820. (target_type == TARGET_TYPE_AR9888) ||
  1821. (target_type == TARGET_TYPE_QCA9888) ||
  1822. (target_type == TARGET_TYPE_AR6320V1) ||
  1823. (target_type == TARGET_TYPE_AR6320V2) ||
  1824. (target_type == TARGET_TYPE_AR6320V3)) {
  1825. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  1826. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1827. }
  1828. end:
  1829. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
  1830. "%s: X, ret = %d", __func__, ret);
  1831. return ret;
  1832. }
  1833. static int hif_ce_srng_free_irq(struct hif_softc *scn)
  1834. {
  1835. int ret = 0;
  1836. int ce_id, irq;
  1837. uint32_t msi_data_start;
  1838. uint32_t msi_data_count;
  1839. uint32_t msi_irq_start;
  1840. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  1841. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  1842. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1843. if (!pld_get_enable_intx(scn->qdf_dev->dev)) {
  1844. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  1845. &msi_data_count,
  1846. &msi_data_start,
  1847. &msi_irq_start);
  1848. if (ret)
  1849. return ret;
  1850. }
  1851. /* needs to match the ce_id -> irq data mapping
  1852. * used in the srng parameter configuration
  1853. */
  1854. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  1855. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  1856. continue;
  1857. if (!ce_sc->tasklets[ce_id].inited)
  1858. continue;
  1859. irq = sc->ce_irq_num[ce_id];
  1860. hif_irq_affinity_remove(irq);
  1861. hif_debug("%s: (ce_id %d, irq %d)", __func__, ce_id, irq);
  1862. pfrm_free_irq(scn->qdf_dev->dev, irq, &ce_sc->tasklets[ce_id]);
  1863. }
  1864. return ret;
  1865. }
  1866. void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
  1867. {
  1868. int i, j, irq;
  1869. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1870. struct hif_exec_context *hif_ext_group;
  1871. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  1872. hif_ext_group = hif_state->hif_ext_group[i];
  1873. if (hif_ext_group->irq_requested) {
  1874. hif_ext_group->irq_requested = false;
  1875. for (j = 0; j < hif_ext_group->numirq; j++) {
  1876. irq = hif_ext_group->os_irq[j];
  1877. if (scn->irq_unlazy_disable) {
  1878. qdf_dev_clear_irq_status_flags(
  1879. irq,
  1880. QDF_IRQ_DISABLE_UNLAZY);
  1881. }
  1882. hif_irq_affinity_remove(irq);
  1883. pfrm_free_irq(scn->qdf_dev->dev,
  1884. irq, hif_ext_group);
  1885. }
  1886. hif_ext_group->numirq = 0;
  1887. }
  1888. }
  1889. }
  1890. /**
  1891. * hif_pci_nointrs(): disable IRQ
  1892. * @scn: struct hif_softc
  1893. *
  1894. * This function stops interrupt(s)
  1895. *
  1896. * Return: none
  1897. */
  1898. void hif_pci_nointrs(struct hif_softc *scn)
  1899. {
  1900. int i, ret;
  1901. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1902. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  1903. scn->free_irq_done = true;
  1904. ce_unregister_irq(hif_state, CE_ALL_BITMAP);
  1905. if (scn->request_irq_done == false)
  1906. return;
  1907. hif_pci_deconfigure_grp_irq(scn);
  1908. ret = hif_ce_srng_free_irq(scn);
  1909. if (ret != -EINVAL) {
  1910. /* ce irqs freed in hif_ce_srng_free_irq */
  1911. if (scn->wake_irq)
  1912. pfrm_free_irq(scn->qdf_dev->dev, scn->wake_irq, scn);
  1913. scn->wake_irq = 0;
  1914. } else if (sc->num_msi_intrs > 0) {
  1915. /* MSI interrupt(s) */
  1916. for (i = 0; i < sc->num_msi_intrs; i++)
  1917. free_irq(sc->irq + i, sc);
  1918. sc->num_msi_intrs = 0;
  1919. } else {
  1920. /* Legacy PCI line interrupt
  1921. * Use sc->irq instead of sc->pdev-irq
  1922. * platform_device pdev doesn't have an irq field
  1923. */
  1924. free_irq(sc->irq, sc);
  1925. }
  1926. scn->request_irq_done = false;
  1927. }
  1928. static inline
  1929. bool hif_pci_default_link_up(struct hif_target_info *tgt_info)
  1930. {
  1931. if (ADRASTEA_BU && (tgt_info->target_type != TARGET_TYPE_QCN7605))
  1932. return true;
  1933. else
  1934. return false;
  1935. }
  1936. /**
  1937. * hif_pci_disable_bus(): hif_disable_bus
  1938. * @scn: hif context
  1939. *
  1940. * This function disables the bus
  1941. *
  1942. * Return: none
  1943. */
  1944. void hif_pci_disable_bus(struct hif_softc *scn)
  1945. {
  1946. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  1947. struct pci_dev *pdev;
  1948. void __iomem *mem;
  1949. struct hif_target_info *tgt_info = &scn->target_info;
  1950. /* Attach did not succeed, all resources have been
  1951. * freed in error handler
  1952. */
  1953. if (!sc)
  1954. return;
  1955. pdev = sc->pdev;
  1956. if (hif_pci_default_link_up(tgt_info)) {
  1957. hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
  1958. hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
  1959. hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
  1960. HOST_GROUP0_MASK);
  1961. }
  1962. #if defined(CPU_WARM_RESET_WAR)
  1963. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  1964. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  1965. * verified for AR9888_REV1
  1966. */
  1967. if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
  1968. (tgt_info->target_version == AR9887_REV1_VERSION))
  1969. hif_pci_device_warm_reset(sc);
  1970. else
  1971. hif_pci_device_reset(sc);
  1972. #else
  1973. hif_pci_device_reset(sc);
  1974. #endif
  1975. mem = (void __iomem *)sc->mem;
  1976. if (mem) {
  1977. hif_dump_pipe_debug_count(scn);
  1978. if (scn->athdiag_procfs_inited) {
  1979. athdiag_procfs_remove();
  1980. scn->athdiag_procfs_inited = false;
  1981. }
  1982. sc->hif_pci_deinit(sc);
  1983. scn->mem = NULL;
  1984. }
  1985. hif_info("X");
  1986. }
  1987. #define OL_ATH_PCI_PM_CONTROL 0x44
  1988. #ifdef CONFIG_PLD_PCIE_CNSS
  1989. /**
  1990. * hif_pci_prevent_linkdown(): allow or permit linkdown
  1991. * @scn: hif context
  1992. * @flag: true prevents linkdown, false allows
  1993. *
  1994. * Calls into the platform driver to vote against taking down the
  1995. * pcie link.
  1996. *
  1997. * Return: n/a
  1998. */
  1999. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2000. {
  2001. int errno;
  2002. hif_info("wlan: %s pcie power collapse", flag ? "disable" : "enable");
  2003. hif_runtime_prevent_linkdown(scn, flag);
  2004. errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
  2005. if (errno)
  2006. hif_err("Failed pld_wlan_pm_control; errno %d", errno);
  2007. }
  2008. #else
  2009. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2010. {
  2011. }
  2012. #endif
  2013. #ifdef CONFIG_PCI_LOW_POWER_INT_REG
  2014. /**
  2015. * hif_pci_config_low_power_int_register() - configure pci low power
  2016. * interrupt register.
  2017. * @scn: hif context
  2018. * @enable: true to enable the bits, false clear.
  2019. *
  2020. * Configure the bits INTR_L1SS and INTR_CLKPM of
  2021. * PCIE_LOW_POWER_INT_MASK register.
  2022. *
  2023. * Return: n/a
  2024. */
  2025. static void hif_pci_config_low_power_int_register(struct hif_softc *scn,
  2026. bool enable)
  2027. {
  2028. void *address;
  2029. uint32_t value;
  2030. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  2031. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  2032. uint32_t target_type = tgt_info->target_type;
  2033. /*
  2034. * Only configure the bits INTR_L1SS and INTR_CLKPM of
  2035. * PCIE_LOW_POWER_INT_MASK register for QCA6174 for high
  2036. * consumption issue. NFA344A power consumption is above 80mA
  2037. * after entering Modern Standby. But the power will drop to normal
  2038. * after PERST# de-assert.
  2039. */
  2040. if ((target_type == TARGET_TYPE_AR6320) ||
  2041. (target_type == TARGET_TYPE_AR6320V1) ||
  2042. (target_type == TARGET_TYPE_AR6320V2) ||
  2043. (target_type == TARGET_TYPE_AR6320V3)) {
  2044. hif_info("Configure PCI low power int mask register");
  2045. address = scn->mem + PCIE_LOW_POWER_INT_MASK_OFFSET;
  2046. /* Configure bit3 INTR_L1SS */
  2047. value = hif_read32_mb(scn, address);
  2048. if (enable)
  2049. value |= INTR_L1SS;
  2050. else
  2051. value &= ~INTR_L1SS;
  2052. hif_write32_mb(scn, address, value);
  2053. /* Configure bit4 INTR_CLKPM */
  2054. value = hif_read32_mb(scn, address);
  2055. if (enable)
  2056. value |= INTR_CLKPM;
  2057. else
  2058. value &= ~INTR_CLKPM;
  2059. hif_write32_mb(scn, address, value);
  2060. }
  2061. }
  2062. #else
  2063. static inline void hif_pci_config_low_power_int_register(struct hif_softc *scn,
  2064. bool enable)
  2065. {
  2066. }
  2067. #endif
  2068. /**
  2069. * hif_pci_bus_suspend(): prepare hif for suspend
  2070. * @scn: hif context
  2071. *
  2072. * Return: Errno
  2073. */
  2074. int hif_pci_bus_suspend(struct hif_softc *scn)
  2075. {
  2076. QDF_STATUS ret;
  2077. hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
  2078. /*
  2079. * In an unlikely case, if draining becomes infinite loop,
  2080. * it returns an error, shall abort the bus suspend.
  2081. */
  2082. ret = hif_drain_fw_diag_ce(scn);
  2083. if (ret)
  2084. hif_err("draining fw_diag_ce not got cleaned");
  2085. ret = hif_try_complete_tasks(scn);
  2086. if (QDF_IS_STATUS_ERROR(ret)) {
  2087. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2088. return -EBUSY;
  2089. }
  2090. /* Stop the HIF Sleep Timer */
  2091. hif_cancel_deferred_target_sleep(scn);
  2092. /*
  2093. * Only need clear the bits INTR_L1SS/INTR_CLKPM after suspend.
  2094. * No need do enable bits after resume, as firmware will restore
  2095. * the bits after resume.
  2096. */
  2097. hif_pci_config_low_power_int_register(scn, false);
  2098. scn->bus_suspended = true;
  2099. return 0;
  2100. }
  2101. #ifdef PCI_LINK_STATUS_SANITY
  2102. /**
  2103. * __hif_check_link_status() - API to check if PCIe link is active/not
  2104. * @scn: HIF Context
  2105. *
  2106. * API reads the PCIe config space to verify if PCIe link training is
  2107. * successful or not.
  2108. *
  2109. * Return: Success/Failure
  2110. */
  2111. static int __hif_check_link_status(struct hif_softc *scn)
  2112. {
  2113. uint16_t dev_id = 0;
  2114. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2115. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2116. if (!sc) {
  2117. hif_err("HIF Bus Context is Invalid");
  2118. return -EINVAL;
  2119. }
  2120. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
  2121. if (dev_id == sc->devid)
  2122. return 0;
  2123. hif_err("Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
  2124. dev_id);
  2125. scn->recovery = true;
  2126. if (cbk && cbk->set_recovery_in_progress)
  2127. cbk->set_recovery_in_progress(cbk->context, true);
  2128. else
  2129. hif_err("Driver Global Recovery is not set");
  2130. pld_is_pci_link_down(sc->dev);
  2131. return -EACCES;
  2132. }
  2133. #else
  2134. static inline int __hif_check_link_status(struct hif_softc *scn)
  2135. {
  2136. return 0;
  2137. }
  2138. #endif
  2139. #ifdef HIF_BUS_LOG_INFO
  2140. bool hif_log_pcie_info(struct hif_softc *scn, uint8_t *data,
  2141. unsigned int *offset)
  2142. {
  2143. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2144. struct hang_event_bus_info info = {0};
  2145. size_t size;
  2146. if (!sc) {
  2147. hif_err("HIF Bus Context is Invalid");
  2148. return false;
  2149. }
  2150. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &info.dev_id);
  2151. size = sizeof(info);
  2152. QDF_HANG_EVT_SET_HDR(&info.tlv_header, HANG_EVT_TAG_BUS_INFO,
  2153. size - QDF_HANG_EVENT_TLV_HDR_SIZE);
  2154. if (*offset + size > QDF_WLAN_HANG_FW_OFFSET)
  2155. return false;
  2156. qdf_mem_copy(data + *offset, &info, size);
  2157. *offset = *offset + size;
  2158. if (info.dev_id == sc->devid)
  2159. return false;
  2160. qdf_recovery_reason_update(QCA_HANG_BUS_FAILURE);
  2161. qdf_get_bus_reg_dump(scn->qdf_dev->dev, data,
  2162. (QDF_WLAN_HANG_FW_OFFSET - size));
  2163. return true;
  2164. }
  2165. #endif
  2166. /**
  2167. * hif_pci_bus_resume(): prepare hif for resume
  2168. * @scn: hif context
  2169. *
  2170. * Return: Errno
  2171. */
  2172. int hif_pci_bus_resume(struct hif_softc *scn)
  2173. {
  2174. int errno;
  2175. scn->bus_suspended = false;
  2176. errno = __hif_check_link_status(scn);
  2177. if (errno)
  2178. return errno;
  2179. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2180. return 0;
  2181. }
  2182. /**
  2183. * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
  2184. * @scn: hif context
  2185. *
  2186. * Ensure that if we received the wakeup message before the irq
  2187. * was disabled that the message is processed before suspending.
  2188. *
  2189. * Return: -EBUSY if we fail to flush the tasklets.
  2190. */
  2191. int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
  2192. {
  2193. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2194. qdf_atomic_set(&scn->link_suspended, 1);
  2195. return 0;
  2196. }
  2197. /**
  2198. * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
  2199. * @scn: hif context
  2200. *
  2201. * Ensure that if we received the wakeup message before the irq
  2202. * was disabled that the message is processed before suspending.
  2203. *
  2204. * Return: -EBUSY if we fail to flush the tasklets.
  2205. */
  2206. int hif_pci_bus_resume_noirq(struct hif_softc *scn)
  2207. {
  2208. /* a vote for link up can come in the middle of the ongoing resume
  2209. * process. hence, clear the link suspend flag once
  2210. * hif_bus_resume_noirq() succeeds since PCIe link is already resumed
  2211. * by this time
  2212. */
  2213. qdf_atomic_set(&scn->link_suspended, 0);
  2214. return 0;
  2215. }
  2216. #if CONFIG_PCIE_64BIT_MSI
  2217. static void hif_free_msi_ctx(struct hif_softc *scn)
  2218. {
  2219. struct hif_pci_softc *sc = scn->hif_sc;
  2220. struct hif_msi_info *info = &sc->msi_info;
  2221. struct device *dev = scn->qdf_dev->dev;
  2222. OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
  2223. OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
  2224. info->magic = NULL;
  2225. info->magic_dma = 0;
  2226. }
  2227. #else
  2228. static void hif_free_msi_ctx(struct hif_softc *scn)
  2229. {
  2230. }
  2231. #endif
  2232. void hif_pci_disable_isr(struct hif_softc *scn)
  2233. {
  2234. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2235. hif_exec_kill(&scn->osc);
  2236. hif_nointrs(scn);
  2237. hif_free_msi_ctx(scn);
  2238. /* Cancel the pending tasklet */
  2239. ce_tasklet_kill(scn);
  2240. tasklet_kill(&sc->intr_tq);
  2241. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  2242. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  2243. }
  2244. /* Function to reset SoC */
  2245. void hif_pci_reset_soc(struct hif_softc *hif_sc)
  2246. {
  2247. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
  2248. struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
  2249. struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
  2250. #if defined(CPU_WARM_RESET_WAR)
  2251. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2252. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2253. * verified for AR9888_REV1
  2254. */
  2255. if (tgt_info->target_version == AR9888_REV2_VERSION)
  2256. hif_pci_device_warm_reset(sc);
  2257. else
  2258. hif_pci_device_reset(sc);
  2259. #else
  2260. hif_pci_device_reset(sc);
  2261. #endif
  2262. }
  2263. /**
  2264. * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
  2265. * @sc: HIF PCIe Context
  2266. *
  2267. * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
  2268. *
  2269. * Return: Failure to caller
  2270. */
  2271. static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
  2272. {
  2273. uint16_t val = 0;
  2274. uint32_t bar = 0;
  2275. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
  2276. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2277. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
  2278. struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
  2279. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2280. A_target_id_t pci_addr = scn->mem;
  2281. hif_info("keep_awake_count = %d", hif_state->keep_awake_count);
  2282. pfrm_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  2283. hif_info("PCI Vendor ID = 0x%04x", val);
  2284. pfrm_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  2285. hif_info("PCI Device ID = 0x%04x", val);
  2286. pfrm_read_config_word(sc->pdev, PCI_COMMAND, &val);
  2287. hif_info("PCI Command = 0x%04x", val);
  2288. pfrm_read_config_word(sc->pdev, PCI_STATUS, &val);
  2289. hif_info("PCI Status = 0x%04x", val);
  2290. pfrm_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
  2291. hif_info("PCI BAR 0 = 0x%08x", bar);
  2292. hif_info("SOC_WAKE_ADDR 0%08x",
  2293. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2294. PCIE_SOC_WAKE_ADDRESS));
  2295. hif_info("RTC_STATE_ADDR 0x%08x",
  2296. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2297. RTC_STATE_ADDRESS));
  2298. hif_info("wakeup target");
  2299. if (!cfg->enable_self_recovery)
  2300. QDF_BUG(0);
  2301. scn->recovery = true;
  2302. if (cbk->set_recovery_in_progress)
  2303. cbk->set_recovery_in_progress(cbk->context, true);
  2304. pld_is_pci_link_down(sc->dev);
  2305. return -EACCES;
  2306. }
  2307. /*
  2308. * For now, we use simple on-demand sleep/wake.
  2309. * Some possible improvements:
  2310. * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
  2311. * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
  2312. * Careful, though, these functions may be used by
  2313. * interrupt handlers ("atomic")
  2314. * -Don't use host_reg_table for this code; instead use values directly
  2315. * -Use a separate timer to track activity and allow Target to sleep only
  2316. * if it hasn't done anything for a while; may even want to delay some
  2317. * processing for a short while in order to "batch" (e.g.) transmit
  2318. * requests with completion processing into "windows of up time". Costs
  2319. * some performance, but improves power utilization.
  2320. * -On some platforms, it might be possible to eliminate explicit
  2321. * sleep/wakeup. Instead, take a chance that each access works OK. If not,
  2322. * recover from the failure by forcing the Target awake.
  2323. * -Change keep_awake_count to an atomic_t in order to avoid spin lock
  2324. * overhead in some cases. Perhaps this makes more sense when
  2325. * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
  2326. * disabled.
  2327. * -It is possible to compile this code out and simply force the Target
  2328. * to remain awake. That would yield optimal performance at the cost of
  2329. * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
  2330. *
  2331. * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
  2332. */
  2333. /**
  2334. * hif_pci_target_sleep_state_adjust() - on-demand sleep/wake
  2335. * @scn: hif_softc pointer.
  2336. * @sleep_ok: bool
  2337. * @wait_for_it: bool
  2338. *
  2339. * Output the pipe error counts of each pipe to log file
  2340. *
  2341. * Return: int
  2342. */
  2343. int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
  2344. bool sleep_ok, bool wait_for_it)
  2345. {
  2346. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2347. A_target_id_t pci_addr = scn->mem;
  2348. static int max_delay;
  2349. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2350. static int debug;
  2351. if (scn->recovery)
  2352. return -EACCES;
  2353. if (qdf_atomic_read(&scn->link_suspended)) {
  2354. hif_err("Invalid access, PCIe link is down");
  2355. debug = true;
  2356. QDF_ASSERT(0);
  2357. return -EACCES;
  2358. }
  2359. if (debug) {
  2360. wait_for_it = true;
  2361. hif_err("Invalid access, PCIe link is suspended");
  2362. QDF_ASSERT(0);
  2363. }
  2364. if (sleep_ok) {
  2365. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2366. hif_state->keep_awake_count--;
  2367. if (hif_state->keep_awake_count == 0) {
  2368. /* Allow sleep */
  2369. hif_state->verified_awake = false;
  2370. hif_state->sleep_ticks = qdf_system_ticks();
  2371. }
  2372. if (hif_state->fake_sleep == false) {
  2373. /* Set the Fake Sleep */
  2374. hif_state->fake_sleep = true;
  2375. /* Start the Sleep Timer */
  2376. qdf_timer_stop(&hif_state->sleep_timer);
  2377. qdf_timer_start(&hif_state->sleep_timer,
  2378. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  2379. }
  2380. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2381. } else {
  2382. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2383. if (hif_state->fake_sleep) {
  2384. hif_state->verified_awake = true;
  2385. } else {
  2386. if (hif_state->keep_awake_count == 0) {
  2387. /* Force AWAKE */
  2388. hif_write32_mb(sc, pci_addr +
  2389. PCIE_LOCAL_BASE_ADDRESS +
  2390. PCIE_SOC_WAKE_ADDRESS,
  2391. PCIE_SOC_WAKE_V_MASK);
  2392. }
  2393. }
  2394. hif_state->keep_awake_count++;
  2395. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2396. if (wait_for_it && !hif_state->verified_awake) {
  2397. #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
  2398. int tot_delay = 0;
  2399. int curr_delay = 5;
  2400. for (;; ) {
  2401. if (hif_targ_is_awake(scn, pci_addr)) {
  2402. hif_state->verified_awake = true;
  2403. break;
  2404. }
  2405. if (!hif_pci_targ_is_present(scn, pci_addr))
  2406. break;
  2407. if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
  2408. return hif_log_soc_wakeup_timeout(sc);
  2409. OS_DELAY(curr_delay);
  2410. tot_delay += curr_delay;
  2411. if (curr_delay < 50)
  2412. curr_delay += 5;
  2413. }
  2414. /*
  2415. * NB: If Target has to come out of Deep Sleep,
  2416. * this may take a few Msecs. Typically, though
  2417. * this delay should be <30us.
  2418. */
  2419. if (tot_delay > max_delay)
  2420. max_delay = tot_delay;
  2421. }
  2422. }
  2423. if (debug && hif_state->verified_awake) {
  2424. debug = 0;
  2425. hif_err("INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
  2426. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2427. PCIE_INTR_ENABLE_ADDRESS),
  2428. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2429. PCIE_INTR_CAUSE_ADDRESS),
  2430. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2431. CPU_INTR_ADDRESS),
  2432. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2433. PCIE_INTR_CLR_ADDRESS),
  2434. hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
  2435. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
  2436. }
  2437. return 0;
  2438. }
  2439. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  2440. uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
  2441. {
  2442. uint32_t value;
  2443. void *addr;
  2444. addr = scn->mem + offset;
  2445. value = hif_read32_mb(scn, addr);
  2446. {
  2447. unsigned long irq_flags;
  2448. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2449. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2450. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2451. pcie_access_log[idx].is_write = false;
  2452. pcie_access_log[idx].addr = addr;
  2453. pcie_access_log[idx].value = value;
  2454. pcie_access_log_seqnum++;
  2455. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2456. }
  2457. return value;
  2458. }
  2459. void
  2460. hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
  2461. {
  2462. void *addr;
  2463. addr = scn->mem + (offset);
  2464. hif_write32_mb(scn, addr, value);
  2465. {
  2466. unsigned long irq_flags;
  2467. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2468. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2469. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2470. pcie_access_log[idx].is_write = true;
  2471. pcie_access_log[idx].addr = addr;
  2472. pcie_access_log[idx].value = value;
  2473. pcie_access_log_seqnum++;
  2474. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2475. }
  2476. }
  2477. /**
  2478. * hif_target_dump_access_log() - dump access log
  2479. *
  2480. * dump access log
  2481. *
  2482. * Return: n/a
  2483. */
  2484. void hif_target_dump_access_log(void)
  2485. {
  2486. int idx, len, start_idx, cur_idx;
  2487. unsigned long irq_flags;
  2488. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2489. if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
  2490. len = PCIE_ACCESS_LOG_NUM;
  2491. start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2492. } else {
  2493. len = pcie_access_log_seqnum;
  2494. start_idx = 0;
  2495. }
  2496. for (idx = 0; idx < len; idx++) {
  2497. cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
  2498. hif_debug("idx:%d sn:%u wr:%d addr:%pK val:%u",
  2499. idx,
  2500. pcie_access_log[cur_idx].seqnum,
  2501. pcie_access_log[cur_idx].is_write,
  2502. pcie_access_log[cur_idx].addr,
  2503. pcie_access_log[cur_idx].value);
  2504. }
  2505. pcie_access_log_seqnum = 0;
  2506. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2507. }
  2508. #endif
  2509. #ifndef HIF_AHB
  2510. int hif_ahb_configure_irq(struct hif_pci_softc *sc)
  2511. {
  2512. QDF_BUG(0);
  2513. return -EINVAL;
  2514. }
  2515. #endif
  2516. static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
  2517. {
  2518. struct ce_tasklet_entry *tasklet_entry = context;
  2519. return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
  2520. }
  2521. extern const char *ce_name[];
  2522. static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  2523. {
  2524. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  2525. return pci_scn->ce_irq_num[ce_id];
  2526. }
  2527. /* hif_srng_msi_irq_disable() - disable the irq for msi
  2528. * @hif_sc: hif context
  2529. * @ce_id: which ce to disable copy complete interrupts for
  2530. *
  2531. * since MSI interrupts are not level based, the system can function
  2532. * without disabling these interrupts. Interrupt mitigation can be
  2533. * added here for better system performance.
  2534. */
  2535. static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2536. {
  2537. pfrm_disable_irq_nosync(hif_sc->qdf_dev->dev,
  2538. hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2539. }
  2540. static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2541. {
  2542. if (__hif_check_link_status(hif_sc))
  2543. return;
  2544. pfrm_enable_irq(hif_sc->qdf_dev->dev,
  2545. hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2546. }
  2547. static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2548. {
  2549. disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2550. }
  2551. static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2552. {
  2553. enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2554. }
  2555. #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
  2556. /**
  2557. * hif_ce_configure_legacyirq() - Configure CE interrupts
  2558. * @scn: hif_softc pointer
  2559. *
  2560. * Configure CE legacy interrupts
  2561. *
  2562. * Return: int
  2563. */
  2564. static int hif_ce_configure_legacyirq(struct hif_softc *scn)
  2565. {
  2566. int ret = 0;
  2567. int irq, ce_id;
  2568. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2569. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  2570. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2571. int pci_slot;
  2572. qdf_device_t qdf_dev = scn->qdf_dev;
  2573. if (!pld_get_enable_intx(scn->qdf_dev->dev))
  2574. return -EINVAL;
  2575. scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
  2576. scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
  2577. scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
  2578. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2579. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2580. continue;
  2581. if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
  2582. continue;
  2583. ret = pfrm_get_irq(scn->qdf_dev->dev,
  2584. (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
  2585. legacy_ic_irqname[ce_id], ce_id, &irq);
  2586. if (ret) {
  2587. dev_err(scn->qdf_dev->dev, "get irq failed\n");
  2588. ret = -EFAULT;
  2589. goto skip;
  2590. }
  2591. pci_slot = hif_get_pci_slot(scn);
  2592. qdf_scnprintf(ce_irqname[pci_slot][ce_id],
  2593. DP_IRQ_NAME_LEN, "pci%d_ce_%u", pci_slot, ce_id);
  2594. pci_sc->ce_irq_num[ce_id] = irq;
  2595. ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
  2596. hif_ce_interrupt_handler,
  2597. IRQF_SHARED,
  2598. ce_irqname[pci_slot][ce_id],
  2599. &ce_sc->tasklets[ce_id]);
  2600. if (ret) {
  2601. hif_err("error = %d", ret);
  2602. return -EINVAL;
  2603. }
  2604. }
  2605. skip:
  2606. return ret;
  2607. }
  2608. #else
  2609. /**
  2610. * hif_ce_configure_legacyirq() - Configure CE interrupts
  2611. * @scn: hif_softc pointer
  2612. *
  2613. * Configure CE legacy interrupts
  2614. *
  2615. * Return: int
  2616. */
  2617. static int hif_ce_configure_legacyirq(struct hif_softc *scn)
  2618. {
  2619. return 0;
  2620. }
  2621. #endif
  2622. int hif_ce_msi_configure_irq_by_ceid(struct hif_softc *scn, int ce_id)
  2623. {
  2624. int ret = 0;
  2625. int irq;
  2626. uint32_t msi_data_start;
  2627. uint32_t msi_data_count;
  2628. unsigned int msi_data;
  2629. int irq_id;
  2630. uint32_t msi_irq_start;
  2631. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2632. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2633. int pci_slot;
  2634. unsigned long irq_flags;
  2635. if (ce_id >= CE_COUNT_MAX)
  2636. return -EINVAL;
  2637. /* do ce irq assignments */
  2638. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2639. &msi_data_count, &msi_data_start,
  2640. &msi_irq_start);
  2641. if (ret) {
  2642. hif_err("Failed to get CE msi config");
  2643. return -EINVAL;
  2644. }
  2645. irq_id = scn->int_assignment->msi_idx[ce_id];
  2646. /* needs to match the ce_id -> irq data mapping
  2647. * used in the srng parameter configuration
  2648. */
  2649. pci_slot = hif_get_pci_slot(scn);
  2650. msi_data = irq_id + msi_irq_start;
  2651. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2652. if (pld_is_one_msi(scn->qdf_dev->dev))
  2653. irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
  2654. else
  2655. irq_flags = IRQF_SHARED;
  2656. hif_debug("%s: (ce_id %d, irq_id %d, msi_data %d, irq %d flag 0x%lx tasklet %pK)",
  2657. __func__, ce_id, irq_id, msi_data, irq, irq_flags,
  2658. &ce_sc->tasklets[ce_id]);
  2659. /* implies the ce is also initialized */
  2660. if (!ce_sc->tasklets[ce_id].inited)
  2661. goto skip;
  2662. pci_sc->ce_irq_num[ce_id] = irq;
  2663. hif_affinity_mgr_init_ce_irq(scn, ce_id, irq);
  2664. qdf_scnprintf(ce_irqname[pci_slot][ce_id],
  2665. DP_IRQ_NAME_LEN, "pci%u_wlan_ce_%u",
  2666. pci_slot, ce_id);
  2667. ret = pfrm_request_irq(scn->qdf_dev->dev,
  2668. irq, hif_ce_interrupt_handler, irq_flags,
  2669. ce_irqname[pci_slot][ce_id],
  2670. &ce_sc->tasklets[ce_id]);
  2671. if (ret)
  2672. return -EINVAL;
  2673. skip:
  2674. return ret;
  2675. }
  2676. static int hif_ce_msi_configure_irq(struct hif_softc *scn)
  2677. {
  2678. int ret;
  2679. int ce_id, irq;
  2680. uint32_t msi_data_start;
  2681. uint32_t msi_data_count;
  2682. uint32_t msi_irq_start;
  2683. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2684. struct CE_attr *host_ce_conf = ce_sc->host_ce_config;
  2685. if (!scn->ini_cfg.disable_wake_irq) {
  2686. /* do wake irq assignment */
  2687. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
  2688. &msi_data_count,
  2689. &msi_data_start,
  2690. &msi_irq_start);
  2691. if (ret)
  2692. return ret;
  2693. scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev,
  2694. msi_irq_start);
  2695. scn->wake_irq_type = HIF_PM_MSI_WAKE;
  2696. ret = pfrm_request_irq(scn->qdf_dev->dev, scn->wake_irq,
  2697. hif_wake_interrupt_handler,
  2698. IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
  2699. if (ret)
  2700. return ret;
  2701. }
  2702. /* do ce irq assignments */
  2703. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2704. &msi_data_count, &msi_data_start,
  2705. &msi_irq_start);
  2706. if (ret)
  2707. goto free_wake_irq;
  2708. if (ce_srng_based(scn)) {
  2709. scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
  2710. scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
  2711. } else {
  2712. scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
  2713. scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
  2714. }
  2715. scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
  2716. /* needs to match the ce_id -> irq data mapping
  2717. * used in the srng parameter configuration
  2718. */
  2719. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2720. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2721. continue;
  2722. if (host_ce_conf[ce_id].flags & CE_ATTR_INIT_ON_DEMAND)
  2723. continue;
  2724. ret = hif_ce_msi_configure_irq_by_ceid(scn, ce_id);
  2725. if (ret)
  2726. goto free_irq;
  2727. }
  2728. return ret;
  2729. free_irq:
  2730. /* the request_irq for the last ce_id failed so skip it. */
  2731. while (ce_id > 0 && ce_id < scn->ce_count) {
  2732. unsigned int msi_data;
  2733. ce_id--;
  2734. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  2735. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2736. pfrm_free_irq(scn->qdf_dev->dev,
  2737. irq, &ce_sc->tasklets[ce_id]);
  2738. }
  2739. free_wake_irq:
  2740. if (!scn->ini_cfg.disable_wake_irq) {
  2741. pfrm_free_irq(scn->qdf_dev->dev,
  2742. scn->wake_irq, scn->qdf_dev->dev);
  2743. scn->wake_irq = 0;
  2744. scn->wake_irq_type = HIF_PM_INVALID_WAKE;
  2745. }
  2746. return ret;
  2747. }
  2748. static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
  2749. {
  2750. int i;
  2751. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  2752. for (i = 0; i < hif_ext_group->numirq; i++)
  2753. pfrm_disable_irq_nosync(scn->qdf_dev->dev,
  2754. hif_ext_group->os_irq[i]);
  2755. }
  2756. static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
  2757. {
  2758. int i;
  2759. struct hif_softc *scn = HIF_GET_SOFTC(hif_ext_group->hif);
  2760. for (i = 0; i < hif_ext_group->numirq; i++)
  2761. pfrm_enable_irq(scn->qdf_dev->dev, hif_ext_group->os_irq[i]);
  2762. }
  2763. /**
  2764. * hif_pci_get_irq_name() - get irqname
  2765. * This function gives irqnumber to irqname
  2766. * mapping.
  2767. *
  2768. * @irq_no: irq number
  2769. *
  2770. * Return: irq name
  2771. */
  2772. const char *hif_pci_get_irq_name(int irq_no)
  2773. {
  2774. return "pci-dummy";
  2775. }
  2776. #if defined(FEATURE_IRQ_AFFINITY) || defined(HIF_CPU_PERF_AFFINE_MASK)
  2777. void hif_pci_irq_set_affinity_hint(struct hif_exec_context *hif_ext_group,
  2778. bool perf)
  2779. {
  2780. int i, ret;
  2781. unsigned int cpus;
  2782. bool mask_set = false;
  2783. int package_id;
  2784. int cpu_cluster = perf ? hif_get_perf_cluster_bitmap() :
  2785. BIT(CPU_CLUSTER_TYPE_LITTLE);
  2786. for (i = 0; i < hif_ext_group->numirq; i++)
  2787. qdf_cpumask_clear(&hif_ext_group->new_cpu_mask[i]);
  2788. for (i = 0; i < hif_ext_group->numirq; i++) {
  2789. qdf_for_each_online_cpu(cpus) {
  2790. package_id = qdf_topology_physical_package_id(cpus);
  2791. if (package_id >= 0 && BIT(package_id) & cpu_cluster) {
  2792. qdf_cpumask_set_cpu(cpus,
  2793. &hif_ext_group->
  2794. new_cpu_mask[i]);
  2795. mask_set = true;
  2796. }
  2797. }
  2798. }
  2799. for (i = 0; i < hif_ext_group->numirq; i++) {
  2800. if (mask_set) {
  2801. ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
  2802. hif_ext_group->os_irq[i],
  2803. hif_ext_group->grp_id, i,
  2804. &hif_ext_group->new_cpu_mask[i]);
  2805. if (ret)
  2806. qdf_debug("Set affinity %*pbl fails for IRQ %d ",
  2807. qdf_cpumask_pr_args(&hif_ext_group->
  2808. new_cpu_mask[i]),
  2809. hif_ext_group->os_irq[i]);
  2810. } else {
  2811. qdf_debug("Offline CPU: Set affinity fails for IRQ: %d",
  2812. hif_ext_group->os_irq[i]);
  2813. }
  2814. }
  2815. }
  2816. #endif
  2817. #ifdef HIF_CPU_PERF_AFFINE_MASK
  2818. void hif_pci_ce_irq_set_affinity_hint(struct hif_softc *scn)
  2819. {
  2820. int ret;
  2821. unsigned int cpus;
  2822. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2823. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2824. struct CE_attr *host_ce_conf;
  2825. int ce_id;
  2826. qdf_cpu_mask ce_cpu_mask, updated_mask;
  2827. int perf_cpu_cluster = hif_get_perf_cluster_bitmap();
  2828. int package_id;
  2829. host_ce_conf = ce_sc->host_ce_config;
  2830. qdf_cpumask_clear(&ce_cpu_mask);
  2831. qdf_for_each_online_cpu(cpus) {
  2832. package_id = qdf_topology_physical_package_id(cpus);
  2833. if (package_id >= 0 && BIT(package_id) & perf_cpu_cluster) {
  2834. qdf_cpumask_set_cpu(cpus,
  2835. &ce_cpu_mask);
  2836. } else {
  2837. hif_err_rl("Unable to set cpu mask for offline CPU %d"
  2838. , cpus);
  2839. }
  2840. }
  2841. if (qdf_cpumask_empty(&ce_cpu_mask)) {
  2842. hif_err_rl("Empty cpu_mask, unable to set CE IRQ affinity");
  2843. return;
  2844. }
  2845. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2846. if (host_ce_conf[ce_id].flags & CE_ATTR_DISABLE_INTR)
  2847. continue;
  2848. qdf_cpumask_copy(&updated_mask, &ce_cpu_mask);
  2849. ret = hif_affinity_mgr_set_ce_irq_affinity(scn, pci_sc->ce_irq_num[ce_id],
  2850. ce_id,
  2851. &updated_mask);
  2852. qdf_cpumask_clear(&pci_sc->ce_irq_cpu_mask[ce_id]);
  2853. qdf_cpumask_copy(&pci_sc->ce_irq_cpu_mask[ce_id],
  2854. &updated_mask);
  2855. if (ret)
  2856. hif_err_rl("Set affinity %*pbl fails for CE IRQ %d",
  2857. qdf_cpumask_pr_args(
  2858. &pci_sc->ce_irq_cpu_mask[ce_id]),
  2859. pci_sc->ce_irq_num[ce_id]);
  2860. else
  2861. hif_debug_rl("Set affinity %*pbl for CE IRQ: %d",
  2862. qdf_cpumask_pr_args(
  2863. &pci_sc->ce_irq_cpu_mask[ce_id]),
  2864. pci_sc->ce_irq_num[ce_id]);
  2865. }
  2866. }
  2867. #endif /* #ifdef HIF_CPU_PERF_AFFINE_MASK */
  2868. #ifdef HIF_CPU_CLEAR_AFFINITY
  2869. void hif_pci_config_irq_clear_cpu_affinity(struct hif_softc *scn,
  2870. int intr_ctxt_id, int cpu)
  2871. {
  2872. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2873. struct hif_exec_context *hif_ext_group;
  2874. int i, ret;
  2875. if (intr_ctxt_id < hif_state->hif_num_extgroup) {
  2876. hif_ext_group = hif_state->hif_ext_group[intr_ctxt_id];
  2877. for (i = 0; i < hif_ext_group->numirq; i++) {
  2878. qdf_cpumask_setall(&hif_ext_group->new_cpu_mask[i]);
  2879. qdf_cpumask_clear_cpu(cpu,
  2880. &hif_ext_group->new_cpu_mask[i]);
  2881. ret = hif_affinity_mgr_set_qrg_irq_affinity((struct hif_softc *)hif_ext_group->hif,
  2882. hif_ext_group->os_irq[i],
  2883. hif_ext_group->grp_id, i,
  2884. &hif_ext_group->new_cpu_mask[i]);
  2885. if (ret)
  2886. hif_err("Set affinity %*pbl fails for IRQ %d ",
  2887. qdf_cpumask_pr_args(&hif_ext_group->
  2888. new_cpu_mask[i]),
  2889. hif_ext_group->os_irq[i]);
  2890. else
  2891. hif_debug("Set affinity %*pbl for IRQ: %d",
  2892. qdf_cpumask_pr_args(&hif_ext_group->
  2893. new_cpu_mask[i]),
  2894. hif_ext_group->os_irq[i]);
  2895. }
  2896. }
  2897. }
  2898. #endif
  2899. void hif_pci_config_irq_affinity(struct hif_softc *scn)
  2900. {
  2901. int i;
  2902. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2903. struct hif_exec_context *hif_ext_group;
  2904. hif_core_ctl_set_boost(true);
  2905. /* Set IRQ affinity for WLAN DP interrupts*/
  2906. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2907. hif_ext_group = hif_state->hif_ext_group[i];
  2908. hif_pci_irq_set_affinity_hint(hif_ext_group, true);
  2909. }
  2910. /* Set IRQ affinity for CE interrupts*/
  2911. hif_pci_ce_irq_set_affinity_hint(scn);
  2912. }
  2913. #ifdef QCA_SUPPORT_LEGACY_INTERRUPTS
  2914. /**
  2915. * hif_grp_configure_legacyirq() - Configure DP interrupts
  2916. * @scn: hif_softc pointer
  2917. * @hif_ext_group: hif extended group pointer
  2918. *
  2919. * Configure DP legacy interrupts
  2920. *
  2921. * Return: int
  2922. */
  2923. static int hif_grp_configure_legacyirq(struct hif_softc *scn,
  2924. struct hif_exec_context *hif_ext_group)
  2925. {
  2926. int ret = 0;
  2927. int irq = 0;
  2928. int j;
  2929. int pci_slot;
  2930. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2931. struct pci_dev *pdev = sc->pdev;
  2932. qdf_device_t qdf_dev = scn->qdf_dev;
  2933. for (j = 0; j < hif_ext_group->numirq; j++) {
  2934. ret = pfrm_get_irq(&pdev->dev,
  2935. (struct qdf_pfm_hndl *)qdf_dev->cnss_pdev,
  2936. legacy_ic_irqname[hif_ext_group->irq[j]],
  2937. hif_ext_group->irq[j], &irq);
  2938. if (ret) {
  2939. dev_err(&pdev->dev, "get irq failed\n");
  2940. return -EFAULT;
  2941. }
  2942. hif_ext_group->os_irq[j] = irq;
  2943. }
  2944. hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
  2945. hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
  2946. hif_ext_group->irq_name = &hif_pci_get_irq_name;
  2947. hif_ext_group->work_complete = &hif_dummy_grp_done;
  2948. pci_slot = hif_get_pci_slot(scn);
  2949. for (j = 0; j < hif_ext_group->numirq; j++) {
  2950. irq = hif_ext_group->os_irq[j];
  2951. if (scn->irq_unlazy_disable)
  2952. qdf_dev_set_irq_status_flags(irq,
  2953. QDF_IRQ_DISABLE_UNLAZY);
  2954. hif_debug("request_irq = %d for grp %d",
  2955. irq, hif_ext_group->grp_id);
  2956. qdf_scnprintf(dp_legacy_irqname[pci_slot][hif_ext_group->irq[j]],
  2957. DP_IRQ_NAME_LEN, "pci%u_%s", pci_slot,
  2958. legacy_ic_irqname[hif_ext_group->irq[j]]);
  2959. ret = pfrm_request_irq(scn->qdf_dev->dev, irq,
  2960. hif_ext_group_interrupt_handler,
  2961. IRQF_SHARED | IRQF_NO_SUSPEND,
  2962. dp_legacy_irqname[pci_slot][hif_ext_group->irq[j]],
  2963. hif_ext_group);
  2964. if (ret) {
  2965. hif_err("request_irq failed ret = %d", ret);
  2966. return -EFAULT;
  2967. }
  2968. hif_ext_group->os_irq[j] = irq;
  2969. }
  2970. hif_ext_group->irq_requested = true;
  2971. return 0;
  2972. }
  2973. #else
  2974. /**
  2975. * hif_grp_configure_legacyirq() - Configure DP interrupts
  2976. * @scn: hif_softc pointer
  2977. * @hif_ext_group: hif extended group pointer
  2978. *
  2979. * Configure DP legacy interrupts
  2980. *
  2981. * Return: int
  2982. */
  2983. static int hif_grp_configure_legacyirq(struct hif_softc *scn,
  2984. struct hif_exec_context *hif_ext_group)
  2985. {
  2986. return 0;
  2987. }
  2988. #endif
  2989. int hif_pci_configure_grp_irq(struct hif_softc *scn,
  2990. struct hif_exec_context *hif_ext_group)
  2991. {
  2992. int ret = 0;
  2993. int irq = 0;
  2994. int j;
  2995. int pci_slot;
  2996. unsigned long irq_flags;
  2997. if (pld_get_enable_intx(scn->qdf_dev->dev))
  2998. return hif_grp_configure_legacyirq(scn, hif_ext_group);
  2999. hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
  3000. hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
  3001. hif_ext_group->irq_name = &hif_pci_get_irq_name;
  3002. hif_ext_group->work_complete = &hif_dummy_grp_done;
  3003. pci_slot = hif_get_pci_slot(scn);
  3004. for (j = 0; j < hif_ext_group->numirq; j++) {
  3005. irq = hif_ext_group->irq[j];
  3006. if (scn->irq_unlazy_disable)
  3007. qdf_dev_set_irq_status_flags(irq,
  3008. QDF_IRQ_DISABLE_UNLAZY);
  3009. if (pld_is_one_msi(scn->qdf_dev->dev))
  3010. irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
  3011. else
  3012. irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
  3013. hif_debug("request_irq = %d for grp %d irq_flags 0x%lx",
  3014. irq, hif_ext_group->grp_id, irq_flags);
  3015. qdf_scnprintf(dp_irqname[pci_slot][hif_ext_group->grp_id],
  3016. DP_IRQ_NAME_LEN, "pci%u_wlan_grp_dp_%u",
  3017. pci_slot, hif_ext_group->grp_id);
  3018. ret = pfrm_request_irq(
  3019. scn->qdf_dev->dev, irq,
  3020. hif_ext_group_interrupt_handler,
  3021. irq_flags,
  3022. dp_irqname[pci_slot][hif_ext_group->grp_id],
  3023. hif_ext_group);
  3024. if (ret) {
  3025. hif_err("request_irq failed ret = %d", ret);
  3026. return -EFAULT;
  3027. }
  3028. hif_ext_group->os_irq[j] = irq;
  3029. hif_affinity_mgr_init_grp_irq(scn, hif_ext_group->grp_id,
  3030. j, irq);
  3031. }
  3032. hif_ext_group->irq_requested = true;
  3033. return 0;
  3034. }
  3035. #ifdef FEATURE_IRQ_AFFINITY
  3036. void hif_pci_set_grp_intr_affinity(struct hif_softc *scn,
  3037. uint32_t grp_intr_bitmask, bool perf)
  3038. {
  3039. int i;
  3040. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  3041. struct hif_exec_context *hif_ext_group;
  3042. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  3043. if (!(grp_intr_bitmask & BIT(i)))
  3044. continue;
  3045. hif_ext_group = hif_state->hif_ext_group[i];
  3046. hif_pci_irq_set_affinity_hint(hif_ext_group, perf);
  3047. qdf_atomic_set(&hif_ext_group->force_napi_complete, -1);
  3048. }
  3049. }
  3050. #endif
  3051. #if (defined(QCA_WIFI_QCA6390) || defined(QCA_WIFI_QCA6490) || \
  3052. defined(QCA_WIFI_KIWI))
  3053. uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
  3054. uint32_t offset)
  3055. {
  3056. return hal_read32_mb(hif_sc->hal_soc, offset);
  3057. }
  3058. void hif_pci_reg_write32(struct hif_softc *hif_sc,
  3059. uint32_t offset,
  3060. uint32_t value)
  3061. {
  3062. hal_write32_mb(hif_sc->hal_soc, offset, value);
  3063. }
  3064. #else
  3065. /* TODO: Need to implement other chips carefully */
  3066. uint32_t hif_pci_reg_read32(struct hif_softc *hif_sc,
  3067. uint32_t offset)
  3068. {
  3069. return 0;
  3070. }
  3071. void hif_pci_reg_write32(struct hif_softc *hif_sc,
  3072. uint32_t offset,
  3073. uint32_t value)
  3074. {
  3075. }
  3076. #endif
  3077. /**
  3078. * hif_configure_irq() - configure interrupt
  3079. * @scn: HIF context
  3080. *
  3081. * This function configures interrupt(s)
  3082. *
  3083. * Return: 0 - for success
  3084. */
  3085. int hif_configure_irq(struct hif_softc *scn)
  3086. {
  3087. int ret = 0;
  3088. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3089. hif_info("E");
  3090. if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
  3091. scn->request_irq_done = false;
  3092. return 0;
  3093. }
  3094. hif_init_reschedule_tasklet_work(sc);
  3095. ret = hif_ce_msi_configure_irq(scn);
  3096. if (ret == 0) {
  3097. goto end;
  3098. }
  3099. switch (scn->target_info.target_type) {
  3100. case TARGET_TYPE_QCA8074:
  3101. case TARGET_TYPE_QCA8074V2:
  3102. case TARGET_TYPE_QCA6018:
  3103. case TARGET_TYPE_QCA5018:
  3104. case TARGET_TYPE_QCA5332:
  3105. case TARGET_TYPE_QCA9574:
  3106. case TARGET_TYPE_QCN9160:
  3107. ret = hif_ahb_configure_irq(sc);
  3108. break;
  3109. case TARGET_TYPE_QCN9224:
  3110. ret = hif_ce_configure_legacyirq(scn);
  3111. break;
  3112. default:
  3113. ret = hif_pci_configure_legacy_irq(sc);
  3114. break;
  3115. }
  3116. if (ret < 0) {
  3117. hif_err("error = %d", ret);
  3118. return ret;
  3119. }
  3120. end:
  3121. scn->request_irq_done = true;
  3122. return 0;
  3123. }
  3124. /**
  3125. * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
  3126. * @scn: hif control structure
  3127. *
  3128. * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
  3129. * stuck at a polling loop in pcie_address_config in FW
  3130. *
  3131. * Return: none
  3132. */
  3133. static void hif_trigger_timer_irq(struct hif_softc *scn)
  3134. {
  3135. int tmp;
  3136. /* Trigger IRQ on Peregrine/Swift by setting
  3137. * IRQ Bit of LF_TIMER 0
  3138. */
  3139. tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  3140. SOC_LF_TIMER_STATUS0_ADDRESS));
  3141. /* Set Raw IRQ Bit */
  3142. tmp |= 1;
  3143. /* SOC_LF_TIMER_STATUS0 */
  3144. hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  3145. SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
  3146. }
  3147. /**
  3148. * hif_target_sync() : ensure the target is ready
  3149. * @scn: hif control structure
  3150. *
  3151. * Informs fw that we plan to use legacy interrupts so that
  3152. * it can begin booting. Ensures that the fw finishes booting
  3153. * before continuing. Should be called before trying to write
  3154. * to the targets other registers for the first time.
  3155. *
  3156. * Return: none
  3157. */
  3158. static void hif_target_sync(struct hif_softc *scn)
  3159. {
  3160. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3161. PCIE_INTR_ENABLE_ADDRESS),
  3162. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  3163. /* read to flush pcie write */
  3164. (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3165. PCIE_INTR_ENABLE_ADDRESS));
  3166. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3167. PCIE_SOC_WAKE_ADDRESS,
  3168. PCIE_SOC_WAKE_V_MASK);
  3169. while (!hif_targ_is_awake(scn, scn->mem))
  3170. ;
  3171. if (HAS_FW_INDICATOR) {
  3172. int wait_limit = 500;
  3173. int fw_ind = 0;
  3174. int retry_count = 0;
  3175. uint32_t target_type = scn->target_info.target_type;
  3176. fw_retry:
  3177. hif_info("Loop checking FW signal");
  3178. while (1) {
  3179. fw_ind = hif_read32_mb(scn, scn->mem +
  3180. FW_INDICATOR_ADDRESS);
  3181. if (fw_ind & FW_IND_INITIALIZED)
  3182. break;
  3183. if (wait_limit-- < 0)
  3184. break;
  3185. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3186. PCIE_INTR_ENABLE_ADDRESS),
  3187. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  3188. /* read to flush pcie write */
  3189. (void)hif_read32_mb(scn, scn->mem +
  3190. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
  3191. qdf_mdelay(10);
  3192. }
  3193. if (wait_limit < 0) {
  3194. if (target_type == TARGET_TYPE_AR9888 &&
  3195. retry_count++ < 2) {
  3196. hif_trigger_timer_irq(scn);
  3197. wait_limit = 500;
  3198. goto fw_retry;
  3199. }
  3200. hif_info("FW signal timed out");
  3201. qdf_assert_always(0);
  3202. } else {
  3203. hif_info("Got FW signal, retries = %x", 500-wait_limit);
  3204. }
  3205. }
  3206. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3207. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  3208. }
  3209. static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
  3210. struct device *dev)
  3211. {
  3212. struct pld_soc_info info;
  3213. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  3214. pld_get_soc_info(dev, &info);
  3215. sc->mem = info.v_addr;
  3216. sc->ce_sc.ol_sc.mem = info.v_addr;
  3217. sc->ce_sc.ol_sc.mem_pa = info.p_addr;
  3218. sc->device_version.family_number = info.device_version.family_number;
  3219. sc->device_version.device_number = info.device_version.device_number;
  3220. sc->device_version.major_version = info.device_version.major_version;
  3221. sc->device_version.minor_version = info.device_version.minor_version;
  3222. hif_info("%s: fam num %u dev ver %u maj ver %u min ver %u", __func__,
  3223. sc->device_version.family_number,
  3224. sc->device_version.device_number,
  3225. sc->device_version.major_version,
  3226. sc->device_version.minor_version);
  3227. /* dev_mem_info[0] is for CMEM */
  3228. scn->cmem_start = info.dev_mem_info[0].start;
  3229. scn->cmem_size = info.dev_mem_info[0].size;
  3230. scn->target_info.target_version = info.soc_id;
  3231. scn->target_info.target_revision = 0;
  3232. scn->target_info.soc_version = info.device_version.major_version;
  3233. }
  3234. static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
  3235. struct device *dev)
  3236. {}
  3237. static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
  3238. int device_id)
  3239. {
  3240. if (!pld_have_platform_driver_support(sc->dev))
  3241. return false;
  3242. switch (device_id) {
  3243. case QCA6290_DEVICE_ID:
  3244. case QCN9000_DEVICE_ID:
  3245. case QCN9224_DEVICE_ID:
  3246. case QCA6290_EMULATION_DEVICE_ID:
  3247. case QCA6390_DEVICE_ID:
  3248. case QCA6490_DEVICE_ID:
  3249. case AR6320_DEVICE_ID:
  3250. case QCN7605_DEVICE_ID:
  3251. case KIWI_DEVICE_ID:
  3252. case MANGO_DEVICE_ID:
  3253. case PEACH_DEVICE_ID:
  3254. return true;
  3255. }
  3256. return false;
  3257. }
  3258. static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
  3259. int device_id)
  3260. {
  3261. if (hif_is_pld_based_target(sc, device_id)) {
  3262. sc->hif_enable_pci = hif_enable_pci_pld;
  3263. sc->hif_pci_deinit = hif_pci_deinit_pld;
  3264. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
  3265. } else {
  3266. sc->hif_enable_pci = hif_enable_pci_nopld;
  3267. sc->hif_pci_deinit = hif_pci_deinit_nopld;
  3268. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
  3269. }
  3270. }
  3271. #ifdef HIF_REG_WINDOW_SUPPORT
  3272. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  3273. u32 target_type)
  3274. {
  3275. switch (target_type) {
  3276. case TARGET_TYPE_QCN7605:
  3277. case TARGET_TYPE_QCA6490:
  3278. case TARGET_TYPE_QCA6390:
  3279. case TARGET_TYPE_KIWI:
  3280. case TARGET_TYPE_MANGO:
  3281. case TARGET_TYPE_PEACH:
  3282. sc->use_register_windowing = true;
  3283. qdf_spinlock_create(&sc->register_access_lock);
  3284. sc->register_window = 0;
  3285. break;
  3286. default:
  3287. sc->use_register_windowing = false;
  3288. }
  3289. }
  3290. #else
  3291. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  3292. u32 target_type)
  3293. {
  3294. sc->use_register_windowing = false;
  3295. }
  3296. #endif
  3297. /**
  3298. * hif_pci_enable_bus(): enable bus
  3299. * @ol_sc: soft_sc struct
  3300. * @dev: device pointer
  3301. * @bdev: bus dev pointer
  3302. * @bid: bus id pointer
  3303. * @type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
  3304. *
  3305. * This function enables the bus
  3306. *
  3307. * Return: QDF_STATUS
  3308. */
  3309. QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
  3310. struct device *dev, void *bdev,
  3311. const struct hif_bus_id *bid,
  3312. enum hif_enable_type type)
  3313. {
  3314. int ret = 0;
  3315. uint32_t hif_type;
  3316. uint32_t target_type = TARGET_TYPE_UNKNOWN;
  3317. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
  3318. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
  3319. uint16_t revision_id = 0;
  3320. int probe_again = 0;
  3321. struct pci_dev *pdev = bdev;
  3322. const struct pci_device_id *id = (const struct pci_device_id *)bid;
  3323. struct hif_target_info *tgt_info;
  3324. if (!ol_sc) {
  3325. hif_err("hif_ctx is NULL");
  3326. return QDF_STATUS_E_NOMEM;
  3327. }
  3328. /* Following print is used by various tools to identify
  3329. * WLAN SOC (e.g. crash dump analysis and reporting tool).
  3330. */
  3331. hif_info("con_mode = 0x%x, WLAN_SOC_device_id = 0x%x",
  3332. hif_get_conparam(ol_sc), id->device);
  3333. sc->pdev = pdev;
  3334. sc->dev = &pdev->dev;
  3335. sc->devid = id->device;
  3336. sc->cacheline_sz = dma_get_cache_alignment();
  3337. tgt_info = hif_get_target_info_handle(hif_hdl);
  3338. hif_pci_init_deinit_ops_attach(sc, id->device);
  3339. sc->hif_pci_get_soc_info(sc, dev);
  3340. again:
  3341. ret = sc->hif_enable_pci(sc, pdev, id);
  3342. if (ret < 0) {
  3343. hif_err("hif_enable_pci error = %d", ret);
  3344. goto err_enable_pci;
  3345. }
  3346. hif_info("hif_enable_pci done");
  3347. /* Temporary FIX: disable ASPM on peregrine.
  3348. * Will be removed after the OTP is programmed
  3349. */
  3350. hif_disable_power_gating(hif_hdl);
  3351. device_disable_async_suspend(&pdev->dev);
  3352. pfrm_read_config_word(pdev, 0x08, &revision_id);
  3353. ret = hif_get_device_type(id->device, revision_id,
  3354. &hif_type, &target_type);
  3355. if (ret < 0) {
  3356. hif_err("Invalid device id/revision_id");
  3357. goto err_tgtstate;
  3358. }
  3359. hif_info("hif_type = 0x%x, target_type = 0x%x",
  3360. hif_type, target_type);
  3361. hif_register_tbl_attach(ol_sc, hif_type);
  3362. hif_target_register_tbl_attach(ol_sc, target_type);
  3363. hif_pci_init_reg_windowing_support(sc, target_type);
  3364. tgt_info->target_type = target_type;
  3365. /*
  3366. * Disable unlzay interrupt registration for QCN9000
  3367. */
  3368. if (target_type == TARGET_TYPE_QCN9000 ||
  3369. target_type == TARGET_TYPE_QCN9224)
  3370. ol_sc->irq_unlazy_disable = 1;
  3371. if (ce_srng_based(ol_sc)) {
  3372. hif_info("Skip tgt_wake up for srng devices");
  3373. } else {
  3374. ret = hif_pci_probe_tgt_wakeup(sc);
  3375. if (ret < 0) {
  3376. hif_err("hif_pci_prob_wakeup error = %d", ret);
  3377. if (ret == -EAGAIN)
  3378. probe_again++;
  3379. goto err_tgtstate;
  3380. }
  3381. hif_info("hif_pci_probe_tgt_wakeup done");
  3382. }
  3383. if (!ol_sc->mem_pa) {
  3384. hif_err("BAR0 uninitialized");
  3385. ret = -EIO;
  3386. goto err_tgtstate;
  3387. }
  3388. if (!ce_srng_based(ol_sc)) {
  3389. hif_target_sync(ol_sc);
  3390. if (hif_pci_default_link_up(tgt_info))
  3391. hif_vote_link_up(hif_hdl);
  3392. }
  3393. return QDF_STATUS_SUCCESS;
  3394. err_tgtstate:
  3395. hif_disable_pci(sc);
  3396. sc->pci_enabled = false;
  3397. hif_err("hif_disable_pci done");
  3398. return QDF_STATUS_E_ABORTED;
  3399. err_enable_pci:
  3400. if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
  3401. int delay_time;
  3402. hif_info("pci reprobe");
  3403. /* 10, 40, 90, 100, 100, ... */
  3404. delay_time = max(100, 10 * (probe_again * probe_again));
  3405. qdf_mdelay(delay_time);
  3406. goto again;
  3407. }
  3408. return qdf_status_from_os_return(ret);
  3409. }
  3410. /**
  3411. * hif_pci_irq_enable() - ce_irq_enable
  3412. * @scn: hif_softc
  3413. * @ce_id: ce_id
  3414. *
  3415. * Return: void
  3416. */
  3417. void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
  3418. {
  3419. uint32_t tmp = 1 << ce_id;
  3420. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3421. qdf_spin_lock_irqsave(&sc->irq_lock);
  3422. scn->ce_irq_summary &= ~tmp;
  3423. if (scn->ce_irq_summary == 0) {
  3424. /* Enable Legacy PCI line interrupts */
  3425. if (LEGACY_INTERRUPTS(sc) &&
  3426. (scn->target_status != TARGET_STATUS_RESET) &&
  3427. (!qdf_atomic_read(&scn->link_suspended))) {
  3428. hif_write32_mb(scn, scn->mem +
  3429. (SOC_CORE_BASE_ADDRESS |
  3430. PCIE_INTR_ENABLE_ADDRESS),
  3431. HOST_GROUP0_MASK);
  3432. hif_read32_mb(scn, scn->mem +
  3433. (SOC_CORE_BASE_ADDRESS |
  3434. PCIE_INTR_ENABLE_ADDRESS));
  3435. }
  3436. }
  3437. if (scn->hif_init_done == true)
  3438. Q_TARGET_ACCESS_END(scn);
  3439. qdf_spin_unlock_irqrestore(&sc->irq_lock);
  3440. /* check for missed firmware crash */
  3441. hif_fw_interrupt_handler(0, scn);
  3442. }
  3443. /**
  3444. * hif_pci_irq_disable() - ce_irq_disable
  3445. * @scn: hif_softc
  3446. * @ce_id: ce_id
  3447. *
  3448. * only applicable to legacy copy engine...
  3449. *
  3450. * Return: void
  3451. */
  3452. void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
  3453. {
  3454. /* For Rome only need to wake up target */
  3455. /* target access is maintained until interrupts are re-enabled */
  3456. Q_TARGET_ACCESS_BEGIN(scn);
  3457. }
  3458. int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  3459. {
  3460. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3461. /* legacy case only has one irq */
  3462. return pci_scn->irq;
  3463. }
  3464. int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
  3465. {
  3466. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3467. struct hif_target_info *tgt_info;
  3468. tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
  3469. if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
  3470. tgt_info->target_type == TARGET_TYPE_QCA6390 ||
  3471. tgt_info->target_type == TARGET_TYPE_QCA6490 ||
  3472. tgt_info->target_type == TARGET_TYPE_QCN7605 ||
  3473. tgt_info->target_type == TARGET_TYPE_QCA8074 ||
  3474. tgt_info->target_type == TARGET_TYPE_KIWI ||
  3475. tgt_info->target_type == TARGET_TYPE_MANGO ||
  3476. tgt_info->target_type == TARGET_TYPE_PEACH) {
  3477. /*
  3478. * Need to consider offset's memtype for QCA6290/QCA8074,
  3479. * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
  3480. * well initialized/defined.
  3481. */
  3482. return 0;
  3483. }
  3484. if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
  3485. || (offset + sizeof(unsigned int) <= sc->mem_len)) {
  3486. return 0;
  3487. }
  3488. hif_info("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)",
  3489. offset, (uint32_t)(offset + sizeof(unsigned int)),
  3490. sc->mem_len);
  3491. return -EINVAL;
  3492. }
  3493. /**
  3494. * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
  3495. * @scn: hif context
  3496. *
  3497. * Return: true if soc needs driver bmi otherwise false
  3498. */
  3499. bool hif_pci_needs_bmi(struct hif_softc *scn)
  3500. {
  3501. return !ce_srng_based(scn);
  3502. }
  3503. #ifdef FORCE_WAKE
  3504. #if defined(DEVICE_FORCE_WAKE_ENABLE) && !defined(CONFIG_PLD_PCIE_FW_SIM)
  3505. /*
  3506. * HIF_POLL_UMAC_WAKE poll value to indicate if UMAC is powered up
  3507. * Update the below macro with FW defined one.
  3508. */
  3509. #define HIF_POLL_UMAC_WAKE 0x2
  3510. static inline int hif_soc_wake_request(struct hif_opaque_softc *hif_handle)
  3511. {
  3512. uint32_t timeout, value;
  3513. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3514. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3515. qdf_spin_lock_bh(&pci_scn->force_wake_lock);
  3516. if ((qdf_atomic_inc_return(&scn->active_wake_req_cnt) > 1)) {
  3517. qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
  3518. return 0;
  3519. }
  3520. hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 1);
  3521. HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
  3522. /*
  3523. * do not reset the timeout
  3524. * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
  3525. */
  3526. timeout = 0;
  3527. do {
  3528. value = hif_read32_mb(
  3529. scn, scn->mem +
  3530. PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
  3531. if (value == HIF_POLL_UMAC_WAKE)
  3532. break;
  3533. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  3534. timeout += FORCE_WAKE_DELAY_MS;
  3535. } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
  3536. if (value != HIF_POLL_UMAC_WAKE) {
  3537. hif_err("force wake handshake failed, reg value = 0x%x",
  3538. value);
  3539. HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
  3540. qdf_atomic_dec(&scn->active_wake_req_cnt);
  3541. qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
  3542. return -ETIMEDOUT;
  3543. }
  3544. HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
  3545. qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
  3546. return 0;
  3547. }
  3548. static inline void hif_soc_wake_release(struct hif_opaque_softc *hif_handle)
  3549. {
  3550. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3551. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3552. qdf_spin_lock_bh(&pci_scn->force_wake_lock);
  3553. if (!qdf_atomic_dec_and_test(&scn->active_wake_req_cnt)) {
  3554. qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
  3555. return;
  3556. }
  3557. /* Release umac force wake */
  3558. hif_write32_mb(scn, scn->mem + PCIE_REG_WAKE_UMAC_OFFSET, 0);
  3559. qdf_spin_unlock_bh(&pci_scn->force_wake_lock);
  3560. }
  3561. /**
  3562. * hif_force_wake_request(): Enable the force wake recipe
  3563. * @hif_handle: HIF handle
  3564. *
  3565. * Bring MHI to M0 state and force wake the UMAC by asserting the
  3566. * soc wake reg. Poll the scratch reg to check if its set to
  3567. * HIF_POLL_UMAC_WAKE. The polled value may return 0x1 in case UMAC
  3568. * is powered down.
  3569. *
  3570. * Return: 0 if handshake is successful or ETIMEDOUT in case of failure
  3571. */
  3572. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  3573. {
  3574. uint32_t timeout;
  3575. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3576. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3577. int ret, status = 0;
  3578. /* Prevent runtime PM or trigger resume firstly */
  3579. if (hif_rtpm_get(HIF_RTPM_GET_SYNC, HIF_RTPM_ID_FORCE_WAKE)) {
  3580. hif_err("runtime pm get failed");
  3581. return -EINVAL;
  3582. }
  3583. HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
  3584. if (qdf_in_interrupt())
  3585. timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
  3586. else
  3587. timeout = 0;
  3588. ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
  3589. if (ret) {
  3590. hif_err("force wake request(timeout %u) send failed: %d",
  3591. timeout, ret);
  3592. HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
  3593. status = -EINVAL;
  3594. goto release_rtpm_ref;
  3595. }
  3596. /* If device's M1 state-change event races here, it can be ignored,
  3597. * as the device is expected to immediately move from M2 to M0
  3598. * without entering low power state.
  3599. */
  3600. if (!pld_is_device_awake(scn->qdf_dev->dev))
  3601. hif_info("state-change event races, ignore");
  3602. HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
  3603. ret = hif_soc_wake_request(hif_handle);
  3604. if (ret) {
  3605. hif_err("soc force wake failed: %d", ret);
  3606. status = ret;
  3607. goto release_mhi_wake;
  3608. }
  3609. return 0;
  3610. release_mhi_wake:
  3611. /* Release MHI force wake */
  3612. ret = pld_force_wake_release(scn->qdf_dev->dev);
  3613. if (ret) {
  3614. hif_err("pld force wake release failure");
  3615. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  3616. status = ret;
  3617. } else {
  3618. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  3619. }
  3620. release_rtpm_ref:
  3621. /* Release runtime PM force wake */
  3622. ret = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
  3623. if (ret) {
  3624. hif_err("runtime pm put failure: %d", ret);
  3625. return ret;
  3626. }
  3627. return status;
  3628. }
  3629. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  3630. {
  3631. int ret, status;
  3632. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3633. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3634. hif_soc_wake_release(hif_handle);
  3635. /* Release MHI force wake */
  3636. ret = pld_force_wake_release(scn->qdf_dev->dev);
  3637. if (ret) {
  3638. hif_err("pld force wake release failure");
  3639. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  3640. goto release_rtpm_ref;
  3641. }
  3642. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  3643. HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
  3644. release_rtpm_ref:
  3645. /* Release runtime PM force wake */
  3646. status = hif_rtpm_put(HIF_RTPM_PUT_ASYNC, HIF_RTPM_ID_FORCE_WAKE);
  3647. if (status) {
  3648. hif_err("runtime pm put failure: %d", status);
  3649. return status;
  3650. }
  3651. return ret;
  3652. }
  3653. #else /* DEVICE_FORCE_WAKE_ENABLE */
  3654. /** hif_force_wake_request() - Disable the PCIE scratch register
  3655. * write/read
  3656. *
  3657. * Return: 0
  3658. */
  3659. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  3660. {
  3661. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3662. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3663. uint32_t timeout;
  3664. int ret;
  3665. HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
  3666. if (qdf_in_interrupt())
  3667. timeout = FORCE_WAKE_DELAY_TIMEOUT_MS * 1000;
  3668. else
  3669. timeout = 0;
  3670. ret = pld_force_wake_request_sync(scn->qdf_dev->dev, timeout);
  3671. if (ret) {
  3672. hif_err("force wake request(timeout %u) send failed: %d",
  3673. timeout, ret);
  3674. HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
  3675. return -EINVAL;
  3676. }
  3677. /* If device's M1 state-change event races here, it can be ignored,
  3678. * as the device is expected to immediately move from M2 to M0
  3679. * without entering low power state.
  3680. */
  3681. if (!pld_is_device_awake(scn->qdf_dev->dev))
  3682. hif_info("state-change event races, ignore");
  3683. HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
  3684. return 0;
  3685. }
  3686. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  3687. {
  3688. int ret;
  3689. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3690. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3691. ret = pld_force_wake_release(scn->qdf_dev->dev);
  3692. if (ret) {
  3693. hif_err("force wake release failure");
  3694. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  3695. return ret;
  3696. }
  3697. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  3698. return 0;
  3699. }
  3700. #endif /* DEVICE_FORCE_WAKE_ENABLE */
  3701. void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
  3702. {
  3703. hif_debug("mhi_force_wake_request_vote: %d",
  3704. pci_handle->stats.mhi_force_wake_request_vote);
  3705. hif_debug("mhi_force_wake_failure: %d",
  3706. pci_handle->stats.mhi_force_wake_failure);
  3707. hif_debug("mhi_force_wake_success: %d",
  3708. pci_handle->stats.mhi_force_wake_success);
  3709. hif_debug("soc_force_wake_register_write_success: %d",
  3710. pci_handle->stats.soc_force_wake_register_write_success);
  3711. hif_debug("soc_force_wake_failure: %d",
  3712. pci_handle->stats.soc_force_wake_failure);
  3713. hif_debug("soc_force_wake_success: %d",
  3714. pci_handle->stats.soc_force_wake_success);
  3715. hif_debug("mhi_force_wake_release_failure: %d",
  3716. pci_handle->stats.mhi_force_wake_release_failure);
  3717. hif_debug("mhi_force_wake_release_success: %d",
  3718. pci_handle->stats.mhi_force_wake_release_success);
  3719. hif_debug("oc_force_wake_release_success: %d",
  3720. pci_handle->stats.soc_force_wake_release_success);
  3721. }
  3722. #endif /* FORCE_WAKE */
  3723. #ifdef FEATURE_HAL_DELAYED_REG_WRITE
  3724. int hif_prevent_link_low_power_states(struct hif_opaque_softc *hif)
  3725. {
  3726. return pld_prevent_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
  3727. }
  3728. void hif_allow_link_low_power_states(struct hif_opaque_softc *hif)
  3729. {
  3730. pld_allow_l1(HIF_GET_SOFTC(hif)->qdf_dev->dev);
  3731. }
  3732. #endif
  3733. #ifdef IPA_OPT_WIFI_DP
  3734. int hif_prevent_l1(struct hif_opaque_softc *hif)
  3735. {
  3736. struct hif_softc *hif_softc = (struct hif_softc *)hif;
  3737. int status;
  3738. status = hif_force_wake_request(hif);
  3739. if (status) {
  3740. hif_err("Force wake request error");
  3741. return status;
  3742. }
  3743. qdf_atomic_inc(&hif_softc->opt_wifi_dp_rtpm_cnt);
  3744. hif_info("opt_dp: pcie link up count %d",
  3745. qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt));
  3746. return status;
  3747. }
  3748. void hif_allow_l1(struct hif_opaque_softc *hif)
  3749. {
  3750. struct hif_softc *hif_softc = (struct hif_softc *)hif;
  3751. int status;
  3752. if (qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt) > 0) {
  3753. status = hif_force_wake_release(hif);
  3754. if (status) {
  3755. hif_err("Force wake release error");
  3756. return;
  3757. }
  3758. qdf_atomic_dec(&hif_softc->opt_wifi_dp_rtpm_cnt);
  3759. hif_info("opt_dp: pcie link down count %d",
  3760. qdf_atomic_read(&hif_softc->opt_wifi_dp_rtpm_cnt));
  3761. }
  3762. }
  3763. #endif