if_pci.c 124 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738
  1. /*
  2. * Copyright (c) 2013-2019 The Linux Foundation. All rights reserved.
  3. *
  4. * Permission to use, copy, modify, and/or distribute this software for
  5. * any purpose with or without fee is hereby granted, provided that the
  6. * above copyright notice and this permission notice appear in all
  7. * copies.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
  10. * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
  11. * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
  12. * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
  13. * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
  14. * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
  15. * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
  16. * PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <linux/pci.h>
  19. #include <linux/slab.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/if_arp.h>
  22. #ifdef CONFIG_PCI_MSM
  23. #include <linux/msm_pcie.h>
  24. #endif
  25. #include "hif_io32.h"
  26. #include "if_pci.h"
  27. #include "hif.h"
  28. #include "target_type.h"
  29. #include "hif_main.h"
  30. #include "ce_main.h"
  31. #include "ce_api.h"
  32. #include "ce_internal.h"
  33. #include "ce_reg.h"
  34. #include "ce_bmi.h"
  35. #include "regtable.h"
  36. #include "hif_hw_version.h"
  37. #include <linux/debugfs.h>
  38. #include <linux/seq_file.h>
  39. #include "qdf_status.h"
  40. #include "qdf_atomic.h"
  41. #include "pld_common.h"
  42. #include "mp_dev.h"
  43. #include "hif_debug.h"
  44. #include "if_pci_internal.h"
  45. #include "ce_tasklet.h"
  46. #include "targaddrs.h"
  47. #include "hif_exec.h"
  48. #include "pci_api.h"
  49. #include "ahb_api.h"
  50. /* Maximum ms timeout for host to wake up target */
  51. #define PCIE_WAKE_TIMEOUT 1000
  52. #define RAMDUMP_EVENT_TIMEOUT 2500
  53. /* Setting SOC_GLOBAL_RESET during driver unload causes intermittent
  54. * PCIe data bus error
  55. * As workaround for this issue - changing the reset sequence to
  56. * use TargetCPU warm reset * instead of SOC_GLOBAL_RESET
  57. */
  58. #define CPU_WARM_RESET_WAR
  59. /*
  60. * Top-level interrupt handler for all PCI interrupts from a Target.
  61. * When a block of MSI interrupts is allocated, this top-level handler
  62. * is not used; instead, we directly call the correct sub-handler.
  63. */
  64. struct ce_irq_reg_table {
  65. uint32_t irq_enable;
  66. uint32_t irq_status;
  67. };
  68. #ifndef QCA_WIFI_3_0_ADRASTEA
  69. static inline void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  70. {
  71. }
  72. #else
  73. static void hif_pci_route_adrastea_interrupt(struct hif_pci_softc *sc)
  74. {
  75. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  76. unsigned int target_enable0, target_enable1;
  77. unsigned int target_cause0, target_cause1;
  78. target_enable0 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0);
  79. target_enable1 = hif_read32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1);
  80. target_cause0 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_0);
  81. target_cause1 = hif_read32_mb(sc, sc->mem + Q6_CAUSE_REGISTER_1);
  82. if ((target_enable0 & target_cause0) ||
  83. (target_enable1 & target_cause1)) {
  84. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_0, 0);
  85. hif_write32_mb(sc, sc->mem + Q6_ENABLE_REGISTER_1, 0);
  86. if (scn->notice_send)
  87. pld_intr_notify_q6(sc->dev);
  88. }
  89. }
  90. #endif
  91. /**
  92. * pci_dispatch_ce_irq() - pci_dispatch_ce_irq
  93. * @scn: scn
  94. *
  95. * Return: N/A
  96. */
  97. static void pci_dispatch_interrupt(struct hif_softc *scn)
  98. {
  99. uint32_t intr_summary;
  100. int id;
  101. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  102. if (scn->hif_init_done != true)
  103. return;
  104. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  105. return;
  106. intr_summary = CE_INTERRUPT_SUMMARY(scn);
  107. if (intr_summary == 0) {
  108. if ((scn->target_status != TARGET_STATUS_RESET) &&
  109. (!qdf_atomic_read(&scn->link_suspended))) {
  110. hif_write32_mb(scn, scn->mem +
  111. (SOC_CORE_BASE_ADDRESS |
  112. PCIE_INTR_ENABLE_ADDRESS),
  113. HOST_GROUP0_MASK);
  114. hif_read32_mb(scn, scn->mem +
  115. (SOC_CORE_BASE_ADDRESS |
  116. PCIE_INTR_ENABLE_ADDRESS));
  117. }
  118. Q_TARGET_ACCESS_END(scn);
  119. return;
  120. }
  121. Q_TARGET_ACCESS_END(scn);
  122. scn->ce_irq_summary = intr_summary;
  123. for (id = 0; intr_summary && (id < scn->ce_count); id++) {
  124. if (intr_summary & (1 << id)) {
  125. intr_summary &= ~(1 << id);
  126. ce_dispatch_interrupt(id, &hif_state->tasklets[id]);
  127. }
  128. }
  129. }
  130. irqreturn_t hif_pci_legacy_ce_interrupt_handler(int irq, void *arg)
  131. {
  132. struct hif_pci_softc *sc = (struct hif_pci_softc *)arg;
  133. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  134. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
  135. volatile int tmp;
  136. uint16_t val = 0;
  137. uint32_t bar0 = 0;
  138. uint32_t fw_indicator_address, fw_indicator;
  139. bool ssr_irq = false;
  140. unsigned int host_cause, host_enable;
  141. if (LEGACY_INTERRUPTS(sc)) {
  142. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  143. return IRQ_HANDLED;
  144. if (ADRASTEA_BU) {
  145. host_enable = hif_read32_mb(sc, sc->mem +
  146. PCIE_INTR_ENABLE_ADDRESS);
  147. host_cause = hif_read32_mb(sc, sc->mem +
  148. PCIE_INTR_CAUSE_ADDRESS);
  149. if (!(host_enable & host_cause)) {
  150. hif_pci_route_adrastea_interrupt(sc);
  151. return IRQ_HANDLED;
  152. }
  153. }
  154. /* Clear Legacy PCI line interrupts
  155. * IMPORTANT: INTR_CLR regiser has to be set
  156. * after INTR_ENABLE is set to 0,
  157. * otherwise interrupt can not be really cleared
  158. */
  159. hif_write32_mb(sc, sc->mem +
  160. (SOC_CORE_BASE_ADDRESS |
  161. PCIE_INTR_ENABLE_ADDRESS), 0);
  162. hif_write32_mb(sc, sc->mem +
  163. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CLR_ADDRESS),
  164. ADRASTEA_BU ?
  165. (host_enable & host_cause) :
  166. HOST_GROUP0_MASK);
  167. if (ADRASTEA_BU)
  168. hif_write32_mb(sc, sc->mem + 0x2f100c,
  169. (host_cause >> 1));
  170. /* IMPORTANT: this extra read transaction is required to
  171. * flush the posted write buffer
  172. */
  173. if (!ADRASTEA_BU) {
  174. tmp =
  175. hif_read32_mb(sc, sc->mem +
  176. (SOC_CORE_BASE_ADDRESS |
  177. PCIE_INTR_ENABLE_ADDRESS));
  178. if (tmp == 0xdeadbeef) {
  179. HIF_ERROR("BUG(%s): SoC returns 0xdeadbeef!!",
  180. __func__);
  181. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  182. HIF_ERROR("%s: PCI Vendor ID = 0x%04x",
  183. __func__, val);
  184. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  185. HIF_ERROR("%s: PCI Device ID = 0x%04x",
  186. __func__, val);
  187. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  188. HIF_ERROR("%s: PCI Command = 0x%04x", __func__,
  189. val);
  190. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  191. HIF_ERROR("%s: PCI Status = 0x%04x", __func__,
  192. val);
  193. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0,
  194. &bar0);
  195. HIF_ERROR("%s: PCI BAR0 = 0x%08x", __func__,
  196. bar0);
  197. HIF_ERROR("%s: RTC_STATE_ADDRESS = 0x%08x",
  198. __func__,
  199. hif_read32_mb(sc, sc->mem +
  200. PCIE_LOCAL_BASE_ADDRESS
  201. + RTC_STATE_ADDRESS));
  202. HIF_ERROR("%s: PCIE_SOC_WAKE_ADDRESS = 0x%08x",
  203. __func__,
  204. hif_read32_mb(sc, sc->mem +
  205. PCIE_LOCAL_BASE_ADDRESS
  206. + PCIE_SOC_WAKE_ADDRESS));
  207. HIF_ERROR("%s: 0x80008 = 0x%08x, 0x8000c = 0x%08x",
  208. __func__,
  209. hif_read32_mb(sc, sc->mem + 0x80008),
  210. hif_read32_mb(sc, sc->mem + 0x8000c));
  211. HIF_ERROR("%s: 0x80010 = 0x%08x, 0x80014 = 0x%08x",
  212. __func__,
  213. hif_read32_mb(sc, sc->mem + 0x80010),
  214. hif_read32_mb(sc, sc->mem + 0x80014));
  215. HIF_ERROR("%s: 0x80018 = 0x%08x, 0x8001c = 0x%08x",
  216. __func__,
  217. hif_read32_mb(sc, sc->mem + 0x80018),
  218. hif_read32_mb(sc, sc->mem + 0x8001c));
  219. QDF_BUG(0);
  220. }
  221. PCI_CLR_CAUSE0_REGISTER(sc);
  222. }
  223. if (HAS_FW_INDICATOR) {
  224. fw_indicator_address = hif_state->fw_indicator_address;
  225. fw_indicator = A_TARGET_READ(scn, fw_indicator_address);
  226. if ((fw_indicator != ~0) &&
  227. (fw_indicator & FW_IND_EVENT_PENDING))
  228. ssr_irq = true;
  229. }
  230. if (Q_TARGET_ACCESS_END(scn) < 0)
  231. return IRQ_HANDLED;
  232. }
  233. /* TBDXXX: Add support for WMAC */
  234. if (ssr_irq) {
  235. sc->irq_event = irq;
  236. qdf_atomic_set(&scn->tasklet_from_intr, 1);
  237. qdf_atomic_inc(&scn->active_tasklet_cnt);
  238. tasklet_schedule(&sc->intr_tq);
  239. } else {
  240. pci_dispatch_interrupt(scn);
  241. }
  242. return IRQ_HANDLED;
  243. }
  244. bool hif_pci_targ_is_present(struct hif_softc *scn, void *__iomem *mem)
  245. {
  246. return 1; /* FIX THIS */
  247. }
  248. int hif_get_irq_num(struct hif_opaque_softc *scn, int *irq, uint32_t size)
  249. {
  250. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  251. int i = 0;
  252. if (!irq || !size) {
  253. return -EINVAL;
  254. }
  255. if (!sc->num_msi_intrs || sc->num_msi_intrs == 1) {
  256. irq[0] = sc->irq;
  257. return 1;
  258. }
  259. if (sc->num_msi_intrs > size) {
  260. qdf_print("Not enough space in irq buffer to return irqs");
  261. return -EINVAL;
  262. }
  263. for (i = 0; i < sc->num_msi_intrs; i++) {
  264. irq[i] = sc->irq + i + MSI_ASSIGN_CE_INITIAL;
  265. }
  266. return sc->num_msi_intrs;
  267. }
  268. /**
  269. * hif_pci_cancel_deferred_target_sleep() - cancels the defered target sleep
  270. * @scn: hif_softc
  271. *
  272. * Return: void
  273. */
  274. #if CONFIG_ATH_PCIE_MAX_PERF == 0
  275. void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  276. {
  277. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  278. A_target_id_t pci_addr = scn->mem;
  279. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  280. /*
  281. * If the deferred sleep timer is running cancel it
  282. * and put the soc into sleep.
  283. */
  284. if (hif_state->fake_sleep == true) {
  285. qdf_timer_stop(&hif_state->sleep_timer);
  286. if (hif_state->verified_awake == false) {
  287. hif_write32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  288. PCIE_SOC_WAKE_ADDRESS,
  289. PCIE_SOC_WAKE_RESET);
  290. }
  291. hif_state->fake_sleep = false;
  292. }
  293. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  294. }
  295. #else
  296. inline void hif_pci_cancel_deferred_target_sleep(struct hif_softc *scn)
  297. {
  298. }
  299. #endif
  300. #define A_PCIE_LOCAL_REG_READ(sc, mem, addr) \
  301. hif_read32_mb(sc, (char *)(mem) + \
  302. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr))
  303. #define A_PCIE_LOCAL_REG_WRITE(sc, mem, addr, val) \
  304. hif_write32_mb(sc, ((char *)(mem) + \
  305. PCIE_LOCAL_BASE_ADDRESS + (uint32_t)(addr)), (val))
  306. #ifdef QCA_WIFI_3_0
  307. /**
  308. * hif_targ_is_awake() - check to see if the target is awake
  309. * @hif_ctx: hif context
  310. *
  311. * emulation never goes to sleep
  312. *
  313. * Return: true if target is awake
  314. */
  315. static bool hif_targ_is_awake(struct hif_softc *hif_ctx, void *__iomem *mem)
  316. {
  317. return true;
  318. }
  319. #else
  320. /**
  321. * hif_targ_is_awake() - check to see if the target is awake
  322. * @hif_ctx: hif context
  323. *
  324. * Return: true if the targets clocks are on
  325. */
  326. static bool hif_targ_is_awake(struct hif_softc *scn, void *__iomem *mem)
  327. {
  328. uint32_t val;
  329. if (scn->recovery)
  330. return false;
  331. val = hif_read32_mb(scn, mem + PCIE_LOCAL_BASE_ADDRESS
  332. + RTC_STATE_ADDRESS);
  333. return (RTC_STATE_V_GET(val) & RTC_STATE_V_ON) == RTC_STATE_V_ON;
  334. }
  335. #endif
  336. #define ATH_PCI_RESET_WAIT_MAX 10 /* Ms */
  337. static void hif_pci_device_reset(struct hif_pci_softc *sc)
  338. {
  339. void __iomem *mem = sc->mem;
  340. int i;
  341. uint32_t val;
  342. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  343. if (!scn->hostdef)
  344. return;
  345. /* NB: Don't check resetok here. This form of reset
  346. * is integral to correct operation.
  347. */
  348. if (!SOC_GLOBAL_RESET_ADDRESS)
  349. return;
  350. if (!mem)
  351. return;
  352. HIF_ERROR("%s: Reset Device", __func__);
  353. /*
  354. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  355. * writing WAKE_V, the Target may scribble over Host memory!
  356. */
  357. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  358. PCIE_SOC_WAKE_V_MASK);
  359. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  360. if (hif_targ_is_awake(scn, mem))
  361. break;
  362. qdf_mdelay(1);
  363. }
  364. /* Put Target, including PCIe, into RESET. */
  365. val = A_PCIE_LOCAL_REG_READ(sc, mem, SOC_GLOBAL_RESET_ADDRESS);
  366. val |= 1;
  367. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  368. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  369. if (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  370. RTC_STATE_COLD_RESET_MASK)
  371. break;
  372. qdf_mdelay(1);
  373. }
  374. /* Pull Target, including PCIe, out of RESET. */
  375. val &= ~1;
  376. A_PCIE_LOCAL_REG_WRITE(sc, mem, SOC_GLOBAL_RESET_ADDRESS, val);
  377. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  378. if (!
  379. (A_PCIE_LOCAL_REG_READ(sc, mem, RTC_STATE_ADDRESS) &
  380. RTC_STATE_COLD_RESET_MASK))
  381. break;
  382. qdf_mdelay(1);
  383. }
  384. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  385. PCIE_SOC_WAKE_RESET);
  386. }
  387. /* CPU warm reset function
  388. * Steps:
  389. * 1. Disable all pending interrupts - so no pending interrupts on WARM reset
  390. * 2. Clear the FW_INDICATOR_ADDRESS -so Traget CPU initializes FW
  391. * correctly on WARM reset
  392. * 3. Clear TARGET CPU LF timer interrupt
  393. * 4. Reset all CEs to clear any pending CE tarnsactions
  394. * 5. Warm reset CPU
  395. */
  396. static void hif_pci_device_warm_reset(struct hif_pci_softc *sc)
  397. {
  398. void __iomem *mem = sc->mem;
  399. int i;
  400. uint32_t val;
  401. uint32_t fw_indicator;
  402. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  403. /* NB: Don't check resetok here. This form of reset is
  404. * integral to correct operation.
  405. */
  406. if (!mem)
  407. return;
  408. HIF_INFO_MED("%s: Target Warm Reset", __func__);
  409. /*
  410. * NB: If we try to write SOC_GLOBAL_RESET_ADDRESS without first
  411. * writing WAKE_V, the Target may scribble over Host memory!
  412. */
  413. A_PCIE_LOCAL_REG_WRITE(sc, mem, PCIE_SOC_WAKE_ADDRESS,
  414. PCIE_SOC_WAKE_V_MASK);
  415. for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
  416. if (hif_targ_is_awake(scn, mem))
  417. break;
  418. qdf_mdelay(1);
  419. }
  420. /*
  421. * Disable Pending interrupts
  422. */
  423. val =
  424. hif_read32_mb(sc, mem +
  425. (SOC_CORE_BASE_ADDRESS |
  426. PCIE_INTR_CAUSE_ADDRESS));
  427. HIF_INFO_MED("%s: Host Intr Cause reg 0x%x : value : 0x%x", __func__,
  428. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_CAUSE_ADDRESS), val);
  429. /* Target CPU Intr Cause */
  430. val = hif_read32_mb(sc, mem +
  431. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  432. HIF_INFO_MED("%s: Target CPU Intr Cause 0x%x", __func__, val);
  433. val =
  434. hif_read32_mb(sc, mem +
  435. (SOC_CORE_BASE_ADDRESS |
  436. PCIE_INTR_ENABLE_ADDRESS));
  437. hif_write32_mb(sc, (mem +
  438. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS)), 0);
  439. hif_write32_mb(sc, (mem +
  440. (SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS)),
  441. HOST_GROUP0_MASK);
  442. qdf_mdelay(100);
  443. /* Clear FW_INDICATOR_ADDRESS */
  444. if (HAS_FW_INDICATOR) {
  445. fw_indicator = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  446. hif_write32_mb(sc, mem + FW_INDICATOR_ADDRESS, 0);
  447. }
  448. /* Clear Target LF Timer interrupts */
  449. val =
  450. hif_read32_mb(sc, mem +
  451. (RTC_SOC_BASE_ADDRESS +
  452. SOC_LF_TIMER_CONTROL0_ADDRESS));
  453. HIF_INFO_MED("%s: addr 0x%x : 0x%x", __func__,
  454. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS), val);
  455. val &= ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK;
  456. hif_write32_mb(sc, mem +
  457. (RTC_SOC_BASE_ADDRESS + SOC_LF_TIMER_CONTROL0_ADDRESS),
  458. val);
  459. /* Reset CE */
  460. val =
  461. hif_read32_mb(sc, mem +
  462. (RTC_SOC_BASE_ADDRESS |
  463. SOC_RESET_CONTROL_ADDRESS));
  464. val |= SOC_RESET_CONTROL_CE_RST_MASK;
  465. hif_write32_mb(sc, (mem +
  466. (RTC_SOC_BASE_ADDRESS | SOC_RESET_CONTROL_ADDRESS)),
  467. val);
  468. val =
  469. hif_read32_mb(sc, mem +
  470. (RTC_SOC_BASE_ADDRESS |
  471. SOC_RESET_CONTROL_ADDRESS));
  472. qdf_mdelay(10);
  473. /* CE unreset */
  474. val &= ~SOC_RESET_CONTROL_CE_RST_MASK;
  475. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  476. SOC_RESET_CONTROL_ADDRESS), val);
  477. val =
  478. hif_read32_mb(sc, mem +
  479. (RTC_SOC_BASE_ADDRESS |
  480. SOC_RESET_CONTROL_ADDRESS));
  481. qdf_mdelay(10);
  482. /* Read Target CPU Intr Cause */
  483. val = hif_read32_mb(sc, mem +
  484. (SOC_CORE_BASE_ADDRESS | CPU_INTR_ADDRESS));
  485. HIF_INFO_MED("%s: Target CPU Intr Cause after CE reset 0x%x",
  486. __func__, val);
  487. /* CPU warm RESET */
  488. val =
  489. hif_read32_mb(sc, mem +
  490. (RTC_SOC_BASE_ADDRESS |
  491. SOC_RESET_CONTROL_ADDRESS));
  492. val |= SOC_RESET_CONTROL_CPU_WARM_RST_MASK;
  493. hif_write32_mb(sc, mem + (RTC_SOC_BASE_ADDRESS |
  494. SOC_RESET_CONTROL_ADDRESS), val);
  495. val =
  496. hif_read32_mb(sc, mem +
  497. (RTC_SOC_BASE_ADDRESS |
  498. SOC_RESET_CONTROL_ADDRESS));
  499. HIF_INFO_MED("%s: RESET_CONTROL after cpu warm reset 0x%x",
  500. __func__, val);
  501. qdf_mdelay(100);
  502. HIF_INFO_MED("%s: Target Warm reset complete", __func__);
  503. }
  504. #ifndef QCA_WIFI_3_0
  505. /* only applicable to legacy ce */
  506. int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
  507. {
  508. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  509. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  510. void __iomem *mem = sc->mem;
  511. uint32_t val;
  512. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  513. return ATH_ISR_NOSCHED;
  514. val = hif_read32_mb(sc, mem + FW_INDICATOR_ADDRESS);
  515. if (Q_TARGET_ACCESS_END(scn) < 0)
  516. return ATH_ISR_SCHED;
  517. HIF_INFO_MED("%s: FW_INDICATOR register is 0x%x", __func__, val);
  518. if (val & FW_IND_HELPER)
  519. return 0;
  520. return 1;
  521. }
  522. #endif
  523. int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
  524. {
  525. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  526. uint16_t device_id = 0;
  527. uint32_t val;
  528. uint16_t timeout_count = 0;
  529. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  530. /* Check device ID from PCIe configuration space for link status */
  531. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &device_id);
  532. if (device_id != sc->devid) {
  533. HIF_ERROR("%s: device ID does match (read 0x%x, expect 0x%x)",
  534. __func__, device_id, sc->devid);
  535. return -EACCES;
  536. }
  537. /* Check PCIe local register for bar/memory access */
  538. val = hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  539. RTC_STATE_ADDRESS);
  540. HIF_INFO_MED("%s: RTC_STATE_ADDRESS is %08x", __func__, val);
  541. /* Try to wake up taget if it sleeps */
  542. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  543. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  544. HIF_INFO_MED("%s: PCIE_SOC_WAKE_ADDRESS is %08x", __func__,
  545. hif_read32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  546. PCIE_SOC_WAKE_ADDRESS));
  547. /* Check if taget can be woken up */
  548. while (!hif_targ_is_awake(scn, sc->mem)) {
  549. if (timeout_count >= PCIE_WAKE_TIMEOUT) {
  550. HIF_ERROR("%s: wake up timeout, %08x, %08x",
  551. __func__,
  552. hif_read32_mb(sc, sc->mem +
  553. PCIE_LOCAL_BASE_ADDRESS +
  554. RTC_STATE_ADDRESS),
  555. hif_read32_mb(sc, sc->mem +
  556. PCIE_LOCAL_BASE_ADDRESS +
  557. PCIE_SOC_WAKE_ADDRESS));
  558. return -EACCES;
  559. }
  560. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  561. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  562. qdf_mdelay(100);
  563. timeout_count += 100;
  564. }
  565. /* Check Power register for SoC internal bus issues */
  566. val =
  567. hif_read32_mb(sc, sc->mem + RTC_SOC_BASE_ADDRESS +
  568. SOC_POWER_REG_OFFSET);
  569. HIF_INFO_MED("%s: Power register is %08x", __func__, val);
  570. return 0;
  571. }
  572. /**
  573. * __hif_pci_dump_registers(): dump other PCI debug registers
  574. * @scn: struct hif_softc
  575. *
  576. * This function dumps pci debug registers. The parrent function
  577. * dumps the copy engine registers before calling this function.
  578. *
  579. * Return: void
  580. */
  581. static void __hif_pci_dump_registers(struct hif_softc *scn)
  582. {
  583. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  584. void __iomem *mem = sc->mem;
  585. uint32_t val, i, j;
  586. uint32_t wrapper_idx[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
  587. uint32_t ce_base;
  588. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  589. return;
  590. /* DEBUG_INPUT_SEL_SRC = 0x6 */
  591. val =
  592. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  593. WLAN_DEBUG_INPUT_SEL_OFFSET);
  594. val &= ~WLAN_DEBUG_INPUT_SEL_SRC_MASK;
  595. val |= WLAN_DEBUG_INPUT_SEL_SRC_SET(0x6);
  596. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  597. WLAN_DEBUG_INPUT_SEL_OFFSET, val);
  598. /* DEBUG_CONTROL_ENABLE = 0x1 */
  599. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  600. WLAN_DEBUG_CONTROL_OFFSET);
  601. val &= ~WLAN_DEBUG_CONTROL_ENABLE_MASK;
  602. val |= WLAN_DEBUG_CONTROL_ENABLE_SET(0x1);
  603. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  604. WLAN_DEBUG_CONTROL_OFFSET, val);
  605. HIF_INFO_MED("%s: Debug: inputsel: %x dbgctrl: %x", __func__,
  606. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  607. WLAN_DEBUG_INPUT_SEL_OFFSET),
  608. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  609. WLAN_DEBUG_CONTROL_OFFSET));
  610. HIF_INFO_MED("%s: Debug CE", __func__);
  611. /* Loop CE debug output */
  612. /* AMBA_DEBUG_BUS_SEL = 0xc */
  613. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  614. AMBA_DEBUG_BUS_OFFSET);
  615. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  616. val |= AMBA_DEBUG_BUS_SEL_SET(0xc);
  617. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS + AMBA_DEBUG_BUS_OFFSET,
  618. val);
  619. for (i = 0; i < sizeof(wrapper_idx) / sizeof(uint32_t); i++) {
  620. /* For (i=1,2,3,4,8,9) write CE_WRAPPER_DEBUG_SEL = i */
  621. val = hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  622. CE_WRAPPER_DEBUG_OFFSET);
  623. val &= ~CE_WRAPPER_DEBUG_SEL_MASK;
  624. val |= CE_WRAPPER_DEBUG_SEL_SET(wrapper_idx[i]);
  625. hif_write32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  626. CE_WRAPPER_DEBUG_OFFSET, val);
  627. HIF_INFO_MED("%s: ce wrapper: %d amdbg: %x cewdbg: %x",
  628. __func__, wrapper_idx[i],
  629. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  630. AMBA_DEBUG_BUS_OFFSET),
  631. hif_read32_mb(sc, mem + CE_WRAPPER_BASE_ADDRESS +
  632. CE_WRAPPER_DEBUG_OFFSET));
  633. if (wrapper_idx[i] <= 7) {
  634. for (j = 0; j <= 5; j++) {
  635. ce_base = CE_BASE_ADDRESS(wrapper_idx[i]);
  636. /* For (j=0~5) write CE_DEBUG_SEL = j */
  637. val =
  638. hif_read32_mb(sc, mem + ce_base +
  639. CE_DEBUG_OFFSET);
  640. val &= ~CE_DEBUG_SEL_MASK;
  641. val |= CE_DEBUG_SEL_SET(j);
  642. hif_write32_mb(sc, mem + ce_base +
  643. CE_DEBUG_OFFSET, val);
  644. /* read (@gpio_athr_wlan_reg)
  645. * WLAN_DEBUG_OUT_DATA
  646. */
  647. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS
  648. + WLAN_DEBUG_OUT_OFFSET);
  649. val = WLAN_DEBUG_OUT_DATA_GET(val);
  650. HIF_INFO_MED("%s: module%d: cedbg: %x out: %x",
  651. __func__, j,
  652. hif_read32_mb(sc, mem + ce_base +
  653. CE_DEBUG_OFFSET), val);
  654. }
  655. } else {
  656. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  657. val =
  658. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  659. WLAN_DEBUG_OUT_OFFSET);
  660. val = WLAN_DEBUG_OUT_DATA_GET(val);
  661. HIF_INFO_MED("%s: out: %x", __func__, val);
  662. }
  663. }
  664. HIF_INFO_MED("%s: Debug PCIe:", __func__);
  665. /* Loop PCIe debug output */
  666. /* Write AMBA_DEBUG_BUS_SEL = 0x1c */
  667. val = hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  668. AMBA_DEBUG_BUS_OFFSET);
  669. val &= ~AMBA_DEBUG_BUS_SEL_MASK;
  670. val |= AMBA_DEBUG_BUS_SEL_SET(0x1c);
  671. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  672. AMBA_DEBUG_BUS_OFFSET, val);
  673. for (i = 0; i <= 8; i++) {
  674. /* For (i=1~8) write AMBA_DEBUG_BUS_PCIE_DEBUG_SEL = i */
  675. val =
  676. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  677. AMBA_DEBUG_BUS_OFFSET);
  678. val &= ~AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_MASK;
  679. val |= AMBA_DEBUG_BUS_PCIE_DEBUG_SEL_SET(i);
  680. hif_write32_mb(sc, mem + GPIO_BASE_ADDRESS +
  681. AMBA_DEBUG_BUS_OFFSET, val);
  682. /* read (@gpio_athr_wlan_reg) WLAN_DEBUG_OUT_DATA */
  683. val =
  684. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  685. WLAN_DEBUG_OUT_OFFSET);
  686. val = WLAN_DEBUG_OUT_DATA_GET(val);
  687. HIF_INFO_MED("%s: amdbg: %x out: %x %x", __func__,
  688. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  689. WLAN_DEBUG_OUT_OFFSET), val,
  690. hif_read32_mb(sc, mem + GPIO_BASE_ADDRESS +
  691. WLAN_DEBUG_OUT_OFFSET));
  692. }
  693. Q_TARGET_ACCESS_END(scn);
  694. }
  695. /**
  696. * hif_dump_registers(): dump bus debug registers
  697. * @scn: struct hif_opaque_softc
  698. *
  699. * This function dumps hif bus debug registers
  700. *
  701. * Return: 0 for success or error code
  702. */
  703. int hif_pci_dump_registers(struct hif_softc *hif_ctx)
  704. {
  705. int status;
  706. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  707. status = hif_dump_ce_registers(scn);
  708. if (status)
  709. HIF_ERROR("%s: Dump CE Registers Failed", __func__);
  710. /* dump non copy engine pci registers */
  711. __hif_pci_dump_registers(scn);
  712. return 0;
  713. }
  714. #ifdef HIF_CONFIG_SLUB_DEBUG_ON
  715. /* worker thread to schedule wlan_tasklet in SLUB debug build */
  716. static void reschedule_tasklet_work_handler(void *arg)
  717. {
  718. struct hif_pci_softc *sc = arg;
  719. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  720. if (!scn) {
  721. HIF_ERROR("%s: hif_softc is NULL\n", __func__);
  722. return;
  723. }
  724. if (scn->hif_init_done == false) {
  725. HIF_ERROR("%s: wlan driver is unloaded", __func__);
  726. return;
  727. }
  728. tasklet_schedule(&sc->intr_tq);
  729. }
  730. /**
  731. * hif_init_reschedule_tasklet_work() - API to initialize reschedule tasklet
  732. * work
  733. * @sc: HIF PCI Context
  734. *
  735. * Return: void
  736. */
  737. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc)
  738. {
  739. qdf_create_work(0, &sc->reschedule_tasklet_work,
  740. reschedule_tasklet_work_handler, NULL);
  741. }
  742. #else
  743. static void hif_init_reschedule_tasklet_work(struct hif_pci_softc *sc) { }
  744. #endif /* HIF_CONFIG_SLUB_DEBUG_ON */
  745. void wlan_tasklet(unsigned long data)
  746. {
  747. struct hif_pci_softc *sc = (struct hif_pci_softc *)data;
  748. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  749. if (scn->hif_init_done == false)
  750. goto end;
  751. if (qdf_atomic_read(&scn->link_suspended))
  752. goto end;
  753. if (!ADRASTEA_BU) {
  754. (irqreturn_t) hif_fw_interrupt_handler(sc->irq_event, scn);
  755. if (scn->target_status == TARGET_STATUS_RESET)
  756. goto end;
  757. }
  758. end:
  759. qdf_atomic_set(&scn->tasklet_from_intr, 0);
  760. qdf_atomic_dec(&scn->active_tasklet_cnt);
  761. }
  762. #ifdef FEATURE_RUNTIME_PM
  763. static const char *hif_pm_runtime_state_to_string(uint32_t state)
  764. {
  765. switch (state) {
  766. case HIF_PM_RUNTIME_STATE_NONE:
  767. return "INIT_STATE";
  768. case HIF_PM_RUNTIME_STATE_ON:
  769. return "ON";
  770. case HIF_PM_RUNTIME_STATE_RESUMING:
  771. return "RESUMING";
  772. case HIF_PM_RUNTIME_STATE_SUSPENDING:
  773. return "SUSPENDING";
  774. case HIF_PM_RUNTIME_STATE_SUSPENDED:
  775. return "SUSPENDED";
  776. default:
  777. return "INVALID STATE";
  778. }
  779. }
  780. #define HIF_PCI_RUNTIME_PM_STATS(_s, _sc, _name) \
  781. seq_printf(_s, "%30s: %u\n", #_name, _sc->pm_stats._name)
  782. /**
  783. * hif_pci_runtime_pm_warn() - Runtime PM Debugging API
  784. * @sc: hif_pci_softc context
  785. * @msg: log message
  786. *
  787. * log runtime pm stats when something seems off.
  788. *
  789. * Return: void
  790. */
  791. static void hif_pci_runtime_pm_warn(struct hif_pci_softc *sc, const char *msg)
  792. {
  793. struct hif_pm_runtime_lock *ctx;
  794. HIF_ERROR("%s: usage_count: %d, pm_state: %s, prevent_suspend_cnt: %d",
  795. msg, atomic_read(&sc->dev->power.usage_count),
  796. hif_pm_runtime_state_to_string(
  797. atomic_read(&sc->pm_state)),
  798. sc->prevent_suspend_cnt);
  799. HIF_ERROR("runtime_status: %d, runtime_error: %d, disable_depth: %d autosuspend_delay: %d",
  800. sc->dev->power.runtime_status,
  801. sc->dev->power.runtime_error,
  802. sc->dev->power.disable_depth,
  803. sc->dev->power.autosuspend_delay);
  804. HIF_ERROR("runtime_get: %u, runtime_put: %u, request_resume: %u",
  805. sc->pm_stats.runtime_get, sc->pm_stats.runtime_put,
  806. sc->pm_stats.request_resume);
  807. HIF_ERROR("allow_suspend: %u, prevent_suspend: %u",
  808. sc->pm_stats.allow_suspend,
  809. sc->pm_stats.prevent_suspend);
  810. HIF_ERROR("prevent_suspend_timeout: %u, allow_suspend_timeout: %u",
  811. sc->pm_stats.prevent_suspend_timeout,
  812. sc->pm_stats.allow_suspend_timeout);
  813. HIF_ERROR("Suspended: %u, resumed: %u count",
  814. sc->pm_stats.suspended,
  815. sc->pm_stats.resumed);
  816. HIF_ERROR("suspend_err: %u, runtime_get_err: %u",
  817. sc->pm_stats.suspend_err,
  818. sc->pm_stats.runtime_get_err);
  819. HIF_ERROR("Active Wakeup Sources preventing Runtime Suspend: ");
  820. list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
  821. HIF_ERROR("source %s; timeout %d ms", ctx->name, ctx->timeout);
  822. }
  823. WARN_ON(1);
  824. }
  825. /**
  826. * hif_pci_pm_runtime_debugfs_show(): show debug stats for runtimepm
  827. * @s: file to print to
  828. * @data: unused
  829. *
  830. * debugging tool added to the debug fs for displaying runtimepm stats
  831. *
  832. * Return: 0
  833. */
  834. static int hif_pci_pm_runtime_debugfs_show(struct seq_file *s, void *data)
  835. {
  836. struct hif_pci_softc *sc = s->private;
  837. static const char * const autopm_state[] = {"NONE", "ON", "RESUMING",
  838. "SUSPENDING", "SUSPENDED"};
  839. unsigned int msecs_age;
  840. qdf_time_t usecs_age;
  841. int pm_state = atomic_read(&sc->pm_state);
  842. unsigned long timer_expires;
  843. struct hif_pm_runtime_lock *ctx;
  844. seq_printf(s, "%30s: %s\n", "Runtime PM state",
  845. autopm_state[pm_state]);
  846. seq_printf(s, "%30s: %pf\n", "Last Resume Caller",
  847. sc->pm_stats.last_resume_caller);
  848. seq_printf(s, "%30s: %pf\n", "Last Busy Marker",
  849. sc->pm_stats.last_busy_marker);
  850. usecs_age = qdf_get_log_timestamp_usecs() -
  851. sc->pm_stats.last_busy_timestamp;
  852. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Timestamp",
  853. sc->pm_stats.last_busy_timestamp / 1000000,
  854. sc->pm_stats.last_busy_timestamp % 1000000);
  855. seq_printf(s, "%30s: %lu.%06lus\n", "Last Busy Since",
  856. usecs_age / 1000000, usecs_age % 1000000);
  857. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED) {
  858. msecs_age = jiffies_to_msecs(jiffies -
  859. sc->pm_stats.suspend_jiffies);
  860. seq_printf(s, "%30s: %d.%03ds\n", "Suspended Since",
  861. msecs_age / 1000, msecs_age % 1000);
  862. }
  863. seq_printf(s, "%30s: %d\n", "PM Usage count",
  864. atomic_read(&sc->dev->power.usage_count));
  865. seq_printf(s, "%30s: %u\n", "prevent_suspend_cnt",
  866. sc->prevent_suspend_cnt);
  867. HIF_PCI_RUNTIME_PM_STATS(s, sc, suspended);
  868. HIF_PCI_RUNTIME_PM_STATS(s, sc, suspend_err);
  869. HIF_PCI_RUNTIME_PM_STATS(s, sc, resumed);
  870. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get);
  871. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_put);
  872. HIF_PCI_RUNTIME_PM_STATS(s, sc, request_resume);
  873. HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend);
  874. HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend);
  875. HIF_PCI_RUNTIME_PM_STATS(s, sc, prevent_suspend_timeout);
  876. HIF_PCI_RUNTIME_PM_STATS(s, sc, allow_suspend_timeout);
  877. HIF_PCI_RUNTIME_PM_STATS(s, sc, runtime_get_err);
  878. timer_expires = sc->runtime_timer_expires;
  879. if (timer_expires > 0) {
  880. msecs_age = jiffies_to_msecs(timer_expires - jiffies);
  881. seq_printf(s, "%30s: %d.%03ds\n", "Prevent suspend timeout",
  882. msecs_age / 1000, msecs_age % 1000);
  883. }
  884. spin_lock_bh(&sc->runtime_lock);
  885. if (list_empty(&sc->prevent_suspend_list)) {
  886. spin_unlock_bh(&sc->runtime_lock);
  887. return 0;
  888. }
  889. seq_printf(s, "%30s: ", "Active Wakeup_Sources");
  890. list_for_each_entry(ctx, &sc->prevent_suspend_list, list) {
  891. seq_printf(s, "%s", ctx->name);
  892. if (ctx->timeout)
  893. seq_printf(s, "(%d ms)", ctx->timeout);
  894. seq_puts(s, " ");
  895. }
  896. seq_puts(s, "\n");
  897. spin_unlock_bh(&sc->runtime_lock);
  898. return 0;
  899. }
  900. #undef HIF_PCI_RUNTIME_PM_STATS
  901. /**
  902. * hif_pci_autopm_open() - open a debug fs file to access the runtime pm stats
  903. * @inode
  904. * @file
  905. *
  906. * Return: linux error code of single_open.
  907. */
  908. static int hif_pci_runtime_pm_open(struct inode *inode, struct file *file)
  909. {
  910. return single_open(file, hif_pci_pm_runtime_debugfs_show,
  911. inode->i_private);
  912. }
  913. static const struct file_operations hif_pci_runtime_pm_fops = {
  914. .owner = THIS_MODULE,
  915. .open = hif_pci_runtime_pm_open,
  916. .release = single_release,
  917. .read = seq_read,
  918. .llseek = seq_lseek,
  919. };
  920. /**
  921. * hif_runtime_pm_debugfs_create() - creates runtimepm debugfs entry
  922. * @sc: pci context
  923. *
  924. * creates a debugfs entry to debug the runtime pm feature.
  925. */
  926. static void hif_runtime_pm_debugfs_create(struct hif_pci_softc *sc)
  927. {
  928. sc->pm_dentry = debugfs_create_file("cnss_runtime_pm",
  929. 0400, NULL, sc,
  930. &hif_pci_runtime_pm_fops);
  931. }
  932. /**
  933. * hif_runtime_pm_debugfs_remove() - removes runtimepm debugfs entry
  934. * @sc: pci context
  935. *
  936. * removes the debugfs entry to debug the runtime pm feature.
  937. */
  938. static void hif_runtime_pm_debugfs_remove(struct hif_pci_softc *sc)
  939. {
  940. debugfs_remove(sc->pm_dentry);
  941. }
  942. static void hif_runtime_init(struct device *dev, int delay)
  943. {
  944. pm_runtime_set_autosuspend_delay(dev, delay);
  945. pm_runtime_use_autosuspend(dev);
  946. pm_runtime_allow(dev);
  947. pm_runtime_mark_last_busy(dev);
  948. pm_runtime_put_noidle(dev);
  949. pm_suspend_ignore_children(dev, true);
  950. }
  951. static void hif_runtime_exit(struct device *dev)
  952. {
  953. pm_runtime_get_noresume(dev);
  954. pm_runtime_set_active(dev);
  955. }
  956. static void hif_pm_runtime_lock_timeout_fn(void *data);
  957. /**
  958. * hif_pm_runtime_start(): start the runtime pm
  959. * @sc: pci context
  960. *
  961. * After this call, runtime pm will be active.
  962. */
  963. static void hif_pm_runtime_start(struct hif_pci_softc *sc)
  964. {
  965. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  966. uint32_t mode = hif_get_conparam(ol_sc);
  967. if (!ol_sc->hif_config.enable_runtime_pm) {
  968. HIF_INFO("%s: RUNTIME PM is disabled in ini\n", __func__);
  969. return;
  970. }
  971. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  972. mode == QDF_GLOBAL_MONITOR_MODE) {
  973. HIF_INFO("%s: RUNTIME PM is disabled for FTM/EPPING mode\n",
  974. __func__);
  975. return;
  976. }
  977. qdf_timer_init(NULL, &sc->runtime_timer,
  978. hif_pm_runtime_lock_timeout_fn,
  979. sc, QDF_TIMER_TYPE_WAKE_APPS);
  980. HIF_INFO("%s: Enabling RUNTIME PM, Delay: %d ms", __func__,
  981. ol_sc->hif_config.runtime_pm_delay);
  982. hif_runtime_init(sc->dev, ol_sc->hif_config.runtime_pm_delay);
  983. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_ON);
  984. hif_runtime_pm_debugfs_create(sc);
  985. }
  986. /**
  987. * hif_pm_runtime_stop(): stop runtime pm
  988. * @sc: pci context
  989. *
  990. * Turns off runtime pm and frees corresponding resources
  991. * that were acquired by hif_runtime_pm_start().
  992. */
  993. static void hif_pm_runtime_stop(struct hif_pci_softc *sc)
  994. {
  995. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  996. uint32_t mode = hif_get_conparam(ol_sc);
  997. if (!ol_sc->hif_config.enable_runtime_pm)
  998. return;
  999. if (mode == QDF_GLOBAL_FTM_MODE || QDF_IS_EPPING_ENABLED(mode) ||
  1000. mode == QDF_GLOBAL_MONITOR_MODE)
  1001. return;
  1002. hif_runtime_exit(sc->dev);
  1003. hif_pm_runtime_resume(sc->dev);
  1004. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  1005. hif_runtime_pm_debugfs_remove(sc);
  1006. qdf_timer_free(&sc->runtime_timer);
  1007. /* doesn't wait for penting trafic unlike cld-2.0 */
  1008. }
  1009. /**
  1010. * hif_pm_runtime_open(): initialize runtime pm
  1011. * @sc: pci data structure
  1012. *
  1013. * Early initialization
  1014. */
  1015. static void hif_pm_runtime_open(struct hif_pci_softc *sc)
  1016. {
  1017. spin_lock_init(&sc->runtime_lock);
  1018. qdf_atomic_init(&sc->pm_state);
  1019. qdf_runtime_lock_init(&sc->prevent_linkdown_lock);
  1020. qdf_atomic_set(&sc->pm_state, HIF_PM_RUNTIME_STATE_NONE);
  1021. INIT_LIST_HEAD(&sc->prevent_suspend_list);
  1022. }
  1023. /**
  1024. * hif_pm_runtime_sanitize_on_exit(): sanitize the pm usage count and state
  1025. * @sc: pci context
  1026. *
  1027. * Ensure we have only one vote against runtime suspend before closing
  1028. * the runtime suspend feature.
  1029. *
  1030. * all gets by the wlan driver should have been returned
  1031. * one vote should remain as part of cnss_runtime_exit
  1032. *
  1033. * needs to be revisited if we share the root complex.
  1034. */
  1035. static void hif_pm_runtime_sanitize_on_exit(struct hif_pci_softc *sc)
  1036. {
  1037. struct hif_pm_runtime_lock *ctx, *tmp;
  1038. if (atomic_read(&sc->dev->power.usage_count) != 1)
  1039. hif_pci_runtime_pm_warn(sc, "Driver UnLoaded");
  1040. else
  1041. return;
  1042. spin_lock_bh(&sc->runtime_lock);
  1043. list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
  1044. spin_unlock_bh(&sc->runtime_lock);
  1045. hif_runtime_lock_deinit(GET_HIF_OPAQUE_HDL(sc), ctx);
  1046. spin_lock_bh(&sc->runtime_lock);
  1047. }
  1048. spin_unlock_bh(&sc->runtime_lock);
  1049. /* ensure 1 and only 1 usage count so that when the wlan
  1050. * driver is re-insmodded runtime pm won't be
  1051. * disabled also ensures runtime pm doesn't get
  1052. * broken on by being less than 1.
  1053. */
  1054. if (atomic_read(&sc->dev->power.usage_count) <= 0)
  1055. atomic_set(&sc->dev->power.usage_count, 1);
  1056. while (atomic_read(&sc->dev->power.usage_count) > 1)
  1057. hif_pm_runtime_put_auto(sc->dev);
  1058. }
  1059. static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
  1060. struct hif_pm_runtime_lock *lock);
  1061. /**
  1062. * hif_pm_runtime_sanitize_on_ssr_exit() - Empty the suspend list on SSR
  1063. * @sc: PCIe Context
  1064. *
  1065. * API is used to empty the runtime pm prevent suspend list.
  1066. *
  1067. * Return: void
  1068. */
  1069. static void hif_pm_runtime_sanitize_on_ssr_exit(struct hif_pci_softc *sc)
  1070. {
  1071. struct hif_pm_runtime_lock *ctx, *tmp;
  1072. spin_lock_bh(&sc->runtime_lock);
  1073. list_for_each_entry_safe(ctx, tmp, &sc->prevent_suspend_list, list) {
  1074. __hif_pm_runtime_allow_suspend(sc, ctx);
  1075. }
  1076. spin_unlock_bh(&sc->runtime_lock);
  1077. }
  1078. /**
  1079. * hif_pm_runtime_close(): close runtime pm
  1080. * @sc: pci bus handle
  1081. *
  1082. * ensure runtime_pm is stopped before closing the driver
  1083. */
  1084. static void hif_pm_runtime_close(struct hif_pci_softc *sc)
  1085. {
  1086. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1087. qdf_runtime_lock_deinit(&sc->prevent_linkdown_lock);
  1088. if (qdf_atomic_read(&sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE)
  1089. return;
  1090. hif_pm_runtime_stop(sc);
  1091. hif_is_recovery_in_progress(scn) ?
  1092. hif_pm_runtime_sanitize_on_ssr_exit(sc) :
  1093. hif_pm_runtime_sanitize_on_exit(sc);
  1094. }
  1095. #else
  1096. static void hif_pm_runtime_close(struct hif_pci_softc *sc) {}
  1097. static void hif_pm_runtime_open(struct hif_pci_softc *sc) {}
  1098. static void hif_pm_runtime_start(struct hif_pci_softc *sc) {}
  1099. static void hif_pm_runtime_stop(struct hif_pci_softc *sc) {}
  1100. #endif
  1101. /**
  1102. * hif_disable_power_gating() - disable HW power gating
  1103. * @hif_ctx: hif context
  1104. *
  1105. * disables pcie L1 power states
  1106. */
  1107. static void hif_disable_power_gating(struct hif_opaque_softc *hif_ctx)
  1108. {
  1109. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  1110. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1111. if (!scn) {
  1112. HIF_ERROR("%s: Could not disable ASPM scn is null",
  1113. __func__);
  1114. return;
  1115. }
  1116. /* Disable ASPM when pkt log is enabled */
  1117. pci_read_config_dword(sc->pdev, 0x80, &sc->lcr_val);
  1118. pci_write_config_dword(sc->pdev, 0x80, (sc->lcr_val & 0xffffff00));
  1119. }
  1120. /**
  1121. * hif_enable_power_gating() - enable HW power gating
  1122. * @hif_ctx: hif context
  1123. *
  1124. * enables pcie L1 power states
  1125. */
  1126. static void hif_enable_power_gating(struct hif_pci_softc *sc)
  1127. {
  1128. if (!sc) {
  1129. HIF_ERROR("%s: Could not disable ASPM scn is null",
  1130. __func__);
  1131. return;
  1132. }
  1133. /* Re-enable ASPM after firmware/OTP download is complete */
  1134. pci_write_config_dword(sc->pdev, 0x80, sc->lcr_val);
  1135. }
  1136. /**
  1137. * hif_enable_power_management() - enable power management
  1138. * @hif_ctx: hif context
  1139. *
  1140. * Enables runtime pm, aspm(PCI.. hif_enable_power_gating) and re-enabling
  1141. * soc-sleep after driver load (hif_pci_target_sleep_state_adjust).
  1142. *
  1143. * note: epping mode does not call this function as it does not
  1144. * care about saving power.
  1145. */
  1146. void hif_pci_enable_power_management(struct hif_softc *hif_sc,
  1147. bool is_packet_log_enabled)
  1148. {
  1149. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_sc);
  1150. uint32_t mode;
  1151. if (!pci_ctx) {
  1152. HIF_ERROR("%s, hif_ctx null", __func__);
  1153. return;
  1154. }
  1155. mode = hif_get_conparam(hif_sc);
  1156. if (mode == QDF_GLOBAL_FTM_MODE) {
  1157. HIF_INFO("%s: Enable power gating for FTM mode", __func__);
  1158. hif_enable_power_gating(pci_ctx);
  1159. return;
  1160. }
  1161. hif_pm_runtime_start(pci_ctx);
  1162. if (!is_packet_log_enabled)
  1163. hif_enable_power_gating(pci_ctx);
  1164. if (!CONFIG_ATH_PCIE_MAX_PERF &&
  1165. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD &&
  1166. !ce_srng_based(hif_sc)) {
  1167. /* allow sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature */
  1168. if (hif_pci_target_sleep_state_adjust(hif_sc, true, false) < 0)
  1169. HIF_ERROR("%s, failed to set target to sleep",
  1170. __func__);
  1171. }
  1172. }
  1173. /**
  1174. * hif_disable_power_management() - disable power management
  1175. * @hif_ctx: hif context
  1176. *
  1177. * Currently disables runtime pm. Should be updated to behave
  1178. * if runtime pm is not started. Should be updated to take care
  1179. * of aspm and soc sleep for driver load.
  1180. */
  1181. void hif_pci_disable_power_management(struct hif_softc *hif_ctx)
  1182. {
  1183. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1184. if (!pci_ctx) {
  1185. HIF_ERROR("%s, hif_ctx null", __func__);
  1186. return;
  1187. }
  1188. hif_pm_runtime_stop(pci_ctx);
  1189. }
  1190. void hif_pci_display_stats(struct hif_softc *hif_ctx)
  1191. {
  1192. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1193. if (!pci_ctx) {
  1194. HIF_ERROR("%s, hif_ctx null", __func__);
  1195. return;
  1196. }
  1197. hif_display_ce_stats(&pci_ctx->ce_sc);
  1198. hif_print_pci_stats(pci_ctx);
  1199. }
  1200. void hif_pci_clear_stats(struct hif_softc *hif_ctx)
  1201. {
  1202. struct hif_pci_softc *pci_ctx = HIF_GET_PCI_SOFTC(hif_ctx);
  1203. if (!pci_ctx) {
  1204. HIF_ERROR("%s, hif_ctx null", __func__);
  1205. return;
  1206. }
  1207. hif_clear_ce_stats(&pci_ctx->ce_sc);
  1208. }
  1209. #define ATH_PCI_PROBE_RETRY_MAX 3
  1210. /**
  1211. * hif_bus_open(): hif_bus_open
  1212. * @scn: scn
  1213. * @bus_type: bus type
  1214. *
  1215. * Return: n/a
  1216. */
  1217. QDF_STATUS hif_pci_open(struct hif_softc *hif_ctx, enum qdf_bus_type bus_type)
  1218. {
  1219. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  1220. hif_ctx->bus_type = bus_type;
  1221. hif_pm_runtime_open(sc);
  1222. qdf_spinlock_create(&sc->irq_lock);
  1223. return hif_ce_open(hif_ctx);
  1224. }
  1225. /**
  1226. * hif_wake_target_cpu() - wake the target's cpu
  1227. * @scn: hif context
  1228. *
  1229. * Send an interrupt to the device to wake up the Target CPU
  1230. * so it has an opportunity to notice any changed state.
  1231. */
  1232. static void hif_wake_target_cpu(struct hif_softc *scn)
  1233. {
  1234. QDF_STATUS rv;
  1235. uint32_t core_ctrl;
  1236. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1237. rv = hif_diag_read_access(hif_hdl,
  1238. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1239. &core_ctrl);
  1240. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1241. /* A_INUM_FIRMWARE interrupt to Target CPU */
  1242. core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
  1243. rv = hif_diag_write_access(hif_hdl,
  1244. SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS,
  1245. core_ctrl);
  1246. QDF_ASSERT(rv == QDF_STATUS_SUCCESS);
  1247. }
  1248. /**
  1249. * soc_wake_reset() - allow the target to go to sleep
  1250. * @scn: hif_softc
  1251. *
  1252. * Clear the force wake register. This is done by
  1253. * hif_sleep_entry and cancel defered timer sleep.
  1254. */
  1255. static void soc_wake_reset(struct hif_softc *scn)
  1256. {
  1257. hif_write32_mb(scn, scn->mem +
  1258. PCIE_LOCAL_BASE_ADDRESS +
  1259. PCIE_SOC_WAKE_ADDRESS,
  1260. PCIE_SOC_WAKE_RESET);
  1261. }
  1262. /**
  1263. * hif_sleep_entry() - gate target sleep
  1264. * @arg: hif context
  1265. *
  1266. * This function is the callback for the sleep timer.
  1267. * Check if last force awake critical section was at least
  1268. * HIF_MIN_SLEEP_INACTIVITY_TIME_MS time ago. if it was,
  1269. * allow the target to go to sleep and cancel the sleep timer.
  1270. * otherwise reschedule the sleep timer.
  1271. */
  1272. static void hif_sleep_entry(void *arg)
  1273. {
  1274. struct HIF_CE_state *hif_state = (struct HIF_CE_state *)arg;
  1275. struct hif_softc *scn = HIF_GET_SOFTC(hif_state);
  1276. uint32_t idle_ms;
  1277. if (scn->recovery)
  1278. return;
  1279. if (hif_is_driver_unloading(scn))
  1280. return;
  1281. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  1282. if (hif_state->fake_sleep) {
  1283. idle_ms = qdf_system_ticks_to_msecs(qdf_system_ticks()
  1284. - hif_state->sleep_ticks);
  1285. if (!hif_state->verified_awake &&
  1286. idle_ms >= HIF_MIN_SLEEP_INACTIVITY_TIME_MS) {
  1287. if (!qdf_atomic_read(&scn->link_suspended)) {
  1288. soc_wake_reset(scn);
  1289. hif_state->fake_sleep = false;
  1290. }
  1291. } else {
  1292. qdf_timer_stop(&hif_state->sleep_timer);
  1293. qdf_timer_start(&hif_state->sleep_timer,
  1294. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  1295. }
  1296. }
  1297. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  1298. }
  1299. #define HIF_HIA_MAX_POLL_LOOP 1000000
  1300. #define HIF_HIA_POLLING_DELAY_MS 10
  1301. #ifdef QCA_HIF_HIA_EXTND
  1302. static void hif_set_hia_extnd(struct hif_softc *scn)
  1303. {
  1304. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1305. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1306. uint32_t target_type = tgt_info->target_type;
  1307. HIF_TRACE("%s: E", __func__);
  1308. if ((target_type == TARGET_TYPE_AR900B) ||
  1309. target_type == TARGET_TYPE_QCA9984 ||
  1310. target_type == TARGET_TYPE_QCA9888) {
  1311. /* CHIP revision is 8-11 bits of the CHIP_ID register 0xec
  1312. * in RTC space
  1313. */
  1314. tgt_info->target_revision
  1315. = CHIP_ID_REVISION_GET(hif_read32_mb(scn, scn->mem
  1316. + CHIP_ID_ADDRESS));
  1317. qdf_print("chip_id 0x%x chip_revision 0x%x",
  1318. target_type, tgt_info->target_revision);
  1319. }
  1320. {
  1321. uint32_t flag2_value = 0;
  1322. uint32_t flag2_targ_addr =
  1323. host_interest_item_address(target_type,
  1324. offsetof(struct host_interest_s, hi_skip_clock_init));
  1325. if ((ar900b_20_targ_clk != -1) &&
  1326. (frac != -1) && (intval != -1)) {
  1327. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1328. &flag2_value);
  1329. qdf_print("\n Setting clk_override");
  1330. flag2_value |= CLOCK_OVERRIDE;
  1331. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1332. flag2_value);
  1333. qdf_print("\n CLOCK PLL val set %d", flag2_value);
  1334. } else {
  1335. qdf_print("\n CLOCK PLL skipped");
  1336. }
  1337. }
  1338. if (target_type == TARGET_TYPE_AR900B
  1339. || target_type == TARGET_TYPE_QCA9984
  1340. || target_type == TARGET_TYPE_QCA9888) {
  1341. /* for AR9980_2.0, 300 mhz clock is used, right now we assume
  1342. * this would be supplied through module parameters,
  1343. * if not supplied assumed default or same behavior as 1.0.
  1344. * Assume 1.0 clock can't be tuned, reset to defaults
  1345. */
  1346. qdf_print(KERN_INFO
  1347. "%s: setting the target pll frac %x intval %x",
  1348. __func__, frac, intval);
  1349. /* do not touch frac, and int val, let them be default -1,
  1350. * if desired, host can supply these through module params
  1351. */
  1352. if (frac != -1 || intval != -1) {
  1353. uint32_t flag2_value = 0;
  1354. uint32_t flag2_targ_addr;
  1355. flag2_targ_addr =
  1356. host_interest_item_address(target_type,
  1357. offsetof(struct host_interest_s,
  1358. hi_clock_info));
  1359. hif_diag_read_access(hif_hdl,
  1360. flag2_targ_addr, &flag2_value);
  1361. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1362. flag2_value);
  1363. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1364. qdf_print("\n INT Val %x Address %x",
  1365. intval, flag2_value + 4);
  1366. hif_diag_write_access(hif_hdl,
  1367. flag2_value + 4, intval);
  1368. } else {
  1369. qdf_print(KERN_INFO
  1370. "%s: no frac provided, skipping pre-configuring PLL",
  1371. __func__);
  1372. }
  1373. /* for 2.0 write 300 mhz into hi_desired_cpu_speed_hz */
  1374. if ((target_type == TARGET_TYPE_AR900B)
  1375. && (tgt_info->target_revision == AR900B_REV_2)
  1376. && ar900b_20_targ_clk != -1) {
  1377. uint32_t flag2_value = 0;
  1378. uint32_t flag2_targ_addr;
  1379. flag2_targ_addr
  1380. = host_interest_item_address(target_type,
  1381. offsetof(struct host_interest_s,
  1382. hi_desired_cpu_speed_hz));
  1383. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1384. &flag2_value);
  1385. qdf_print("\n ==> hi_desired_cpu_speed_hz Address %x",
  1386. flag2_value);
  1387. hif_diag_write_access(hif_hdl, flag2_value,
  1388. ar900b_20_targ_clk/*300000000u*/);
  1389. } else if (target_type == TARGET_TYPE_QCA9888) {
  1390. uint32_t flag2_targ_addr;
  1391. if (200000000u != qca9888_20_targ_clk) {
  1392. qca9888_20_targ_clk = 300000000u;
  1393. /* Setting the target clock speed to 300 mhz */
  1394. }
  1395. flag2_targ_addr
  1396. = host_interest_item_address(target_type,
  1397. offsetof(struct host_interest_s,
  1398. hi_desired_cpu_speed_hz));
  1399. hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1400. qca9888_20_targ_clk);
  1401. } else {
  1402. qdf_print("%s: targ_clk is not provided, skipping pre-configuring PLL",
  1403. __func__);
  1404. }
  1405. } else {
  1406. if (frac != -1 || intval != -1) {
  1407. uint32_t flag2_value = 0;
  1408. uint32_t flag2_targ_addr =
  1409. host_interest_item_address(target_type,
  1410. offsetof(struct host_interest_s,
  1411. hi_clock_info));
  1412. hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1413. &flag2_value);
  1414. qdf_print("\n ====> FRAC Val %x Address %x", frac,
  1415. flag2_value);
  1416. hif_diag_write_access(hif_hdl, flag2_value, frac);
  1417. qdf_print("\n INT Val %x Address %x", intval,
  1418. flag2_value + 4);
  1419. hif_diag_write_access(hif_hdl, flag2_value + 4,
  1420. intval);
  1421. }
  1422. }
  1423. }
  1424. #else
  1425. static void hif_set_hia_extnd(struct hif_softc *scn)
  1426. {
  1427. }
  1428. #endif
  1429. /**
  1430. * hif_set_hia() - fill out the host interest area
  1431. * @scn: hif context
  1432. *
  1433. * This is replaced by hif_wlan_enable for integrated targets.
  1434. * This fills out the host interest area. The firmware will
  1435. * process these memory addresses when it is first brought out
  1436. * of reset.
  1437. *
  1438. * Return: 0 for success.
  1439. */
  1440. static int hif_set_hia(struct hif_softc *scn)
  1441. {
  1442. QDF_STATUS rv;
  1443. uint32_t interconnect_targ_addr = 0;
  1444. uint32_t pcie_state_targ_addr = 0;
  1445. uint32_t pipe_cfg_targ_addr = 0;
  1446. uint32_t svc_to_pipe_map = 0;
  1447. uint32_t pcie_config_flags = 0;
  1448. uint32_t flag2_value = 0;
  1449. uint32_t flag2_targ_addr = 0;
  1450. #ifdef QCA_WIFI_3_0
  1451. uint32_t host_interest_area = 0;
  1452. uint8_t i;
  1453. #else
  1454. uint32_t ealloc_value = 0;
  1455. uint32_t ealloc_targ_addr = 0;
  1456. uint8_t banks_switched = 1;
  1457. uint32_t chip_id;
  1458. #endif
  1459. uint32_t pipe_cfg_addr;
  1460. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(scn);
  1461. struct hif_target_info *tgt_info = hif_get_target_info_handle(hif_hdl);
  1462. uint32_t target_type = tgt_info->target_type;
  1463. uint32_t target_ce_config_sz, target_service_to_ce_map_sz;
  1464. static struct CE_pipe_config *target_ce_config;
  1465. struct service_to_pipe *target_service_to_ce_map;
  1466. HIF_TRACE("%s: E", __func__);
  1467. hif_get_target_ce_config(scn,
  1468. &target_ce_config, &target_ce_config_sz,
  1469. &target_service_to_ce_map,
  1470. &target_service_to_ce_map_sz,
  1471. NULL, NULL);
  1472. if (ADRASTEA_BU)
  1473. return QDF_STATUS_SUCCESS;
  1474. #ifdef QCA_WIFI_3_0
  1475. i = 0;
  1476. while (i < HIF_HIA_MAX_POLL_LOOP) {
  1477. host_interest_area = hif_read32_mb(scn, scn->mem +
  1478. A_SOC_CORE_SCRATCH_0_ADDRESS);
  1479. if ((host_interest_area & 0x01) == 0) {
  1480. qdf_mdelay(HIF_HIA_POLLING_DELAY_MS);
  1481. host_interest_area = 0;
  1482. i++;
  1483. if (i > HIF_HIA_MAX_POLL_LOOP && (i % 1000 == 0))
  1484. HIF_ERROR("%s: poll timeout(%d)", __func__, i);
  1485. } else {
  1486. host_interest_area &= (~0x01);
  1487. hif_write32_mb(scn, scn->mem + 0x113014, 0);
  1488. break;
  1489. }
  1490. }
  1491. if (i >= HIF_HIA_MAX_POLL_LOOP) {
  1492. HIF_ERROR("%s: hia polling timeout", __func__);
  1493. return -EIO;
  1494. }
  1495. if (host_interest_area == 0) {
  1496. HIF_ERROR("%s: host_interest_area = 0", __func__);
  1497. return -EIO;
  1498. }
  1499. interconnect_targ_addr = host_interest_area +
  1500. offsetof(struct host_interest_area_t,
  1501. hi_interconnect_state);
  1502. flag2_targ_addr = host_interest_area +
  1503. offsetof(struct host_interest_area_t, hi_option_flag2);
  1504. #else
  1505. interconnect_targ_addr = hif_hia_item_address(target_type,
  1506. offsetof(struct host_interest_s, hi_interconnect_state));
  1507. ealloc_targ_addr = hif_hia_item_address(target_type,
  1508. offsetof(struct host_interest_s, hi_early_alloc));
  1509. flag2_targ_addr = hif_hia_item_address(target_type,
  1510. offsetof(struct host_interest_s, hi_option_flag2));
  1511. #endif
  1512. /* Supply Target-side CE configuration */
  1513. rv = hif_diag_read_access(hif_hdl, interconnect_targ_addr,
  1514. &pcie_state_targ_addr);
  1515. if (rv != QDF_STATUS_SUCCESS) {
  1516. HIF_ERROR("%s: interconnect_targ_addr = 0x%0x, ret = %d",
  1517. __func__, interconnect_targ_addr, rv);
  1518. goto done;
  1519. }
  1520. if (pcie_state_targ_addr == 0) {
  1521. rv = QDF_STATUS_E_FAILURE;
  1522. HIF_ERROR("%s: pcie state addr is 0", __func__);
  1523. goto done;
  1524. }
  1525. pipe_cfg_addr = pcie_state_targ_addr +
  1526. offsetof(struct pcie_state_s,
  1527. pipe_cfg_addr);
  1528. rv = hif_diag_read_access(hif_hdl,
  1529. pipe_cfg_addr,
  1530. &pipe_cfg_targ_addr);
  1531. if (rv != QDF_STATUS_SUCCESS) {
  1532. HIF_ERROR("%s: pipe_cfg_addr = 0x%0x, ret = %d",
  1533. __func__, pipe_cfg_addr, rv);
  1534. goto done;
  1535. }
  1536. if (pipe_cfg_targ_addr == 0) {
  1537. rv = QDF_STATUS_E_FAILURE;
  1538. HIF_ERROR("%s: pipe cfg addr is 0", __func__);
  1539. goto done;
  1540. }
  1541. rv = hif_diag_write_mem(hif_hdl, pipe_cfg_targ_addr,
  1542. (uint8_t *) target_ce_config,
  1543. target_ce_config_sz);
  1544. if (rv != QDF_STATUS_SUCCESS) {
  1545. HIF_ERROR("%s: write pipe cfg (%d)", __func__, rv);
  1546. goto done;
  1547. }
  1548. rv = hif_diag_read_access(hif_hdl,
  1549. pcie_state_targ_addr +
  1550. offsetof(struct pcie_state_s,
  1551. svc_to_pipe_map),
  1552. &svc_to_pipe_map);
  1553. if (rv != QDF_STATUS_SUCCESS) {
  1554. HIF_ERROR("%s: get svc/pipe map (%d)", __func__, rv);
  1555. goto done;
  1556. }
  1557. if (svc_to_pipe_map == 0) {
  1558. rv = QDF_STATUS_E_FAILURE;
  1559. HIF_ERROR("%s: svc_to_pipe map is 0", __func__);
  1560. goto done;
  1561. }
  1562. rv = hif_diag_write_mem(hif_hdl,
  1563. svc_to_pipe_map,
  1564. (uint8_t *) target_service_to_ce_map,
  1565. target_service_to_ce_map_sz);
  1566. if (rv != QDF_STATUS_SUCCESS) {
  1567. HIF_ERROR("%s: write svc/pipe map (%d)", __func__, rv);
  1568. goto done;
  1569. }
  1570. rv = hif_diag_read_access(hif_hdl,
  1571. pcie_state_targ_addr +
  1572. offsetof(struct pcie_state_s,
  1573. config_flags),
  1574. &pcie_config_flags);
  1575. if (rv != QDF_STATUS_SUCCESS) {
  1576. HIF_ERROR("%s: get pcie config_flags (%d)", __func__, rv);
  1577. goto done;
  1578. }
  1579. #if (CONFIG_PCIE_ENABLE_L1_CLOCK_GATE)
  1580. pcie_config_flags |= PCIE_CONFIG_FLAG_ENABLE_L1;
  1581. #else
  1582. pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
  1583. #endif /* CONFIG_PCIE_ENABLE_L1_CLOCK_GATE */
  1584. pcie_config_flags |= PCIE_CONFIG_FLAG_CLK_SWITCH_WAIT;
  1585. #if (CONFIG_PCIE_ENABLE_AXI_CLK_GATE)
  1586. pcie_config_flags |= PCIE_CONFIG_FLAG_AXI_CLK_GATE;
  1587. #endif
  1588. rv = hif_diag_write_mem(hif_hdl,
  1589. pcie_state_targ_addr +
  1590. offsetof(struct pcie_state_s,
  1591. config_flags),
  1592. (uint8_t *) &pcie_config_flags,
  1593. sizeof(pcie_config_flags));
  1594. if (rv != QDF_STATUS_SUCCESS) {
  1595. HIF_ERROR("%s: write pcie config_flags (%d)", __func__, rv);
  1596. goto done;
  1597. }
  1598. #ifndef QCA_WIFI_3_0
  1599. /* configure early allocation */
  1600. ealloc_targ_addr = hif_hia_item_address(target_type,
  1601. offsetof(
  1602. struct host_interest_s,
  1603. hi_early_alloc));
  1604. rv = hif_diag_read_access(hif_hdl, ealloc_targ_addr,
  1605. &ealloc_value);
  1606. if (rv != QDF_STATUS_SUCCESS) {
  1607. HIF_ERROR("%s: get early alloc val (%d)", __func__, rv);
  1608. goto done;
  1609. }
  1610. /* 1 bank is switched to IRAM, except ROME 1.0 */
  1611. ealloc_value |=
  1612. ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
  1613. HI_EARLY_ALLOC_MAGIC_MASK);
  1614. rv = hif_diag_read_access(hif_hdl,
  1615. CHIP_ID_ADDRESS |
  1616. RTC_SOC_BASE_ADDRESS, &chip_id);
  1617. if (rv != QDF_STATUS_SUCCESS) {
  1618. HIF_ERROR("%s: get chip id val (%d)", __func__, rv);
  1619. goto done;
  1620. }
  1621. if (CHIP_ID_VERSION_GET(chip_id) == 0xD) {
  1622. tgt_info->target_revision = CHIP_ID_REVISION_GET(chip_id);
  1623. switch (CHIP_ID_REVISION_GET(chip_id)) {
  1624. case 0x2: /* ROME 1.3 */
  1625. /* 2 banks are switched to IRAM */
  1626. banks_switched = 2;
  1627. break;
  1628. case 0x4: /* ROME 2.1 */
  1629. case 0x5: /* ROME 2.2 */
  1630. banks_switched = 6;
  1631. break;
  1632. case 0x8: /* ROME 3.0 */
  1633. case 0x9: /* ROME 3.1 */
  1634. case 0xA: /* ROME 3.2 */
  1635. banks_switched = 9;
  1636. break;
  1637. case 0x0: /* ROME 1.0 */
  1638. case 0x1: /* ROME 1.1 */
  1639. default:
  1640. /* 3 banks are switched to IRAM */
  1641. banks_switched = 3;
  1642. break;
  1643. }
  1644. }
  1645. ealloc_value |=
  1646. ((banks_switched << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT)
  1647. & HI_EARLY_ALLOC_IRAM_BANKS_MASK);
  1648. rv = hif_diag_write_access(hif_hdl,
  1649. ealloc_targ_addr,
  1650. ealloc_value);
  1651. if (rv != QDF_STATUS_SUCCESS) {
  1652. HIF_ERROR("%s: set early alloc val (%d)", __func__, rv);
  1653. goto done;
  1654. }
  1655. #endif
  1656. if ((target_type == TARGET_TYPE_AR900B)
  1657. || (target_type == TARGET_TYPE_QCA9984)
  1658. || (target_type == TARGET_TYPE_QCA9888)
  1659. || (target_type == TARGET_TYPE_AR9888)) {
  1660. hif_set_hia_extnd(scn);
  1661. }
  1662. /* Tell Target to proceed with initialization */
  1663. flag2_targ_addr = hif_hia_item_address(target_type,
  1664. offsetof(
  1665. struct host_interest_s,
  1666. hi_option_flag2));
  1667. rv = hif_diag_read_access(hif_hdl, flag2_targ_addr,
  1668. &flag2_value);
  1669. if (rv != QDF_STATUS_SUCCESS) {
  1670. HIF_ERROR("%s: get option val (%d)", __func__, rv);
  1671. goto done;
  1672. }
  1673. flag2_value |= HI_OPTION_EARLY_CFG_DONE;
  1674. rv = hif_diag_write_access(hif_hdl, flag2_targ_addr,
  1675. flag2_value);
  1676. if (rv != QDF_STATUS_SUCCESS) {
  1677. HIF_ERROR("%s: set option val (%d)", __func__, rv);
  1678. goto done;
  1679. }
  1680. hif_wake_target_cpu(scn);
  1681. done:
  1682. return rv;
  1683. }
  1684. /**
  1685. * hif_bus_configure() - configure the pcie bus
  1686. * @hif_sc: pointer to the hif context.
  1687. *
  1688. * return: 0 for success. nonzero for failure.
  1689. */
  1690. int hif_pci_bus_configure(struct hif_softc *hif_sc)
  1691. {
  1692. int status = 0;
  1693. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(hif_sc);
  1694. struct hif_opaque_softc *hif_osc = GET_HIF_OPAQUE_HDL(hif_sc);
  1695. hif_ce_prepare_config(hif_sc);
  1696. /* initialize sleep state adjust variables */
  1697. hif_state->sleep_timer_init = true;
  1698. hif_state->keep_awake_count = 0;
  1699. hif_state->fake_sleep = false;
  1700. hif_state->sleep_ticks = 0;
  1701. qdf_timer_init(NULL, &hif_state->sleep_timer,
  1702. hif_sleep_entry, (void *)hif_state,
  1703. QDF_TIMER_TYPE_WAKE_APPS);
  1704. hif_state->sleep_timer_init = true;
  1705. status = hif_wlan_enable(hif_sc);
  1706. if (status) {
  1707. HIF_ERROR("%s: hif_wlan_enable error = %d",
  1708. __func__, status);
  1709. goto timer_free;
  1710. }
  1711. A_TARGET_ACCESS_LIKELY(hif_sc);
  1712. if ((CONFIG_ATH_PCIE_MAX_PERF ||
  1713. CONFIG_ATH_PCIE_AWAKE_WHILE_DRIVER_LOAD) &&
  1714. !ce_srng_based(hif_sc)) {
  1715. /*
  1716. * prevent sleep for PCIE_AWAKE_WHILE_DRIVER_LOAD feature
  1717. * prevent sleep when we want to keep firmware always awake
  1718. * note: when we want to keep firmware always awake,
  1719. * hif_target_sleep_state_adjust will point to a dummy
  1720. * function, and hif_pci_target_sleep_state_adjust must
  1721. * be called instead.
  1722. * note: bus type check is here because AHB bus is reusing
  1723. * hif_pci_bus_configure code.
  1724. */
  1725. if (hif_sc->bus_type == QDF_BUS_TYPE_PCI) {
  1726. if (hif_pci_target_sleep_state_adjust(hif_sc,
  1727. false, true) < 0) {
  1728. status = -EACCES;
  1729. goto disable_wlan;
  1730. }
  1731. }
  1732. }
  1733. /* todo: consider replacing this with an srng field */
  1734. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1735. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1736. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
  1737. (hif_sc->bus_type == QDF_BUS_TYPE_AHB)) {
  1738. hif_sc->per_ce_irq = true;
  1739. }
  1740. status = hif_config_ce(hif_sc);
  1741. if (status)
  1742. goto disable_wlan;
  1743. /* QCA_WIFI_QCA8074_VP:Should not be executed on 8074 VP platform */
  1744. if (hif_needs_bmi(hif_osc)) {
  1745. status = hif_set_hia(hif_sc);
  1746. if (status)
  1747. goto unconfig_ce;
  1748. HIF_INFO_MED("%s: hif_set_hia done", __func__);
  1749. }
  1750. if (((hif_sc->target_info.target_type == TARGET_TYPE_QCA8074) ||
  1751. (hif_sc->target_info.target_type == TARGET_TYPE_QCA8074V2) ||
  1752. (hif_sc->target_info.target_type == TARGET_TYPE_QCA6018)) &&
  1753. (hif_sc->bus_type == QDF_BUS_TYPE_PCI))
  1754. HIF_INFO_MED("%s: Skip irq config for PCI based 8074 target",
  1755. __func__);
  1756. else {
  1757. status = hif_configure_irq(hif_sc);
  1758. if (status < 0)
  1759. goto unconfig_ce;
  1760. }
  1761. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1762. return status;
  1763. unconfig_ce:
  1764. hif_unconfig_ce(hif_sc);
  1765. disable_wlan:
  1766. A_TARGET_ACCESS_UNLIKELY(hif_sc);
  1767. hif_wlan_disable(hif_sc);
  1768. timer_free:
  1769. qdf_timer_stop(&hif_state->sleep_timer);
  1770. qdf_timer_free(&hif_state->sleep_timer);
  1771. hif_state->sleep_timer_init = false;
  1772. HIF_ERROR("%s: failed, status = %d", __func__, status);
  1773. return status;
  1774. }
  1775. /**
  1776. * hif_bus_close(): hif_bus_close
  1777. *
  1778. * Return: n/a
  1779. */
  1780. void hif_pci_close(struct hif_softc *hif_sc)
  1781. {
  1782. struct hif_pci_softc *hif_pci_sc = HIF_GET_PCI_SOFTC(hif_sc);
  1783. hif_pm_runtime_close(hif_pci_sc);
  1784. hif_ce_close(hif_sc);
  1785. }
  1786. #define BAR_NUM 0
  1787. static int hif_enable_pci_nopld(struct hif_pci_softc *sc,
  1788. struct pci_dev *pdev,
  1789. const struct pci_device_id *id)
  1790. {
  1791. void __iomem *mem;
  1792. int ret = 0;
  1793. uint16_t device_id = 0;
  1794. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1795. pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
  1796. if (device_id != id->device) {
  1797. HIF_ERROR(
  1798. "%s: dev id mismatch, config id = 0x%x, probing id = 0x%x",
  1799. __func__, device_id, id->device);
  1800. /* pci link is down, so returing with error code */
  1801. return -EIO;
  1802. }
  1803. /* FIXME: temp. commenting out assign_resource
  1804. * call for dev_attach to work on 2.6.38 kernel
  1805. */
  1806. #if (!defined(__LINUX_ARM_ARCH__))
  1807. if (pci_assign_resource(pdev, BAR_NUM)) {
  1808. HIF_ERROR("%s: pci_assign_resource error", __func__);
  1809. return -EIO;
  1810. }
  1811. #endif
  1812. if (pci_enable_device(pdev)) {
  1813. HIF_ERROR("%s: pci_enable_device error",
  1814. __func__);
  1815. return -EIO;
  1816. }
  1817. /* Request MMIO resources */
  1818. ret = pci_request_region(pdev, BAR_NUM, "ath");
  1819. if (ret) {
  1820. HIF_ERROR("%s: PCI MMIO reservation error", __func__);
  1821. ret = -EIO;
  1822. goto err_region;
  1823. }
  1824. #ifdef CONFIG_ARM_LPAE
  1825. /* if CONFIG_ARM_LPAE is enabled, we have to set 64 bits mask
  1826. * for 32 bits device also.
  1827. */
  1828. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  1829. if (ret) {
  1830. HIF_ERROR("%s: Cannot enable 64-bit pci DMA", __func__);
  1831. goto err_dma;
  1832. }
  1833. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  1834. if (ret) {
  1835. HIF_ERROR("%s: Cannot enable 64-bit DMA", __func__);
  1836. goto err_dma;
  1837. }
  1838. #else
  1839. ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  1840. if (ret) {
  1841. HIF_ERROR("%s: Cannot enable 32-bit pci DMA", __func__);
  1842. goto err_dma;
  1843. }
  1844. ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  1845. if (ret) {
  1846. HIF_ERROR("%s: Cannot enable 32-bit consistent DMA!",
  1847. __func__);
  1848. goto err_dma;
  1849. }
  1850. #endif
  1851. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1852. /* Set bus master bit in PCI_COMMAND to enable DMA */
  1853. pci_set_master(pdev);
  1854. /* Arrange for access to Target SoC registers. */
  1855. mem = pci_iomap(pdev, BAR_NUM, 0);
  1856. if (!mem) {
  1857. HIF_ERROR("%s: PCI iomap error", __func__);
  1858. ret = -EIO;
  1859. goto err_iomap;
  1860. }
  1861. HIF_INFO("*****BAR is %pK\n", (void *)mem);
  1862. sc->mem = mem;
  1863. /* Hawkeye emulation specific change */
  1864. if ((device_id == RUMIM2M_DEVICE_ID_NODE0) ||
  1865. (device_id == RUMIM2M_DEVICE_ID_NODE1) ||
  1866. (device_id == RUMIM2M_DEVICE_ID_NODE2) ||
  1867. (device_id == RUMIM2M_DEVICE_ID_NODE3) ||
  1868. (device_id == RUMIM2M_DEVICE_ID_NODE4) ||
  1869. (device_id == RUMIM2M_DEVICE_ID_NODE5)) {
  1870. mem = mem + 0x0c000000;
  1871. sc->mem = mem;
  1872. HIF_INFO("%s: Changing PCI mem base to %pK\n",
  1873. __func__, sc->mem);
  1874. }
  1875. sc->mem_len = pci_resource_len(pdev, BAR_NUM);
  1876. ol_sc->mem = mem;
  1877. ol_sc->mem_pa = pci_resource_start(pdev, BAR_NUM);
  1878. sc->pci_enabled = true;
  1879. return ret;
  1880. err_iomap:
  1881. pci_clear_master(pdev);
  1882. err_dma:
  1883. pci_release_region(pdev, BAR_NUM);
  1884. err_region:
  1885. pci_disable_device(pdev);
  1886. return ret;
  1887. }
  1888. static int hif_enable_pci_pld(struct hif_pci_softc *sc,
  1889. struct pci_dev *pdev,
  1890. const struct pci_device_id *id)
  1891. {
  1892. PCI_CFG_TO_DISABLE_L1SS_STATES(pdev, 0x188);
  1893. sc->pci_enabled = true;
  1894. return 0;
  1895. }
  1896. static void hif_pci_deinit_nopld(struct hif_pci_softc *sc)
  1897. {
  1898. pci_disable_msi(sc->pdev);
  1899. pci_iounmap(sc->pdev, sc->mem);
  1900. pci_clear_master(sc->pdev);
  1901. pci_release_region(sc->pdev, BAR_NUM);
  1902. pci_disable_device(sc->pdev);
  1903. }
  1904. static void hif_pci_deinit_pld(struct hif_pci_softc *sc) {}
  1905. static void hif_disable_pci(struct hif_pci_softc *sc)
  1906. {
  1907. struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
  1908. if (!ol_sc) {
  1909. HIF_ERROR("%s: ol_sc = NULL", __func__);
  1910. return;
  1911. }
  1912. hif_pci_device_reset(sc);
  1913. sc->hif_pci_deinit(sc);
  1914. sc->mem = NULL;
  1915. ol_sc->mem = NULL;
  1916. }
  1917. static int hif_pci_probe_tgt_wakeup(struct hif_pci_softc *sc)
  1918. {
  1919. int ret = 0;
  1920. int targ_awake_limit = 500;
  1921. #ifndef QCA_WIFI_3_0
  1922. uint32_t fw_indicator;
  1923. #endif
  1924. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1925. /*
  1926. * Verify that the Target was started cleanly.*
  1927. * The case where this is most likely is with an AUX-powered
  1928. * Target and a Host in WoW mode. If the Host crashes,
  1929. * loses power, or is restarted (without unloading the driver)
  1930. * then the Target is left (aux) powered and running. On a
  1931. * subsequent driver load, the Target is in an unexpected state.
  1932. * We try to catch that here in order to reset the Target and
  1933. * retry the probe.
  1934. */
  1935. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1936. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  1937. while (!hif_targ_is_awake(scn, sc->mem)) {
  1938. if (0 == targ_awake_limit) {
  1939. HIF_ERROR("%s: target awake timeout", __func__);
  1940. ret = -EAGAIN;
  1941. goto end;
  1942. }
  1943. qdf_mdelay(1);
  1944. targ_awake_limit--;
  1945. }
  1946. #if PCIE_BAR0_READY_CHECKING
  1947. {
  1948. int wait_limit = 200;
  1949. /* Synchronization point: wait the BAR0 is configured */
  1950. while (wait_limit-- &&
  1951. !(hif_read32_mb(sc, c->mem +
  1952. PCIE_LOCAL_BASE_ADDRESS +
  1953. PCIE_SOC_RDY_STATUS_ADDRESS)
  1954. & PCIE_SOC_RDY_STATUS_BAR_MASK)) {
  1955. qdf_mdelay(10);
  1956. }
  1957. if (wait_limit < 0) {
  1958. /* AR6320v1 doesn't support checking of BAR0
  1959. * configuration, takes one sec to wait BAR0 ready
  1960. */
  1961. HIF_INFO_MED("%s: AR6320v1 waits two sec for BAR0",
  1962. __func__);
  1963. }
  1964. }
  1965. #endif
  1966. #ifndef QCA_WIFI_3_0
  1967. fw_indicator = hif_read32_mb(sc, sc->mem + FW_INDICATOR_ADDRESS);
  1968. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  1969. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  1970. if (fw_indicator & FW_IND_INITIALIZED) {
  1971. HIF_ERROR("%s: Target is in an unknown state. EAGAIN",
  1972. __func__);
  1973. ret = -EAGAIN;
  1974. goto end;
  1975. }
  1976. #endif
  1977. end:
  1978. return ret;
  1979. }
  1980. static int hif_pci_configure_legacy_irq(struct hif_pci_softc *sc)
  1981. {
  1982. int ret = 0;
  1983. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  1984. uint32_t target_type = scn->target_info.target_type;
  1985. HIF_TRACE("%s: E", __func__);
  1986. /* do notn support MSI or MSI IRQ failed */
  1987. tasklet_init(&sc->intr_tq, wlan_tasklet, (unsigned long)sc);
  1988. ret = request_irq(sc->pdev->irq,
  1989. hif_pci_legacy_ce_interrupt_handler, IRQF_SHARED,
  1990. "wlan_pci", sc);
  1991. if (ret) {
  1992. HIF_ERROR("%s: request_irq failed, ret = %d", __func__, ret);
  1993. goto end;
  1994. }
  1995. scn->wake_irq = sc->pdev->irq;
  1996. /* Use sc->irq instead of sc->pdev-irq
  1997. * platform_device pdev doesn't have an irq field
  1998. */
  1999. sc->irq = sc->pdev->irq;
  2000. /* Use Legacy PCI Interrupts */
  2001. hif_write32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  2002. PCIE_INTR_ENABLE_ADDRESS),
  2003. HOST_GROUP0_MASK);
  2004. hif_read32_mb(sc, sc->mem + (SOC_CORE_BASE_ADDRESS |
  2005. PCIE_INTR_ENABLE_ADDRESS));
  2006. hif_write32_mb(sc, sc->mem + PCIE_LOCAL_BASE_ADDRESS +
  2007. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  2008. if ((target_type == TARGET_TYPE_IPQ4019) ||
  2009. (target_type == TARGET_TYPE_AR900B) ||
  2010. (target_type == TARGET_TYPE_QCA9984) ||
  2011. (target_type == TARGET_TYPE_AR9888) ||
  2012. (target_type == TARGET_TYPE_QCA9888) ||
  2013. (target_type == TARGET_TYPE_AR6320V1) ||
  2014. (target_type == TARGET_TYPE_AR6320V2) ||
  2015. (target_type == TARGET_TYPE_AR6320V3)) {
  2016. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  2017. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_V_MASK);
  2018. }
  2019. end:
  2020. QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_ERROR,
  2021. "%s: X, ret = %d", __func__, ret);
  2022. return ret;
  2023. }
  2024. static int hif_ce_srng_msi_free_irq(struct hif_softc *scn)
  2025. {
  2026. int ret;
  2027. int ce_id, irq;
  2028. uint32_t msi_data_start;
  2029. uint32_t msi_data_count;
  2030. uint32_t msi_irq_start;
  2031. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2032. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2033. &msi_data_count, &msi_data_start,
  2034. &msi_irq_start);
  2035. if (ret)
  2036. return ret;
  2037. /* needs to match the ce_id -> irq data mapping
  2038. * used in the srng parameter configuration
  2039. */
  2040. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2041. unsigned int msi_data;
  2042. if (!ce_sc->tasklets[ce_id].inited)
  2043. continue;
  2044. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  2045. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2046. hif_debug("%s: (ce_id %d, msi_data %d, irq %d)", __func__,
  2047. ce_id, msi_data, irq);
  2048. free_irq(irq, &ce_sc->tasklets[ce_id]);
  2049. }
  2050. return ret;
  2051. }
  2052. static void hif_pci_deconfigure_grp_irq(struct hif_softc *scn)
  2053. {
  2054. int i, j, irq;
  2055. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2056. struct hif_exec_context *hif_ext_group;
  2057. for (i = 0; i < hif_state->hif_num_extgroup; i++) {
  2058. hif_ext_group = hif_state->hif_ext_group[i];
  2059. if (hif_ext_group->irq_requested) {
  2060. hif_ext_group->irq_requested = false;
  2061. for (j = 0; j < hif_ext_group->numirq; j++) {
  2062. irq = hif_ext_group->os_irq[j];
  2063. free_irq(irq, hif_ext_group);
  2064. }
  2065. hif_ext_group->numirq = 0;
  2066. }
  2067. }
  2068. }
  2069. /**
  2070. * hif_nointrs(): disable IRQ
  2071. *
  2072. * This function stops interrupt(s)
  2073. *
  2074. * @scn: struct hif_softc
  2075. *
  2076. * Return: none
  2077. */
  2078. void hif_pci_nointrs(struct hif_softc *scn)
  2079. {
  2080. int i, ret;
  2081. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2082. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2083. ce_unregister_irq(hif_state, CE_ALL_BITMAP);
  2084. if (scn->request_irq_done == false)
  2085. return;
  2086. hif_pci_deconfigure_grp_irq(scn);
  2087. ret = hif_ce_srng_msi_free_irq(scn);
  2088. if (ret != -EINVAL) {
  2089. /* ce irqs freed in hif_ce_srng_msi_free_irq */
  2090. if (scn->wake_irq)
  2091. free_irq(scn->wake_irq, scn);
  2092. scn->wake_irq = 0;
  2093. } else if (sc->num_msi_intrs > 0) {
  2094. /* MSI interrupt(s) */
  2095. for (i = 0; i < sc->num_msi_intrs; i++)
  2096. free_irq(sc->irq + i, sc);
  2097. sc->num_msi_intrs = 0;
  2098. } else {
  2099. /* Legacy PCI line interrupt
  2100. * Use sc->irq instead of sc->pdev-irq
  2101. * platform_device pdev doesn't have an irq field
  2102. */
  2103. free_irq(sc->irq, sc);
  2104. }
  2105. scn->request_irq_done = false;
  2106. }
  2107. /**
  2108. * hif_disable_bus(): hif_disable_bus
  2109. *
  2110. * This function disables the bus
  2111. *
  2112. * @bdev: bus dev
  2113. *
  2114. * Return: none
  2115. */
  2116. void hif_pci_disable_bus(struct hif_softc *scn)
  2117. {
  2118. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2119. struct pci_dev *pdev;
  2120. void __iomem *mem;
  2121. struct hif_target_info *tgt_info = &scn->target_info;
  2122. /* Attach did not succeed, all resources have been
  2123. * freed in error handler
  2124. */
  2125. if (!sc)
  2126. return;
  2127. pdev = sc->pdev;
  2128. if (ADRASTEA_BU) {
  2129. hif_vote_link_down(GET_HIF_OPAQUE_HDL(scn));
  2130. hif_write32_mb(sc, sc->mem + PCIE_INTR_ENABLE_ADDRESS, 0);
  2131. hif_write32_mb(sc, sc->mem + PCIE_INTR_CLR_ADDRESS,
  2132. HOST_GROUP0_MASK);
  2133. }
  2134. #if defined(CPU_WARM_RESET_WAR)
  2135. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2136. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2137. * verified for AR9888_REV1
  2138. */
  2139. if ((tgt_info->target_version == AR9888_REV2_VERSION) ||
  2140. (tgt_info->target_version == AR9887_REV1_VERSION))
  2141. hif_pci_device_warm_reset(sc);
  2142. else
  2143. hif_pci_device_reset(sc);
  2144. #else
  2145. hif_pci_device_reset(sc);
  2146. #endif
  2147. mem = (void __iomem *)sc->mem;
  2148. if (mem) {
  2149. hif_dump_pipe_debug_count(scn);
  2150. if (scn->athdiag_procfs_inited) {
  2151. athdiag_procfs_remove();
  2152. scn->athdiag_procfs_inited = false;
  2153. }
  2154. sc->hif_pci_deinit(sc);
  2155. scn->mem = NULL;
  2156. }
  2157. HIF_INFO("%s: X", __func__);
  2158. }
  2159. #define OL_ATH_PCI_PM_CONTROL 0x44
  2160. #ifdef FEATURE_RUNTIME_PM
  2161. /**
  2162. * hif_runtime_prevent_linkdown() - prevent or allow a runtime pm from occurring
  2163. * @scn: hif context
  2164. * @flag: prevent linkdown if true otherwise allow
  2165. *
  2166. * this api should only be called as part of bus prevent linkdown
  2167. */
  2168. static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  2169. {
  2170. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2171. if (flag)
  2172. qdf_runtime_pm_prevent_suspend(&sc->prevent_linkdown_lock);
  2173. else
  2174. qdf_runtime_pm_allow_suspend(&sc->prevent_linkdown_lock);
  2175. }
  2176. #else
  2177. static void hif_runtime_prevent_linkdown(struct hif_softc *scn, bool flag)
  2178. {
  2179. }
  2180. #endif
  2181. #if defined(CONFIG_PCI_MSM)
  2182. /**
  2183. * hif_bus_prevent_linkdown(): allow or permit linkdown
  2184. * @flag: true prevents linkdown, false allows
  2185. *
  2186. * Calls into the platform driver to vote against taking down the
  2187. * pcie link.
  2188. *
  2189. * Return: n/a
  2190. */
  2191. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2192. {
  2193. int errno;
  2194. HIF_INFO("wlan: %s pcie power collapse", flag ? "disable" : "enable");
  2195. hif_runtime_prevent_linkdown(scn, flag);
  2196. errno = pld_wlan_pm_control(scn->qdf_dev->dev, flag);
  2197. if (errno)
  2198. HIF_ERROR("%s: Failed pld_wlan_pm_control; errno %d",
  2199. __func__, errno);
  2200. }
  2201. #else
  2202. void hif_pci_prevent_linkdown(struct hif_softc *scn, bool flag)
  2203. {
  2204. HIF_INFO("wlan: %s pcie power collapse", (flag ? "disable" : "enable"));
  2205. hif_runtime_prevent_linkdown(scn, flag);
  2206. }
  2207. #endif
  2208. /**
  2209. * hif_pci_bus_suspend(): prepare hif for suspend
  2210. *
  2211. * Return: Errno
  2212. */
  2213. int hif_pci_bus_suspend(struct hif_softc *scn)
  2214. {
  2215. hif_apps_irqs_disable(GET_HIF_OPAQUE_HDL(scn));
  2216. if (hif_drain_tasklets(scn)) {
  2217. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2218. return -EBUSY;
  2219. }
  2220. /* Stop the HIF Sleep Timer */
  2221. hif_cancel_deferred_target_sleep(scn);
  2222. return 0;
  2223. }
  2224. /**
  2225. * __hif_check_link_status() - API to check if PCIe link is active/not
  2226. * @scn: HIF Context
  2227. *
  2228. * API reads the PCIe config space to verify if PCIe link training is
  2229. * successful or not.
  2230. *
  2231. * Return: Success/Failure
  2232. */
  2233. static int __hif_check_link_status(struct hif_softc *scn)
  2234. {
  2235. uint16_t dev_id = 0;
  2236. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2237. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2238. if (!sc) {
  2239. HIF_ERROR("%s: HIF Bus Context is Invalid", __func__);
  2240. return -EINVAL;
  2241. }
  2242. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &dev_id);
  2243. if (dev_id == sc->devid)
  2244. return 0;
  2245. HIF_ERROR("%s: Invalid PCIe Config Space; PCIe link down dev_id:0x%04x",
  2246. __func__, dev_id);
  2247. scn->recovery = true;
  2248. if (cbk && cbk->set_recovery_in_progress)
  2249. cbk->set_recovery_in_progress(cbk->context, true);
  2250. else
  2251. HIF_ERROR("%s: Driver Global Recovery is not set", __func__);
  2252. pld_is_pci_link_down(sc->dev);
  2253. return -EACCES;
  2254. }
  2255. /**
  2256. * hif_pci_bus_resume(): prepare hif for resume
  2257. *
  2258. * Return: Errno
  2259. */
  2260. int hif_pci_bus_resume(struct hif_softc *scn)
  2261. {
  2262. int errno;
  2263. errno = __hif_check_link_status(scn);
  2264. if (errno)
  2265. return errno;
  2266. hif_apps_irqs_enable(GET_HIF_OPAQUE_HDL(scn));
  2267. return 0;
  2268. }
  2269. /**
  2270. * hif_pci_bus_suspend_noirq() - ensure there are no pending transactions
  2271. * @scn: hif context
  2272. *
  2273. * Ensure that if we received the wakeup message before the irq
  2274. * was disabled that the message is pocessed before suspending.
  2275. *
  2276. * Return: -EBUSY if we fail to flush the tasklets.
  2277. */
  2278. int hif_pci_bus_suspend_noirq(struct hif_softc *scn)
  2279. {
  2280. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2281. qdf_atomic_set(&scn->link_suspended, 1);
  2282. hif_apps_wake_irq_enable(GET_HIF_OPAQUE_HDL(scn));
  2283. return 0;
  2284. }
  2285. /**
  2286. * hif_pci_bus_resume_noirq() - ensure there are no pending transactions
  2287. * @scn: hif context
  2288. *
  2289. * Ensure that if we received the wakeup message before the irq
  2290. * was disabled that the message is pocessed before suspending.
  2291. *
  2292. * Return: -EBUSY if we fail to flush the tasklets.
  2293. */
  2294. int hif_pci_bus_resume_noirq(struct hif_softc *scn)
  2295. {
  2296. hif_apps_wake_irq_disable(GET_HIF_OPAQUE_HDL(scn));
  2297. if (hif_can_suspend_link(GET_HIF_OPAQUE_HDL(scn)))
  2298. qdf_atomic_set(&scn->link_suspended, 0);
  2299. return 0;
  2300. }
  2301. #ifdef FEATURE_RUNTIME_PM
  2302. /**
  2303. * __hif_runtime_pm_set_state(): utility function
  2304. * @state: state to set
  2305. *
  2306. * indexes into the runtime pm state and sets it.
  2307. */
  2308. static void __hif_runtime_pm_set_state(struct hif_softc *scn,
  2309. enum hif_pm_runtime_state state)
  2310. {
  2311. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2312. if (!sc) {
  2313. HIF_ERROR("%s: HIF_CTX not initialized",
  2314. __func__);
  2315. return;
  2316. }
  2317. qdf_atomic_set(&sc->pm_state, state);
  2318. }
  2319. /**
  2320. * hif_runtime_pm_set_state_on(): adjust runtime pm state
  2321. *
  2322. * Notify hif that a the runtime pm state should be on
  2323. */
  2324. static void hif_runtime_pm_set_state_on(struct hif_softc *scn)
  2325. {
  2326. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_ON);
  2327. }
  2328. /**
  2329. * hif_runtime_pm_set_state_resuming(): adjust runtime pm state
  2330. *
  2331. * Notify hif that a runtime pm resuming has started
  2332. */
  2333. static void hif_runtime_pm_set_state_resuming(struct hif_softc *scn)
  2334. {
  2335. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_RESUMING);
  2336. }
  2337. /**
  2338. * hif_runtime_pm_set_state_suspending(): adjust runtime pm state
  2339. *
  2340. * Notify hif that a runtime pm suspend has started
  2341. */
  2342. static void hif_runtime_pm_set_state_suspending(struct hif_softc *scn)
  2343. {
  2344. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDING);
  2345. }
  2346. /**
  2347. * hif_runtime_pm_set_state_suspended(): adjust runtime pm state
  2348. *
  2349. * Notify hif that a runtime suspend attempt has been completed successfully
  2350. */
  2351. static void hif_runtime_pm_set_state_suspended(struct hif_softc *scn)
  2352. {
  2353. __hif_runtime_pm_set_state(scn, HIF_PM_RUNTIME_STATE_SUSPENDED);
  2354. }
  2355. /**
  2356. * hif_log_runtime_suspend_success() - log a successful runtime suspend
  2357. */
  2358. static void hif_log_runtime_suspend_success(struct hif_softc *hif_ctx)
  2359. {
  2360. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2361. if (!sc)
  2362. return;
  2363. sc->pm_stats.suspended++;
  2364. sc->pm_stats.suspend_jiffies = jiffies;
  2365. }
  2366. /**
  2367. * hif_log_runtime_suspend_failure() - log a failed runtime suspend
  2368. *
  2369. * log a failed runtime suspend
  2370. * mark last busy to prevent immediate runtime suspend
  2371. */
  2372. static void hif_log_runtime_suspend_failure(void *hif_ctx)
  2373. {
  2374. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2375. if (!sc)
  2376. return;
  2377. sc->pm_stats.suspend_err++;
  2378. }
  2379. /**
  2380. * hif_log_runtime_resume_success() - log a successful runtime resume
  2381. *
  2382. * log a successful runtime resume
  2383. * mark last busy to prevent immediate runtime suspend
  2384. */
  2385. static void hif_log_runtime_resume_success(void *hif_ctx)
  2386. {
  2387. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2388. if (!sc)
  2389. return;
  2390. sc->pm_stats.resumed++;
  2391. }
  2392. /**
  2393. * hif_process_runtime_suspend_failure() - bookkeeping of suspend failure
  2394. *
  2395. * Record the failure.
  2396. * mark last busy to delay a retry.
  2397. * adjust the runtime_pm state.
  2398. */
  2399. void hif_process_runtime_suspend_failure(struct hif_opaque_softc *hif_ctx)
  2400. {
  2401. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2402. hif_log_runtime_suspend_failure(hif_ctx);
  2403. hif_pm_runtime_mark_last_busy(hif_ctx);
  2404. hif_runtime_pm_set_state_on(scn);
  2405. }
  2406. /**
  2407. * hif_pre_runtime_suspend() - bookkeeping before beginning runtime suspend
  2408. *
  2409. * Makes sure that the pci link will be taken down by the suspend opperation.
  2410. * If the hif layer is configured to leave the bus on, runtime suspend will
  2411. * not save any power.
  2412. *
  2413. * Set the runtime suspend state to in progress.
  2414. *
  2415. * return -EINVAL if the bus won't go down. otherwise return 0
  2416. */
  2417. int hif_pre_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  2418. {
  2419. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2420. if (!hif_can_suspend_link(hif_ctx)) {
  2421. HIF_ERROR("Runtime PM not supported for link up suspend");
  2422. return -EINVAL;
  2423. }
  2424. hif_runtime_pm_set_state_suspending(scn);
  2425. return 0;
  2426. }
  2427. /**
  2428. * hif_process_runtime_suspend_success() - bookkeeping of suspend success
  2429. *
  2430. * Record the success.
  2431. * adjust the runtime_pm state
  2432. */
  2433. void hif_process_runtime_suspend_success(struct hif_opaque_softc *hif_ctx)
  2434. {
  2435. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2436. hif_runtime_pm_set_state_suspended(scn);
  2437. hif_log_runtime_suspend_success(scn);
  2438. }
  2439. /**
  2440. * hif_pre_runtime_resume() - bookkeeping before beginning runtime resume
  2441. *
  2442. * update the runtime pm state.
  2443. */
  2444. void hif_pre_runtime_resume(struct hif_opaque_softc *hif_ctx)
  2445. {
  2446. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2447. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  2448. hif_runtime_pm_set_state_resuming(scn);
  2449. }
  2450. /**
  2451. * hif_process_runtime_resume_success() - bookkeeping after a runtime resume
  2452. *
  2453. * record the success.
  2454. * adjust the runtime_pm state
  2455. */
  2456. void hif_process_runtime_resume_success(struct hif_opaque_softc *hif_ctx)
  2457. {
  2458. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2459. hif_log_runtime_resume_success(hif_ctx);
  2460. hif_pm_runtime_mark_last_busy(hif_ctx);
  2461. hif_runtime_pm_set_state_on(scn);
  2462. }
  2463. /**
  2464. * hif_runtime_suspend() - do the bus suspend part of a runtime suspend
  2465. *
  2466. * Return: 0 for success and non-zero error code for failure
  2467. */
  2468. int hif_runtime_suspend(struct hif_opaque_softc *hif_ctx)
  2469. {
  2470. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  2471. int errno;
  2472. errno = hif_bus_suspend(hif_ctx);
  2473. if (errno) {
  2474. HIF_ERROR("%s: failed bus suspend: %d", __func__, errno);
  2475. return errno;
  2476. }
  2477. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 1);
  2478. errno = hif_bus_suspend_noirq(hif_ctx);
  2479. if (errno) {
  2480. HIF_ERROR("%s: failed bus suspend noirq: %d", __func__, errno);
  2481. hif_pm_runtime_set_monitor_wake_intr(hif_ctx, 0);
  2482. goto bus_resume;
  2483. }
  2484. qdf_atomic_set(&sc->pm_dp_rx_busy, 0);
  2485. return 0;
  2486. bus_resume:
  2487. QDF_BUG(!hif_bus_resume(hif_ctx));
  2488. return errno;
  2489. }
  2490. /**
  2491. * hif_fastpath_resume() - resume fastpath for runtimepm
  2492. *
  2493. * ensure that the fastpath write index register is up to date
  2494. * since runtime pm may cause ce_send_fast to skip the register
  2495. * write.
  2496. *
  2497. * fastpath only applicable to legacy copy engine
  2498. */
  2499. void hif_fastpath_resume(struct hif_opaque_softc *hif_ctx)
  2500. {
  2501. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  2502. struct CE_state *ce_state;
  2503. if (!scn)
  2504. return;
  2505. if (scn->fastpath_mode_on) {
  2506. if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
  2507. return;
  2508. ce_state = scn->ce_id_to_state[CE_HTT_H2T_MSG];
  2509. qdf_spin_lock_bh(&ce_state->ce_index_lock);
  2510. /*war_ce_src_ring_write_idx_set */
  2511. CE_SRC_RING_WRITE_IDX_SET(scn, ce_state->ctrl_addr,
  2512. ce_state->src_ring->write_index);
  2513. qdf_spin_unlock_bh(&ce_state->ce_index_lock);
  2514. Q_TARGET_ACCESS_END(scn);
  2515. }
  2516. }
  2517. /**
  2518. * hif_runtime_resume() - do the bus resume part of a runtime resume
  2519. *
  2520. * Return: 0 for success and non-zero error code for failure
  2521. */
  2522. int hif_runtime_resume(struct hif_opaque_softc *hif_ctx)
  2523. {
  2524. QDF_BUG(!hif_bus_resume_noirq(hif_ctx));
  2525. QDF_BUG(!hif_bus_resume(hif_ctx));
  2526. return 0;
  2527. }
  2528. #endif /* #ifdef FEATURE_RUNTIME_PM */
  2529. #if CONFIG_PCIE_64BIT_MSI
  2530. static void hif_free_msi_ctx(struct hif_softc *scn)
  2531. {
  2532. struct hif_pci_softc *sc = scn->hif_sc;
  2533. struct hif_msi_info *info = &sc->msi_info;
  2534. struct device *dev = scn->qdf_dev->dev;
  2535. OS_FREE_CONSISTENT(dev, 4, info->magic, info->magic_dma,
  2536. OS_GET_DMA_MEM_CONTEXT(scn, dmacontext));
  2537. info->magic = NULL;
  2538. info->magic_dma = 0;
  2539. }
  2540. #else
  2541. static void hif_free_msi_ctx(struct hif_softc *scn)
  2542. {
  2543. }
  2544. #endif
  2545. void hif_pci_disable_isr(struct hif_softc *scn)
  2546. {
  2547. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2548. hif_exec_kill(&scn->osc);
  2549. hif_nointrs(scn);
  2550. hif_free_msi_ctx(scn);
  2551. /* Cancel the pending tasklet */
  2552. ce_tasklet_kill(scn);
  2553. tasklet_kill(&sc->intr_tq);
  2554. qdf_atomic_set(&scn->active_tasklet_cnt, 0);
  2555. qdf_atomic_set(&scn->active_grp_tasklet_cnt, 0);
  2556. }
  2557. /* Function to reset SoC */
  2558. void hif_pci_reset_soc(struct hif_softc *hif_sc)
  2559. {
  2560. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_sc);
  2561. struct hif_opaque_softc *ol_sc = GET_HIF_OPAQUE_HDL(hif_sc);
  2562. struct hif_target_info *tgt_info = hif_get_target_info_handle(ol_sc);
  2563. #if defined(CPU_WARM_RESET_WAR)
  2564. /* Currently CPU warm reset sequence is tested only for AR9888_REV2
  2565. * Need to enable for AR9888_REV1 once CPU warm reset sequence is
  2566. * verified for AR9888_REV1
  2567. */
  2568. if (tgt_info->target_version == AR9888_REV2_VERSION)
  2569. hif_pci_device_warm_reset(sc);
  2570. else
  2571. hif_pci_device_reset(sc);
  2572. #else
  2573. hif_pci_device_reset(sc);
  2574. #endif
  2575. }
  2576. #ifdef CONFIG_PCI_MSM
  2577. static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc)
  2578. {
  2579. msm_pcie_debug_info(sc->pdev, 13, 1, 0, 0, 0);
  2580. msm_pcie_debug_info(sc->pdev, 13, 2, 0, 0, 0);
  2581. }
  2582. #else
  2583. static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
  2584. #endif
  2585. /**
  2586. * hif_log_soc_wakeup_timeout() - API to log PCIe and SOC Info
  2587. * @sc: HIF PCIe Context
  2588. *
  2589. * API to log PCIe Config space and SOC info when SOC wakeup timeout happens
  2590. *
  2591. * Return: Failure to caller
  2592. */
  2593. static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
  2594. {
  2595. uint16_t val = 0;
  2596. uint32_t bar = 0;
  2597. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
  2598. struct hif_softc *scn = HIF_GET_SOFTC(sc);
  2599. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
  2600. struct hif_config_info *cfg = hif_get_ini_handle(hif_hdl);
  2601. struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
  2602. A_target_id_t pci_addr = scn->mem;
  2603. HIF_ERROR("%s: keep_awake_count = %d",
  2604. __func__, hif_state->keep_awake_count);
  2605. pci_read_config_word(sc->pdev, PCI_VENDOR_ID, &val);
  2606. HIF_ERROR("%s: PCI Vendor ID = 0x%04x", __func__, val);
  2607. pci_read_config_word(sc->pdev, PCI_DEVICE_ID, &val);
  2608. HIF_ERROR("%s: PCI Device ID = 0x%04x", __func__, val);
  2609. pci_read_config_word(sc->pdev, PCI_COMMAND, &val);
  2610. HIF_ERROR("%s: PCI Command = 0x%04x", __func__, val);
  2611. pci_read_config_word(sc->pdev, PCI_STATUS, &val);
  2612. HIF_ERROR("%s: PCI Status = 0x%04x", __func__, val);
  2613. pci_read_config_dword(sc->pdev, PCI_BASE_ADDRESS_0, &bar);
  2614. HIF_ERROR("%s: PCI BAR 0 = 0x%08x", __func__, bar);
  2615. HIF_ERROR("%s: SOC_WAKE_ADDR 0%08x", __func__,
  2616. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2617. PCIE_SOC_WAKE_ADDRESS));
  2618. HIF_ERROR("%s: RTC_STATE_ADDR 0x%08x", __func__,
  2619. hif_read32_mb(scn, pci_addr + PCIE_LOCAL_BASE_ADDRESS +
  2620. RTC_STATE_ADDRESS));
  2621. HIF_ERROR("%s:error, wakeup target", __func__);
  2622. hif_msm_pcie_debug_info(sc);
  2623. if (!cfg->enable_self_recovery)
  2624. QDF_BUG(0);
  2625. scn->recovery = true;
  2626. if (cbk->set_recovery_in_progress)
  2627. cbk->set_recovery_in_progress(cbk->context, true);
  2628. pld_is_pci_link_down(sc->dev);
  2629. return -EACCES;
  2630. }
  2631. /*
  2632. * For now, we use simple on-demand sleep/wake.
  2633. * Some possible improvements:
  2634. * -Use the Host-destined A_INUM_PCIE_AWAKE interrupt rather than spin/delay
  2635. * (or perhaps spin/delay for a short while, then convert to sleep/interrupt)
  2636. * Careful, though, these functions may be used by
  2637. * interrupt handlers ("atomic")
  2638. * -Don't use host_reg_table for this code; instead use values directly
  2639. * -Use a separate timer to track activity and allow Target to sleep only
  2640. * if it hasn't done anything for a while; may even want to delay some
  2641. * processing for a short while in order to "batch" (e.g.) transmit
  2642. * requests with completion processing into "windows of up time". Costs
  2643. * some performance, but improves power utilization.
  2644. * -On some platforms, it might be possible to eliminate explicit
  2645. * sleep/wakeup. Instead, take a chance that each access works OK. If not,
  2646. * recover from the failure by forcing the Target awake.
  2647. * -Change keep_awake_count to an atomic_t in order to avoid spin lock
  2648. * overhead in some cases. Perhaps this makes more sense when
  2649. * CONFIG_ATH_PCIE_ACCESS_LIKELY is used and less sense when LIKELY is
  2650. * disabled.
  2651. * -It is possible to compile this code out and simply force the Target
  2652. * to remain awake. That would yield optimal performance at the cost of
  2653. * increased power. See CONFIG_ATH_PCIE_MAX_PERF.
  2654. *
  2655. * Note: parameter wait_for_it has meaning only when waking (when sleep_ok==0).
  2656. */
  2657. /**
  2658. * hif_target_sleep_state_adjust() - on-demand sleep/wake
  2659. * @scn: hif_softc pointer.
  2660. * @sleep_ok: bool
  2661. * @wait_for_it: bool
  2662. *
  2663. * Output the pipe error counts of each pipe to log file
  2664. *
  2665. * Return: int
  2666. */
  2667. int hif_pci_target_sleep_state_adjust(struct hif_softc *scn,
  2668. bool sleep_ok, bool wait_for_it)
  2669. {
  2670. struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
  2671. A_target_id_t pci_addr = scn->mem;
  2672. static int max_delay;
  2673. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  2674. static int debug;
  2675. if (scn->recovery)
  2676. return -EACCES;
  2677. if (qdf_atomic_read(&scn->link_suspended)) {
  2678. HIF_ERROR("%s:invalid access, PCIe link is down", __func__);
  2679. debug = true;
  2680. QDF_ASSERT(0);
  2681. return -EACCES;
  2682. }
  2683. if (debug) {
  2684. wait_for_it = true;
  2685. HIF_ERROR("%s: doing debug for invalid access, PCIe link is suspended",
  2686. __func__);
  2687. QDF_ASSERT(0);
  2688. }
  2689. if (sleep_ok) {
  2690. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2691. hif_state->keep_awake_count--;
  2692. if (hif_state->keep_awake_count == 0) {
  2693. /* Allow sleep */
  2694. hif_state->verified_awake = false;
  2695. hif_state->sleep_ticks = qdf_system_ticks();
  2696. }
  2697. if (hif_state->fake_sleep == false) {
  2698. /* Set the Fake Sleep */
  2699. hif_state->fake_sleep = true;
  2700. /* Start the Sleep Timer */
  2701. qdf_timer_stop(&hif_state->sleep_timer);
  2702. qdf_timer_start(&hif_state->sleep_timer,
  2703. HIF_SLEEP_INACTIVITY_TIMER_PERIOD_MS);
  2704. }
  2705. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2706. } else {
  2707. qdf_spin_lock_irqsave(&hif_state->keep_awake_lock);
  2708. if (hif_state->fake_sleep) {
  2709. hif_state->verified_awake = true;
  2710. } else {
  2711. if (hif_state->keep_awake_count == 0) {
  2712. /* Force AWAKE */
  2713. hif_write32_mb(sc, pci_addr +
  2714. PCIE_LOCAL_BASE_ADDRESS +
  2715. PCIE_SOC_WAKE_ADDRESS,
  2716. PCIE_SOC_WAKE_V_MASK);
  2717. }
  2718. }
  2719. hif_state->keep_awake_count++;
  2720. qdf_spin_unlock_irqrestore(&hif_state->keep_awake_lock);
  2721. if (wait_for_it && !hif_state->verified_awake) {
  2722. #define PCIE_SLEEP_ADJUST_TIMEOUT 8000 /* 8Ms */
  2723. int tot_delay = 0;
  2724. int curr_delay = 5;
  2725. for (;; ) {
  2726. if (hif_targ_is_awake(scn, pci_addr)) {
  2727. hif_state->verified_awake = true;
  2728. break;
  2729. }
  2730. if (!hif_pci_targ_is_present(scn, pci_addr))
  2731. break;
  2732. if (tot_delay > PCIE_SLEEP_ADJUST_TIMEOUT)
  2733. return hif_log_soc_wakeup_timeout(sc);
  2734. OS_DELAY(curr_delay);
  2735. tot_delay += curr_delay;
  2736. if (curr_delay < 50)
  2737. curr_delay += 5;
  2738. }
  2739. /*
  2740. * NB: If Target has to come out of Deep Sleep,
  2741. * this may take a few Msecs. Typically, though
  2742. * this delay should be <30us.
  2743. */
  2744. if (tot_delay > max_delay)
  2745. max_delay = tot_delay;
  2746. }
  2747. }
  2748. if (debug && hif_state->verified_awake) {
  2749. debug = 0;
  2750. HIF_ERROR("%s: INTR_ENABLE_REG = 0x%08x, INTR_CAUSE_REG = 0x%08x, CPU_INTR_REG = 0x%08x, INTR_CLR_REG = 0x%08x, CE_INTERRUPT_SUMMARY_REG = 0x%08x",
  2751. __func__,
  2752. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2753. PCIE_INTR_ENABLE_ADDRESS),
  2754. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2755. PCIE_INTR_CAUSE_ADDRESS),
  2756. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2757. CPU_INTR_ADDRESS),
  2758. hif_read32_mb(sc, sc->mem + SOC_CORE_BASE_ADDRESS +
  2759. PCIE_INTR_CLR_ADDRESS),
  2760. hif_read32_mb(sc, sc->mem + CE_WRAPPER_BASE_ADDRESS +
  2761. CE_WRAPPER_INTERRUPT_SUMMARY_ADDRESS));
  2762. }
  2763. return 0;
  2764. }
  2765. #ifdef CONFIG_ATH_PCIE_ACCESS_DEBUG
  2766. uint32_t hif_target_read_checked(struct hif_softc *scn, uint32_t offset)
  2767. {
  2768. uint32_t value;
  2769. void *addr;
  2770. addr = scn->mem + offset;
  2771. value = hif_read32_mb(scn, addr);
  2772. {
  2773. unsigned long irq_flags;
  2774. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2775. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2776. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2777. pcie_access_log[idx].is_write = false;
  2778. pcie_access_log[idx].addr = addr;
  2779. pcie_access_log[idx].value = value;
  2780. pcie_access_log_seqnum++;
  2781. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2782. }
  2783. return value;
  2784. }
  2785. void
  2786. hif_target_write_checked(struct hif_softc *scn, uint32_t offset, uint32_t value)
  2787. {
  2788. void *addr;
  2789. addr = scn->mem + (offset);
  2790. hif_write32_mb(scn, addr, value);
  2791. {
  2792. unsigned long irq_flags;
  2793. int idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2794. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2795. pcie_access_log[idx].seqnum = pcie_access_log_seqnum;
  2796. pcie_access_log[idx].is_write = true;
  2797. pcie_access_log[idx].addr = addr;
  2798. pcie_access_log[idx].value = value;
  2799. pcie_access_log_seqnum++;
  2800. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2801. }
  2802. }
  2803. /**
  2804. * hif_target_dump_access_log() - dump access log
  2805. *
  2806. * dump access log
  2807. *
  2808. * Return: n/a
  2809. */
  2810. void hif_target_dump_access_log(void)
  2811. {
  2812. int idx, len, start_idx, cur_idx;
  2813. unsigned long irq_flags;
  2814. spin_lock_irqsave(&pcie_access_log_lock, irq_flags);
  2815. if (pcie_access_log_seqnum > PCIE_ACCESS_LOG_NUM) {
  2816. len = PCIE_ACCESS_LOG_NUM;
  2817. start_idx = pcie_access_log_seqnum % PCIE_ACCESS_LOG_NUM;
  2818. } else {
  2819. len = pcie_access_log_seqnum;
  2820. start_idx = 0;
  2821. }
  2822. for (idx = 0; idx < len; idx++) {
  2823. cur_idx = (start_idx + idx) % PCIE_ACCESS_LOG_NUM;
  2824. HIF_ERROR("%s: idx:%d sn:%u wr:%d addr:%pK val:%u.",
  2825. __func__, idx,
  2826. pcie_access_log[cur_idx].seqnum,
  2827. pcie_access_log[cur_idx].is_write,
  2828. pcie_access_log[cur_idx].addr,
  2829. pcie_access_log[cur_idx].value);
  2830. }
  2831. pcie_access_log_seqnum = 0;
  2832. spin_unlock_irqrestore(&pcie_access_log_lock, irq_flags);
  2833. }
  2834. #endif
  2835. #ifndef HIF_AHB
  2836. int hif_ahb_configure_legacy_irq(struct hif_pci_softc *sc)
  2837. {
  2838. QDF_BUG(0);
  2839. return -EINVAL;
  2840. }
  2841. int hif_ahb_configure_irq(struct hif_pci_softc *sc)
  2842. {
  2843. QDF_BUG(0);
  2844. return -EINVAL;
  2845. }
  2846. #endif
  2847. static irqreturn_t hif_ce_interrupt_handler(int irq, void *context)
  2848. {
  2849. struct ce_tasklet_entry *tasklet_entry = context;
  2850. return ce_dispatch_interrupt(tasklet_entry->ce_id, tasklet_entry);
  2851. }
  2852. extern const char *ce_name[];
  2853. static int hif_ce_msi_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  2854. {
  2855. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  2856. return pci_scn->ce_msi_irq_num[ce_id];
  2857. }
  2858. /* hif_srng_msi_irq_disable() - disable the irq for msi
  2859. * @hif_sc: hif context
  2860. * @ce_id: which ce to disable copy complete interrupts for
  2861. *
  2862. * since MSI interrupts are not level based, the system can function
  2863. * without disabling these interrupts. Interrupt mitigation can be
  2864. * added here for better system performance.
  2865. */
  2866. static void hif_ce_srng_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2867. {
  2868. disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2869. }
  2870. static void hif_ce_srng_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2871. {
  2872. enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2873. }
  2874. static void hif_ce_legacy_msi_irq_disable(struct hif_softc *hif_sc, int ce_id)
  2875. {
  2876. disable_irq_nosync(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2877. }
  2878. static void hif_ce_legacy_msi_irq_enable(struct hif_softc *hif_sc, int ce_id)
  2879. {
  2880. enable_irq(hif_ce_msi_map_ce_to_irq(hif_sc, ce_id));
  2881. }
  2882. static int hif_ce_msi_configure_irq(struct hif_softc *scn)
  2883. {
  2884. int ret;
  2885. int ce_id, irq;
  2886. uint32_t msi_data_start;
  2887. uint32_t msi_data_count;
  2888. uint32_t msi_irq_start;
  2889. struct HIF_CE_state *ce_sc = HIF_GET_CE_STATE(scn);
  2890. struct hif_pci_softc *pci_sc = HIF_GET_PCI_SOFTC(scn);
  2891. /* do wake irq assignment */
  2892. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "WAKE",
  2893. &msi_data_count, &msi_data_start,
  2894. &msi_irq_start);
  2895. if (ret)
  2896. return ret;
  2897. scn->wake_irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_irq_start);
  2898. ret = request_irq(scn->wake_irq, hif_wake_interrupt_handler,
  2899. IRQF_NO_SUSPEND, "wlan_wake_irq", scn);
  2900. if (ret)
  2901. return ret;
  2902. /* do ce irq assignments */
  2903. ret = pld_get_user_msi_assignment(scn->qdf_dev->dev, "CE",
  2904. &msi_data_count, &msi_data_start,
  2905. &msi_irq_start);
  2906. if (ret)
  2907. goto free_wake_irq;
  2908. if (ce_srng_based(scn)) {
  2909. scn->bus_ops.hif_irq_disable = &hif_ce_srng_msi_irq_disable;
  2910. scn->bus_ops.hif_irq_enable = &hif_ce_srng_msi_irq_enable;
  2911. } else {
  2912. scn->bus_ops.hif_irq_disable = &hif_ce_legacy_msi_irq_disable;
  2913. scn->bus_ops.hif_irq_enable = &hif_ce_legacy_msi_irq_enable;
  2914. }
  2915. scn->bus_ops.hif_map_ce_to_irq = &hif_ce_msi_map_ce_to_irq;
  2916. /* needs to match the ce_id -> irq data mapping
  2917. * used in the srng parameter configuration
  2918. */
  2919. for (ce_id = 0; ce_id < scn->ce_count; ce_id++) {
  2920. unsigned int msi_data = (ce_id % msi_data_count) +
  2921. msi_irq_start;
  2922. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2923. HIF_DBG("%s: (ce_id %d, msi_data %d, irq %d tasklet %pK)",
  2924. __func__, ce_id, msi_data, irq,
  2925. &ce_sc->tasklets[ce_id]);
  2926. /* implies the ce is also initialized */
  2927. if (!ce_sc->tasklets[ce_id].inited)
  2928. continue;
  2929. pci_sc->ce_msi_irq_num[ce_id] = irq;
  2930. ret = request_irq(irq, hif_ce_interrupt_handler,
  2931. IRQF_SHARED,
  2932. ce_name[ce_id],
  2933. &ce_sc->tasklets[ce_id]);
  2934. if (ret)
  2935. goto free_irq;
  2936. }
  2937. return ret;
  2938. free_irq:
  2939. /* the request_irq for the last ce_id failed so skip it. */
  2940. while (ce_id > 0 && ce_id < scn->ce_count) {
  2941. unsigned int msi_data;
  2942. ce_id--;
  2943. msi_data = (ce_id % msi_data_count) + msi_irq_start;
  2944. irq = pld_get_msi_irq(scn->qdf_dev->dev, msi_data);
  2945. free_irq(irq, &ce_sc->tasklets[ce_id]);
  2946. }
  2947. free_wake_irq:
  2948. free_irq(scn->wake_irq, scn->qdf_dev->dev);
  2949. scn->wake_irq = 0;
  2950. return ret;
  2951. }
  2952. static void hif_exec_grp_irq_disable(struct hif_exec_context *hif_ext_group)
  2953. {
  2954. int i;
  2955. for (i = 0; i < hif_ext_group->numirq; i++)
  2956. disable_irq_nosync(hif_ext_group->os_irq[i]);
  2957. }
  2958. static void hif_exec_grp_irq_enable(struct hif_exec_context *hif_ext_group)
  2959. {
  2960. int i;
  2961. for (i = 0; i < hif_ext_group->numirq; i++)
  2962. enable_irq(hif_ext_group->os_irq[i]);
  2963. }
  2964. /**
  2965. * hif_pci_get_irq_name() - get irqname
  2966. * This function gives irqnumber to irqname
  2967. * mapping.
  2968. *
  2969. * @irq_no: irq number
  2970. *
  2971. * Return: irq name
  2972. */
  2973. const char *hif_pci_get_irq_name(int irq_no)
  2974. {
  2975. return "pci-dummy";
  2976. }
  2977. int hif_pci_configure_grp_irq(struct hif_softc *scn,
  2978. struct hif_exec_context *hif_ext_group)
  2979. {
  2980. int ret = 0;
  2981. int irq = 0;
  2982. int j;
  2983. hif_ext_group->irq_enable = &hif_exec_grp_irq_enable;
  2984. hif_ext_group->irq_disable = &hif_exec_grp_irq_disable;
  2985. hif_ext_group->irq_name = &hif_pci_get_irq_name;
  2986. hif_ext_group->work_complete = &hif_dummy_grp_done;
  2987. for (j = 0; j < hif_ext_group->numirq; j++) {
  2988. irq = hif_ext_group->irq[j];
  2989. hif_info("request_irq = %d for grp %d",
  2990. irq, hif_ext_group->grp_id);
  2991. ret = request_irq(irq,
  2992. hif_ext_group_interrupt_handler,
  2993. IRQF_SHARED | IRQF_NO_SUSPEND,
  2994. "wlan_EXT_GRP",
  2995. hif_ext_group);
  2996. if (ret) {
  2997. HIF_ERROR("%s: request_irq failed ret = %d",
  2998. __func__, ret);
  2999. return -EFAULT;
  3000. }
  3001. hif_ext_group->os_irq[j] = irq;
  3002. }
  3003. hif_ext_group->irq_requested = true;
  3004. return 0;
  3005. }
  3006. /**
  3007. * hif_configure_irq() - configure interrupt
  3008. *
  3009. * This function configures interrupt(s)
  3010. *
  3011. * @sc: PCIe control struct
  3012. * @hif_hdl: struct HIF_CE_state
  3013. *
  3014. * Return: 0 - for success
  3015. */
  3016. int hif_configure_irq(struct hif_softc *scn)
  3017. {
  3018. int ret = 0;
  3019. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3020. HIF_TRACE("%s: E", __func__);
  3021. if (hif_is_polled_mode_enabled(GET_HIF_OPAQUE_HDL(scn))) {
  3022. scn->request_irq_done = false;
  3023. return 0;
  3024. }
  3025. hif_init_reschedule_tasklet_work(sc);
  3026. ret = hif_ce_msi_configure_irq(scn);
  3027. if (ret == 0) {
  3028. goto end;
  3029. }
  3030. switch (scn->target_info.target_type) {
  3031. case TARGET_TYPE_IPQ4019:
  3032. ret = hif_ahb_configure_legacy_irq(sc);
  3033. break;
  3034. case TARGET_TYPE_QCA8074:
  3035. case TARGET_TYPE_QCA8074V2:
  3036. case TARGET_TYPE_QCA6018:
  3037. ret = hif_ahb_configure_irq(sc);
  3038. break;
  3039. default:
  3040. ret = hif_pci_configure_legacy_irq(sc);
  3041. break;
  3042. }
  3043. if (ret < 0) {
  3044. HIF_ERROR("%s: hif_pci_configure_legacy_irq error = %d",
  3045. __func__, ret);
  3046. return ret;
  3047. }
  3048. end:
  3049. scn->request_irq_done = true;
  3050. return 0;
  3051. }
  3052. /**
  3053. * hif_trigger_timer_irq() : Triggers interrupt on LF_Timer 0
  3054. * @scn: hif control structure
  3055. *
  3056. * Sets IRQ bit in LF Timer Status Address to awake peregrine/swift
  3057. * stuck at a polling loop in pcie_address_config in FW
  3058. *
  3059. * Return: none
  3060. */
  3061. static void hif_trigger_timer_irq(struct hif_softc *scn)
  3062. {
  3063. int tmp;
  3064. /* Trigger IRQ on Peregrine/Swift by setting
  3065. * IRQ Bit of LF_TIMER 0
  3066. */
  3067. tmp = hif_read32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  3068. SOC_LF_TIMER_STATUS0_ADDRESS));
  3069. /* Set Raw IRQ Bit */
  3070. tmp |= 1;
  3071. /* SOC_LF_TIMER_STATUS0 */
  3072. hif_write32_mb(scn, scn->mem + (RTC_SOC_BASE_ADDRESS +
  3073. SOC_LF_TIMER_STATUS0_ADDRESS), tmp);
  3074. }
  3075. /**
  3076. * hif_target_sync() : ensure the target is ready
  3077. * @scn: hif control structure
  3078. *
  3079. * Informs fw that we plan to use legacy interupts so that
  3080. * it can begin booting. Ensures that the fw finishes booting
  3081. * before continuing. Should be called before trying to write
  3082. * to the targets other registers for the first time.
  3083. *
  3084. * Return: none
  3085. */
  3086. static void hif_target_sync(struct hif_softc *scn)
  3087. {
  3088. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3089. PCIE_INTR_ENABLE_ADDRESS),
  3090. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  3091. /* read to flush pcie write */
  3092. (void)hif_read32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3093. PCIE_INTR_ENABLE_ADDRESS));
  3094. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3095. PCIE_SOC_WAKE_ADDRESS,
  3096. PCIE_SOC_WAKE_V_MASK);
  3097. while (!hif_targ_is_awake(scn, scn->mem))
  3098. ;
  3099. if (HAS_FW_INDICATOR) {
  3100. int wait_limit = 500;
  3101. int fw_ind = 0;
  3102. int retry_count = 0;
  3103. uint32_t target_type = scn->target_info.target_type;
  3104. fw_retry:
  3105. HIF_TRACE("%s: Loop checking FW signal", __func__);
  3106. while (1) {
  3107. fw_ind = hif_read32_mb(scn, scn->mem +
  3108. FW_INDICATOR_ADDRESS);
  3109. if (fw_ind & FW_IND_INITIALIZED)
  3110. break;
  3111. if (wait_limit-- < 0)
  3112. break;
  3113. hif_write32_mb(scn, scn->mem + (SOC_CORE_BASE_ADDRESS |
  3114. PCIE_INTR_ENABLE_ADDRESS),
  3115. PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
  3116. /* read to flush pcie write */
  3117. (void)hif_read32_mb(scn, scn->mem +
  3118. (SOC_CORE_BASE_ADDRESS | PCIE_INTR_ENABLE_ADDRESS));
  3119. qdf_mdelay(10);
  3120. }
  3121. if (wait_limit < 0) {
  3122. if (target_type == TARGET_TYPE_AR9888 &&
  3123. retry_count++ < 2) {
  3124. hif_trigger_timer_irq(scn);
  3125. wait_limit = 500;
  3126. goto fw_retry;
  3127. }
  3128. HIF_TRACE("%s: FW signal timed out",
  3129. __func__);
  3130. qdf_assert_always(0);
  3131. } else {
  3132. HIF_TRACE("%s: Got FW signal, retries = %x",
  3133. __func__, 500-wait_limit);
  3134. }
  3135. }
  3136. hif_write32_mb(scn, scn->mem + PCIE_LOCAL_BASE_ADDRESS +
  3137. PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
  3138. }
  3139. static void hif_pci_get_soc_info_pld(struct hif_pci_softc *sc,
  3140. struct device *dev)
  3141. {
  3142. struct pld_soc_info info;
  3143. pld_get_soc_info(dev, &info);
  3144. sc->mem = info.v_addr;
  3145. sc->ce_sc.ol_sc.mem = info.v_addr;
  3146. sc->ce_sc.ol_sc.mem_pa = info.p_addr;
  3147. }
  3148. static void hif_pci_get_soc_info_nopld(struct hif_pci_softc *sc,
  3149. struct device *dev)
  3150. {}
  3151. static bool hif_is_pld_based_target(struct hif_pci_softc *sc,
  3152. int device_id)
  3153. {
  3154. if (!pld_have_platform_driver_support(sc->dev))
  3155. return false;
  3156. switch (device_id) {
  3157. case QCA6290_DEVICE_ID:
  3158. case QCN9000_DEVICE_ID:
  3159. case QCA6290_EMULATION_DEVICE_ID:
  3160. case QCA6390_DEVICE_ID:
  3161. case QCA6490_DEVICE_ID:
  3162. case AR6320_DEVICE_ID:
  3163. case QCN7605_DEVICE_ID:
  3164. return true;
  3165. }
  3166. return false;
  3167. }
  3168. static void hif_pci_init_deinit_ops_attach(struct hif_pci_softc *sc,
  3169. int device_id)
  3170. {
  3171. if (hif_is_pld_based_target(sc, device_id)) {
  3172. sc->hif_enable_pci = hif_enable_pci_pld;
  3173. sc->hif_pci_deinit = hif_pci_deinit_pld;
  3174. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_pld;
  3175. } else {
  3176. sc->hif_enable_pci = hif_enable_pci_nopld;
  3177. sc->hif_pci_deinit = hif_pci_deinit_nopld;
  3178. sc->hif_pci_get_soc_info = hif_pci_get_soc_info_nopld;
  3179. }
  3180. }
  3181. #ifdef HIF_REG_WINDOW_SUPPORT
  3182. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  3183. u32 target_type)
  3184. {
  3185. switch (target_type) {
  3186. case TARGET_TYPE_QCN7605:
  3187. sc->use_register_windowing = true;
  3188. qdf_spinlock_create(&sc->register_access_lock);
  3189. sc->register_window = 0;
  3190. break;
  3191. default:
  3192. sc->use_register_windowing = false;
  3193. }
  3194. }
  3195. #else
  3196. static void hif_pci_init_reg_windowing_support(struct hif_pci_softc *sc,
  3197. u32 target_type)
  3198. {
  3199. sc->use_register_windowing = false;
  3200. }
  3201. #endif
  3202. /**
  3203. * hif_enable_bus(): enable bus
  3204. *
  3205. * This function enables the bus
  3206. *
  3207. * @ol_sc: soft_sc struct
  3208. * @dev: device pointer
  3209. * @bdev: bus dev pointer
  3210. * bid: bus id pointer
  3211. * type: enum hif_enable_type such as HIF_ENABLE_TYPE_PROBE
  3212. * Return: QDF_STATUS
  3213. */
  3214. QDF_STATUS hif_pci_enable_bus(struct hif_softc *ol_sc,
  3215. struct device *dev, void *bdev,
  3216. const struct hif_bus_id *bid,
  3217. enum hif_enable_type type)
  3218. {
  3219. int ret = 0;
  3220. uint32_t hif_type, target_type;
  3221. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(ol_sc);
  3222. struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(ol_sc);
  3223. uint16_t revision_id = 0;
  3224. int probe_again = 0;
  3225. struct pci_dev *pdev = bdev;
  3226. const struct pci_device_id *id = (const struct pci_device_id *)bid;
  3227. struct hif_target_info *tgt_info;
  3228. if (!ol_sc) {
  3229. HIF_ERROR("%s: hif_ctx is NULL", __func__);
  3230. return QDF_STATUS_E_NOMEM;
  3231. }
  3232. HIF_TRACE("%s: con_mode = 0x%x, device_id = 0x%x",
  3233. __func__, hif_get_conparam(ol_sc), id->device);
  3234. sc->pdev = pdev;
  3235. sc->dev = &pdev->dev;
  3236. sc->devid = id->device;
  3237. sc->cacheline_sz = dma_get_cache_alignment();
  3238. tgt_info = hif_get_target_info_handle(hif_hdl);
  3239. hif_pci_init_deinit_ops_attach(sc, id->device);
  3240. sc->hif_pci_get_soc_info(sc, dev);
  3241. again:
  3242. ret = sc->hif_enable_pci(sc, pdev, id);
  3243. if (ret < 0) {
  3244. HIF_ERROR("%s: ERROR - hif_enable_pci error = %d",
  3245. __func__, ret);
  3246. goto err_enable_pci;
  3247. }
  3248. HIF_TRACE("%s: hif_enable_pci done", __func__);
  3249. /* Temporary FIX: disable ASPM on peregrine.
  3250. * Will be removed after the OTP is programmed
  3251. */
  3252. hif_disable_power_gating(hif_hdl);
  3253. device_disable_async_suspend(&pdev->dev);
  3254. pci_read_config_word(pdev, 0x08, &revision_id);
  3255. ret = hif_get_device_type(id->device, revision_id,
  3256. &hif_type, &target_type);
  3257. if (ret < 0) {
  3258. HIF_ERROR("%s: invalid device id/revision_id", __func__);
  3259. goto err_tgtstate;
  3260. }
  3261. HIF_TRACE("%s: hif_type = 0x%x, target_type = 0x%x",
  3262. __func__, hif_type, target_type);
  3263. hif_register_tbl_attach(ol_sc, hif_type);
  3264. hif_target_register_tbl_attach(ol_sc, target_type);
  3265. hif_pci_init_reg_windowing_support(sc, target_type);
  3266. tgt_info->target_type = target_type;
  3267. if (ce_srng_based(ol_sc)) {
  3268. HIF_TRACE("%s:Skip tgt_wake up for srng devices\n", __func__);
  3269. } else {
  3270. ret = hif_pci_probe_tgt_wakeup(sc);
  3271. if (ret < 0) {
  3272. HIF_ERROR("%s: ERROR - hif_pci_prob_wakeup error = %d",
  3273. __func__, ret);
  3274. if (ret == -EAGAIN)
  3275. probe_again++;
  3276. goto err_tgtstate;
  3277. }
  3278. HIF_TRACE("%s: hif_pci_probe_tgt_wakeup done", __func__);
  3279. }
  3280. if (!ol_sc->mem_pa) {
  3281. HIF_ERROR("%s: ERROR - BAR0 uninitialized", __func__);
  3282. ret = -EIO;
  3283. goto err_tgtstate;
  3284. }
  3285. if (!ce_srng_based(ol_sc)) {
  3286. hif_target_sync(ol_sc);
  3287. if (ADRASTEA_BU)
  3288. hif_vote_link_up(hif_hdl);
  3289. }
  3290. return 0;
  3291. err_tgtstate:
  3292. hif_disable_pci(sc);
  3293. sc->pci_enabled = false;
  3294. HIF_ERROR("%s: error, hif_disable_pci done", __func__);
  3295. return QDF_STATUS_E_ABORTED;
  3296. err_enable_pci:
  3297. if (probe_again && (probe_again <= ATH_PCI_PROBE_RETRY_MAX)) {
  3298. int delay_time;
  3299. HIF_INFO("%s: pci reprobe", __func__);
  3300. /* 10, 40, 90, 100, 100, ... */
  3301. delay_time = max(100, 10 * (probe_again * probe_again));
  3302. qdf_mdelay(delay_time);
  3303. goto again;
  3304. }
  3305. return ret;
  3306. }
  3307. /**
  3308. * hif_pci_irq_enable() - ce_irq_enable
  3309. * @scn: hif_softc
  3310. * @ce_id: ce_id
  3311. *
  3312. * Return: void
  3313. */
  3314. void hif_pci_irq_enable(struct hif_softc *scn, int ce_id)
  3315. {
  3316. uint32_t tmp = 1 << ce_id;
  3317. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3318. qdf_spin_lock_irqsave(&sc->irq_lock);
  3319. scn->ce_irq_summary &= ~tmp;
  3320. if (scn->ce_irq_summary == 0) {
  3321. /* Enable Legacy PCI line interrupts */
  3322. if (LEGACY_INTERRUPTS(sc) &&
  3323. (scn->target_status != TARGET_STATUS_RESET) &&
  3324. (!qdf_atomic_read(&scn->link_suspended))) {
  3325. hif_write32_mb(scn, scn->mem +
  3326. (SOC_CORE_BASE_ADDRESS |
  3327. PCIE_INTR_ENABLE_ADDRESS),
  3328. HOST_GROUP0_MASK);
  3329. hif_read32_mb(scn, scn->mem +
  3330. (SOC_CORE_BASE_ADDRESS |
  3331. PCIE_INTR_ENABLE_ADDRESS));
  3332. }
  3333. }
  3334. if (scn->hif_init_done == true)
  3335. Q_TARGET_ACCESS_END(scn);
  3336. qdf_spin_unlock_irqrestore(&sc->irq_lock);
  3337. /* check for missed firmware crash */
  3338. hif_fw_interrupt_handler(0, scn);
  3339. }
  3340. /**
  3341. * hif_pci_irq_disable() - ce_irq_disable
  3342. * @scn: hif_softc
  3343. * @ce_id: ce_id
  3344. *
  3345. * only applicable to legacy copy engine...
  3346. *
  3347. * Return: void
  3348. */
  3349. void hif_pci_irq_disable(struct hif_softc *scn, int ce_id)
  3350. {
  3351. /* For Rome only need to wake up target */
  3352. /* target access is maintained until interrupts are re-enabled */
  3353. Q_TARGET_ACCESS_BEGIN(scn);
  3354. }
  3355. #ifdef FEATURE_RUNTIME_PM
  3356. /**
  3357. * hif_pm_runtime_get_sync() - do a get operation with sync resume
  3358. *
  3359. * A get operation will prevent a runtime suspend until a corresponding
  3360. * put is done. Unlike hif_pm_runtime_get(), this API will do a sync
  3361. * resume instead of requesting a resume if it is runtime PM suspended
  3362. * so it can only be called in non-atomic context.
  3363. *
  3364. * @hif_ctx: pointer of HIF context
  3365. *
  3366. * Return: 0 if it is runtime PM resumed otherwise an error code.
  3367. */
  3368. int hif_pm_runtime_get_sync(struct hif_opaque_softc *hif_ctx)
  3369. {
  3370. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3371. int pm_state;
  3372. int ret;
  3373. if (!sc)
  3374. return -EINVAL;
  3375. pm_state = qdf_atomic_read(&sc->pm_state);
  3376. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  3377. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  3378. hif_info_high("Runtime PM resume is requested by %ps",
  3379. (void *)_RET_IP_);
  3380. sc->pm_stats.runtime_get++;
  3381. ret = pm_runtime_get_sync(sc->dev);
  3382. /* Get can return 1 if the device is already active, just return
  3383. * success in that case.
  3384. */
  3385. if (ret > 0)
  3386. ret = 0;
  3387. if (ret) {
  3388. sc->pm_stats.runtime_get_err++;
  3389. hif_err("Runtime PM Get Sync error in pm_state: %d, ret: %d",
  3390. qdf_atomic_read(&sc->pm_state), ret);
  3391. hif_pm_runtime_put(hif_ctx);
  3392. }
  3393. return ret;
  3394. }
  3395. /**
  3396. * hif_pm_runtime_put_sync_suspend() - do a put operation with sync suspend
  3397. *
  3398. * This API will do a runtime put operation followed by a sync suspend if usage
  3399. * count is 0 so it can only be called in non-atomic context.
  3400. *
  3401. * @hif_ctx: pointer of HIF context
  3402. *
  3403. * Return: 0 for success otherwise an error code
  3404. */
  3405. int hif_pm_runtime_put_sync_suspend(struct hif_opaque_softc *hif_ctx)
  3406. {
  3407. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3408. int usage_count, pm_state;
  3409. char *err = NULL;
  3410. if (!sc)
  3411. return -EINVAL;
  3412. usage_count = atomic_read(&sc->dev->power.usage_count);
  3413. if (usage_count == 1) {
  3414. pm_state = qdf_atomic_read(&sc->pm_state);
  3415. if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
  3416. err = "Ignore unexpected Put as runtime PM is disabled";
  3417. } else if (usage_count == 0) {
  3418. err = "Put without a Get Operation";
  3419. }
  3420. if (err) {
  3421. hif_pci_runtime_pm_warn(sc, err);
  3422. return -EINVAL;
  3423. }
  3424. sc->pm_stats.runtime_put++;
  3425. return pm_runtime_put_sync_suspend(sc->dev);
  3426. }
  3427. int hif_pm_runtime_request_resume(struct hif_opaque_softc *hif_ctx)
  3428. {
  3429. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3430. int pm_state;
  3431. if (!sc)
  3432. return -EINVAL;
  3433. pm_state = qdf_atomic_read(&sc->pm_state);
  3434. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  3435. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING)
  3436. HIF_INFO("Runtime PM resume is requested by %ps",
  3437. (void *)_RET_IP_);
  3438. sc->pm_stats.request_resume++;
  3439. sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
  3440. return hif_pm_request_resume(sc->dev);
  3441. }
  3442. void hif_pm_runtime_mark_last_busy(struct hif_opaque_softc *hif_ctx)
  3443. {
  3444. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3445. if (!sc)
  3446. return;
  3447. sc->pm_stats.last_busy_marker = (void *)_RET_IP_;
  3448. sc->pm_stats.last_busy_timestamp = qdf_get_log_timestamp_usecs();
  3449. return pm_runtime_mark_last_busy(sc->dev);
  3450. }
  3451. void hif_pm_runtime_get_noresume(struct hif_opaque_softc *hif_ctx)
  3452. {
  3453. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3454. if (!sc)
  3455. return;
  3456. sc->pm_stats.runtime_get++;
  3457. pm_runtime_get_noresume(sc->dev);
  3458. }
  3459. /**
  3460. * hif_pm_runtime_get() - do a get opperation on the device
  3461. *
  3462. * A get opperation will prevent a runtime suspend until a
  3463. * corresponding put is done. This api should be used when sending
  3464. * data.
  3465. *
  3466. * CONTRARY TO THE REGULAR RUNTIME PM, WHEN THE BUS IS SUSPENDED,
  3467. * THIS API WILL ONLY REQUEST THE RESUME AND NOT TO A GET!!!
  3468. *
  3469. * return: success if the bus is up and a get has been issued
  3470. * otherwise an error code.
  3471. */
  3472. int hif_pm_runtime_get(struct hif_opaque_softc *hif_ctx)
  3473. {
  3474. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  3475. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3476. int ret;
  3477. int pm_state;
  3478. if (!scn) {
  3479. hif_err("Could not do runtime get, scn is null");
  3480. return -EFAULT;
  3481. }
  3482. pm_state = qdf_atomic_read(&sc->pm_state);
  3483. if (pm_state == HIF_PM_RUNTIME_STATE_ON ||
  3484. pm_state == HIF_PM_RUNTIME_STATE_NONE) {
  3485. sc->pm_stats.runtime_get++;
  3486. ret = __hif_pm_runtime_get(sc->dev);
  3487. /* Get can return 1 if the device is already active, just return
  3488. * success in that case
  3489. */
  3490. if (ret > 0)
  3491. ret = 0;
  3492. if (ret)
  3493. hif_pm_runtime_put(hif_ctx);
  3494. if (ret && ret != -EINPROGRESS) {
  3495. sc->pm_stats.runtime_get_err++;
  3496. hif_err("Runtime Get PM Error in pm_state:%d ret: %d",
  3497. qdf_atomic_read(&sc->pm_state), ret);
  3498. }
  3499. return ret;
  3500. }
  3501. if (pm_state == HIF_PM_RUNTIME_STATE_SUSPENDED ||
  3502. pm_state == HIF_PM_RUNTIME_STATE_SUSPENDING) {
  3503. hif_info_high("Runtime PM resume is requested by %ps",
  3504. (void *)_RET_IP_);
  3505. ret = -EAGAIN;
  3506. } else {
  3507. ret = -EBUSY;
  3508. }
  3509. sc->pm_stats.request_resume++;
  3510. sc->pm_stats.last_resume_caller = (void *)_RET_IP_;
  3511. hif_pm_request_resume(sc->dev);
  3512. return ret;
  3513. }
  3514. /**
  3515. * hif_pm_runtime_put() - do a put opperation on the device
  3516. *
  3517. * A put opperation will allow a runtime suspend after a corresponding
  3518. * get was done. This api should be used when sending data.
  3519. *
  3520. * This api will return a failure if runtime pm is stopped
  3521. * This api will return failure if it would decrement the usage count below 0.
  3522. *
  3523. * return: QDF_STATUS_SUCCESS if the put is performed
  3524. */
  3525. int hif_pm_runtime_put(struct hif_opaque_softc *hif_ctx)
  3526. {
  3527. struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
  3528. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3529. int pm_state, usage_count;
  3530. char *error = NULL;
  3531. if (!scn) {
  3532. HIF_ERROR("%s: Could not do runtime put, scn is null",
  3533. __func__);
  3534. return -EFAULT;
  3535. }
  3536. usage_count = atomic_read(&sc->dev->power.usage_count);
  3537. if (usage_count == 1) {
  3538. pm_state = qdf_atomic_read(&sc->pm_state);
  3539. if (pm_state == HIF_PM_RUNTIME_STATE_NONE)
  3540. error = "Ignoring unexpected put when runtime pm is disabled";
  3541. } else if (usage_count == 0) {
  3542. error = "PUT Without a Get Operation";
  3543. }
  3544. if (error) {
  3545. hif_pci_runtime_pm_warn(sc, error);
  3546. return -EINVAL;
  3547. }
  3548. sc->pm_stats.runtime_put++;
  3549. hif_pm_runtime_mark_last_busy(hif_ctx);
  3550. hif_pm_runtime_put_auto(sc->dev);
  3551. return 0;
  3552. }
  3553. /**
  3554. * __hif_pm_runtime_prevent_suspend() - prevent runtime suspend for a protocol
  3555. * reason
  3556. * @hif_sc: pci context
  3557. * @lock: runtime_pm lock being acquired
  3558. *
  3559. * Return 0 if successful.
  3560. */
  3561. static int __hif_pm_runtime_prevent_suspend(struct hif_pci_softc
  3562. *hif_sc, struct hif_pm_runtime_lock *lock)
  3563. {
  3564. int ret = 0;
  3565. /*
  3566. * We shouldn't be setting context->timeout to zero here when
  3567. * context is active as we will have a case where Timeout API's
  3568. * for the same context called back to back.
  3569. * eg: echo "1=T:10:T:20" > /d/cnss_runtime_pm
  3570. * Set context->timeout to zero in hif_pm_runtime_prevent_suspend
  3571. * API to ensure the timeout version is no more active and
  3572. * list entry of this context will be deleted during allow suspend.
  3573. */
  3574. if (lock->active)
  3575. return 0;
  3576. ret = __hif_pm_runtime_get(hif_sc->dev);
  3577. /**
  3578. * The ret can be -EINPROGRESS, if Runtime status is RPM_RESUMING or
  3579. * RPM_SUSPENDING. Any other negative value is an error.
  3580. * We shouldn't be do runtime_put here as in later point allow
  3581. * suspend gets called with the the context and there the usage count
  3582. * is decremented, so suspend will be prevented.
  3583. */
  3584. if (ret < 0 && ret != -EINPROGRESS) {
  3585. hif_sc->pm_stats.runtime_get_err++;
  3586. hif_pci_runtime_pm_warn(hif_sc,
  3587. "Prevent Suspend Runtime PM Error");
  3588. }
  3589. hif_sc->prevent_suspend_cnt++;
  3590. lock->active = true;
  3591. list_add_tail(&lock->list, &hif_sc->prevent_suspend_list);
  3592. hif_sc->pm_stats.prevent_suspend++;
  3593. HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
  3594. hif_pm_runtime_state_to_string(
  3595. qdf_atomic_read(&hif_sc->pm_state)),
  3596. ret);
  3597. return ret;
  3598. }
  3599. static int __hif_pm_runtime_allow_suspend(struct hif_pci_softc *hif_sc,
  3600. struct hif_pm_runtime_lock *lock)
  3601. {
  3602. struct hif_opaque_softc *hif_ctx = GET_HIF_OPAQUE_HDL(hif_sc);
  3603. int ret = 0;
  3604. int usage_count;
  3605. if (hif_sc->prevent_suspend_cnt == 0)
  3606. return ret;
  3607. if (!lock->active)
  3608. return ret;
  3609. usage_count = atomic_read(&hif_sc->dev->power.usage_count);
  3610. /*
  3611. * During Driver unload, platform driver increments the usage
  3612. * count to prevent any runtime suspend getting called.
  3613. * So during driver load in HIF_PM_RUNTIME_STATE_NONE state the
  3614. * usage_count should be one. Ideally this shouldn't happen as
  3615. * context->active should be active for allow suspend to happen
  3616. * Handling this case here to prevent any failures.
  3617. */
  3618. if ((qdf_atomic_read(&hif_sc->pm_state) == HIF_PM_RUNTIME_STATE_NONE
  3619. && usage_count == 1) || usage_count == 0) {
  3620. hif_pci_runtime_pm_warn(hif_sc,
  3621. "Allow without a prevent suspend");
  3622. return -EINVAL;
  3623. }
  3624. list_del(&lock->list);
  3625. hif_sc->prevent_suspend_cnt--;
  3626. lock->active = false;
  3627. lock->timeout = 0;
  3628. hif_pm_runtime_mark_last_busy(hif_ctx);
  3629. ret = hif_pm_runtime_put_auto(hif_sc->dev);
  3630. HIF_ERROR("%s: in pm_state:%s ret: %d", __func__,
  3631. hif_pm_runtime_state_to_string(
  3632. qdf_atomic_read(&hif_sc->pm_state)),
  3633. ret);
  3634. hif_sc->pm_stats.allow_suspend++;
  3635. return ret;
  3636. }
  3637. /**
  3638. * hif_pm_runtime_lock_timeout_fn() - callback the runtime lock timeout
  3639. * @data: calback data that is the pci context
  3640. *
  3641. * if runtime locks are acquired with a timeout, this function releases
  3642. * the locks when the last runtime lock expires.
  3643. *
  3644. * dummy implementation until lock acquisition is implemented.
  3645. */
  3646. static void hif_pm_runtime_lock_timeout_fn(void *data)
  3647. {
  3648. struct hif_pci_softc *hif_sc = data;
  3649. unsigned long timer_expires;
  3650. struct hif_pm_runtime_lock *context, *temp;
  3651. spin_lock_bh(&hif_sc->runtime_lock);
  3652. timer_expires = hif_sc->runtime_timer_expires;
  3653. /* Make sure we are not called too early, this should take care of
  3654. * following case
  3655. *
  3656. * CPU0 CPU1 (timeout function)
  3657. * ---- ----------------------
  3658. * spin_lock_irq
  3659. * timeout function called
  3660. *
  3661. * mod_timer()
  3662. *
  3663. * spin_unlock_irq
  3664. * spin_lock_irq
  3665. */
  3666. if (timer_expires > 0 && !time_after(timer_expires, jiffies)) {
  3667. hif_sc->runtime_timer_expires = 0;
  3668. list_for_each_entry_safe(context, temp,
  3669. &hif_sc->prevent_suspend_list, list) {
  3670. if (context->timeout) {
  3671. __hif_pm_runtime_allow_suspend(hif_sc, context);
  3672. hif_sc->pm_stats.allow_suspend_timeout++;
  3673. }
  3674. }
  3675. }
  3676. spin_unlock_bh(&hif_sc->runtime_lock);
  3677. }
  3678. int hif_pm_runtime_prevent_suspend(struct hif_opaque_softc *ol_sc,
  3679. struct hif_pm_runtime_lock *data)
  3680. {
  3681. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3682. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
  3683. struct hif_pm_runtime_lock *context = data;
  3684. if (!sc->hif_config.enable_runtime_pm)
  3685. return 0;
  3686. if (!context)
  3687. return -EINVAL;
  3688. if (in_irq())
  3689. WARN_ON(1);
  3690. spin_lock_bh(&hif_sc->runtime_lock);
  3691. context->timeout = 0;
  3692. __hif_pm_runtime_prevent_suspend(hif_sc, context);
  3693. spin_unlock_bh(&hif_sc->runtime_lock);
  3694. return 0;
  3695. }
  3696. int hif_pm_runtime_allow_suspend(struct hif_opaque_softc *ol_sc,
  3697. struct hif_pm_runtime_lock *data)
  3698. {
  3699. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3700. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(ol_sc);
  3701. struct hif_pm_runtime_lock *context = data;
  3702. if (!sc->hif_config.enable_runtime_pm)
  3703. return 0;
  3704. if (!context)
  3705. return -EINVAL;
  3706. if (in_irq())
  3707. WARN_ON(1);
  3708. spin_lock_bh(&hif_sc->runtime_lock);
  3709. __hif_pm_runtime_allow_suspend(hif_sc, context);
  3710. /* The list can be empty as well in cases where
  3711. * we have one context in the list and the allow
  3712. * suspend came before the timer expires and we delete
  3713. * context above from the list.
  3714. * When list is empty prevent_suspend count will be zero.
  3715. */
  3716. if (hif_sc->prevent_suspend_cnt == 0 &&
  3717. hif_sc->runtime_timer_expires > 0) {
  3718. qdf_timer_free(&hif_sc->runtime_timer);
  3719. hif_sc->runtime_timer_expires = 0;
  3720. }
  3721. spin_unlock_bh(&hif_sc->runtime_lock);
  3722. return 0;
  3723. }
  3724. /**
  3725. * hif_pm_runtime_prevent_suspend_timeout() - Prevent runtime suspend timeout
  3726. * @ol_sc: HIF context
  3727. * @lock: which lock is being acquired
  3728. * @delay: Timeout in milliseconds
  3729. *
  3730. * Prevent runtime suspend with a timeout after which runtime suspend would be
  3731. * allowed. This API uses a single timer to allow the suspend and timer is
  3732. * modified if the timeout is changed before timer fires.
  3733. * If the timeout is less than autosuspend_delay then use mark_last_busy instead
  3734. * of starting the timer.
  3735. *
  3736. * It is wise to try not to use this API and correct the design if possible.
  3737. *
  3738. * Return: 0 on success and negative error code on failure
  3739. */
  3740. int hif_pm_runtime_prevent_suspend_timeout(struct hif_opaque_softc *ol_sc,
  3741. struct hif_pm_runtime_lock *lock, unsigned int delay)
  3742. {
  3743. struct hif_softc *sc = HIF_GET_SOFTC(ol_sc);
  3744. struct hif_pci_softc *hif_sc = HIF_GET_PCI_SOFTC(sc);
  3745. int ret = 0;
  3746. unsigned long expires;
  3747. struct hif_pm_runtime_lock *context = lock;
  3748. if (hif_is_load_or_unload_in_progress(sc)) {
  3749. HIF_ERROR("%s: Load/unload in progress, ignore!",
  3750. __func__);
  3751. return -EINVAL;
  3752. }
  3753. if (hif_is_recovery_in_progress(sc)) {
  3754. HIF_ERROR("%s: LOGP in progress, ignore!", __func__);
  3755. return -EINVAL;
  3756. }
  3757. if (!sc->hif_config.enable_runtime_pm)
  3758. return 0;
  3759. if (!context)
  3760. return -EINVAL;
  3761. if (in_irq())
  3762. WARN_ON(1);
  3763. /*
  3764. * Don't use internal timer if the timeout is less than auto suspend
  3765. * delay.
  3766. */
  3767. if (delay <= hif_sc->dev->power.autosuspend_delay) {
  3768. hif_pm_request_resume(hif_sc->dev);
  3769. hif_pm_runtime_mark_last_busy(ol_sc);
  3770. return ret;
  3771. }
  3772. expires = jiffies + msecs_to_jiffies(delay);
  3773. expires += !expires;
  3774. spin_lock_bh(&hif_sc->runtime_lock);
  3775. context->timeout = delay;
  3776. ret = __hif_pm_runtime_prevent_suspend(hif_sc, context);
  3777. hif_sc->pm_stats.prevent_suspend_timeout++;
  3778. /* Modify the timer only if new timeout is after already configured
  3779. * timeout
  3780. */
  3781. if (time_after(expires, hif_sc->runtime_timer_expires)) {
  3782. qdf_timer_mod(&hif_sc->runtime_timer, delay);
  3783. hif_sc->runtime_timer_expires = expires;
  3784. }
  3785. spin_unlock_bh(&hif_sc->runtime_lock);
  3786. HIF_ERROR("%s: pm_state: %s delay: %dms ret: %d\n", __func__,
  3787. hif_pm_runtime_state_to_string(
  3788. qdf_atomic_read(&hif_sc->pm_state)),
  3789. delay, ret);
  3790. return ret;
  3791. }
  3792. /**
  3793. * hif_runtime_lock_init() - API to initialize Runtime PM context
  3794. * @name: Context name
  3795. *
  3796. * This API initializes the Runtime PM context of the caller and
  3797. * return the pointer.
  3798. *
  3799. * Return: None
  3800. */
  3801. int hif_runtime_lock_init(qdf_runtime_lock_t *lock, const char *name)
  3802. {
  3803. struct hif_pm_runtime_lock *context;
  3804. HIF_INFO("Initializing Runtime PM wakelock %s", name);
  3805. context = qdf_mem_malloc(sizeof(*context));
  3806. if (!context)
  3807. return -ENOMEM;
  3808. context->name = name ? name : "Default";
  3809. lock->lock = context;
  3810. return 0;
  3811. }
  3812. /**
  3813. * hif_runtime_lock_deinit() - This API frees the runtime pm ctx
  3814. * @data: Runtime PM context
  3815. *
  3816. * Return: void
  3817. */
  3818. void hif_runtime_lock_deinit(struct hif_opaque_softc *hif_ctx,
  3819. struct hif_pm_runtime_lock *data)
  3820. {
  3821. struct hif_pm_runtime_lock *context = data;
  3822. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3823. if (!context) {
  3824. HIF_ERROR("Runtime PM wakelock context is NULL");
  3825. return;
  3826. }
  3827. HIF_INFO("Deinitializing Runtime PM wakelock %s", context->name);
  3828. /*
  3829. * Ensure to delete the context list entry and reduce the usage count
  3830. * before freeing the context if context is active.
  3831. */
  3832. if (sc) {
  3833. spin_lock_bh(&sc->runtime_lock);
  3834. __hif_pm_runtime_allow_suspend(sc, context);
  3835. spin_unlock_bh(&sc->runtime_lock);
  3836. }
  3837. qdf_mem_free(context);
  3838. }
  3839. /**
  3840. * hif_pm_runtime_is_suspended() - API to check if driver has runtime suspended
  3841. * @hif_ctx: HIF context
  3842. *
  3843. * Return: true for runtime suspended, otherwise false
  3844. */
  3845. bool hif_pm_runtime_is_suspended(struct hif_opaque_softc *hif_ctx)
  3846. {
  3847. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3848. return qdf_atomic_read(&sc->pm_state) ==
  3849. HIF_PM_RUNTIME_STATE_SUSPENDED;
  3850. }
  3851. /**
  3852. * hif_pm_runtime_get_monitor_wake_intr() - API to get monitor_wake_intr
  3853. * @hif_ctx: HIF context
  3854. *
  3855. * monitor_wake_intr variable can be used to indicate if driver expects wake
  3856. * MSI for runtime PM
  3857. *
  3858. * Return: monitor_wake_intr variable
  3859. */
  3860. int hif_pm_runtime_get_monitor_wake_intr(struct hif_opaque_softc *hif_ctx)
  3861. {
  3862. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3863. return qdf_atomic_read(&sc->monitor_wake_intr);
  3864. }
  3865. /**
  3866. * hif_pm_runtime_set_monitor_wake_intr() - API to set monitor_wake_intr
  3867. * @hif_ctx: HIF context
  3868. * @val: value to set
  3869. *
  3870. * monitor_wake_intr variable can be used to indicate if driver expects wake
  3871. * MSI for runtime PM
  3872. *
  3873. * Return: void
  3874. */
  3875. void hif_pm_runtime_set_monitor_wake_intr(struct hif_opaque_softc *hif_ctx,
  3876. int val)
  3877. {
  3878. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3879. qdf_atomic_set(&sc->monitor_wake_intr, val);
  3880. }
  3881. /**
  3882. * hif_pm_runtime_mark_dp_rx_busy() - Set last busy mark my data path
  3883. * @hif_ctx: HIF context
  3884. *
  3885. * Return: void
  3886. */
  3887. void hif_pm_runtime_mark_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  3888. {
  3889. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3890. if (!sc)
  3891. return;
  3892. qdf_atomic_set(&sc->pm_dp_rx_busy, 1);
  3893. sc->dp_last_busy_timestamp = qdf_get_log_timestamp_usecs();
  3894. hif_pm_runtime_mark_last_busy(hif_ctx);
  3895. }
  3896. /**
  3897. * hif_pm_runtime_is_dp_rx_busy() - Check if last mark busy by dp rx
  3898. * @hif_ctx: HIF context
  3899. *
  3900. * Return: dp rx busy set value
  3901. */
  3902. int hif_pm_runtime_is_dp_rx_busy(struct hif_opaque_softc *hif_ctx)
  3903. {
  3904. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3905. if (!sc)
  3906. return 0;
  3907. return qdf_atomic_read(&sc->pm_dp_rx_busy);
  3908. }
  3909. /**
  3910. * hif_pm_runtime_get_dp_rx_busy_mark() - Get last busy by dp rx timestamp
  3911. * @hif_ctx: HIF context
  3912. *
  3913. * Return: timestamp of last mark busy by dp rx
  3914. */
  3915. qdf_time_t hif_pm_runtime_get_dp_rx_busy_mark(struct hif_opaque_softc *hif_ctx)
  3916. {
  3917. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(hif_ctx);
  3918. if (!sc)
  3919. return 0;
  3920. return sc->dp_last_busy_timestamp;
  3921. }
  3922. #endif /* FEATURE_RUNTIME_PM */
  3923. int hif_pci_legacy_map_ce_to_irq(struct hif_softc *scn, int ce_id)
  3924. {
  3925. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3926. /* legacy case only has one irq */
  3927. return pci_scn->irq;
  3928. }
  3929. int hif_pci_addr_in_boundary(struct hif_softc *scn, uint32_t offset)
  3930. {
  3931. struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
  3932. struct hif_target_info *tgt_info;
  3933. tgt_info = hif_get_target_info_handle(GET_HIF_OPAQUE_HDL(scn));
  3934. if (tgt_info->target_type == TARGET_TYPE_QCA6290 ||
  3935. tgt_info->target_type == TARGET_TYPE_QCA6390 ||
  3936. tgt_info->target_type == TARGET_TYPE_QCA6490 ||
  3937. tgt_info->target_type == TARGET_TYPE_QCA8074) {
  3938. /*
  3939. * Need to consider offset's memtype for QCA6290/QCA8074,
  3940. * also mem_len and DRAM_BASE_ADDRESS/DRAM_SIZE need to be
  3941. * well initialized/defined.
  3942. */
  3943. return 0;
  3944. }
  3945. if ((offset >= DRAM_BASE_ADDRESS && offset <= DRAM_BASE_ADDRESS + DRAM_SIZE)
  3946. || (offset + sizeof(unsigned int) <= sc->mem_len)) {
  3947. return 0;
  3948. }
  3949. HIF_TRACE("Refusing to read memory at 0x%x - 0x%x (max 0x%zx)\n",
  3950. offset, (uint32_t)(offset + sizeof(unsigned int)),
  3951. sc->mem_len);
  3952. return -EINVAL;
  3953. }
  3954. /**
  3955. * hif_pci_needs_bmi() - return true if the soc needs bmi through the driver
  3956. * @scn: hif context
  3957. *
  3958. * Return: true if soc needs driver bmi otherwise false
  3959. */
  3960. bool hif_pci_needs_bmi(struct hif_softc *scn)
  3961. {
  3962. return !ce_srng_based(scn);
  3963. }
  3964. #ifdef FORCE_WAKE
  3965. int hif_force_wake_request(struct hif_opaque_softc *hif_handle)
  3966. {
  3967. uint32_t timeout = 0, value;
  3968. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  3969. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  3970. if (pld_force_wake_request(scn->qdf_dev->dev)) {
  3971. hif_err("force wake request send failed");
  3972. return -EINVAL;
  3973. }
  3974. HIF_STATS_INC(pci_scn, mhi_force_wake_request_vote, 1);
  3975. while (!pld_is_device_awake(scn->qdf_dev->dev) &&
  3976. timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS) {
  3977. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  3978. timeout += FORCE_WAKE_DELAY_MS;
  3979. }
  3980. if (pld_is_device_awake(scn->qdf_dev->dev) <= 0) {
  3981. hif_err("Unable to wake up mhi");
  3982. HIF_STATS_INC(pci_scn, mhi_force_wake_failure, 1);
  3983. return -EINVAL;
  3984. }
  3985. HIF_STATS_INC(pci_scn, mhi_force_wake_success, 1);
  3986. hif_write32_mb(scn,
  3987. scn->mem +
  3988. PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG,
  3989. 0);
  3990. hif_write32_mb(scn,
  3991. scn->mem +
  3992. PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
  3993. 1);
  3994. HIF_STATS_INC(pci_scn, soc_force_wake_register_write_success, 1);
  3995. /*
  3996. * do not reset the timeout
  3997. * total_wake_time = MHI_WAKE_TIME + PCI_WAKE_TIME < 50 ms
  3998. */
  3999. do {
  4000. value =
  4001. hif_read32_mb(scn,
  4002. scn->mem +
  4003. PCIE_SOC_PCIE_REG_PCIE_SCRATCH_0_SOC_PCIE_REG);
  4004. if (value)
  4005. break;
  4006. qdf_mdelay(FORCE_WAKE_DELAY_MS);
  4007. timeout += FORCE_WAKE_DELAY_MS;
  4008. } while (timeout <= FORCE_WAKE_DELAY_TIMEOUT_MS);
  4009. if (!value) {
  4010. hif_err("failed handshake mechanism");
  4011. HIF_STATS_INC(pci_scn, soc_force_wake_failure, 1);
  4012. return -ETIMEDOUT;
  4013. }
  4014. HIF_STATS_INC(pci_scn, soc_force_wake_success, 1);
  4015. return 0;
  4016. }
  4017. int hif_force_wake_release(struct hif_opaque_softc *hif_handle)
  4018. {
  4019. int ret;
  4020. struct hif_softc *scn = (struct hif_softc *)hif_handle;
  4021. struct hif_pci_softc *pci_scn = HIF_GET_PCI_SOFTC(scn);
  4022. ret = pld_force_wake_release(scn->qdf_dev->dev);
  4023. if (ret) {
  4024. hif_err("force wake release failure");
  4025. HIF_STATS_INC(pci_scn, mhi_force_wake_release_failure, 1);
  4026. return ret;
  4027. }
  4028. HIF_STATS_INC(pci_scn, mhi_force_wake_release_success, 1);
  4029. hif_write32_mb(scn,
  4030. scn->mem +
  4031. PCIE_PCIE_LOCAL_REG_PCIE_SOC_WAKE_PCIE_LOCAL_REG,
  4032. 0);
  4033. HIF_STATS_INC(pci_scn, soc_force_wake_release_success, 1);
  4034. return 0;
  4035. }
  4036. void hif_print_pci_stats(struct hif_pci_softc *pci_handle)
  4037. {
  4038. hif_debug("mhi_force_wake_request_vote: %d",
  4039. pci_handle->stats.mhi_force_wake_request_vote);
  4040. hif_debug("mhi_force_wake_failure: %d",
  4041. pci_handle->stats.mhi_force_wake_failure);
  4042. hif_debug("mhi_force_wake_success: %d",
  4043. pci_handle->stats.mhi_force_wake_success);
  4044. hif_debug("soc_force_wake_register_write_success: %d",
  4045. pci_handle->stats.soc_force_wake_register_write_success);
  4046. hif_debug("soc_force_wake_failure: %d",
  4047. pci_handle->stats.soc_force_wake_failure);
  4048. hif_debug("soc_force_wake_success: %d",
  4049. pci_handle->stats.soc_force_wake_success);
  4050. hif_debug("mhi_force_wake_release_failure: %d",
  4051. pci_handle->stats.mhi_force_wake_release_failure);
  4052. hif_debug("mhi_force_wake_release_success: %d",
  4053. pci_handle->stats.mhi_force_wake_release_success);
  4054. hif_debug("oc_force_wake_release_success: %d",
  4055. pci_handle->stats.soc_force_wake_release_success);
  4056. }
  4057. #endif /* FORCE_WAKE */