123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330123311233212333123341233512336123371233812339123401234112342123431234412345123461234712348123491235012351123521235312354123551235612357123581235912360123611236212363123641236512366123671236812369123701237112372123731237412375123761237712378123791238012381123821238312384123851238612387123881238912390123911239212393123941239512396123971239812399124001240112402124031240412405124061240712408124091241012411124121241312414124151241612417124181241912420124211242212423124241242512426124271242812429124301243112432124331243412435124361243712438124391244012441124421244312444124451244612447124481244912450124511245212453124541245512456124571245812459124601246112462124631246412465124661246712468124691247012471124721247312474124751247612477124781247912480124811248212483124841248512486124871248812489124901249112492124931249412495124961249712498124991250012501125021250312504125051250612507125081250912510125111251212513125141251512516125171251812519125201252112522125231252412525125261252712528125291253012531125321253312534125351253612537125381253912540125411254212543125441254512546125471254812549125501255112552125531255412555125561255712558125591256012561125621256312564125651256612567125681256912570125711257212573125741257512576125771257812579125801258112582125831258412585125861258712588125891259012591125921259312594125951259612597125981259912600126011260212603126041260512606126071260812609126101261112612126131261412615126161261712618126191262012621126221262312624126251262612627126281262912630126311263212633126341263512636126371263812639126401264112642126431264412645126461264712648126491265012651126521265312654126551265612657126581265912660126611266212663126641266512666126671266812669126701267112672126731267412675126761267712678126791268012681126821268312684126851268612687126881268912690126911269212693126941269512696126971269812699127001270112702127031270412705127061270712708127091271012711127121271312714127151271612717127181271912720127211272212723127241272512726127271272812729127301273112732127331273412735127361273712738127391274012741127421274312744127451274612747127481274912750127511275212753127541275512756127571275812759127601276112762127631276412765127661276712768127691277012771127721277312774127751277612777127781277912780127811278212783127841278512786127871278812789127901279112792127931279412795127961279712798127991280012801128021280312804128051280612807128081280912810128111281212813128141281512816128171281812819128201282112822128231282412825128261282712828128291283012831128321283312834128351283612837128381283912840128411284212843128441284512846128471284812849128501285112852128531285412855128561285712858128591286012861128621286312864128651286612867128681286912870128711287212873 |
- // SPDX-License-Identifier: GPL-2.0-only
- /* Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */
- #include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
- #include <dt-bindings/interconnect/qcom,icc.h>
- #include <linux/aer.h>
- #include <linux/bitops.h>
- #include <linux/clk.h>
- #include <linux/compiler.h>
- #include <linux/crc8.h>
- #include <linux/debugfs.h>
- #include <linux/delay.h>
- #include <linux/gpio.h>
- #include <linux/i2c.h>
- #include <linux/interconnect.h>
- #include <linux/interrupt.h>
- #include <linux/io.h>
- #include <linux/iopoll.h>
- #include <linux/ipc_logging.h>
- #include <linux/irq.h>
- #include <linux/irqdomain.h>
- #include <linux/jiffies.h>
- #include <linux/kernel.h>
- #include <linux/module.h>
- #include <linux/msm_pcie.h>
- #include <linux/of_device.h>
- #include <linux/of_gpio.h>
- #include <linux/of_pci.h>
- #include <linux/pci.h>
- #include <linux/platform_device.h>
- #include <linux/pm_wakeup.h>
- #include <linux/remoteproc/qcom_rproc.h>
- #include <linux/reset.h>
- #include <linux/regulator/consumer.h>
- #include <linux/rpmsg.h>
- #include <linux/seq_file.h>
- #include <linux/slab.h>
- #include <linux/types.h>
- #include <linux/uaccess.h>
- #include <linux/kfifo.h>
- #include <linux/clk/qcom.h>
- #include <soc/qcom/crm.h>
- #include <linux/pinctrl/qcom-pinctrl.h>
- #include <soc/qcom/pcie-pdc.h>
- #include <linux/random.h>
- #include "../pci.h"
- #ifdef CONFIG_SEC_PCIE
- #include <linux/samsung/bsp/sec_class.h>
- #include <linux/samsung/debug/sec_debug.h>
- #include <linux/samsung/debug/qcom/sec_qc_user_reset.h>
- #include <linux/mhi_misc.h>
- #endif
- #define PCIE_VENDOR_ID_QCOM (0x17cb)
- #define PCIE20_PARF_DBI_BASE_ADDR (0x350)
- #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE (0x358)
- #define PCIE_GEN3_PRESET_DEFAULT (0x55555555)
- #define PCIE_GEN3_SPCIE_CAP (0x0154)
- #define PCIE_GEN3_GEN2_CTRL (0x080c)
- #define PCIE_GEN3_RELATED (0x0890)
- #define PCIE_GEN3_RELATED_RATE_SHADOW_SEL_MASK (BIT(25) | BIT(24))
- /* 0 - Gen3, 1 - Gen4 */
- #define PCIE_GEN3_RELATED_RATE_SHADOW_SEL(x) ((x) - PCI_EXP_LNKCAP_SLS_8_0GB)
- #define PCIE_GEN3_EQ_CONTROL (0x08a8)
- #define PCIE_GEN3_EQ_PSET_REQ_VEC_MASK (GENMASK(23, 8))
- #define PCIE_GEN3_EQ_FB_MODE_DIR_CHANGE (0x08ac)
- #define PCIE_GEN3_EQ_FMDC_T_MIN_PHASE23_MASK (0x1f)
- #define PCIE_GEN3_MISC_CONTROL (0x08bc)
- #define PCIE_PL_16GT_CAP (0x168)
- #define PCIE20_PARF_SYS_CTRL (0x00)
- #define PCIE20_PARF_PM_CTRL (0x20)
- #define PCIE20_PARF_PM_STTS (0x24)
- #define PCIE20_PARF_PHY_CTRL (0x40)
- #define PCIE20_PARF_TEST_BUS (0xe4)
- #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL (0x174)
- #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT (0x1a8)
- #define PCIE20_PARF_LTSSM (0x1b0)
- #define LTSSM_EN BIT(8)
- #define SW_CLR_FLUSH_MODE BIT(10)
- #define FLUSH_MODE BIT(11)
- #define PCIE20_PARF_INT_ALL_STATUS (0x224)
- #define PCIE20_PARF_INT_ALL_CLEAR (0x228)
- #define PCIE20_PARF_INT_ALL_MASK (0x22c)
- #define PCIE20_PARF_STATUS (0x230)
- #define FLUSH_COMPLETED BIT(8)
- #define PCIE20_PARF_CFG_BITS_3 (0x2C4)
- #define PCIE20_PARF_DEVICE_TYPE (0x1000)
- #define PCIE20_PARF_BDF_TO_SID_TABLE_N (0x2000)
- #define PCIE20_PARF_BDF_TO_SID_CFG (0x2C00)
- #define PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER (0x180)
- #define PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET (BIT(31))
- #define PCIE20_PARF_DEBUG_INT_EN (0x190)
- #define PCIE20_PARF_DEBUG_INT_EN_L1SUB_TIMEOUT_BIT (BIT(0))
- #define PCIE20_PARF_INT_ALL_2_STATUS (0x500)
- #define PCIE20_PARF_INT_ALL_2_CLEAR (0x504)
- #define PCIE20_PARF_INT_ALL_2_MASK (0X508)
- #define MSM_PCIE_BW_MGT_INT_STATUS (BIT(25))
- #define PCIE20_PARF_DEBUG_CNT_IN_L0S (0xc10)
- #define PCIE20_PARF_DEBUG_CNT_IN_L1 (0xc0c)
- #define PCIE20_PARF_DEBUG_CNT_IN_L1SUB_L1 (0xc84)
- #define PCIE20_PARF_DEBUG_CNT_IN_L1SUB_L2 (0xc88)
- #define PCIE20_PARF_PM_STTS_1 (0x28)
- #define PM_STATE_L0 0
- #define PM_STATE_L0s 1
- #define PM_STATE_L1 2
- #define PM_STATE_L2 3
- #define PCIE_LINK_PM_STATE(val) ((val & (7 << 7)) >> 7)
- #define PCIE_LINK_IN_L2_STATE(val) ((PCIE_LINK_PM_STATE(val)) == PM_STATE_L2)
- #define PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS (0x4D0)
- #define PCIE20_PARF_L1SS_SLEEP_MODE_HANDLER_CFG (0x4D4)
- #define PCIE20_PARF_CORE_ERRORS (0x3C0)
- #define PCIE20_LINK_DOWN_AXI_ECAM_BLOCK_STATUS (0x630)
- #define PCIE20_PARF_STATUS (0x230)
- #define PCIE20_PARF_CLKREQ_OVERRIDE (0x2b0)
- #define PCIE20_PARF_CLKREQ_IN_VALUE (BIT(3))
- #define PCIE20_PARF_CLKREQ_IN_ENABLE (BIT(1))
- #define PCIE20_ELBI_SYS_CTRL (0x04)
- #define PCIE20_ELBI_SYS_STTS (0x08)
- #define PCIE20_CAP (0x70)
- #define PCIE20_CAP_DEVCTRLSTATUS (PCIE20_CAP + 0x08)
- #define PCIE20_CAP_LINKCTRLSTATUS (PCIE20_CAP + 0x10)
- #define PCIE_CAP_DLL_ACTIVE BIT(29)
- #define PCIE20_COMMAND_STATUS (0x04)
- #define PCIE20_HEADER_TYPE (0x0c)
- #define PCIE20_BRIDGE_CTRL (0x3c)
- #define PCIE20_BRIDGE_CTRL_SBR (BIT(22))
- #define PCIE20_DEVICE_CONTROL_STATUS (0x78)
- #define PCIE20_DEVICE_CONTROL2_STATUS2 (0x98)
- #define PCIE20_PCI_MSI_CAP_ID_NEXT_CTRL_REG (0x50)
- #define PCIE20_PIPE_LOOPBACK_CONTROL (0x8b8)
- #define PCIE20_AUX_CLK_FREQ_REG (0xb40)
- #define PCIE20_ACK_F_ASPM_CTRL_REG (0x70c)
- #define PCIE20_LANE_SKEW_OFF (0x714)
- #define PCIE20_ACK_N_FTS (0xff00)
- #define PCIE20_PLR_IATU_VIEWPORT (0x900)
- #define PCIE20_PLR_IATU_CTRL1 (0x904)
- #define PCIE20_PLR_IATU_CTRL2 (0x908)
- #define PCIE20_PLR_IATU_LBAR (0x90c)
- #define PCIE20_PLR_IATU_UBAR (0x910)
- #define PCIE20_PLR_IATU_LAR (0x914)
- #define PCIE20_PLR_IATU_LTAR (0x918)
- #define PCIE20_PLR_IATU_UTAR (0x91c)
- #define PCIE_IATU_BASE(n) (n * 0x200)
- #define PCIE_IATU_CTRL1(n) (PCIE_IATU_BASE(n) + 0x00)
- #define PCIE_IATU_CTRL2(n) (PCIE_IATU_BASE(n) + 0x04)
- #define PCIE_IATU_LBAR(n) (PCIE_IATU_BASE(n) + 0x08)
- #define PCIE_IATU_UBAR(n) (PCIE_IATU_BASE(n) + 0x0c)
- #define PCIE_IATU_LAR(n) (PCIE_IATU_BASE(n) + 0x10)
- #define PCIE_IATU_LTAR(n) (PCIE_IATU_BASE(n) + 0x14)
- #define PCIE_IATU_UTAR(n) (PCIE_IATU_BASE(n) + 0x18)
- #define PCIE20_PORT_LINK_CTRL_REG (0x710)
- #define PCIE20_CTRL1_TYPE_CFG0 (0x04)
- #define PCIE20_CTRL1_TYPE_CFG1 (0x05)
- #define PCIE20_CAP_ID (0x10)
- #define L1SUB_CAP_ID (0x1e)
- #define PCIE_CAP_PTR_OFFSET (0x34)
- #define PCIE_EXT_CAP_OFFSET (0x100)
- #define PCIE20_AER_UNCORR_ERR_STATUS_REG (0x104)
- #define PCIE20_AER_CORR_ERR_STATUS_REG (0x110)
- #define PCIE20_AER_ROOT_ERR_STATUS_REG (0x130)
- #define PCIE20_AER_ERR_SRC_ID_REG (0x134)
- #define PCIE20_L1SUB_CONTROL1_REG (0x204)
- #define PCIE20_TX_P_FC_CREDIT_STATUS_OFF (0x730)
- #define PCIE20_TX_NP_FC_CREDIT_STATUS_OFF (0x734)
- #define PCIE20_TX_CPL_FC_CREDIT_STATUS_OFF (0x738)
- #define PCIE20_QUEUE_STATUS_OFF (0x73C)
- #ifdef CONFIG_SEC_PCIE_AER
- #define PCIE20_AER_CAP_REG 0x118
- #define PCIE20_AER_HEADER_LOG_REG 0x11C
- #define PCIE20_AER_TLPPREFIX_LOG_REG 0x138
- #define PCI_ERR_TLPPREFIX_LOG 0x38
- #endif
- #define RD (0)
- #define WR (1)
- #define MSM_PCIE_ERROR (-1)
- #define PERST_PROPAGATION_DELAY_US_MIN (1000)
- #define PERST_PROPAGATION_DELAY_US_MAX (1005)
- #define SWITCH_DELAY_MAX (20)
- #define REFCLK_STABILIZATION_DELAY_US_MIN (1000)
- #define REFCLK_STABILIZATION_DELAY_US_MAX (1005)
- #define LINK_UP_TIMEOUT_US_MIN (5000)
- #define LINK_UP_TIMEOUT_US_MAX (5101)
- #define LINK_UP_CHECK_MAX_COUNT (20)
- #define EP_UP_TIMEOUT_US_MIN (1000)
- #define EP_UP_TIMEOUT_US_MAX (1005)
- #define EP_UP_TIMEOUT_US (1000000)
- #define PHY_STABILIZATION_DELAY_US_MIN (995)
- #define PHY_STABILIZATION_DELAY_US_MAX (1005)
- #define MSM_PCIE_CRC8_POLYNOMIAL (BIT(2) | BIT(1) | BIT(0))
- #define GEN1_SPEED (0x1)
- #define GEN2_SPEED (0x2)
- #define GEN3_SPEED (0x3)
- #define LINK_WIDTH_X1 (0x1)
- #define LINK_WIDTH_X2 (0x3)
- #define LINK_WIDTH_MASK (0x3f)
- #define LINK_WIDTH_SHIFT (16)
- #define NUM_OF_LANES_MASK (0x1f)
- #define NUM_OF_LANES_SHIFT (8)
- #define MSM_PCIE_LTSSM_MASK (0x3f)
- /*
- * Allow selection of clkreq signal with PCIe controller
- * 1 - PCIe controller receives clk req from cesta
- * 0 - PCIe controller receives clk req from direct clk req gpio
- */
- #define PARF_CESTA_CLKREQ_SEL BIT(0)
- /* Override bit for sending timeout indication to cesta (debug purpose) */
- #define PARF_CESTA_L1SUB_TIMEOUT_OVERRIDE BIT(1)
- /* Override value for sending timeout indication to cesta (debug purpose) */
- #define PARF_CESTA_L1SUB_TIMEOUT_VALUE BIT(2)
- /* Enabling the l1ss timeout indication to cesta */
- #define PARF_CESTA_L1SUB_TIMEOUT_EXT_INT_EN BIT(3)
- /*
- * Enabling l1ss timeout indication to internal global int generation.
- * Legacy method (0 - no global interrupt for l1ss timeout,
- * 1 - global interrupt for l1ss timeout)
- */
- #define PARF_LEGACY_L1SUB_TIMEOUT_INT_EN BIT(31)
- #define MSM_PCIE_DRV_MAJOR_VERSION (1)
- #define MSM_PCIE_DRV_MINOR_VERSION (0)
- #define MSM_PCIE_DRV_SEQ_RESV (0xffff)
- #define IPC_TIMEOUT_MS (250)
- #define PHY_READY_TIMEOUT_COUNT (10)
- #define XMLH_LINK_UP (0x400)
- #define MAX_PROP_SIZE (32)
- #define MAX_RC_NAME_LEN (15)
- #define MSM_PCIE_MAX_VREG (6)
- #define MAX_RC_NUM (5)
- #define MAX_DEVICE_NUM (20)
- #define PCIE_TLP_RD_SIZE (0x5)
- #define PCIE_LOG_PAGES (50)
- #define PCIE_CONF_SPACE_DW (1024)
- #define PCIE_CLEAR (0xdeadbeef)
- #define PCIE_LINK_DOWN (0xffffffff)
- #define MSM_PCIE_MAX_RESET (5)
- #define MSM_PCIE_MAX_PIPE_RESET (1)
- #define MSM_PCIE_MAX_LINKDOWN_RESET (2)
- /* QPHY_POWER_DOWN_CONTROL */
- #define MSM_PCIE_PHY_SW_PWRDN BIT(0)
- #define MSM_PCIE_PHY_REFCLK_DRV_DSBL BIT(1)
- #define MSM_PCIE_PHY_SW_AUX_CLK_REQ (BIT(6) | BIT(7))
- #define MSM_PCIE_PHY_SW_AUX_CLK_REQ_VAL 0x2
- #define MSM_PCIE_EXT_CLKBUF_EN_MUX BIT(1)
- #define MSM_PCIE_EXT_CLKBUF_EN_MUX_VAL 0x1
- #define ICC_AVG_BW (500)
- #define ICC_PEAK_BW (800)
- /* Each tick is aux clk freq in MHz */
- #define L1SS_TIMEOUT_US_TO_TICKS(x, freq) (x * freq)
- #define L1SS_TIMEOUT_US (100000)
- #define L23_READY_POLL_TIMEOUT (100000)
- #define L1SS_POLL_INTERVAL_US (1000)
- #define L1SS_POLL_TIMEOUT_US (200000)
- #ifdef CONFIG_PHYS_ADDR_T_64BIT
- #define PCIE_UPPER_ADDR(addr) ((u32)((addr) >> 32))
- #else
- #define PCIE_UPPER_ADDR(addr) (0x0)
- #endif
- #define PCIE_LOWER_ADDR(addr) ((u32)((addr) & 0xffffffff))
- #define PCIE_BUS_PRIV_DATA(bus) \
- ((struct msm_pcie_dev_t *)(bus->sysdata))
- /* Config Space Offsets */
- #define BDF_OFFSET(bus, devfn) \
- ((bus << 24) | (devfn << 16))
- #define PCIE_DBG(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log_long) \
- ipc_log_string((dev)->ipc_log_long, \
- "DBG1:%s: " fmt, __func__, ##arg); \
- if ((dev) && (dev)->ipc_log) \
- ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, ##arg); \
- } while (0)
- #define PCIE_DBG2(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log) \
- ipc_log_string((dev)->ipc_log, "DBG2:%s: " fmt, \
- __func__, ##arg);\
- } while (0)
- #define PCIE_DBG3(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log) \
- ipc_log_string((dev)->ipc_log, "DBG3:%s: " fmt, \
- __func__, ##arg);\
- } while (0)
- #define PCIE_DUMP(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log_dump) \
- ipc_log_string((dev)->ipc_log_dump, \
- "DUMP:%s: " fmt, __func__, ##arg); \
- } while (0)
- #define PCIE_DBG_FS(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log_dump) \
- ipc_log_string((dev)->ipc_log_dump, \
- "DBG_FS:%s: " fmt, __func__, ##arg); \
- pr_alert("%s: " fmt, __func__, ##arg); \
- } while (0)
- #define PCIE_INFO(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log_long) \
- ipc_log_string((dev)->ipc_log_long, \
- "INFO:%s: " fmt, __func__, ##arg); \
- if ((dev) && (dev)->ipc_log) \
- ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, ##arg); \
- pr_info("%s: " fmt, __func__, ##arg); \
- } while (0)
- #define PCIE_ERR(dev, fmt, arg...) do { \
- if ((dev) && (dev)->ipc_log_long) \
- ipc_log_string((dev)->ipc_log_long, \
- "ERR:%s: " fmt, __func__, ##arg); \
- if ((dev) && (dev)->ipc_log) \
- ipc_log_string((dev)->ipc_log, "%s: " fmt, __func__, ##arg); \
- pr_err("%s: " fmt, __func__, arg); \
- } while (0)
- #define CHECK_NTN3_VERSION_MASK (0x000000FF)
- #define NTN3_CHIP_VERSION_1 (0x00000000)
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- #define PCIE_SEC_DBG_ERR PCIE_ERR
- #else
- #define PCIE_SEC_DBG_ERR PCIE_DBG
- #endif
- enum msm_pcie_res {
- MSM_PCIE_RES_PARF,
- MSM_PCIE_RES_PHY,
- MSM_PCIE_RES_DM_CORE,
- MSM_PCIE_RES_ELBI,
- MSM_PCIE_RES_IATU,
- MSM_PCIE_RES_CONF,
- MSM_PCIE_RES_SM,
- MSM_PCIE_RES_MHI,
- MSM_PCIE_RES_TCSR,
- MSM_PCIE_RES_RUMI,
- MSM_PCIE_MAX_RES,
- };
- enum msm_pcie_irq {
- MSM_PCIE_INT_A,
- MSM_PCIE_INT_B,
- MSM_PCIE_INT_C,
- MSM_PCIE_INT_D,
- MSM_PCIE_INT_GLOBAL_INT,
- MSM_PCIE_MAX_IRQ,
- };
- enum msm_pcie_irq_event {
- MSM_PCIE_INT_EVT_LINK_DOWN = 1,
- MSM_PCIE_INT_EVT_BME,
- MSM_PCIE_INT_EVT_PM_TURNOFF,
- MSM_PCIE_INT_EVT_DEBUG,
- MSM_PCIE_INT_EVT_LTR,
- MSM_PCIE_INT_EVT_MHI_Q6,
- MSM_PCIE_INT_EVT_MHI_A7,
- MSM_PCIE_INT_EVT_DSTATE_CHANGE,
- MSM_PCIE_INT_EVT_L1SUB_TIMEOUT,
- MSM_PCIE_INT_EVT_MMIO_WRITE,
- MSM_PCIE_INT_EVT_CFG_WRITE,
- MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N,
- MSM_PCIE_INT_EVT_LINK_UP,
- MSM_PCIE_INT_EVT_AER_LEGACY,
- MSM_PCIE_INT_EVT_AER_ERR,
- MSM_PCIE_INT_EVT_PME_LEGACY,
- MSM_PCIE_INT_EVT_PLS_PME,
- MSM_PCIE_INT_EVT_INTD,
- MSM_PCIE_INT_EVT_INTC,
- MSM_PCIE_INT_EVT_INTB,
- MSM_PCIE_INT_EVT_INTA,
- MSM_PCIE_INT_EVT_EDMA,
- MSM_PCIE_INT_EVT_MSI_0,
- MSM_PCIE_INT_EVT_MSI_1,
- MSM_PCIE_INT_EVT_MSI_2,
- MSM_PCIE_INT_EVT_MSI_3,
- MSM_PCIE_INT_EVT_MSI_4,
- MSM_PCIE_INT_EVT_MSI_5,
- MSM_PCIE_INT_EVT_MSI_6,
- MSM_PCIE_INT_EVT_MSI_7,
- MSM_PCIE_INT_EVT_MAX = 30,
- };
- enum msm_pcie_gpio {
- MSM_PCIE_GPIO_PERST,
- MSM_PCIE_GPIO_WAKE,
- MSM_PCIE_GPIO_EP,
- MSM_PCIE_GPIO_CARD_PRESENCE_PIN,
- MSM_PCIE_MAX_GPIO
- };
- enum msm_pcie_link_status {
- MSM_PCIE_LINK_DEINIT,
- MSM_PCIE_LINK_ENABLED,
- MSM_PCIE_LINK_DISABLED,
- MSM_PCIE_LINK_DRV,
- MSM_PCIE_LINK_DOWN,
- };
- enum msm_pcie_boot_option {
- MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
- MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
- };
- enum msm_pcie_ltssm {
- MSM_PCIE_LTSSM_DETECT_QUIET = 0x00,
- MSM_PCIE_LTSSM_DETECT_ACT = 0x01,
- MSM_PCIE_LTSSM_POLL_ACTIVE = 0x02,
- MSM_PCIE_LTSSM_POLL_COMPLIANCE = 0x03,
- MSM_PCIE_LTSSM_POLL_CONFIG = 0x04,
- MSM_PCIE_LTSSM_PRE_DETECT_QUIET = 0x05,
- MSM_PCIE_LTSSM_DETECT_WAIT = 0x06,
- MSM_PCIE_LTSSM_CFG_LINKWD_START = 0x07,
- MSM_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x08,
- MSM_PCIE_LTSSM_CFG_LANENUM_WAIT = 0x09,
- MSM_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0x0a,
- MSM_PCIE_LTSSM_CFG_COMPLETE = 0x0b,
- MSM_PCIE_LTSSM_CFG_IDLE = 0x0c,
- MSM_PCIE_LTSSM_RCVRY_LOCK = 0x0d,
- MSM_PCIE_LTSSM_RCVRY_SPEED = 0x0e,
- MSM_PCIE_LTSSM_RCVRY_RCVRCFG = 0x0f,
- MSM_PCIE_LTSSM_RCVRY_IDLE = 0x10,
- MSM_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
- MSM_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
- MSM_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
- MSM_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
- MSM_PCIE_LTSSM_L0 = 0x11,
- MSM_PCIE_LTSSM_L0S = 0x12,
- MSM_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
- MSM_PCIE_LTSSM_L1_IDLE = 0x14,
- MSM_PCIE_LTSSM_L2_IDLE = 0x15,
- MSM_PCIE_LTSSM_L2_WAKE = 0x16,
- MSM_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
- MSM_PCIE_LTSSM_DISABLED_IDLE = 0x18,
- MSM_PCIE_LTSSM_DISABLED = 0x19,
- MSM_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
- MSM_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
- MSM_PCIE_LTSSM_LPBK_EXIT = 0x1c,
- MSM_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
- MSM_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
- MSM_PCIE_LTSSM_HOT_RESET = 0x1f,
- };
- static const char * const msm_pcie_ltssm_str[] = {
- [MSM_PCIE_LTSSM_DETECT_QUIET] = "LTSSM_DETECT_QUIET",
- [MSM_PCIE_LTSSM_DETECT_ACT] = "LTSSM_DETECT_ACT",
- [MSM_PCIE_LTSSM_POLL_ACTIVE] = "LTSSM_POLL_ACTIVE",
- [MSM_PCIE_LTSSM_POLL_COMPLIANCE] = "LTSSM_POLL_COMPLIANCE",
- [MSM_PCIE_LTSSM_POLL_CONFIG] = "LTSSM_POLL_CONFIG",
- [MSM_PCIE_LTSSM_PRE_DETECT_QUIET] = "LTSSM_PRE_DETECT_QUIET",
- [MSM_PCIE_LTSSM_DETECT_WAIT] = "LTSSM_DETECT_WAIT",
- [MSM_PCIE_LTSSM_CFG_LINKWD_START] = "LTSSM_CFG_LINKWD_START",
- [MSM_PCIE_LTSSM_CFG_LINKWD_ACEPT] = "LTSSM_CFG_LINKWD_ACEPT",
- [MSM_PCIE_LTSSM_CFG_LANENUM_WAIT] = "LTSSM_CFG_LANENUM_WAIT",
- [MSM_PCIE_LTSSM_CFG_LANENUM_ACEPT] = "LTSSM_CFG_LANENUM_ACEPT",
- [MSM_PCIE_LTSSM_CFG_COMPLETE] = "LTSSM_CFG_COMPLETE",
- [MSM_PCIE_LTSSM_CFG_IDLE] = "LTSSM_CFG_IDLE",
- [MSM_PCIE_LTSSM_RCVRY_LOCK] = "LTSSM_RCVRY_LOCK",
- [MSM_PCIE_LTSSM_RCVRY_SPEED] = "LTSSM_RCVRY_SPEED",
- [MSM_PCIE_LTSSM_RCVRY_RCVRCFG] = "LTSSM_RCVRY_RCVRCFG",
- [MSM_PCIE_LTSSM_RCVRY_IDLE] = "LTSSM_RCVRY_IDLE",
- [MSM_PCIE_LTSSM_RCVRY_EQ0] = "LTSSM_RCVRY_EQ0",
- [MSM_PCIE_LTSSM_RCVRY_EQ1] = "LTSSM_RCVRY_EQ1",
- [MSM_PCIE_LTSSM_RCVRY_EQ2] = "LTSSM_RCVRY_EQ2",
- [MSM_PCIE_LTSSM_RCVRY_EQ3] = "LTSSM_RCVRY_EQ3",
- [MSM_PCIE_LTSSM_L0] = "LTSSM_L0",
- [MSM_PCIE_LTSSM_L0S] = "LTSSM_L0S",
- [MSM_PCIE_LTSSM_L123_SEND_EIDLE] = "LTSSM_L123_SEND_EIDLE",
- [MSM_PCIE_LTSSM_L1_IDLE] = "LTSSM_L1_IDLE",
- [MSM_PCIE_LTSSM_L2_IDLE] = "LTSSM_L2_IDLE",
- [MSM_PCIE_LTSSM_L2_WAKE] = "LTSSM_L2_WAKE",
- [MSM_PCIE_LTSSM_DISABLED_ENTRY] = "LTSSM_DISABLED_ENTRY",
- [MSM_PCIE_LTSSM_DISABLED_IDLE] = "LTSSM_DISABLED_IDLE",
- [MSM_PCIE_LTSSM_DISABLED] = "LTSSM_DISABLED",
- [MSM_PCIE_LTSSM_LPBK_ENTRY] = "LTSSM_LPBK_ENTRY",
- [MSM_PCIE_LTSSM_LPBK_ACTIVE] = "LTSSM_LPBK_ACTIVE",
- [MSM_PCIE_LTSSM_LPBK_EXIT] = "LTSSM_LPBK_EXIT",
- [MSM_PCIE_LTSSM_LPBK_EXIT_TIMEOUT] = "LTSSM_LPBK_EXIT_TIMEOUT",
- [MSM_PCIE_LTSSM_HOT_RESET_ENTRY] = "LTSSM_HOT_RESET_ENTRY",
- [MSM_PCIE_LTSSM_HOT_RESET] = "LTSSM_HOT_RESET",
- };
- #define TO_LTSSM_STR(state) ((state) >= ARRAY_SIZE(msm_pcie_ltssm_str) ? \
- "LTSSM_INVALID" : msm_pcie_ltssm_str[state])
- enum msm_pcie_debugfs_option {
- MSM_PCIE_OUTPUT_PCIE_INFO,
- MSM_PCIE_DISABLE_LINK,
- MSM_PCIE_ENABLE_LINK,
- MSM_PCIE_DISABLE_ENABLE_LINK,
- MSM_PCIE_DISABLE_L0S,
- MSM_PCIE_ENABLE_L0S,
- MSM_PCIE_DISABLE_L1,
- MSM_PCIE_ENABLE_L1,
- MSM_PCIE_DISABLE_L1SS,
- MSM_PCIE_ENABLE_L1SS,
- MSM_PCIE_ENUMERATION,
- MSM_PCIE_DEENUMERATION,
- MSM_PCIE_READ_PCIE_REGISTER,
- MSM_PCIE_WRITE_PCIE_REGISTER,
- MSM_PCIE_DUMP_PCIE_REGISTER_SPACE,
- MSM_PCIE_DISABLE_AER,
- MSM_PCIE_ENABLE_AER,
- MSM_PCIE_GPIO_STATUS,
- MSM_PCIE_ASSERT_PERST,
- MSM_PCIE_DEASSERT_PERST,
- MSM_PCIE_KEEP_RESOURCES_ON,
- MSM_PCIE_FORCE_GEN1,
- MSM_PCIE_FORCE_GEN2,
- MSM_PCIE_FORCE_GEN3,
- MSM_PCIE_TRIGGER_SBR,
- MSM_PCIE_REMOTE_LOOPBACK,
- MSM_PCIE_LOCAL_LOOPBACK,
- MSM_PCIE_MAX_DEBUGFS_OPTION
- };
- static const char * const
- msm_pcie_debugfs_option_desc[MSM_PCIE_MAX_DEBUGFS_OPTION] = {
- "OUTPUT PCIE INFO",
- "DISABLE LINK",
- "ENABLE LINK",
- "DISABLE AND ENABLE LINK",
- "DISABLE L0S",
- "ENABLE L0S",
- "DISABLE L1",
- "ENABLE L1",
- "DISABLE L1SS",
- "ENABLE L1SS",
- "ENUMERATE",
- "DE-ENUMERATE",
- "READ A PCIE REGISTER",
- "WRITE TO PCIE REGISTER",
- "DUMP PCIE REGISTER SPACE",
- "SET AER ENABLE FLAG",
- "CLEAR AER ENABLE FLAG",
- "OUTPUT PERST AND WAKE GPIO STATUS",
- "ASSERT PERST",
- "DE-ASSERT PERST",
- "SET KEEP_RESOURCES_ON FLAG",
- "SET MAXIMUM LINK SPEED TO GEN 1",
- "SET MAXIMUM LINK SPEED TO GEN 2",
- "SET MAXIMUM LINK SPEED TO GEN 3",
- "Trigger SBR",
- "PCIE REMOTE LOOPBACK",
- "PCIE LOCAL LOOPBACK",
- };
- /* gpio info structure */
- struct msm_pcie_gpio_info_t {
- char *name;
- uint32_t num;
- bool out;
- uint32_t on;
- uint32_t init;
- bool required;
- };
- /* voltage regulator info structrue */
- struct msm_pcie_vreg_info_t {
- struct regulator *hdl;
- char *name;
- uint32_t max_v;
- uint32_t min_v;
- uint32_t opt_mode;
- bool required;
- };
- /* reset info structure */
- struct msm_pcie_reset_info_t {
- struct reset_control *hdl;
- char *name;
- bool required;
- };
- /* clock info structure */
- struct msm_pcie_clk_info_t {
- struct clk *hdl;
- const char *name;
- u32 freq;
- /*
- * Suppressible clocks are not turned off during drv suspend.
- * These clocks will be automatically gated during XO shutdown.
- */
- bool suppressible;
- };
- /* resource info structure */
- struct msm_pcie_res_info_t {
- char *name;
- struct resource *resource;
- void __iomem *base;
- };
- /* irq info structrue */
- struct msm_pcie_irq_info_t {
- char *name;
- uint32_t num;
- };
- /* bandwidth info structure */
- struct msm_pcie_bw_scale_info_t {
- u32 cx_vreg_min;
- u32 mx_vreg_min;
- u32 rate_change_freq;
- };
- /* phy info structure */
- struct msm_pcie_phy_info_t {
- u32 offset;
- u32 val;
- u32 delay;
- };
- /* tcsr info structure */
- struct msm_pcie_tcsr_info_t {
- u32 offset;
- u32 val;
- };
- /* sid info structure */
- struct msm_pcie_sid_info_t {
- u16 bdf;
- u8 pcie_sid;
- u8 hash;
- u8 next_hash;
- u32 smmu_sid;
- u32 value;
- };
- /* PCIe device info structure */
- struct msm_pcie_device_info {
- struct list_head pcidev_node;
- struct pci_dev *dev;
- };
- /* DRV IPC command type */
- enum msm_pcie_drv_cmds {
- MSM_PCIE_DRV_CMD_ENABLE = 0xc0000000,
- MSM_PCIE_DRV_CMD_DISABLE = 0xc0000001,
- MSM_PCIE_DRV_CMD_ENABLE_L1SS_SLEEP = 0xc0000005,
- MSM_PCIE_DRV_CMD_DISABLE_L1SS_SLEEP = 0xc0000006,
- MSM_PCIE_DRV_CMD_DISABLE_PC = 0xc0000007,
- MSM_PCIE_DRV_CMD_ENABLE_PC = 0xc0000008,
- };
- /* DRV IPC message type */
- enum msm_pcie_drv_msg_id {
- MSM_PCIE_DRV_MSG_ID_ACK = 0xa,
- MSM_PCIE_DRV_MSG_ID_CMD = 0xc,
- MSM_PCIE_DRV_MSG_ID_EVT = 0xe,
- };
- /* DRV IPC header */
- struct __packed msm_pcie_drv_header {
- u16 major_ver;
- u16 minor_ver;
- u16 msg_id;
- u16 seq;
- u16 reply_seq;
- u16 payload_size;
- u32 dev_id;
- u8 reserved[8];
- };
- /* DRV IPC transfer ring element */
- struct __packed msm_pcie_drv_tre {
- u32 dword[4];
- };
- struct __packed msm_pcie_drv_msg {
- struct msm_pcie_drv_header hdr;
- struct msm_pcie_drv_tre pkt;
- };
- struct msm_pcie_drv_info {
- bool ep_connected; /* drv supports only one endpoint (no switch) */
- struct msm_pcie_drv_msg drv_enable; /* hand off payload */
- struct msm_pcie_drv_msg drv_disable; /* payload to request back */
- struct msm_pcie_drv_msg drv_enable_l1ss_sleep; /* enable l1ss sleep */
- struct msm_pcie_drv_msg drv_disable_l1ss_sleep; /* disable l1ss sleep */
- struct msm_pcie_drv_msg drv_enable_pc; /* enable drv pc */
- struct msm_pcie_drv_msg drv_disable_pc; /* disable drv pc */
- int dev_id;
- u16 seq;
- u16 reply_seq;
- u32 timeout_ms; /* IPC command timeout */
- struct completion completion;
- };
- /* For AER logging */
- #define AER_ERROR_SOURCES_MAX (128)
- #define AER_MAX_TYPEOF_COR_ERRS 16 /* as per PCI_ERR_COR_STATUS */
- #define AER_MAX_TYPEOF_UNCOR_ERRS 27 /* as per PCI_ERR_UNCOR_STATUS*/
- #define PCI_EXP_AER_FLAGS (PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
- PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
- #define AER_MAX_MULTI_ERR_DEVICES 5 /* Not likely to have more */
- struct msm_aer_err_info {
- struct msm_pcie_dev_t *rdev;
- struct pci_dev *dev[AER_MAX_MULTI_ERR_DEVICES];
- int error_dev_num;
- unsigned int id:16;
- unsigned int severity:2; /* 0:NONFATAL | 1:FATAL | 2:COR */
- unsigned int __pad1:5;
- unsigned int multi_error_valid:1;
- unsigned int first_error:5;
- unsigned int __pad2:2;
- unsigned int tlp_header_valid:1;
- unsigned int status; /* COR/UNCOR Error Status */
- unsigned int mask; /* COR/UNCOR Error Mask */
- struct aer_header_log_regs tlp; /* TLP Header */
- u32 l1ss_ctl1; /* PCI_L1SS_CTL1 reg value */
- u16 lnksta; /* PCI_EXP_LNKSTA reg value */
- };
- struct aer_err_source {
- unsigned int status;
- unsigned int id;
- };
- /* AER stats for the device */
- struct aer_stats {
- /*
- * Fields for all AER capable devices. They indicate the errors
- * "as seen by this device". Note that this may mean that if an
- * end point is causing problems, the AER counters may increment
- * at its link partner (e.g. root port) because the errors will be
- * "seen" by the link partner and not he problematic end point
- * itself (which may report all counters as 0 as it never saw any
- * problems).
- */
- /* Counters for different type of correctable errors */
- u64 dev_cor_errs[AER_MAX_TYPEOF_COR_ERRS];
- /* Counters for different type of fatal uncorrectable errors */
- u64 dev_fatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
- /* Counters for different type of nonfatal uncorrectable errors */
- u64 dev_nonfatal_errs[AER_MAX_TYPEOF_UNCOR_ERRS];
- /* Total number of ERR_COR sent by this device */
- u64 dev_total_cor_errs;
- /* Total number of ERR_FATAL sent by this device */
- u64 dev_total_fatal_errs;
- /* Total number of ERR_NONFATAL sent by this device */
- u64 dev_total_nonfatal_errs;
- /*
- * Fields for Root ports & root complex event collectors only, these
- * indicate the total number of ERR_COR, ERR_FATAL, and ERR_NONFATAL
- * messages received by the root port / event collector, INCLUDING the
- * ones that are generated internally (by the rootport itself)
- */
- u64 rootport_total_cor_errs;
- u64 rootport_total_fatal_errs;
- u64 rootport_total_nonfatal_errs;
- };
- #define AER_LOG_TLP_MASKS (PCI_ERR_UNC_POISON_TLP| \
- PCI_ERR_UNC_ECRC| \
- PCI_ERR_UNC_UNSUP| \
- PCI_ERR_UNC_COMP_ABORT| \
- PCI_ERR_UNC_UNX_COMP| \
- PCI_ERR_UNC_MALF_TLP)
- #define ERR_COR_ID(d) (d & 0xffff)
- #define ERR_UNCOR_ID(d) (d >> 16)
- #define AER_AGENT_RECEIVER 0
- #define AER_AGENT_REQUESTER 1
- #define AER_AGENT_COMPLETER 2
- #define AER_AGENT_TRANSMITTER 3
- #define AER_AGENT_REQUESTER_MASK(t) ((t == AER_CORRECTABLE) ? \
- 0 : (PCI_ERR_UNC_COMP_TIME|PCI_ERR_UNC_UNSUP))
- #define AER_AGENT_COMPLETER_MASK(t) ((t == AER_CORRECTABLE) ? \
- 0 : PCI_ERR_UNC_COMP_ABORT)
- #define AER_AGENT_TRANSMITTER_MASK(t) ((t == AER_CORRECTABLE) ? \
- (PCI_ERR_COR_REP_ROLL|PCI_ERR_COR_REP_TIMER) : 0)
- #define AER_GET_AGENT(t, e) \
- ((e & AER_AGENT_COMPLETER_MASK(t)) ? AER_AGENT_COMPLETER : \
- (e & AER_AGENT_REQUESTER_MASK(t)) ? AER_AGENT_REQUESTER : \
- (e & AER_AGENT_TRANSMITTER_MASK(t)) ? AER_AGENT_TRANSMITTER : \
- AER_AGENT_RECEIVER)
- #define AER_PHYSICAL_LAYER_ERROR 0
- #define AER_DATA_LINK_LAYER_ERROR 1
- #define AER_TRANSACTION_LAYER_ERROR 2
- #define AER_PHYSICAL_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
- PCI_ERR_COR_RCVR : 0)
- #define AER_DATA_LINK_LAYER_ERROR_MASK(t) ((t == AER_CORRECTABLE) ? \
- (PCI_ERR_COR_BAD_TLP| \
- PCI_ERR_COR_BAD_DLLP| \
- PCI_ERR_COR_REP_ROLL| \
- PCI_ERR_COR_REP_TIMER) : PCI_ERR_UNC_DLP)
- #define AER_GET_LAYER_ERROR(t, e) \
- ((e & AER_PHYSICAL_LAYER_ERROR_MASK(t)) ? AER_PHYSICAL_LAYER_ERROR : \
- (e & AER_DATA_LINK_LAYER_ERROR_MASK(t)) ? AER_DATA_LINK_LAYER_ERROR : \
- AER_TRANSACTION_LAYER_ERROR)
- /*
- * AER error strings
- */
- static const char * const aer_error_severity_string[] = {
- "Uncorrected (Non-Fatal)",
- "Uncorrected (Fatal)",
- "Corrected"
- };
- static const char * const aer_error_layer[] = {
- "Physical Layer",
- "Data Link Layer",
- "Transaction Layer"
- };
- static const char * const aer_correctable_error_string[] = {
- "RxErr", /* Bit Position 0 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "BadTLP", /* Bit Position 6 */
- "BadDLLP", /* Bit Position 7 */
- "Rollover", /* Bit Position 8 */
- NULL,
- NULL,
- NULL,
- "Timeout", /* Bit Position 12 */
- "NonFatalErr", /* Bit Position 13 */
- "CorrIntErr", /* Bit Position 14 */
- "HeaderOF", /* Bit Position 15 */
- NULL, /* Bit Position 16 */
- NULL, /* Bit Position 17 */
- NULL, /* Bit Position 18 */
- NULL, /* Bit Position 19 */
- NULL, /* Bit Position 20 */
- NULL, /* Bit Position 21 */
- NULL, /* Bit Position 22 */
- NULL, /* Bit Position 23 */
- NULL, /* Bit Position 24 */
- NULL, /* Bit Position 25 */
- NULL, /* Bit Position 26 */
- NULL, /* Bit Position 27 */
- NULL, /* Bit Position 28 */
- NULL, /* Bit Position 29 */
- NULL, /* Bit Position 30 */
- NULL, /* Bit Position 31 */
- };
- static const char * const aer_uncorrectable_error_string[] = {
- "Undefined", /* Bit Position 0 */
- NULL,
- NULL,
- NULL,
- "DLP", /* Bit Position 4 */
- "SDES", /* Bit Position 5 */
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "TLP", /* Bit Position 12 */
- "FCP", /* Bit Position 13 */
- "CmpltTO", /* Bit Position 14 */
- "CmpltAbrt", /* Bit Position 15 */
- "UnxCmplt", /* Bit Position 16 */
- "RxOF", /* Bit Position 17 */
- "MalfTLP", /* Bit Position 18 */
- "ECRC", /* Bit Position 19 */
- "UnsupReq", /* Bit Position 20 */
- "ACSViol", /* Bit Position 21 */
- "UncorrIntErr", /* Bit Position 22 */
- "BlockedTLP", /* Bit Position 23 */
- "AtomicOpBlocked", /* Bit Position 24 */
- "TLPBlockedErr", /* Bit Position 25 */
- "PoisonTLPBlocked", /* Bit Position 26 */
- NULL, /* Bit Position 27 */
- NULL, /* Bit Position 28 */
- NULL, /* Bit Position 29 */
- NULL, /* Bit Position 30 */
- NULL, /* Bit Position 31 */
- };
- static const char * const aer_agent_string[] = {
- "Receiver ID",
- "Requester ID",
- "Completer ID",
- "Transmitter ID"
- };
- /* PCIe SM register indexes as defined by module param*/
- enum msm_pcie_sm_regs {
- PCIE_SM_BASE,
- PCIE_SM_PWR_CTRL_OFFSET,
- PCIE_SM_PWR_MASK_OFFSET,
- PCIE_SM_PWR_INSTANCE_OFFSET,
- PCIE_SM_NUM_INSTANCES,
- MAX_PCIE_SM_REGS,
- };
- /*
- * This array contains the address of the PCIE_SM
- * PWR_CTRL, PWR_CTRL_MASK registers so that these
- * can be programmed for override. If the override
- * is not done then CX Power Collapse won't happen.
- *
- * Format of the array is <address of PCIE_SM reg base>,
- * <offset of PWR_CTRL register>, <offset of PWR_CTRL_MASK register>,
- * <offset of next PCIE instance>, <number of PCIE instances>.
- */
- static int pcie_sm_regs[MAX_PCIE_SM_REGS];
- static int count;
- module_param_array(pcie_sm_regs, int, &count, 0644);
- MODULE_PARM_DESC(pcie_sm_regs, "This is needed to override the PWR_CTRL/MASK regs");
- /* PCIe State Manager instructions info */
- struct msm_pcie_sm_info {
- u32 branch_offset;
- u32 start_offset;
- u32 *sm_seq;
- u32 *branch_seq;
- u32 *reg_dump;
- int sm_seq_len;
- int sm_branch_len;
- int reg_dump_len;
- };
- /* CESTA power state index */
- enum msm_pcie_cesta_pwr_idx {
- POWER_STATE_0,
- POWER_STATE_1,
- MAX_POWER_STATE,
- };
- /* CESTA perf level index */
- enum msm_pcie_cesta_perf_idx {
- PERF_LVL_D3COLD,
- PERF_LVL_L1SS,
- PERF_LVL_GEN1,
- PERF_LVL_GEN2,
- PERF_LVL_GEN3,
- PERF_LVL_GEN4,
- MAX_PERF_LVL,
- };
- /* CESTA curr perf ol to strings */
- static const char * const msm_pcie_cesta_curr_perf_lvl[] = {
- "D3 cold state",
- "L1ss sleep state",
- "Gen1 speed",
- "Gen2 speed",
- "Gen3 speed",
- "Gen4 speed",
- "Invalid state",
- };
- /* CESTA usage scenarios */
- enum msm_pcie_cesta_map_idx {
- D3COLD_STATE, // Move to D3 Cold state
- D0_STATE, // Move to D0 state
- DRV_STATE, // Move to DRV state
- MAX_MAP_IDX,
- };
- /* CESTA states debug info */
- static const char * const msm_pcie_cesta_states[] = {
- "D3 Cold state",
- "D0 state",
- "DRV state",
- "Invalid state",
- };
- /* CESTA Power state to Perf level mapping w.r.t CESTA usage scenarios */
- static u32 msm_pcie_cesta_map[MAX_MAP_IDX][MAX_POWER_STATE] = {
- {PERF_LVL_D3COLD, PERF_LVL_D3COLD},
- {MAX_PERF_LVL, MAX_PERF_LVL},
- {PERF_LVL_L1SS, MAX_PERF_LVL},
- };
- #if IS_ENABLED(CONFIG_I2C)
- struct pcie_i2c_reg_update {
- u32 offset;
- u32 val;
- };
- /* i2c control interface for a i2c client device */
- struct pcie_i2c_ctrl {
- struct i2c_client *client;
- /* client specific register info */
- u32 gpio_config_reg;
- u32 ep_reset_reg;
- u32 ep_reset_gpio_mask;
- u32 *dump_regs;
- u32 dump_reg_count;
- struct pcie_i2c_reg_update *reg_update;
- u32 reg_update_count;
- u32 version_reg;
- bool force_i2c_setting;
- bool ep_reset_postlinkup;
- struct pcie_i2c_reg_update *switch_reg_update;
- u32 switch_reg_update_count;
- /* client specific callbacks */
- int (*client_i2c_read)(struct i2c_client *client, u32 reg_addr,
- u32 *val);
- int (*client_i2c_write)(struct i2c_client *client, u32 reg_addr,
- u32 val);
- int (*client_i2c_reset)(struct pcie_i2c_ctrl *i2c_ctrl, bool reset);
- void (*client_i2c_dump_regs)(struct pcie_i2c_ctrl *i2c_ctrl);
- void (*client_i2c_de_emphasis_wa)(struct pcie_i2c_ctrl *i2c_ctrl);
- };
- enum i2c_client_id {
- I2C_CLIENT_ID_NTN3,
- I2C_CLIENT_ID_MAX,
- };
- struct i2c_driver_data {
- enum i2c_client_id client_id;
- };
- #endif
- /* msm pcie device structure */
- struct msm_pcie_dev_t {
- struct platform_device *pdev;
- struct pci_dev *dev;
- struct regulator *gdsc_core;
- struct regulator *gdsc_phy;
- struct msm_pcie_vreg_info_t vreg[MSM_PCIE_MAX_VREG];
- struct msm_pcie_gpio_info_t gpio[MSM_PCIE_MAX_GPIO];
- struct msm_pcie_res_info_t res[MSM_PCIE_MAX_RES];
- struct msm_pcie_irq_info_t irq[MSM_PCIE_MAX_IRQ];
- struct msm_pcie_reset_info_t reset[MSM_PCIE_MAX_RESET];
- struct msm_pcie_reset_info_t pipe_reset[MSM_PCIE_MAX_PIPE_RESET];
- struct msm_pcie_reset_info_t linkdown_reset[MSM_PCIE_MAX_LINKDOWN_RESET];
- unsigned int num_pipe_clk;
- struct msm_pcie_clk_info_t *pipe_clk;
- unsigned int num_clk;
- struct msm_pcie_clk_info_t *clk;
- void __iomem *parf;
- void __iomem *phy;
- void __iomem *elbi;
- void __iomem *iatu;
- void __iomem *dm_core;
- void __iomem *conf;
- void __iomem *mhi;
- void __iomem *tcsr;
- void __iomem *rumi;
- uint32_t axi_bar_start;
- uint32_t axi_bar_end;
- uint32_t wake_n;
- uint32_t vreg_n;
- uint32_t gpio_n;
- uint32_t parf_deemph;
- uint32_t parf_swing;
- struct msm_pcie_vreg_info_t *cx_vreg;
- struct msm_pcie_vreg_info_t *mx_vreg;
- struct msm_pcie_bw_scale_info_t *bw_scale;
- u32 bw_gen_max;
- u32 link_width_max;
- struct clk *rate_change_clk;
- struct clk *pipe_clk_mux;
- struct clk *pipe_clk_ext_src;
- struct clk *phy_aux_clk_mux;
- struct clk *phy_aux_clk_ext_src;
- struct clk *ref_clk_src;
- struct clk *ahb_clk;
- bool cfg_access;
- bool apss_based_l1ss_sleep;
- spinlock_t cfg_lock;
- unsigned long irqsave_flags;
- struct mutex enumerate_lock;
- struct mutex setup_lock;
- struct irq_domain *irq_domain;
- enum msm_pcie_link_status link_status;
- bool user_suspend;
- bool disable_pc;
- struct pci_saved_state *default_state;
- struct pci_saved_state *saved_state;
- struct wakeup_source *ws;
- struct icc_path *icc_path;
- /*
- * Gets set when debugfs based l1 enable/disable is used
- * Gets unset when pcie_enable() API is called.
- */
- bool debugfs_l1;
- bool l0s_supported;
- bool l1_supported;
- bool l1ss_supported;
- bool l1_1_pcipm_supported;
- bool l1_2_pcipm_supported;
- bool l1_1_aspm_supported;
- bool l1_2_aspm_supported;
- uint32_t l1_2_th_scale;
- uint32_t l1_2_th_value;
- #ifdef CONFIG_SEC_PCIE_L1SS
- u32 l1ss_ltr_max_snoop_latency;
- u32 l1ss_tpoweron;
- #endif
- bool common_clk_en;
- bool clk_power_manage_en;
- bool aux_clk_sync;
- bool aer_enable;
- uint32_t smmu_sid_base;
- uint32_t link_check_max_count;
- uint32_t target_link_speed;
- uint32_t dt_target_link_speed;
- uint32_t current_link_speed;
- uint32_t target_link_width;
- uint32_t current_link_width;
- uint32_t n_fts;
- uint32_t ep_latency;
- uint32_t switch_latency;
- uint32_t wr_halt_size;
- uint32_t slv_addr_space_size;
- uint32_t phy_status_offset;
- uint32_t phy_status_bit;
- uint32_t phy_power_down_offset;
- uint32_t phy_aux_clk_config1_offset;
- uint32_t phy_pll_clk_enable1_offset;
- uint32_t eq_pset_req_vec;
- uint32_t core_preset;
- uint32_t eq_fmdc_t_min_phase23;
- uint32_t cpl_timeout;
- uint32_t current_bdf;
- uint32_t perst_delay_us_min;
- uint32_t perst_delay_us_max;
- uint32_t tlp_rd_size;
- uint32_t aux_clk_freq;
- bool linkdown_panic;
- uint32_t boot_option;
- uint32_t link_speed_override;
- bool lpi_enable;
- bool linkdown_recovery_enable;
- bool gdsc_clk_drv_ss_nonvotable;
- uint32_t pcie_parf_cesta_config;
- uint32_t rc_idx;
- uint32_t phy_ver;
- bool drv_ready;
- bool enumerated;
- struct work_struct handle_wake_work;
- struct work_struct handle_sbr_work;
- struct mutex recovery_lock;
- spinlock_t irq_lock;
- struct mutex aspm_lock;
- int prevent_l1;
- ulong linkdown_counter;
- ulong link_turned_on_counter;
- ulong link_turned_off_counter;
- #ifdef CONFIG_SEC_PCIE_AER
- ulong aer_irq_counter;
- #endif
- uint64_t l23_rdy_poll_timeout;
- bool suspending;
- ulong wake_counter;
- struct list_head enum_ep_list;
- struct list_head susp_ep_list;
- u32 num_parf_testbus_sel;
- u32 phy_len;
- struct msm_pcie_phy_info_t *phy_sequence;
- u32 tcsr_len;
- struct msm_pcie_tcsr_info_t *tcsr_config;
- u32 sid_info_len;
- struct msm_pcie_sid_info_t *sid_info;
- bool bridge_found;
- struct list_head event_reg_list;
- spinlock_t evt_reg_list_lock;
- bool power_on;
- void *ipc_log;
- void *ipc_log_long;
- void *ipc_log_dump;
- bool use_pinctrl;
- struct pinctrl *pinctrl;
- struct pinctrl_state *pins_default;
- struct pinctrl_state *pins_sleep;
- bool config_recovery;
- struct work_struct link_recover_wq;
- struct msm_pcie_drv_info *drv_info;
- struct work_struct drv_enable_pc_work;
- struct work_struct drv_disable_pc_work;
- /* cache drv pc req from RC client, by default drv pc is enabled */
- int drv_disable_pc_vote;
- struct mutex drv_pc_lock;
- struct completion speed_change_completion;
- const char *drv_name;
- bool drv_supported;
- bool panic_genspeed_mismatch;
- DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
- bool aer_dump;
- bool panic_on_aer;
- struct aer_stats *aer_stats;
- void (*rumi_init)(struct msm_pcie_dev_t *pcie_dev);
- u32 *filtered_bdfs;
- u32 bdf_count;
- u32 phy_debug_reg_len;
- u32 *phy_debug_reg;
- u32 parf_debug_reg_len;
- u32 *parf_debug_reg;
- u32 dbi_debug_reg_len;
- u32 *dbi_debug_reg;
- /* CESTA related structs */
- /* Device handler when using the crm driver APIs */
- const struct device *crm_dev;
- /* Register space of pcie state manager */
- void __iomem *pcie_sm;
- /* pcie state manager instructions sequence info */
- struct msm_pcie_sm_info *sm_info;
- /* Need to configure the l1ss TO when using cesta */
- u32 l1ss_timeout_us;
- u32 l1ss_sleep_disable;
- u32 clkreq_gpio;
- struct pci_host_bridge *bridge;
- bool no_client_based_bw_voting;
- #if IS_ENABLED(CONFIG_I2C)
- struct pcie_i2c_ctrl i2c_ctrl;
- #endif
- #ifdef CONFIG_SEC_PCIE_L1SS
- struct mutex l1ss_ctrl_lock;
- u32 l1ss_disable_flag;
- bool pending_l1ss_ctrl;
- bool ep_config_accessible;
- bool use_ep_loaded;
- bool ep_loaded;
- #endif
- #ifdef CONFIG_SEC_PCIE
- struct workqueue_struct *pcie_error_wq;
- struct delayed_work pcie_error_dwork;
- u32 pcie_error_defer_ms;
- u32 first_pcie_error;
- ulong pcie_error;
- const char *esoc_name;
- /* ssr notification */
- struct notifier_block ssr_nb;
- void *ssr_notifier;
- bool esoc_crashed;
- bool esoc_powerup;
- bool ignore_pcie_error;
- int subpcb_det_upper_gpio;
- int subpcb_det_upper_gpio_level;
- int subpcb_det_lower_gpio;
- int subpcb_det_lower_gpio_level;
- #endif
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- u32 allow_linkup_retry;
- u32 remained_linkup_retry;
- #endif
- };
- #ifdef CONFIG_SEC_PCIE
- enum pcie_error_t {
- PCIE_ERROR_NONE,
- PCIE_ERROR_CLK_FAIL,
- PCIE_ERROR_PHY_INIT,
- PCIE_ERROR_TRAINING_FAIL,
- PCIE_ERROR_LINK_SPEED_MISMATCH,
- PCIE_ERROR_LINK_FAIL,
- PCIE_ERROR_AER,
- PCIE_ERROR_LINKDOWN,
- PCIE_ERROR_L23_NOT_RECEIVED,
- };
- #endif
- struct msm_root_dev_t {
- struct msm_pcie_dev_t *pcie_dev;
- struct pci_dev *pci_dev;
- };
- static u32 msm_pcie_keep_resources_on;
- /* high prio WQ */
- static struct workqueue_struct *mpcie_wq;
- /* debugfs values */
- static u32 rc_sel = BIT(0);
- static u32 base_sel;
- static u32 wr_offset;
- static u32 wr_mask;
- static u32 wr_value;
- static u32 __maybe_unused corr_counter_limit = 5;
- /* CRC8 table for BDF to SID translation */
- static u8 msm_pcie_crc8_table[CRC8_TABLE_SIZE];
- /* PCIe driver state */
- static struct pcie_drv_sta {
- u32 rc_num;
- unsigned long rc_drv_enabled;
- struct msm_pcie_dev_t *msm_pcie_dev;
- struct rpmsg_device *rpdev;
- struct work_struct drv_connect; /* connect worker */
- struct mutex drv_lock;
- struct mutex rpmsg_lock;
- /* ssr notification */
- struct notifier_block nb;
- void *notifier;
- } pcie_drv;
- #define PCIE_RC_DRV_ENABLED(rc_idx) test_bit((rc_idx), &pcie_drv.rc_drv_enabled)
- /* msm pcie device data */
- static struct msm_pcie_dev_t msm_pcie_dev[MAX_RC_NUM];
- /* regulators */
- static struct msm_pcie_vreg_info_t msm_pcie_vreg_info[MSM_PCIE_MAX_VREG] = {
- {NULL, "vreg-3p3", 0, 0, 0, false},
- {NULL, "vreg-1p2", 1200000, 1200000, 18200, true},
- {NULL, "vreg-0p9", 1000000, 1000000, 40000, true},
- {NULL, "vreg-cx", 0, 0, 0, false},
- {NULL, "vreg-mx", 0, 0, 0, false},
- {NULL, "vreg-qref", 880000, 880000, 25700, false},
- };
- /* GPIOs */
- static struct msm_pcie_gpio_info_t msm_pcie_gpio_info[MSM_PCIE_MAX_GPIO] = {
- {"perst-gpio", 0, 1, 0, 0, 1},
- {"wake-gpio", 0, 0, 0, 0, 0},
- {"qcom,ep-gpio", 0, 1, 1, 0, 0},
- {"card-presence-pin", 0, 0, 0, 0, 0}
- };
- /* resets */
- static struct msm_pcie_reset_info_t
- msm_pcie_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_RESET] = {
- {
- {NULL, "pcie_0_core_reset", false},
- {NULL, "pcie_phy_reset", false},
- {NULL, "pcie_phy_com_reset", false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", false},
- {NULL, "pcie_0_phy_reset", false}
- },
- {
- {NULL, "pcie_1_core_reset", false},
- {NULL, "pcie_phy_reset", false},
- {NULL, "pcie_phy_com_reset", false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", false},
- {NULL, "pcie_1_phy_reset", false}
- },
- {
- {NULL, "pcie_2_core_reset", false},
- {NULL, "pcie_phy_reset", false},
- {NULL, "pcie_phy_com_reset", false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", false},
- {NULL, "pcie_2_phy_reset", false}
- },
- {
- {NULL, "pcie_3_core_reset", false},
- {NULL, "pcie_phy_reset", false},
- {NULL, "pcie_phy_com_reset", false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", false},
- {NULL, "pcie_3_phy_reset", false}
- },
- {
- {NULL, "pcie_4_core_reset", false},
- {NULL, "pcie_phy_reset", false},
- {NULL, "pcie_phy_com_reset", false},
- {NULL, "pcie_phy_nocsr_com_phy_reset", false},
- {NULL, "pcie_4_phy_reset", false}
- }
- };
- /* pipe reset */
- static struct msm_pcie_reset_info_t
- msm_pcie_pipe_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_PIPE_RESET] = {
- {
- {NULL, "pcie_0_phy_pipe_reset", false}
- },
- {
- {NULL, "pcie_1_phy_pipe_reset", false}
- },
- {
- {NULL, "pcie_2_phy_pipe_reset", false}
- },
- {
- {NULL, "pcie_3_phy_pipe_reset", false}
- },
- {
- {NULL, "pcie_4_phy_pipe_reset", false}
- }
- };
- /* linkdown recovery resets */
- static struct msm_pcie_reset_info_t
- msm_pcie_linkdown_reset_info[MAX_RC_NUM][MSM_PCIE_MAX_LINKDOWN_RESET] = {
- {
- {NULL, "pcie_0_link_down_reset", false},
- {NULL, "pcie_0_phy_nocsr_com_phy_reset", false},
- },
- {
- {NULL, "pcie_1_link_down_reset", false},
- {NULL, "pcie_1_phy_nocsr_com_phy_reset", false},
- },
- {
- {NULL, "pcie_2_link_down_reset", false},
- {NULL, "pcie_2_phy_nocsr_com_phy_reset", false},
- },
- {
- {NULL, "pcie_3_link_down_reset", false},
- {NULL, "pcie_3_phy_nocsr_com_phy_reset", false},
- },
- {
- {NULL, "pcie_4_link_down_reset", false},
- {NULL, "pcie_4_phy_nocsr_com_phy_reset", false},
- },
- };
- /* resources */
- static const struct msm_pcie_res_info_t msm_pcie_res_info[MSM_PCIE_MAX_RES] = {
- {"parf", NULL, NULL},
- {"phy", NULL, NULL},
- {"dm_core", NULL, NULL},
- {"elbi", NULL, NULL},
- {"iatu", NULL, NULL},
- {"conf", NULL, NULL},
- {"pcie_sm", NULL, NULL},
- {"mhi", NULL, NULL},
- {"tcsr", NULL, NULL},
- {"rumi", NULL, NULL}
- };
- /* irqs */
- static const struct msm_pcie_irq_info_t msm_pcie_irq_info[MSM_PCIE_MAX_IRQ] = {
- {"int_a", 0},
- {"int_b", 0},
- {"int_c", 0},
- {"int_d", 0},
- {"int_global_int", 0}
- };
- enum msm_pcie_reg_dump_type_t {
- MSM_PCIE_DUMP_PARF_REG = 0x0,
- MSM_PCIE_DUMP_DBI_REG,
- MSM_PCIE_DUMP_PHY_REG,
- };
- /* Rpmsg device functions */
- static int msm_pcie_drv_rpmsg_probe(struct rpmsg_device *rpdev);
- static void msm_pcie_drv_rpmsg_remove(struct rpmsg_device *rpdev);
- static int msm_pcie_drv_rpmsg_cb(struct rpmsg_device *rpdev, void *data,
- int len, void *priv, u32 src);
- #ifdef CONFIG_SEC_PCIE
- #define INCREASE_PCIE_VALUE(rc_idx, variable) \
- do { \
- p_health->pcie[rc_idx].variable++; \
- p_health->daily_pcie[rc_idx].variable++; \
- } while (0)
- static ap_health_t *p_health;
- static void update_phyinit_fail_count(int rc)
- {
- if (unlikely(!p_health))
- p_health = sec_qc_ap_health_data_read();
- if (p_health) {
- INCREASE_PCIE_VALUE(rc, phy_init_fail_cnt);
- sec_qc_ap_health_data_write(p_health);
- }
- return;
- }
- static void update_linkdown_count(int rc)
- {
- if (unlikely(!p_health))
- p_health = sec_qc_ap_health_data_read();
- if (p_health) {
- INCREASE_PCIE_VALUE(rc, link_down_cnt);
- sec_qc_ap_health_data_write(p_health);
- }
- return;
- }
- static int pcie_get_cap_list_reg(struct msm_pcie_dev_t *dev, void **found_addr)
- {
- u32 current_offset = readl_relaxed(dev->conf + PCIE_CAP_PTR_OFFSET) & 0xFF;
- u32 val;
- while (current_offset) {
- if (current_offset % 4) {
- PCIE_INFO(dev, "wrong align. %d", current_offset);
- return -1;
- }
- val = readl_relaxed(dev->conf + current_offset);
- if ((val & 0xff) == PCIE20_CAP_ID) {
- *found_addr = dev->conf + current_offset;
- return 0;
- }
- current_offset = (val >> 8) & 0xff;
- }
- PCIE_INFO(dev, "can't find cap reg. %d", current_offset);
- return 1;
- }
- static void pcie_get_cur_link_bw(u32 rc_idx, u32 *speed, u32 *width)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
- u32 val;
- u32 offset = PCIE20_CAP + PCI_EXP_LNKSTA;
- u32 shift = offset % 4;
- if (shift)
- offset = (offset >> 2) << 2;
- val = readl_relaxed(dev->dm_core + offset);
- if (speed)
- *speed = (val >> (shift * 8)) & PCI_EXP_LNKSTA_CLS;
- if (width)
- *width = ((val >> (shift * 8)) & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
- return;
- }
- static u32 pcie_get_target_linkspeed(u32 rc_idx, u32 bus)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
- u32 val, shift;
- u32 offset = PCI_EXP_LNKCTL2;
- void *base = NULL;
- if (bus) {
- if (pcie_get_cap_list_reg(dev, &base))
- return 0xFF;
- } else
- base = dev->dm_core + PCIE20_CAP;
- shift = (u32)(((unsigned long)base + offset) % 4UL);
- val = readl_relaxed(base + offset - shift);
- return (val >> (shift * 8)) & 0xf;
- }
- static u32 pcie_get_max_linkspeed(u32 rc_idx, u32 bus)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
- u32 val, shift;
- u32 offset = PCI_EXP_LNKCAP;
- void *base = NULL;
- if (bus) {
- if (pcie_get_cap_list_reg(dev, &base))
- return 0xFF;
- } else
- base = dev->dm_core + PCIE20_CAP;
- shift = (u32)(((unsigned long)base + offset) % 4UL);
- val = readl_relaxed(base + offset - shift);
- return (val >> (shift * 8)) & 0xf;
- }
- static int msm_pcie_esoc_ssr_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
- {
- struct msm_pcie_dev_t *dev = container_of(nb, struct msm_pcie_dev_t, ssr_nb);
- struct qcom_ssr_notify_data *nd = data;
- PCIE_ERR(dev, "PCIe RC%d: %s notifier (action %d%s)\n",
- dev->rc_idx, nd->name ? nd->name : "", action, nd->crashed ? ", crashed" : "");
- dev->esoc_crashed = nd->crashed;
- if (action == QCOM_SSR_AFTER_POWERUP)
- dev->esoc_powerup = true;
- else
- dev->esoc_powerup = false;
- return NOTIFY_OK;
- }
- static bool is_esoc_alive(struct msm_pcie_dev_t *dev)
- {
- if (!dev->esoc_name)
- return true;
- if (dev->esoc_crashed)
- return false;
- if (!dev->esoc_powerup)
- return false;
- return true;
- }
- static bool is_need_pcie_error_oops(struct pci_dev *dev, struct msm_pcie_dev_t *msm_dev)
- {
- if (msm_dev->ignore_pcie_error)
- return false;
- if (msm_dev->pcie_error_wq) {
- if (delayed_work_pending(&msm_dev->pcie_error_dwork)) {
- PCIE_ERR(msm_dev, "RC%d pcie_error_dwork is already triggered. skip oops.\n", msm_dev->rc_idx);
- return false;
- }
- }
- if (msm_dev->rc_idx == 0 && dev && dev->vendor == PCIE_VENDOR_ID_QCOM) {
- /* This is only to avoid "RC0 "PCIe RC0 Fail(L23)" panic where RC0 is used for QC wifi */
- /* QC wifi PBL doesn't handle PME_TURNOFF_MSG. */
- /* Caution : dev is expected as null except "RC0 "PCIe RC0 Fail(L23)" panic case */
- struct mhi_controller *mhi_cntrl;
- u32 domain = pci_domain_nr(dev->bus);
- u32 bus = dev->bus->number;
- u32 dev_id = dev->device;
- u32 slot = PCI_SLOT(dev->devfn);
- PCIE_ERR(msm_dev, "RC%d MHI EE Check - Domain %d Bus %d DevID %d Slot %d\n",
- msm_dev->rc_idx, domain, bus, dev_id, slot);
- mhi_cntrl = (struct mhi_controller *)dev->dev.driver_data;
- if (mhi_cntrl) {
- PCIE_ERR(msm_dev, "RC%d MHI EE is %d\n", msm_dev->rc_idx, mhi_cntrl->ee);
- if (mhi_cntrl->ee < MHI_EE_PBL || mhi_cntrl->ee >= MHI_EE_MAX) {
- PCIE_ERR(msm_dev, "RC%d skip oops. Invalid mhi_cntrl->ee value\n", msm_dev->rc_idx);
- return false;
- }
- switch (mhi_cntrl->ee) {
- case MHI_EE_PBL:
- case MHI_EE_RDDM:
- case MHI_EE_DISABLE_TRANSITION:
- PCIE_ERR(msm_dev, "RC%d skip oops.\n", msm_dev->rc_idx);
- return false;
- default:
- break;
- }
- }
- }
- return is_esoc_alive(msm_dev);
- }
- #endif
- #ifdef CONFIG_SEC_PCIE_L1SS
- /* for l1ss ctrl */
- struct l1ss_ctrl {
- const unsigned int id;
- const u32 flag;
- const char *name;
- const unsigned long timeout;
- struct delayed_work dwork;
- };
- static struct l1ss_ctrl l1ss_ctrls[L1SS_MAX] = {
- {L1SS_SYSFS, BIT(L1SS_SYSFS), "SYSFS", 0, },
- {L1SS_MST, BIT(L1SS_MST), "MST", 120, },
- {L1SS_AUDIO, BIT(L1SS_AUDIO), "AUDIO", 0, },
- };
- struct workqueue_struct *l1ss_ctrl_wq;
- static int sec_pcie_enable_ep_l1(struct pci_dev *pdev, void *data);
- #endif
- static int msm_pcie_drv_send_rpmsg(struct msm_pcie_dev_t *pcie_dev,
- struct msm_pcie_drv_msg *msg);
- static void msm_pcie_config_sid(struct msm_pcie_dev_t *dev);
- static void msm_pcie_config_l0s_disable_all(struct msm_pcie_dev_t *dev,
- struct pci_bus *bus);
- static void msm_pcie_config_l1_disable_all(struct msm_pcie_dev_t *dev,
- struct pci_bus *bus);
- static void msm_pcie_config_l1ss_disable_all(struct msm_pcie_dev_t *dev,
- struct pci_bus *bus);
- static void msm_pcie_config_l0s_enable_all(struct msm_pcie_dev_t *dev);
- static void msm_pcie_config_l1_enable_all(struct msm_pcie_dev_t *dev);
- static void msm_pcie_config_l1ss_enable_all(struct msm_pcie_dev_t *dev);
- static void msm_pcie_check_l1ss_support_all(struct msm_pcie_dev_t *dev);
- static void msm_pcie_config_link_pm(struct msm_pcie_dev_t *dev, bool enable);
- static int msm_pcie_set_link_width(struct msm_pcie_dev_t *pcie_dev,
- u16 target_link_width);
- static void msm_pcie_disable(struct msm_pcie_dev_t *dev);
- static int msm_pcie_enable(struct msm_pcie_dev_t *dev);
- static u32 msm_pcie_reg_copy(struct msm_pcie_dev_t *dev,
- u8 *buf, u32 size, u8 reg_len,
- enum msm_pcie_reg_dump_type_t type)
- {
- u32 ret = 0, val, i;
- u32 *seq = NULL;
- u32 seq_len = 0;
- void __iomem *base;
- PCIE_DUMP(dev, "RC%d buf=0x%x size=%u, reg_len=%u\n",
- dev->rc_idx, buf, size, reg_len);
- if (type == MSM_PCIE_DUMP_PARF_REG) {
- seq = dev->parf_debug_reg;
- seq_len = dev->parf_debug_reg_len;
- base = dev->parf;
- } else if (type == MSM_PCIE_DUMP_DBI_REG) {
- seq = dev->dbi_debug_reg;
- seq_len = dev->dbi_debug_reg_len;
- base = dev->dm_core;
- } else if (type == MSM_PCIE_DUMP_PHY_REG) {
- seq = dev->phy_debug_reg;
- seq_len = dev->phy_debug_reg_len;
- base = dev->phy;
- } else {
- return ret;
- }
- if (seq) {
- i = seq_len;
- while (i && (ret + reg_len <= size)) {
- PCIE_DUMP(dev, "RC%d *seq:%u\n",
- dev->rc_idx, *seq);
- val = readl_relaxed(base + *seq);
- memcpy(buf, &val, reg_len);
- i--;
- buf += reg_len;
- ret += reg_len;
- seq++;
- }
- }
- return ret;
- }
- int msm_pcie_reg_dump(struct pci_dev *pci_dev, u8 *buff, u32 len)
- {
- struct pci_dev *root_pci_dev;
- struct msm_pcie_dev_t *pcie_dev;
- u32 offset = 0;
- if (!pci_dev)
- return -EINVAL;
- root_pci_dev = pcie_find_root_port(pci_dev);
- if (!root_pci_dev)
- return -ENODEV;
- pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
- if (!pcie_dev) {
- pr_err("PCIe: did not find RC for pci endpoint device.\n");
- return -ENODEV;
- }
- PCIE_DUMP(pcie_dev, "RC%d hang event dump buff=0x%x len=%u\n",
- pcie_dev->rc_idx, buff, len);
- if (pcie_dev->link_status == MSM_PCIE_LINK_DOWN) {
- pr_err("PCIe: the link is in down state\n");
- return -ENODEV;
- }
- if (pcie_dev->suspending) {
- pr_err("PCIe: the device is in suspend state\n");
- return -ENODEV;
- }
- offset = msm_pcie_reg_copy(pcie_dev, buff, len,
- 4, MSM_PCIE_DUMP_PARF_REG);
- buff += offset;
- len -= offset;
- /* check PHY status before dumping DBI registers */
- if (!(readl_relaxed(pcie_dev->phy + pcie_dev->phy_status_offset) &
- BIT(pcie_dev->phy_status_bit))) {
- PCIE_DUMP(pcie_dev, "RC%d Dump DBI registers\n",
- pcie_dev->rc_idx);
- offset = msm_pcie_reg_copy(pcie_dev, buff, len,
- 4, MSM_PCIE_DUMP_DBI_REG);
- } else {
- /* PHY status bit is set to 1 so dump 0's in dbi buffer space */
- PCIE_DUMP(pcie_dev, "RC%d PHY is off, skip DBI\n",
- pcie_dev->rc_idx);
- memset(buff, 0, pcie_dev->dbi_debug_reg_len * 4);
- offset = pcie_dev->dbi_debug_reg_len * 4;
- }
- buff += offset;
- len -= offset;
- msm_pcie_reg_copy(pcie_dev, buff, len,
- 1, MSM_PCIE_DUMP_PHY_REG);
- PCIE_DUMP(pcie_dev, "RC%d hang event Exit\n", pcie_dev->rc_idx);
- return 0;
- }
- EXPORT_SYMBOL(msm_pcie_reg_dump);
- static void msm_pcie_write_reg(void __iomem *base, u32 offset, u32 value)
- {
- writel_relaxed(value, base + offset);
- /* ensure that changes propagated to the hardware */
- readl_relaxed(base + offset);
- }
- static void msm_pcie_write_reg_field(void __iomem *base, u32 offset,
- const u32 mask, u32 val)
- {
- u32 shift = __ffs(mask);
- u32 tmp = readl_relaxed(base + offset);
- tmp &= ~mask; /* clear written bits */
- val = tmp | (val << shift);
- writel_relaxed(val, base + offset);
- /* ensure that changes propagated to the hardware */
- readl_relaxed(base + offset);
- }
- static void msm_pcie_clear_set_reg(void __iomem *base, u32 pos,
- u32 clear, u32 set)
- {
- u32 val;
- val = readl_relaxed(base + pos);
- val &= ~clear;
- val |= set;
- writel_relaxed(val, base + pos);
- /* ensure that changes propagated to the hardware */
- readl_relaxed(base + pos);
- }
- static void msm_pcie_config_clear_set_dword(struct pci_dev *pdev,
- int pos, u32 clear, u32 set)
- {
- u32 val;
- pci_read_config_dword(pdev, pos, &val);
- val &= ~clear;
- val |= set;
- pci_write_config_dword(pdev, pos, val);
- }
- static void msm_pcie_rumi_init(struct msm_pcie_dev_t *pcie_dev)
- {
- u32 val;
- u32 reset_offs = 0x04;
- u32 phy_ctrl_offs = 0x40;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: enter.\n", pcie_dev->rc_idx);
- /* configure PCIe to RC mode */
- msm_pcie_write_reg(pcie_dev->rumi, 0x54, 0x7c70);
- val = readl_relaxed(pcie_dev->rumi + phy_ctrl_offs) | 0x1000;
- msm_pcie_write_reg(pcie_dev->rumi, phy_ctrl_offs, val);
- usleep_range(10000, 10001);
- msm_pcie_write_reg(pcie_dev->rumi, reset_offs, 0x800);
- usleep_range(50000, 50001);
- msm_pcie_write_reg(pcie_dev->rumi, reset_offs, 0xFFFFFFFF);
- usleep_range(50000, 50001);
- msm_pcie_write_reg(pcie_dev->rumi, reset_offs, 0x800);
- usleep_range(50000, 50001);
- msm_pcie_write_reg(pcie_dev->rumi, reset_offs, 0);
- usleep_range(50000, 50001);
- val = readl_relaxed(pcie_dev->rumi + phy_ctrl_offs) & 0xFFFFEFFF;
- msm_pcie_write_reg(pcie_dev->rumi, phy_ctrl_offs, val);
- usleep_range(10000, 10001);
- val = readl_relaxed(pcie_dev->rumi + phy_ctrl_offs) & 0xFFFFFFFE;
- msm_pcie_write_reg(pcie_dev->rumi, phy_ctrl_offs, val);
- }
- static void pcie_phy_dump(struct msm_pcie_dev_t *dev)
- {
- int i, size;
- size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
- for (i = 0; i < size; i += 32) {
- PCIE_DUMP(dev,
- "PCIe PHY of RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- dev->rc_idx, i,
- readl_relaxed(dev->phy + i),
- readl_relaxed(dev->phy + (i + 4)),
- readl_relaxed(dev->phy + (i + 8)),
- readl_relaxed(dev->phy + (i + 12)),
- readl_relaxed(dev->phy + (i + 16)),
- readl_relaxed(dev->phy + (i + 20)),
- readl_relaxed(dev->phy + (i + 24)),
- readl_relaxed(dev->phy + (i + 28)));
- }
- }
- static void pcie_tcsr_init(struct msm_pcie_dev_t *dev)
- {
- int i;
- struct msm_pcie_tcsr_info_t *tcsr_cfg;
- i = dev->tcsr_len;
- tcsr_cfg = dev->tcsr_config;
- while (i--) {
- msm_pcie_write_reg(dev->tcsr,
- tcsr_cfg->offset,
- tcsr_cfg->val);
- tcsr_cfg++;
- }
- }
- static int msm_pcie_check_align(struct msm_pcie_dev_t *dev,
- u32 offset)
- {
- if (offset % 4) {
- PCIE_ERR(dev,
- "PCIe: RC%d: offset 0x%x is not correctly aligned\n",
- dev->rc_idx, offset);
- return MSM_PCIE_ERROR;
- }
- return 0;
- }
- static bool msm_pcie_dll_link_active(struct msm_pcie_dev_t *dev)
- {
- return (readl_relaxed(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS) &
- PCIE_CAP_DLL_ACTIVE);
- }
- static bool msm_pcie_confirm_linkup(struct msm_pcie_dev_t *dev,
- bool check_sw_stts,
- bool check_ep,
- struct pci_dev *pcidev)
- {
- if (check_sw_stts && (dev->link_status != MSM_PCIE_LINK_ENABLED)) {
- PCIE_DBG(dev, "PCIe: The link of RC %d is not enabled.\n",
- dev->rc_idx);
- return false;
- }
- if (!msm_pcie_dll_link_active(dev)) {
- PCIE_DBG(dev, "PCIe: The link of RC %d is not up.\n",
- dev->rc_idx);
- return false;
- }
- if (check_ep && !pci_device_is_present(pcidev)) {
- PCIE_ERR(dev,
- "PCIe: RC%d: Config space access failed for BDF 0x%04x\n",
- dev->rc_idx,
- PCI_DEVID(pcidev->bus->number, pcidev->devfn));
- return false;
- }
- return true;
- }
- static void msm_pcie_write_mask(void __iomem *addr,
- uint32_t clear_mask, uint32_t set_mask)
- {
- uint32_t val;
- val = (readl_relaxed(addr) & ~clear_mask) | set_mask;
- writel_relaxed(val, addr);
- /* ensure data is written to hardware register */
- readl_relaxed(addr);
- }
- static void pcie_parf_dump(struct msm_pcie_dev_t *dev)
- {
- int i;
- u32 original;
- PCIE_DUMP(dev, "PCIe: RC%d PARF testbus\n", dev->rc_idx);
- original = readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL);
- for (i = 0; i <= dev->num_parf_testbus_sel; i++) {
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_SYS_CTRL,
- 0xFF0000, i << 16);
- PCIE_DUMP(dev,
- "RC%d: PARF_SYS_CTRL: 0%08x PARF_TEST_BUS: 0%08x\n",
- dev->rc_idx,
- readl_relaxed(dev->parf + PCIE20_PARF_SYS_CTRL),
- readl_relaxed(dev->parf + PCIE20_PARF_TEST_BUS));
- }
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_SYS_CTRL, original);
- PCIE_DUMP(dev, "PCIe: RC%d PARF register dump\n", dev->rc_idx);
- for (i = 0; i < PCIE20_PARF_BDF_TO_SID_TABLE_N; i += 32) {
- PCIE_DUMP(dev,
- "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- dev->rc_idx, i,
- readl_relaxed(dev->parf + i),
- readl_relaxed(dev->parf + (i + 4)),
- readl_relaxed(dev->parf + (i + 8)),
- readl_relaxed(dev->parf + (i + 12)),
- readl_relaxed(dev->parf + (i + 16)),
- readl_relaxed(dev->parf + (i + 20)),
- readl_relaxed(dev->parf + (i + 24)),
- readl_relaxed(dev->parf + (i + 28)));
- }
- }
- static void pcie_sm_dump(struct msm_pcie_dev_t *dev)
- {
- int i;
- u32 size;
- if (!dev->pcie_sm)
- return;
- PCIE_DUMP(dev, "PCIe: RC%d State Manager reg dump\n", dev->rc_idx);
- size = resource_size(dev->res[MSM_PCIE_RES_SM].resource);
- for (i = 0; i < dev->sm_info->reg_dump_len && i < size; i++) {
- PCIE_DUMP(dev,
- "RC%d: 0x%04x %08x\n",
- dev->rc_idx, dev->sm_info->reg_dump[i],
- readl_relaxed(dev->pcie_sm + dev->sm_info->reg_dump[i]));
- }
- }
- static void pcie_crm_dump(struct msm_pcie_dev_t *dev)
- {
- int ret;
- if (!dev->pcie_sm)
- return;
- ret = crm_dump_regs("pcie_crm");
- if (ret)
- PCIE_DUMP(dev, "PCIe: RC%d Error dumping crm regs %d\n",
- dev->rc_idx, ret);
- }
- static void pcie_dm_core_dump(struct msm_pcie_dev_t *dev)
- {
- int i, size;
- PCIE_DUMP(dev, "PCIe: RC%d DBI/dm_core register dump\n", dev->rc_idx);
- size = resource_size(dev->res[MSM_PCIE_RES_DM_CORE].resource);
- for (i = 0; i < size; i += 32) {
- PCIE_DUMP(dev,
- "RC%d: 0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- dev->rc_idx, i,
- readl_relaxed(dev->dm_core + i),
- readl_relaxed(dev->dm_core + (i + 4)),
- readl_relaxed(dev->dm_core + (i + 8)),
- readl_relaxed(dev->dm_core + (i + 12)),
- readl_relaxed(dev->dm_core + (i + 16)),
- readl_relaxed(dev->dm_core + (i + 20)),
- readl_relaxed(dev->dm_core + (i + 24)),
- readl_relaxed(dev->dm_core + (i + 28)));
- }
- }
- /**
- * msm_pcie_loopback - configure RC in loopback mode and test loopback mode
- * @dev: root commpex
- * @local: If true then use local loopback else use remote loopback
- */
- static void msm_pcie_loopback(struct msm_pcie_dev_t *dev, bool local)
- {
- /* PCIe DBI base + 8MB as initial PCIe address to be translated to target address */
- phys_addr_t loopback_lbar_phy =
- dev->res[MSM_PCIE_RES_DM_CORE].resource->start + SZ_8M;
- u8 *src_vir_addr;
- void __iomem *iatu_base_vir;
- u32 dbi_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
- u32 iatu_base_phy, iatu_ctrl1_offset, iatu_ctrl2_offset, iatu_lbar_offset, iatu_ubar_offset,
- iatu_lar_offset, iatu_ltar_offset, iatu_utar_offset;
- /* todo: modify if want to use a different iATU region. Default is 1 */
- u32 iatu_n = 1;
- u32 type = 0x0;
- dma_addr_t loopback_dst_addr;
- u8 *loopback_dst_vir;
- u32 ltar_addr_lo;
- bool loopback_test_fail = false;
- int i = 0;
- src_vir_addr = (u8 *)ioremap(loopback_lbar_phy, SZ_4K);
- if (!src_vir_addr) {
- PCIE_ERR(dev, "PCIe: RC%d: ioremap fails for loopback_lbar_phy\n", dev->rc_idx);
- return;
- }
- /*
- * Use platform dev to get buffer. Doing so will
- * require change in PCIe platform devicetree to have SMMU/IOMMU tied to
- * this device and memory region created which can be accessed by PCIe controller.
- * Refer to change change-id: I15333e3dbf6e67d59538a807ed9622ea10c56554
- */
- PCIE_DBG_FS(dev, "PCIe: RC%d: Allocate 4K DDR memory and map LBAR.\n", dev->rc_idx);
- if (dma_set_mask_and_coherent(&dev->pdev->dev, DMA_BIT_MASK(64))) {
- PCIE_ERR(dev, "PCIe: RC%d: DMA set mask failed\n", dev->rc_idx);
- iounmap(src_vir_addr);
- return;
- }
- loopback_dst_vir = dma_alloc_coherent(&dev->pdev->dev, SZ_4K,
- &loopback_dst_addr, GFP_KERNEL);
- if (!loopback_dst_vir) {
- PCIE_DBG_FS(dev, "PCIe: RC%d: failed to dma_alloc_coherent.\n", dev->rc_idx);
- iounmap(src_vir_addr);
- return;
- }
- PCIE_DBG_FS(dev, "PCIe: RC%d: VIR DDR memory address: 0x%pK\n",
- dev->rc_idx, loopback_dst_vir);
- PCIE_DBG_FS(dev, "PCIe: RC%d: IOVA DDR memory address: %pad\n",
- dev->rc_idx, &loopback_dst_addr);
- ltar_addr_lo = lower_32_bits(loopback_dst_addr);
- /* need to 4K aligned */
- ltar_addr_lo = rounddown(ltar_addr_lo, SZ_4K);
- if (local) {
- PCIE_DBG_FS(dev, "PCIe: RC%d: Configure Local Loopback.\n", dev->rc_idx);
- /* Disable Gen3 equalization */
- msm_pcie_write_mask(dev->dm_core + PCIE_GEN3_RELATED,
- 0, BIT(16));
- PCIE_DBG_FS(dev, "PCIe: RC%d: 0x%x: 0x%x\n",
- dev->rc_idx, dbi_base_addr + PCIE_GEN3_RELATED,
- readl_relaxed(dev->dm_core + PCIE_GEN3_RELATED));
- /* Enable pipe loopback */
- msm_pcie_write_mask(dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL,
- 0, BIT(31));
- PCIE_DBG_FS(dev, "PCIe: RC%d: 0x%x: 0x%x\n",
- dev->rc_idx, dbi_base_addr + PCIE20_PIPE_LOOPBACK_CONTROL,
- readl_relaxed(dev->dm_core + PCIE20_PIPE_LOOPBACK_CONTROL));
- } else {
- PCIE_DBG_FS(dev, "PCIe: RC%d: Configure remote Loopback.\n", dev->rc_idx);
- }
- /* Enable Loopback */
- msm_pcie_write_mask(dev->dm_core + PCIE20_PORT_LINK_CTRL_REG, 0, BIT(2));
- /* Set BME for RC */
- msm_pcie_write_mask(dev->dm_core + PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
- PCIE_DBG_FS(dev, "PCIe: RC%d: 0x%x: 0x%x\n",
- dev->rc_idx, dbi_base_addr + PCIE20_PORT_LINK_CTRL_REG,
- readl_relaxed(dev->dm_core + PCIE20_PORT_LINK_CTRL_REG));
- iatu_base_vir = dev->iatu;
- iatu_base_phy = dev->res[MSM_PCIE_RES_IATU].resource->start;
- iatu_ctrl1_offset = PCIE_IATU_CTRL1(iatu_n);
- iatu_ctrl2_offset = PCIE_IATU_CTRL2(iatu_n);
- iatu_lbar_offset = PCIE_IATU_LBAR(iatu_n);
- iatu_ubar_offset = PCIE_IATU_UBAR(iatu_n);
- iatu_lar_offset = PCIE_IATU_LAR(iatu_n);
- iatu_ltar_offset = PCIE_IATU_LTAR(iatu_n);
- iatu_utar_offset = PCIE_IATU_UTAR(iatu_n);
- PCIE_DBG_FS(dev, "PCIe: RC%d: Setup iATU.\n", dev->rc_idx);
- /* Switch off region before changing it */
- msm_pcie_write_reg(iatu_base_vir, iatu_ctrl2_offset, 0);
- /* Setup for address matching */
- writel_relaxed(type, iatu_base_vir + iatu_ctrl1_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_CTRL1:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_ctrl1_offset,
- readl_relaxed(iatu_base_vir + iatu_ctrl1_offset));
- /* Program base address to be translated */
- writel_relaxed(loopback_lbar_phy, iatu_base_vir + iatu_lbar_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_LBAR:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_lbar_offset,
- readl_relaxed(iatu_base_vir + iatu_lbar_offset));
- writel_relaxed(0x0, iatu_base_vir + iatu_ubar_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_UBAR:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_ubar_offset,
- readl_relaxed(iatu_base_vir + iatu_ubar_offset));
- /* Program end address to be translated */
- writel_relaxed(loopback_lbar_phy + 0x1FFF, iatu_base_vir + iatu_lar_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_LAR:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_lar_offset,
- readl_relaxed(iatu_base_vir + iatu_lar_offset));
- /* Program base address of tranlated (new address) */
- writel_relaxed(ltar_addr_lo, iatu_base_vir + iatu_ltar_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_LTAR:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_ltar_offset,
- readl_relaxed(iatu_base_vir + iatu_ltar_offset));
- writel_relaxed(upper_32_bits(loopback_dst_addr), iatu_base_vir + iatu_utar_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_UTAR:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_utar_offset,
- readl_relaxed(iatu_base_vir + iatu_utar_offset));
- /* Enable this iATU region */
- writel_relaxed(BIT(31), iatu_base_vir + iatu_ctrl2_offset);
- PCIE_DBG_FS(dev, "PCIe: RC%d: PCIE20_PLR_IATU_CTRL2:\t0x%x: 0x%x\n",
- dev->rc_idx, iatu_base_phy + iatu_ctrl2_offset,
- readl_relaxed(iatu_base_vir + iatu_ctrl2_offset));
- PCIE_DBG_FS(dev, "PCIe RC%d: LTSSM_STATE: %s\n", dev->rc_idx,
- TO_LTSSM_STR((readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS) >> 12) & 0x3f));
- /* Fill the src buffer with random data */
- get_random_bytes(src_vir_addr, SZ_4K);
- usleep_range(100, 101);
- for (i = 0; i < SZ_4K; i++) {
- if (src_vir_addr[i] != loopback_dst_vir[i]) {
- PCIE_DBG_FS(dev, "PCIe: RC%d: exp %x: got %x\n",
- dev->rc_idx, src_vir_addr[i], loopback_dst_vir[i]);
- loopback_test_fail = true;
- }
- }
- if (loopback_test_fail)
- PCIE_DBG_FS(dev, "PCIe: RC%d: %s Loopback Test failed\n",
- dev->rc_idx, local ? "Local" : "Remote");
- else
- PCIE_DBG_FS(dev, "PCIe: RC%d: %s Loopback Test Passed\n",
- dev->rc_idx, local ? "Local" : "Remote");
- iounmap(src_vir_addr);
- dma_free_coherent(&dev->pdev->dev, SZ_4K, loopback_dst_vir, loopback_dst_addr);
- }
- static void msm_pcie_show_status(struct msm_pcie_dev_t *dev)
- {
- PCIE_DBG_FS(dev, "PCIe: RC%d is %s enumerated\n",
- dev->rc_idx, dev->enumerated ? "" : "not");
- PCIE_DBG_FS(dev, "PCIe: link is %s\n",
- (dev->link_status == MSM_PCIE_LINK_ENABLED)
- ? "enabled" : "disabled");
- PCIE_DBG_FS(dev, "cfg_access is %s allowed\n",
- dev->cfg_access ? "" : "not");
- PCIE_DBG_FS(dev, "use_pinctrl is %d\n",
- dev->use_pinctrl);
- PCIE_DBG_FS(dev, "aux_clk_freq is %d\n",
- dev->aux_clk_freq);
- PCIE_DBG_FS(dev, "user_suspend is %d\n",
- dev->user_suspend);
- PCIE_DBG_FS(dev, "num_parf_testbus_sel is 0x%x",
- dev->num_parf_testbus_sel);
- PCIE_DBG_FS(dev, "phy_len is %d",
- dev->phy_len);
- PCIE_DBG_FS(dev, "num_pipe_clk: %d\n", dev->num_pipe_clk);
- PCIE_DBG_FS(dev, "num_clk: %d\n", dev->num_clk);
- PCIE_DBG_FS(dev, "disable_pc is %d",
- dev->disable_pc);
- PCIE_DBG_FS(dev, "l0s_supported is %s supported\n",
- dev->l0s_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1_supported is %s supported\n",
- dev->l1_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1ss_supported is %s supported\n",
- dev->l1ss_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1_1_pcipm_supported is %s supported\n",
- dev->l1_1_pcipm_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1_2_pcipm_supported is %s supported\n",
- dev->l1_2_pcipm_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1_1_aspm_supported is %s supported\n",
- dev->l1_1_aspm_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1_2_aspm_supported is %s supported\n",
- dev->l1_2_aspm_supported ? "" : "not");
- PCIE_DBG_FS(dev, "l1_2_th_scale is %d\n",
- dev->l1_2_th_scale);
- PCIE_DBG_FS(dev, "l1_2_th_value is %d\n",
- dev->l1_2_th_value);
- PCIE_DBG_FS(dev, "common_clk_en is %d\n",
- dev->common_clk_en);
- PCIE_DBG_FS(dev, "clk_power_manage_en is %d\n",
- dev->clk_power_manage_en);
- PCIE_DBG_FS(dev, "aux_clk_sync is %d\n",
- dev->aux_clk_sync);
- PCIE_DBG_FS(dev, "AER is %s enable\n",
- dev->aer_enable ? "" : "not");
- PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
- dev->boot_option);
- PCIE_DBG_FS(dev, "link_speed_override is 0x%x\n",
- dev->link_speed_override);
- PCIE_DBG_FS(dev, "phy_ver is %d\n",
- dev->phy_ver);
- PCIE_DBG_FS(dev, "drv_ready is %d\n",
- dev->drv_ready);
- PCIE_DBG_FS(dev, "linkdown_panic is %d\n",
- dev->linkdown_panic);
- PCIE_DBG_FS(dev, "the link is %s suspending\n",
- dev->suspending ? "" : "not");
- PCIE_DBG_FS(dev, "the power of RC is %s on\n",
- dev->power_on ? "" : "not");
- PCIE_DBG_FS(dev, "smmu_sid_base: 0x%x\n",
- dev->smmu_sid_base);
- PCIE_DBG_FS(dev, "n_fts: %d\n",
- dev->n_fts);
- PCIE_DBG_FS(dev, "ep_latency: %dms\n",
- dev->ep_latency);
- PCIE_DBG_FS(dev, "switch_latency: %dms\n",
- dev->switch_latency);
- PCIE_DBG_FS(dev, "wr_halt_size: 0x%x\n",
- dev->wr_halt_size);
- PCIE_DBG_FS(dev, "slv_addr_space_size: 0x%x\n",
- dev->slv_addr_space_size);
- PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
- dev->phy_status_offset);
- PCIE_DBG_FS(dev, "phy_status_bit: %u\n",
- dev->phy_status_bit);
- PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
- dev->phy_power_down_offset);
- PCIE_DBG_FS(dev, "eq_pset_req_vec: 0x%x\n",
- dev->eq_pset_req_vec);
- PCIE_DBG_FS(dev, "core_preset: 0x%x\n",
- dev->core_preset);
- PCIE_DBG_FS(dev, "eq_fmdc_t_min_phase23: 0x%x\n",
- dev->eq_fmdc_t_min_phase23);
- PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
- dev->cpl_timeout);
- PCIE_DBG_FS(dev, "current_bdf: 0x%x\n",
- dev->current_bdf);
- PCIE_DBG_FS(dev, "perst_delay_us_min: %dus\n",
- dev->perst_delay_us_min);
- PCIE_DBG_FS(dev, "perst_delay_us_max: %dus\n",
- dev->perst_delay_us_max);
- PCIE_DBG_FS(dev, "tlp_rd_size: 0x%x\n",
- dev->tlp_rd_size);
- PCIE_DBG_FS(dev, "linkdown_counter: %lu\n",
- dev->linkdown_counter);
- PCIE_DBG_FS(dev, "wake_counter: %lu\n",
- dev->wake_counter);
- PCIE_DBG_FS(dev, "link_check_max_count: %u\n",
- dev->link_check_max_count);
- PCIE_DBG_FS(dev, "prevent_l1: %d\n",
- dev->prevent_l1);
- PCIE_DBG_FS(dev, "target_link_speed: 0x%x\n",
- dev->target_link_speed);
- PCIE_DBG_FS(dev, "current_link_speed: 0x%x\n",
- dev->current_link_speed);
- PCIE_DBG_FS(dev, "target_link_width: %d\n",
- dev->target_link_width);
- PCIE_DBG_FS(dev, "current_link_width: %d\n",
- dev->current_link_width);
- PCIE_DBG_FS(dev, "link_width_max: %d\n",
- dev->link_width_max);
- PCIE_DBG_FS(dev, "link_turned_on_counter: %lu\n",
- dev->link_turned_on_counter);
- PCIE_DBG_FS(dev, "link_turned_off_counter: %lu\n",
- dev->link_turned_off_counter);
- PCIE_DBG_FS(dev, "l23_rdy_poll_timeout: %llu\n",
- dev->l23_rdy_poll_timeout);
- PCIE_DBG_FS(dev, "PCIe CESTA is %s\n",
- dev->pcie_sm ? "supported" : "not_supported");
- }
- static void msm_pcie_access_reg(struct msm_pcie_dev_t *dev, bool wr)
- {
- u32 base_sel_size = 0;
- phys_addr_t wr_register;
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: %s a PCIe register\n\n", dev->rc_idx,
- wr ? "writing" : "reading");
- if (!base_sel) {
- PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
- return;
- }
- if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
- (!dev->res[base_sel - 1].resource)) {
- PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
- dev->rc_idx);
- return;
- }
- PCIE_DBG_FS(dev, "base: %s: 0x%pK\nwr_offset: 0x%x\n",
- dev->res[base_sel - 1].name, dev->res[base_sel - 1].base,
- wr_offset);
- base_sel_size = resource_size(dev->res[base_sel - 1].resource);
- if (wr_offset > base_sel_size - 4 ||
- msm_pcie_check_align(dev, wr_offset)) {
- PCIE_DBG_FS(dev,
- "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
- dev->rc_idx, wr_offset, base_sel_size - 4);
- } else {
- if (!wr) {
- wr_register =
- dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
- wr_register += wr_offset;
- PCIE_DBG_FS(dev,
- "PCIe: RC%d: register: 0x%pa value: 0x%x\n",
- dev->rc_idx, &wr_register,
- readl_relaxed(dev->res[base_sel - 1].base +
- wr_offset));
- return;
- }
- msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
- }
- }
- static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
- u32 testcase)
- {
- int ret, i;
- u32 base_sel_size = 0;
- switch (testcase) {
- case MSM_PCIE_OUTPUT_PCIE_INFO:
- PCIE_DBG_FS(dev, "\n\nPCIe: Status for RC%d:\n",
- dev->rc_idx);
- msm_pcie_show_status(dev);
- break;
- case MSM_PCIE_DISABLE_LINK:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: disable link\n\n", dev->rc_idx);
- ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0, dev->dev, NULL,
- MSM_PCIE_CONFIG_FORCE_SUSP);
- if (ret)
- PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
- __func__);
- else
- PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n",
- __func__);
- break;
- case MSM_PCIE_ENABLE_LINK:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: enable link and recover config space\n\n",
- dev->rc_idx);
- ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0, dev->dev, NULL,
- 0);
- if (ret)
- PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
- __func__);
- break;
- case MSM_PCIE_DISABLE_ENABLE_LINK:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: disable and enable link then recover config space\n\n",
- dev->rc_idx);
- ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, 0, dev->dev, NULL,
- 0);
- if (ret)
- PCIE_DBG_FS(dev, "PCIe:%s:failed to disable link\n",
- __func__);
- else
- PCIE_DBG_FS(dev, "PCIe:%s:disabled link\n", __func__);
- ret = msm_pcie_pm_control(MSM_PCIE_RESUME, 0, dev->dev, NULL,
- 0);
- if (ret)
- PCIE_DBG_FS(dev, "PCIe:%s:failed to enable link\n",
- __func__);
- break;
- case MSM_PCIE_DISABLE_L0S:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L0s\n\n",
- dev->rc_idx);
- if (dev->link_status == MSM_PCIE_LINK_ENABLED)
- msm_pcie_config_l0s_disable_all(dev, dev->dev->bus);
- dev->l0s_supported = false;
- break;
- case MSM_PCIE_ENABLE_L0S:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L0s\n\n",
- dev->rc_idx);
- dev->l0s_supported = true;
- if (dev->link_status == MSM_PCIE_LINK_ENABLED)
- msm_pcie_config_l0s_enable_all(dev);
- break;
- case MSM_PCIE_DISABLE_L1:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1\n\n",
- dev->rc_idx);
- mutex_lock(&dev->aspm_lock);
- if (dev->link_status == MSM_PCIE_LINK_ENABLED)
- msm_pcie_config_l1_disable_all(dev, dev->dev->bus);
- dev->l1_supported = false;
- dev->debugfs_l1 = true;
- mutex_unlock(&dev->aspm_lock);
- break;
- case MSM_PCIE_ENABLE_L1:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1\n\n",
- dev->rc_idx);
- mutex_lock(&dev->aspm_lock);
- dev->l1_supported = true;
- dev->debugfs_l1 = true;
- if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
- /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
- msm_pcie_write_mask(dev->parf +
- PCIE20_PARF_PM_CTRL, BIT(5), 0);
- msm_pcie_config_l1_enable_all(dev);
- }
- mutex_unlock(&dev->aspm_lock);
- break;
- case MSM_PCIE_DISABLE_L1SS:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: disable L1ss\n\n",
- dev->rc_idx);
- if (dev->link_status == MSM_PCIE_LINK_ENABLED)
- msm_pcie_config_l1ss_disable_all(dev, dev->dev->bus);
- dev->l1ss_supported = false;
- dev->l1_1_pcipm_supported = false;
- dev->l1_2_pcipm_supported = false;
- dev->l1_1_aspm_supported = false;
- dev->l1_2_aspm_supported = false;
- break;
- case MSM_PCIE_ENABLE_L1SS:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: enable L1ss\n\n",
- dev->rc_idx);
- dev->l1ss_supported = true;
- dev->l1_1_pcipm_supported = true;
- dev->l1_2_pcipm_supported = true;
- dev->l1_1_aspm_supported = true;
- dev->l1_2_aspm_supported = true;
- if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
- msm_pcie_check_l1ss_support_all(dev);
- msm_pcie_config_l1ss_enable_all(dev);
- }
- break;
- case MSM_PCIE_ENUMERATION:
- PCIE_DBG_FS(dev, "\n\nPCIe: attempting to enumerate RC%d\n\n",
- dev->rc_idx);
- if (dev->enumerated)
- PCIE_DBG_FS(dev, "PCIe: RC%d is already enumerated\n",
- dev->rc_idx);
- else {
- if (!msm_pcie_enumerate(dev->rc_idx))
- PCIE_DBG_FS(dev,
- "PCIe: RC%d is successfully enumerated\n",
- dev->rc_idx);
- else
- PCIE_DBG_FS(dev,
- "PCIe: RC%d enumeration failed\n",
- dev->rc_idx);
- }
- break;
- case MSM_PCIE_DEENUMERATION:
- PCIE_DBG_FS(dev, "\n\nPCIe: attempting to de enumerate RC%d\n\n",
- dev->rc_idx);
- if (!dev->enumerated)
- PCIE_DBG_FS(dev, "PCIe: RC%d is already de enumerated\n",
- dev->rc_idx);
- else {
- if (!msm_pcie_deenumerate(dev->rc_idx))
- PCIE_DBG_FS(dev,
- "PCIe: RC%d is successfully de enumerated\n",
- dev->rc_idx);
- else
- PCIE_DBG_FS(dev,
- "PCIe: RC%d de enumeration failed\n",
- dev->rc_idx);
- }
- break;
- case MSM_PCIE_READ_PCIE_REGISTER:
- msm_pcie_access_reg(dev, false);
- break;
- case MSM_PCIE_WRITE_PCIE_REGISTER:
- msm_pcie_access_reg(dev, true);
- break;
- case MSM_PCIE_DUMP_PCIE_REGISTER_SPACE:
- if (((base_sel - 1) >= MSM_PCIE_MAX_RES) ||
- (!dev->res[base_sel - 1].resource)) {
- PCIE_DBG_FS(dev, "PCIe: RC%d Resource does not exist\n",
- dev->rc_idx);
- break;
- }
- if (!base_sel) {
- PCIE_DBG_FS(dev, "Invalid base_sel: 0x%x\n", base_sel);
- break;
- } else if (base_sel - 1 == MSM_PCIE_RES_PARF) {
- pcie_parf_dump(dev);
- break;
- } else if (base_sel - 1 == MSM_PCIE_RES_PHY) {
- pcie_phy_dump(dev);
- break;
- } else if (base_sel - 1 == MSM_PCIE_RES_CONF) {
- base_sel_size = 0x1000;
- } else {
- base_sel_size = resource_size(
- dev->res[base_sel - 1].resource);
- }
- PCIE_DBG_FS(dev, "\n\nPCIe: Dumping %s Registers for RC%d\n\n",
- dev->res[base_sel - 1].name, dev->rc_idx);
- for (i = 0; i < base_sel_size; i += 32) {
- PCIE_DBG_FS(dev,
- "0x%04x %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i, readl_relaxed(dev->res[base_sel - 1].base + i),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 4)),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 8)),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 12)),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 16)),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 20)),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 24)),
- readl_relaxed(dev->res[base_sel - 1].base + (i + 28)));
- }
- break;
- case MSM_PCIE_DISABLE_AER:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: clear AER enable flag\n\n",
- dev->rc_idx);
- dev->aer_enable = false;
- break;
- case MSM_PCIE_ENABLE_AER:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: set AER enable flag\n\n",
- dev->rc_idx);
- dev->aer_enable = true;
- break;
- case MSM_PCIE_GPIO_STATUS:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: PERST and WAKE status\n\n",
- dev->rc_idx);
- PCIE_DBG_FS(dev,
- "PCIe: RC%d: PERST: gpio%u value: %d\n",
- dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_PERST].num,
- gpio_get_value(dev->gpio[MSM_PCIE_GPIO_PERST].num));
- PCIE_DBG_FS(dev,
- "PCIe: RC%d: WAKE: gpio%u value: %d\n",
- dev->rc_idx, dev->gpio[MSM_PCIE_GPIO_WAKE].num,
- gpio_get_value(dev->gpio[MSM_PCIE_GPIO_WAKE].num));
- break;
- case MSM_PCIE_ASSERT_PERST:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: assert PERST\n\n",
- dev->rc_idx);
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- dev->gpio[MSM_PCIE_GPIO_PERST].on);
- usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
- break;
- case MSM_PCIE_DEASSERT_PERST:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: de-assert PERST\n\n",
- dev->rc_idx);
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
- usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
- break;
- case MSM_PCIE_KEEP_RESOURCES_ON:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: set keep resources on flag\n\n",
- dev->rc_idx);
- msm_pcie_keep_resources_on |= BIT(dev->rc_idx);
- break;
- case MSM_PCIE_FORCE_GEN1:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: set target speed to Gen 1\n\n",
- dev->rc_idx);
- dev->target_link_speed = GEN1_SPEED;
- break;
- case MSM_PCIE_FORCE_GEN2:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: set target speed to Gen 2\n\n",
- dev->rc_idx);
- dev->target_link_speed = GEN2_SPEED;
- break;
- case MSM_PCIE_FORCE_GEN3:
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: set target speed to Gen 3\n\n",
- dev->rc_idx);
- dev->target_link_speed = GEN3_SPEED;
- break;
- case MSM_PCIE_TRIGGER_SBR:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: Trigger SBR\n\n",
- dev->rc_idx);
- if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
- msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL,
- 0, PCIE20_BRIDGE_CTRL_SBR);
- usleep_range(2000, 2001);
- msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL,
- PCIE20_BRIDGE_CTRL_SBR, 0);
- }
- break;
- case MSM_PCIE_REMOTE_LOOPBACK:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: Move to remote loopback mode\n\n",
- dev->rc_idx);
- if (!dev->enumerated) {
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: the link is not up yet\n\n",
- dev->rc_idx);
- break;
- }
- /* link needs to be in L0 for remote loopback */
- msm_pcie_config_l0s_disable_all(dev, dev->dev->bus);
- dev->l0s_supported = false;
- mutex_lock(&dev->aspm_lock);
- msm_pcie_config_l1_disable_all(dev, dev->dev->bus);
- dev->l1_supported = false;
- dev->debugfs_l1 = true;
- mutex_unlock(&dev->aspm_lock);
- msm_pcie_loopback(dev, false);
- break;
- case MSM_PCIE_LOCAL_LOOPBACK:
- PCIE_DBG_FS(dev, "\n\nPCIe: RC%d: Move to local loopback mode\n\n", dev->rc_idx);
- if (dev->enumerated) {
- /* As endpoint is already connected use remote loopback */
- PCIE_DBG_FS(dev,
- "\n\nPCIe: RC%d: EP is already enumerated, use remote loopback mode\n\n",
- dev->rc_idx);
- break;
- }
- /* keep resources on because we will fail to enable as ep is not connected */
- msm_pcie_keep_resources_on |= BIT(dev->rc_idx);
- /* Enable all the PCIe resources */
- if (!dev->enumerated)
- msm_pcie_enable(dev);
- msm_pcie_loopback(dev, true);
- break;
- default:
- PCIE_DBG_FS(dev, "Invalid testcase: %d.\n", testcase);
- break;
- }
- }
- int msm_pcie_debug_info(struct pci_dev *dev, u32 option, u32 base,
- u32 offset, u32 mask, u32 value)
- {
- int ret = 0;
- struct msm_pcie_dev_t *pdev = NULL;
- if (!dev) {
- pr_err("PCIe: the input pci dev is NULL.\n");
- return -ENODEV;
- }
- if (option == MSM_PCIE_READ_PCIE_REGISTER ||
- option == MSM_PCIE_WRITE_PCIE_REGISTER ||
- option == MSM_PCIE_DUMP_PCIE_REGISTER_SPACE) {
- if (!base || base >= MSM_PCIE_MAX_RES) {
- PCIE_DBG_FS(pdev, "Invalid base_sel: 0x%x\n", base);
- PCIE_DBG_FS(pdev,
- "PCIe: base_sel is still 0x%x\n", base_sel);
- return -EINVAL;
- }
- base_sel = base;
- PCIE_DBG_FS(pdev, "PCIe: base_sel is now 0x%x\n", base_sel);
- if (option == MSM_PCIE_READ_PCIE_REGISTER ||
- option == MSM_PCIE_WRITE_PCIE_REGISTER) {
- wr_offset = offset;
- wr_mask = mask;
- wr_value = value;
- PCIE_DBG_FS(pdev,
- "PCIe: wr_offset is now 0x%x\n", wr_offset);
- PCIE_DBG_FS(pdev,
- "PCIe: wr_mask is now 0x%x\n", wr_mask);
- PCIE_DBG_FS(pdev,
- "PCIe: wr_value is now 0x%x\n", wr_value);
- }
- }
- pdev = PCIE_BUS_PRIV_DATA(dev->bus);
- rc_sel = BIT(pdev->rc_idx);
- msm_pcie_sel_debug_testcase(pdev, option);
- return ret;
- }
- EXPORT_SYMBOL(msm_pcie_debug_info);
- #ifdef CONFIG_SYSFS
- static ssize_t link_check_max_count_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n",
- pcie_dev->link_check_max_count);
- }
- static ssize_t link_check_max_count_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- u32 val;
- if (kstrtou32(buf, 0, &val))
- return -EINVAL;
- pcie_dev->link_check_max_count = val;
- return count;
- }
- static DEVICE_ATTR_RW(link_check_max_count);
- static ssize_t enumerate_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- if (pcie_dev)
- msm_pcie_enumerate(pcie_dev->rc_idx);
- return count;
- }
- static DEVICE_ATTR_WO(enumerate);
- static ssize_t aspm_stat_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- if (!pcie_dev->mhi)
- return scnprintf(buf, PAGE_SIZE,
- "PCIe: RC%d: No dev or MHI space found\n",
- pcie_dev->rc_idx);
- mutex_lock(&pcie_dev->aspm_lock);
- if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED) {
- mutex_unlock(&pcie_dev->aspm_lock);
- return scnprintf(buf, PAGE_SIZE,
- "PCIe: RC%d: registers are not accessible\n",
- pcie_dev->rc_idx);
- }
- mutex_unlock(&pcie_dev->aspm_lock);
- return scnprintf(buf, PAGE_SIZE,
- "PCIe: RC%d: L0s: %u L1: %u L1.1: %u L1.2: %u\n",
- pcie_dev->rc_idx,
- readl_relaxed(pcie_dev->mhi +
- PCIE20_PARF_DEBUG_CNT_IN_L0S),
- readl_relaxed(pcie_dev->mhi +
- PCIE20_PARF_DEBUG_CNT_IN_L1),
- readl_relaxed(pcie_dev->mhi +
- PCIE20_PARF_DEBUG_CNT_IN_L1SUB_L1),
- readl_relaxed(pcie_dev->mhi +
- PCIE20_PARF_DEBUG_CNT_IN_L1SUB_L2));
- }
- static DEVICE_ATTR_RO(aspm_stat);
- static ssize_t l23_rdy_poll_timeout_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%llu\n",
- pcie_dev->l23_rdy_poll_timeout);
- }
- static ssize_t l23_rdy_poll_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- u64 val;
- if (kstrtou64(buf, 0, &val))
- return -EINVAL;
- pcie_dev->l23_rdy_poll_timeout = val;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: L23_Ready poll timeout: %llu\n",
- pcie_dev->rc_idx, pcie_dev->l23_rdy_poll_timeout);
- return count;
- }
- static DEVICE_ATTR_RW(l23_rdy_poll_timeout);
- static ssize_t boot_option_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%x\n", pcie_dev->boot_option);
- }
- static ssize_t boot_option_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- u32 boot_option;
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- if (kstrtou32(buf, 0, &boot_option))
- return -EINVAL;
- if (boot_option <= (BIT(0) | BIT(1))) {
- pcie_dev->boot_option = boot_option;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: boot_option is now 0x%x\n",
- pcie_dev->rc_idx, pcie_dev->boot_option);
- } else {
- pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
- boot_option);
- }
- return count;
- }
- static DEVICE_ATTR_RW(boot_option);
- static ssize_t panic_on_aer_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE, "%x\n", pcie_dev->panic_on_aer);
- }
- static ssize_t panic_on_aer_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- u32 panic_on_aer;
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- if (kstrtou32(buf, 0, &panic_on_aer))
- return -EINVAL;
- pcie_dev->panic_on_aer = panic_on_aer;
- return count;
- }
- static DEVICE_ATTR_RW(panic_on_aer);
- static ssize_t link_speed_override_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE,
- "PCIe: RC%d: link speed override is set to: 0x%x\n",
- pcie_dev->rc_idx, pcie_dev->link_speed_override);
- }
- static ssize_t link_speed_override_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- u32 link_speed_override;
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- int ret;
- if (kstrtou32(buf, 0, &link_speed_override))
- return -EINVAL;
- /* Set target PCIe link speed as maximum device/link is capable of */
- ret = msm_pcie_set_target_link_speed(pcie_dev->rc_idx,
- link_speed_override, true);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Failed to override link speed: %d. %d\n",
- pcie_dev->rc_idx, link_speed_override, ret);
- } else {
- pcie_dev->link_speed_override = link_speed_override;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: link speed override set to: %d\n",
- pcie_dev->rc_idx, link_speed_override);
- }
- return count;
- }
- static DEVICE_ATTR_RW(link_speed_override);
- static ssize_t sbr_link_recovery_show(struct device *dev,
- struct device_attribute *attr, char *buf)
- {
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- return scnprintf(buf, PAGE_SIZE,
- "PCIe: RC%d: sbr_link_recovery is set to: 0x%x\n",
- pcie_dev->rc_idx, pcie_dev->linkdown_recovery_enable);
- }
- static ssize_t sbr_link_recovery_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
- {
- u32 linkdown_recovery_enable;
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev);
- if (kstrtou32(buf, 0, &linkdown_recovery_enable))
- return -EINVAL;
- if (pcie_dev->linkdown_reset[0].hdl && pcie_dev->linkdown_reset[1].hdl)
- pcie_dev->linkdown_recovery_enable = linkdown_recovery_enable;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: sbr_link_recovery is set to: %d\n",
- pcie_dev->rc_idx, linkdown_recovery_enable);
- return count;
- }
- static DEVICE_ATTR_RW(sbr_link_recovery);
- static struct attribute *msm_pcie_debug_attrs[] = {
- &dev_attr_link_check_max_count.attr,
- &dev_attr_enumerate.attr,
- &dev_attr_aspm_stat.attr,
- &dev_attr_l23_rdy_poll_timeout.attr,
- &dev_attr_boot_option.attr,
- &dev_attr_panic_on_aer.attr,
- &dev_attr_link_speed_override.attr,
- &dev_attr_sbr_link_recovery.attr,
- NULL,
- };
- static const struct attribute_group msm_pcie_debug_attr_group = {
- .name = "debug",
- .attrs = msm_pcie_debug_attrs,
- };
- /* AER sysfs entries */
- #define aer_stats_dev_attr(name, stats_array, strings_array, \
- total_string, total_field) \
- static ssize_t \
- name##_show(struct device *dev, struct device_attribute *attr, \
- char *buf) \
- { \
- unsigned int i; \
- u64 *stats; \
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev); \
- size_t len = 0; \
- \
- if (!pcie_dev->aer_stats) \
- return -ENODEV; \
- \
- stats = pcie_dev->aer_stats->stats_array; \
- \
- for (i = 0; i < ARRAY_SIZE(pcie_dev->aer_stats->stats_array); i++) {\
- if (strings_array[i]) \
- len += sysfs_emit_at(buf, len, "%s %llu\n", \
- strings_array[i], \
- stats[i]); \
- else if (stats[i]) \
- len += sysfs_emit_at(buf, len, \
- #stats_array "_bit[%d] %llu\n",\
- i, stats[i]); \
- } \
- len += sysfs_emit_at(buf, len, "TOTAL_%s %llu\n", total_string, \
- pcie_dev->aer_stats->total_field); \
- return len; \
- } \
- static DEVICE_ATTR_RO(name)
- aer_stats_dev_attr(aer_dev_correctable, dev_cor_errs,
- aer_correctable_error_string, "ERR_COR",
- dev_total_cor_errs);
- aer_stats_dev_attr(aer_dev_fatal, dev_fatal_errs,
- aer_uncorrectable_error_string, "ERR_FATAL",
- dev_total_fatal_errs);
- aer_stats_dev_attr(aer_dev_nonfatal, dev_nonfatal_errs,
- aer_uncorrectable_error_string, "ERR_NONFATAL",
- dev_total_nonfatal_errs);
- #define aer_stats_rootport_attr(name, field) \
- static ssize_t \
- name##_show(struct device *dev, struct device_attribute *attr, \
- char *buf) \
- { \
- struct msm_pcie_dev_t *pcie_dev = dev_get_drvdata(dev); \
- \
- if (!pcie_dev->aer_stats) \
- return -ENODEV; \
- \
- return sysfs_emit(buf, "%llu\n", pcie_dev->aer_stats->field); \
- } \
- static DEVICE_ATTR_RO(name)
- aer_stats_rootport_attr(aer_rootport_total_err_cor,
- rootport_total_cor_errs);
- aer_stats_rootport_attr(aer_rootport_total_err_fatal,
- rootport_total_fatal_errs);
- aer_stats_rootport_attr(aer_rootport_total_err_nonfatal,
- rootport_total_nonfatal_errs);
- static struct attribute *msm_aer_stats_attrs[] __ro_after_init = {
- &dev_attr_aer_dev_correctable.attr,
- &dev_attr_aer_dev_fatal.attr,
- &dev_attr_aer_dev_nonfatal.attr,
- &dev_attr_aer_rootport_total_err_cor.attr,
- &dev_attr_aer_rootport_total_err_fatal.attr,
- &dev_attr_aer_rootport_total_err_nonfatal.attr,
- NULL
- };
- static const struct attribute_group msm_aer_stats_attr_group = {
- .name = "aer_stats",
- .attrs = msm_aer_stats_attrs,
- };
- static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
- {
- int ret;
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
- &msm_pcie_debug_attr_group);
- if (ret)
- PCIE_DBG_FS(dev,
- "RC%d: failed to create sysfs debug group\n",
- dev->rc_idx);
- ret = sysfs_create_group(&dev->pdev->dev.kobj,
- &msm_aer_stats_attr_group);
- if (ret)
- PCIE_DBG_FS(dev,
- "RC%d: failed to create sysfs debug group\n",
- dev->rc_idx);
- }
- static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
- {
- if (dev->pdev) {
- sysfs_remove_group(&dev->pdev->dev.kobj,
- &msm_pcie_debug_attr_group);
- sysfs_remove_group(&dev->pdev->dev.kobj,
- &msm_aer_stats_attr_group);
- }
- }
- #else
- static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
- {
- }
- static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
- {
- }
- #endif
- #ifdef CONFIG_DEBUG_FS
- static struct dentry *dent_msm_pcie;
- static struct dentry *dfile_rc_sel;
- static struct dentry *dfile_case;
- static struct dentry *dfile_base_sel;
- static struct dentry *dfile_linkdown_panic;
- static struct dentry *dfile_wr_offset;
- static struct dentry *dfile_wr_mask;
- static struct dentry *dfile_wr_value;
- static struct dentry *dfile_boot_option;
- static struct dentry *dfile_aer_enable;
- static struct dentry *dfile_corr_counter_limit;
- static u32 rc_sel_max;
- #ifdef CONFIG_SEC_PCIE_DEV
- #define MAX_MSG_LEN (80)
- #define MAX_SEC_PHY_TEST_NUM (10)
- static struct dentry *dent_sec;
- static struct dentry *dfile_sec_phy_test;
- typedef struct {
- u32 addr;
- u32 value;
- } sec_phy_data_t;
- static sec_phy_data_t sec_phy_data[MAX_RC_NUM][MAX_SEC_PHY_TEST_NUM];
- static void pcie_sec_phy_init(struct msm_pcie_dev_t *dev)
- {
- u32 start = dev->res[MSM_PCIE_RES_PHY].resource->start;
- u32 size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
- u32 end = start + size - 4;
- int i;
- PCIE_DBG(dev,
- "RC%d: %s enter\n", dev->rc_idx, __func__);
- for (i = 0; sec_phy_data[dev->rc_idx][i].addr >= start &&
- sec_phy_data[dev->rc_idx][i].addr <= end ; i++) {
- msm_pcie_write_reg(dev->phy, sec_phy_data[dev->rc_idx][i].addr - start,
- sec_phy_data[dev->rc_idx][i].value);
- pr_info("PCIE SEC: write 0x%08x <- 0x%08x\n",
- sec_phy_data[dev->rc_idx][i].addr, sec_phy_data[dev->rc_idx][i].value);
- }
- }
- static void pcie_sec_dump(struct msm_pcie_dev_t *dev)
- {
- int i;
- u32 size;
- u32 start = dev->res[MSM_PCIE_RES_PHY].resource->start;
- size = resource_size(dev->res[MSM_PCIE_RES_PHY].resource);
- if (size)
- PCIE_DUMP(dev,
- "------------ PCIe PHY of RC%d PHY DUMP ------------\n",
- dev->rc_idx);
- for (i = 0; i < size; i += 32) {
- PCIE_DUMP(dev,
- "PCIe PHY of RC%d 0x%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- dev->rc_idx, i + start,
- readl_relaxed(dev->phy + i),
- readl_relaxed(dev->phy + (i + 4)),
- readl_relaxed(dev->phy + (i + 8)),
- readl_relaxed(dev->phy + (i + 12)),
- readl_relaxed(dev->phy + (i + 16)),
- readl_relaxed(dev->phy + (i + 20)),
- readl_relaxed(dev->phy + (i + 24)),
- readl_relaxed(dev->phy + (i + 28)));
- }
- }
- static ssize_t pcie_sec_phy_write(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- unsigned long ret;
- char str[MAX_MSG_LEN];
- char opt[MAX_MSG_LEN];
- char *pos;
- int sec_phy_cur_erase_idx = -1, rc;
- size_t i;
- static int sec_phy_cur_wr_idx[MAX_RC_NUM] = {0,};
- memset(str, 0, sizeof(str));
- memset(opt, 0, sizeof(opt));
- ret = copy_from_user(str, buf, sizeof(str)-1);
- if (ret || strlen(str) == 0) {
- return -EFAULT;
- }
- str[strlen(str) - 1] = '\0';
- pr_info("PCIE SEC: input(%s).\n", str);
- if (sscanf(str, "%c", opt) != 1) {
- pr_err("PCIE SEC: first parameter (%c) is wrong.\n", opt[0]);
- return -EINVAL;
- }
- pos = &str[0] + 2;
- rc = pos[0] - '0';
- if (rc < 0 || rc > MAX_RC_NUM - 1 || pos[1] != ' ') {
- pr_err("PCIE SEC: 2nd parameter (%c) is wrong.\n", pos[0]);
- return -EINVAL;
- }
- pos = &pos[0] + 2;
- switch (opt[0]) {
- case 'w':
- case 'W':
- if (sec_phy_cur_wr_idx[rc] >= MAX_SEC_PHY_TEST_NUM) {
- pr_err("PCIE SEC: no space.\n");
- return -ENOSPC;
- }
- for (i = 0; i < strlen(pos); i++)
- if ((pos[i] >= '0' && pos[i] <= '9') ||
- (pos[i] >= 'A' && pos[i] <= 'F') ||
- (pos[i] >= 'a' && pos[i] <= 'f'))
- break;
- if (i == strlen(pos)) {
- pr_err("PCIE SEC: invalid addr.\n");
- return -EINVAL;
- }
- pos = pos + i;
- sec_phy_data[rc][sec_phy_cur_wr_idx[rc]].addr =
- (u32)simple_strtoul(pos, &pos, 16);
- for (i = 0; i < strlen(pos); i++)
- if ((pos[i] >= '0' && pos[i] <= '9') ||
- (pos[i] >= 'A' && pos[i] <= 'F') ||
- (pos[i] >= 'a' && pos[i] <= 'f'))
- break;
- if (i == strlen(pos)) {
- pr_err("PCIE SEC: invalid value.\n");
- return -EINVAL;
- }
- pos = pos + i;
- sec_phy_data[rc][sec_phy_cur_wr_idx[rc]].value =
- (u32)simple_strtoul(pos, 0, 16);
- pr_info("PCIE SEC: wr buff[%d] 0x%08x 0x%08x.\n",
- sec_phy_cur_wr_idx[rc],
- sec_phy_data[rc][sec_phy_cur_wr_idx[rc]].addr,
- sec_phy_data[rc][sec_phy_cur_wr_idx[rc]].value);
- sec_phy_cur_wr_idx[rc]++;
- break;
- case 'e':
- case 'E':
- if (pos[0] == 'a' || pos[0] == 'A') {
- pr_info("PCIE SEC: erase all buff\n");
- for (i = 0; i < MAX_SEC_PHY_TEST_NUM; i++) {
- sec_phy_data[rc][i].addr =
- sec_phy_data[rc][i].value = 0;
- sec_phy_cur_wr_idx[rc] = 0;
- }
- goto out;
- }
- ret = sscanf(pos, " %u", &sec_phy_cur_erase_idx);
- if (ret != 1)
- return -EINVAL;
- if (sec_phy_cur_erase_idx < 0 ||
- sec_phy_cur_erase_idx >= MAX_SEC_PHY_TEST_NUM) {
- pr_err("PCIE SEC: erase idx is wrong.\n");
- return -EINVAL;
- }
- sec_phy_data[rc][sec_phy_cur_erase_idx].addr = 0;
- sec_phy_data[rc][sec_phy_cur_erase_idx].value = 0;
- if (sec_phy_cur_erase_idx+1 < MAX_SEC_PHY_TEST_NUM) {
- for (i = sec_phy_cur_erase_idx+1;
- i < sec_phy_cur_wr_idx[rc]; i++) {
- sec_phy_data[rc][i-1].addr = sec_phy_data[rc][i].addr;
- sec_phy_data[rc][i-1].value = sec_phy_data[rc][i].value;
- sec_phy_data[rc][i].addr = sec_phy_data[rc][i].value = 0;
- }
- }
- sec_phy_cur_wr_idx[rc]--;
- if (sec_phy_cur_wr_idx[rc] < 0)
- sec_phy_cur_wr_idx[rc] = 0;
- break;
- }
- out:
- return count;
- }
- static ssize_t pcie_sec_phy_read(struct file *file,
- char __user *buf,
- size_t count, loff_t *ppos)
- {
- char str[MAX_SEC_PHY_TEST_NUM*MAX_MSG_LEN] = {0,};
- int rc, i, offset;
- size_t cnt = 0, ret;
- loff_t lpos;
- if (*ppos != 0)
- return 0;
- for (rc = 0; rc < MAX_RC_NUM; rc++) {
- offset = 0;
- lpos = 0;
- offset += sprintf(str+offset, "++ RC%d ++\n", rc);
- for (i = 0; i < MAX_SEC_PHY_TEST_NUM; i++) {
- offset += sprintf(str+offset, "%02d %08x %08x\n",
- i, sec_phy_data[rc][i].addr, sec_phy_data[rc][i].value);
- }
- offset += sprintf(str+offset, "----- RC%d -----\n\n", rc);
- ret = simple_read_from_buffer(buf + cnt, offset, &lpos, str, sizeof(str));
- if (ret < 0)
- break;
- cnt += offset;
- }
- *ppos += cnt;
- return cnt;
- }
- const struct file_operations pcie_sec_debug_phy_ops = {
- .write = pcie_sec_phy_write,
- .read = pcie_sec_phy_read,
- };
- static void pcie_sec_debugfs_init(void)
- {
- if (!dent_msm_pcie) {
- pr_err("PCIE SEC: skip to create the folder for debug_fs.\n");
- return;
- }
- dent_sec = debugfs_create_dir("sec", dent_msm_pcie);
- if (IS_ERR(dent_sec)) {
- pr_err("PCIE SEC: fail to create the folder for debug_fs.\n");
- return;
- }
- dfile_sec_phy_test = debugfs_create_file("phy_test", 0644, dent_sec, 0,
- &pcie_sec_debug_phy_ops);
- if (!dfile_sec_phy_test || IS_ERR(dfile_sec_phy_test)) {
- pr_err("PCIE SEC: fail to create the file for debug_fs.\n");
- goto phy_err;
- }
- return;
- phy_err:
- debugfs_remove(dent_sec);
- }
- static void pcie_sec_debugfs_exit(void)
- {
- debugfs_remove(dfile_sec_phy_test);
- debugfs_remove(dent_sec);
- }
- #endif
- static int msm_pcie_debugfs_parse_input(const char __user *buf,
- size_t count, unsigned int *data)
- {
- unsigned long ret;
- char *str, *str_temp;
- str = kmalloc(count + 1, GFP_KERNEL);
- if (!str)
- return -ENOMEM;
- ret = copy_from_user(str, buf, count);
- if (ret) {
- kfree(str);
- return -EFAULT;
- }
- str[count] = 0;
- str_temp = str;
- ret = get_option(&str_temp, data);
- kfree(str);
- if (ret != 1)
- return -EINVAL;
- return 0;
- }
- static int msm_pcie_debugfs_case_show(struct seq_file *m, void *v)
- {
- int i;
- for (i = 0; i < MSM_PCIE_MAX_DEBUGFS_OPTION; i++)
- seq_printf(m, "\t%d:\t %s\n", i,
- msm_pcie_debugfs_option_desc[i]);
- return 0;
- }
- static int msm_pcie_debugfs_case_open(struct inode *inode, struct file *file)
- {
- return single_open(file, msm_pcie_debugfs_case_show, NULL);
- }
- static ssize_t msm_pcie_debugfs_case_select(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int i, ret;
- unsigned int testcase = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &testcase);
- if (ret)
- return ret;
- pr_alert("PCIe: TEST: %d\n", testcase);
- for (i = 0; i < MAX_RC_NUM; i++) {
- if (rc_sel & BIT(i))
- msm_pcie_sel_debug_testcase(&msm_pcie_dev[i], testcase);
- }
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_case_ops = {
- .open = msm_pcie_debugfs_case_open,
- .release = single_release,
- .read = seq_read,
- .write = msm_pcie_debugfs_case_select,
- };
- static int msm_pcie_debugfs_rc_select_show(struct seq_file *m, void *v)
- {
- int i;
- seq_printf(m, "Current rc_sel: %d which selects:\n", rc_sel);
- for (i = 0; i < MAX_RC_NUM; i++)
- if (rc_sel & BIT(i))
- seq_printf(m, "\tPCIe%d\n", i);
- return 0;
- }
- static int msm_pcie_debugfs_rc_select_open(struct inode *inode,
- struct file *file)
- {
- return single_open(file, msm_pcie_debugfs_rc_select_show, NULL);
- }
- static ssize_t msm_pcie_debugfs_rc_select(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int i, ret;
- u32 new_rc_sel = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &new_rc_sel);
- if (ret)
- return ret;
- if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
- pr_alert("PCIe: invalid value for rc_sel: 0x%x\n", new_rc_sel);
- pr_alert("PCIe: rc_sel is still 0x%x\n", rc_sel ? rc_sel : 0x1);
- } else {
- rc_sel = new_rc_sel;
- pr_alert("PCIe: rc_sel is now: 0x%x\n", rc_sel);
- }
- pr_alert("PCIe: the following RC(s) will be tested:\n");
- for (i = 0; i < MAX_RC_NUM; i++)
- if (rc_sel & BIT(i))
- pr_alert("RC %d\n", i);
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_rc_select_ops = {
- .open = msm_pcie_debugfs_rc_select_open,
- .release = single_release,
- .read = seq_read,
- .write = msm_pcie_debugfs_rc_select,
- };
- static int msm_pcie_debugfs_base_select_show(struct seq_file *m, void *v)
- {
- int i;
- seq_puts(m, "Options:\n");
- for (i = 0; i < MSM_PCIE_MAX_RES; i++)
- seq_printf(m, "\t%d: %s\n", i + 1, msm_pcie_res_info[i].name);
- seq_printf(m, "\nCurrent base_sel: %d: %s\n", base_sel, base_sel ?
- msm_pcie_res_info[base_sel - 1].name : "None");
- return 0;
- }
- static int msm_pcie_debugfs_base_select_open(struct inode *inode,
- struct file *file)
- {
- return single_open(file, msm_pcie_debugfs_base_select_show, NULL);
- }
- static ssize_t msm_pcie_debugfs_base_select(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int ret;
- u32 new_base_sel = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &new_base_sel);
- if (ret)
- return ret;
- if (!new_base_sel || new_base_sel > MSM_PCIE_MAX_RES) {
- pr_alert("PCIe: invalid value for base_sel: 0x%x\n",
- new_base_sel);
- pr_alert("PCIe: base_sel is still 0x%x\n", base_sel);
- } else {
- base_sel = new_base_sel;
- pr_alert("PCIe: base_sel is now 0x%x\n", base_sel);
- pr_alert("%s\n", msm_pcie_res_info[base_sel - 1].name);
- }
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_base_select_ops = {
- .open = msm_pcie_debugfs_base_select_open,
- .release = single_release,
- .read = seq_read,
- .write = msm_pcie_debugfs_base_select,
- };
- static ssize_t msm_pcie_debugfs_linkdown_panic(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int i, ret;
- u32 new_linkdown_panic = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &new_linkdown_panic);
- if (ret)
- return ret;
- new_linkdown_panic = !!new_linkdown_panic;
- for (i = 0; i < MAX_RC_NUM; i++) {
- if (rc_sel & BIT(i)) {
- msm_pcie_dev[i].linkdown_panic =
- new_linkdown_panic;
- PCIE_DBG_FS(&msm_pcie_dev[i],
- "PCIe: RC%d: linkdown_panic is now %d\n",
- i, msm_pcie_dev[i].linkdown_panic);
- }
- }
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_linkdown_panic_ops = {
- .write = msm_pcie_debugfs_linkdown_panic,
- };
- static int msm_pcie_debugfs_wr_offset_show(struct seq_file *m, void *v)
- {
- seq_printf(m, "0x%x\n", wr_offset);
- return 0;
- }
- static int msm_pcie_debugfs_wr_offset_open(struct inode *inode,
- struct file *file)
- {
- return single_open(file, msm_pcie_debugfs_wr_offset_show, NULL);
- }
- static ssize_t msm_pcie_debugfs_wr_offset(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int ret;
- wr_offset = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &wr_offset);
- if (ret)
- return ret;
- pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_wr_offset_ops = {
- .open = msm_pcie_debugfs_wr_offset_open,
- .release = single_release,
- .read = seq_read,
- .write = msm_pcie_debugfs_wr_offset,
- };
- static int msm_pcie_debugfs_wr_mask_show(struct seq_file *m, void *v)
- {
- seq_printf(m, "0x%x\n", wr_mask);
- return 0;
- }
- static int msm_pcie_debugfs_wr_mask_open(struct inode *inode, struct file *file)
- {
- return single_open(file, msm_pcie_debugfs_wr_mask_show, NULL);
- }
- static ssize_t msm_pcie_debugfs_wr_mask(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int ret;
- wr_mask = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &wr_mask);
- if (ret)
- return ret;
- pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_wr_mask_ops = {
- .open = msm_pcie_debugfs_wr_mask_open,
- .release = single_release,
- .read = seq_read,
- .write = msm_pcie_debugfs_wr_mask,
- };
- static int msm_pcie_debugfs_wr_value_show(struct seq_file *m, void *v)
- {
- seq_printf(m, "0x%x\n", wr_value);
- return 0;
- }
- static int msm_pcie_debugfs_wr_value_open(struct inode *inode,
- struct file *file)
- {
- return single_open(file, msm_pcie_debugfs_wr_value_show, NULL);
- }
- static ssize_t msm_pcie_debugfs_wr_value(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int ret;
- wr_value = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &wr_value);
- if (ret)
- return ret;
- pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_wr_value_ops = {
- .open = msm_pcie_debugfs_wr_value_open,
- .release = single_release,
- .read = seq_read,
- .write = msm_pcie_debugfs_wr_value,
- };
- static ssize_t msm_pcie_debugfs_boot_option(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int i, ret;
- u32 new_boot_option = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &new_boot_option);
- if (ret)
- return ret;
- if (new_boot_option <= (BIT(0) | BIT(1))) {
- for (i = 0; i < MAX_RC_NUM; i++) {
- if (rc_sel & BIT(i)) {
- msm_pcie_dev[i].boot_option = new_boot_option;
- PCIE_DBG_FS(&msm_pcie_dev[i],
- "PCIe: RC%d: boot_option is now 0x%x\n",
- i, msm_pcie_dev[i].boot_option);
- }
- }
- } else {
- pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
- new_boot_option);
- }
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_boot_option_ops = {
- .write = msm_pcie_debugfs_boot_option,
- };
- static ssize_t msm_pcie_debugfs_aer_enable(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int i, ret;
- u32 new_aer_enable = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &new_aer_enable);
- if (ret)
- return ret;
- new_aer_enable = !!new_aer_enable;
- for (i = 0; i < MAX_RC_NUM; i++) {
- if (rc_sel & BIT(i)) {
- msm_pcie_dev[i].aer_enable = new_aer_enable;
- PCIE_DBG_FS(&msm_pcie_dev[i],
- "PCIe: RC%d: aer_enable is now %d\n",
- i, msm_pcie_dev[i].aer_enable);
- msm_pcie_write_mask(msm_pcie_dev[i].dm_core +
- PCIE20_BRIDGE_CTRL,
- new_aer_enable ? 0 : BIT(16),
- new_aer_enable ? BIT(16) : 0);
- PCIE_DBG_FS(&msm_pcie_dev[i],
- "RC%d: PCIE20_BRIDGE_CTRL: 0x%x\n", i,
- readl_relaxed(msm_pcie_dev[i].dm_core +
- PCIE20_BRIDGE_CTRL));
- }
- }
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_aer_enable_ops = {
- .write = msm_pcie_debugfs_aer_enable,
- };
- static ssize_t msm_pcie_debugfs_corr_counter_limit(struct file *file,
- const char __user *buf,
- size_t count, loff_t *ppos)
- {
- int ret;
- corr_counter_limit = 0;
- ret = msm_pcie_debugfs_parse_input(buf, count, &corr_counter_limit);
- if (ret)
- return ret;
- pr_info("PCIe: corr_counter_limit is now %u\n", corr_counter_limit);
- return count;
- }
- static const struct file_operations msm_pcie_debugfs_corr_counter_limit_ops = {
- .write = msm_pcie_debugfs_corr_counter_limit,
- };
- static void msm_pcie_debugfs_init(void)
- {
- rc_sel_max = (0x1 << MAX_RC_NUM) - 1;
- wr_mask = 0xffffffff;
- dent_msm_pcie = debugfs_create_dir("pci-msm", NULL);
- if (IS_ERR(dent_msm_pcie)) {
- pr_err("PCIe: fail to create the folder for debug_fs.\n");
- return;
- }
- dfile_rc_sel = debugfs_create_file("rc_sel", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_rc_select_ops);
- if (!dfile_rc_sel || IS_ERR(dfile_rc_sel)) {
- pr_err("PCIe: fail to create the file for debug_fs rc_sel.\n");
- goto err;
- }
- dfile_case = debugfs_create_file("case", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_case_ops);
- if (!dfile_case || IS_ERR(dfile_case)) {
- pr_err("PCIe: fail to create the file for debug_fs case.\n");
- goto err;
- }
- dfile_base_sel = debugfs_create_file("base_sel", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_base_select_ops);
- if (!dfile_base_sel || IS_ERR(dfile_base_sel)) {
- pr_err("PCIe: fail to create the file for debug_fs base_sel.\n");
- goto err;
- }
- dfile_linkdown_panic = debugfs_create_file("linkdown_panic", 0644,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_linkdown_panic_ops);
- if (!dfile_linkdown_panic || IS_ERR(dfile_linkdown_panic)) {
- pr_err("PCIe: fail to create the file for debug_fs linkdown_panic.\n");
- goto err;
- }
- dfile_wr_offset = debugfs_create_file("wr_offset", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_wr_offset_ops);
- if (!dfile_wr_offset || IS_ERR(dfile_wr_offset)) {
- pr_err("PCIe: fail to create the file for debug_fs wr_offset.\n");
- goto err;
- }
- dfile_wr_mask = debugfs_create_file("wr_mask", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_wr_mask_ops);
- if (!dfile_wr_mask || IS_ERR(dfile_wr_mask)) {
- pr_err("PCIe: fail to create the file for debug_fs wr_mask.\n");
- goto err;
- }
- dfile_wr_value = debugfs_create_file("wr_value", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_wr_value_ops);
- if (!dfile_wr_value || IS_ERR(dfile_wr_value)) {
- pr_err("PCIe: fail to create the file for debug_fs wr_value.\n");
- goto err;
- }
- dfile_boot_option = debugfs_create_file("boot_option", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_boot_option_ops);
- if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
- pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
- goto err;
- }
- dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
- dent_msm_pcie, NULL,
- &msm_pcie_debugfs_aer_enable_ops);
- if (!dfile_aer_enable || IS_ERR(dfile_aer_enable)) {
- pr_err("PCIe: fail to create the file for debug_fs aer_enable.\n");
- goto err;
- }
- dfile_corr_counter_limit = debugfs_create_file("corr_counter_limit",
- 0664, dent_msm_pcie, NULL,
- &msm_pcie_debugfs_corr_counter_limit_ops);
- if (!dfile_corr_counter_limit || IS_ERR(dfile_corr_counter_limit)) {
- pr_err("PCIe: fail to create the file for debug_fs corr_counter_limit.\n");
- goto err;
- }
- #ifdef CONFIG_SEC_PCIE_DEV
- pcie_sec_debugfs_init();
- #endif
- return;
- err:
- debugfs_remove_recursive(dent_msm_pcie);
- }
- static void msm_pcie_debugfs_exit(void)
- {
- debugfs_remove_recursive(dent_msm_pcie);
- #ifdef CONFIG_SEC_PCIE_DEV
- pcie_sec_debugfs_exit();
- #endif
- }
- #else
- #ifdef CONFIG_SEC_PCIE_DEV
- static void pcie_sec_debugfs_init(void)
- {
- }
- static void pcie_sec_debugfs_exit(void)
- {
- }
- #endif
- static void msm_pcie_debugfs_init(void)
- {
- }
- static void msm_pcie_debugfs_exit(void)
- {
- }
- #endif
- static int msm_pcie_is_link_up(struct msm_pcie_dev_t *dev)
- {
- return readl_relaxed(dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS) & BIT(29);
- }
- static void msm_pcie_config_bandwidth_int(struct msm_pcie_dev_t *dev,
- bool enable)
- {
- struct pci_dev *pci_dev = dev->dev;
- if (enable) {
- /* Clear INT_EN and PCI_MSI_ENABLE to receive interrupts */
- msm_pcie_write_mask(dev->dm_core + PCIE20_COMMAND_STATUS,
- BIT(10), 0);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_PCI_MSI_CAP_ID_NEXT_CTRL_REG,
- BIT(16), 0);
- msm_pcie_write_reg_field(dev->parf, PCIE20_PARF_INT_ALL_2_MASK,
- MSM_PCIE_BW_MGT_INT_STATUS, 1);
- msm_pcie_config_clear_set_dword(pci_dev,
- pci_dev->pcie_cap + PCI_EXP_LNKCTL,
- 0, PCI_EXP_LNKCTL_LBMIE);
- } else {
- msm_pcie_write_reg_field(dev->parf, PCIE20_PARF_INT_ALL_2_MASK,
- MSM_PCIE_BW_MGT_INT_STATUS, 0);
- msm_pcie_config_clear_set_dword(pci_dev,
- pci_dev->pcie_cap + PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_LBMIE, 0);
- }
- }
- static void msm_pcie_clear_bandwidth_int_status(struct msm_pcie_dev_t *dev)
- {
- struct pci_dev *pci_dev = dev->dev;
- msm_pcie_config_clear_set_dword(pci_dev,
- pci_dev->pcie_cap + PCI_EXP_LNKCTL,
- PCI_EXP_LNKCTL_LBMIE, 0);
- msm_pcie_write_reg_field(dev->parf, PCIE20_PARF_INT_ALL_2_CLEAR,
- MSM_PCIE_BW_MGT_INT_STATUS, 1);
- }
- static bool msm_pcie_check_ltssm_state(struct msm_pcie_dev_t *dev, u32 state)
- {
- u32 ltssm;
- ltssm = readl_relaxed(dev->parf + PCIE20_PARF_LTSSM) &
- MSM_PCIE_LTSSM_MASK;
- if (ltssm == state)
- return true;
- return false;
- }
- void msm_pcie_clk_dump(struct msm_pcie_dev_t *pcie_dev)
- {
- struct msm_pcie_clk_info_t *clk_info;
- int i;
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: Dump PCIe clocks\n",
- pcie_dev->rc_idx);
- clk_info = pcie_dev->clk;
- for (i = 0; i < pcie_dev->num_clk; i++, clk_info++) {
- if (clk_info->hdl)
- qcom_clk_dump(clk_info->hdl, NULL, 0);
- }
- clk_info = pcie_dev->pipe_clk;
- for (i = 0; i < pcie_dev->num_pipe_clk; i++, clk_info++) {
- if (clk_info->hdl)
- qcom_clk_dump(clk_info->hdl, NULL, 0);
- }
- }
- /**
- * msm_pcie_iatu_config - configure outbound address translation region
- * @dev: root commpex
- * @nr: region number
- * @type: target transaction type, see PCIE20_CTRL1_TYPE_xxx
- * @host_addr: - region start address on host
- * @host_end: - region end address (low 32 bit) on host,
- * upper 32 bits are same as for @host_addr
- * @bdf: - bus:device:function
- */
- static void msm_pcie_iatu_config(struct msm_pcie_dev_t *dev, int nr, u8 type,
- unsigned long host_addr, u32 host_end,
- u32 bdf)
- {
- void __iomem *iatu_base = dev->iatu ? dev->iatu : dev->dm_core;
- u32 iatu_viewport_offset;
- u32 iatu_ctrl1_offset;
- u32 iatu_ctrl2_offset;
- u32 iatu_lbar_offset;
- u32 iatu_ubar_offset;
- u32 iatu_lar_offset;
- u32 iatu_ltar_offset;
- u32 iatu_utar_offset;
- /* configure iATU only for endpoints */
- if (!bdf)
- return;
- if (dev->iatu) {
- iatu_viewport_offset = 0;
- iatu_ctrl1_offset = PCIE_IATU_CTRL1(nr);
- iatu_ctrl2_offset = PCIE_IATU_CTRL2(nr);
- iatu_lbar_offset = PCIE_IATU_LBAR(nr);
- iatu_ubar_offset = PCIE_IATU_UBAR(nr);
- iatu_lar_offset = PCIE_IATU_LAR(nr);
- iatu_ltar_offset = PCIE_IATU_LTAR(nr);
- iatu_utar_offset = PCIE_IATU_UTAR(nr);
- } else {
- iatu_viewport_offset = PCIE20_PLR_IATU_VIEWPORT;
- iatu_ctrl1_offset = PCIE20_PLR_IATU_CTRL1;
- iatu_ctrl2_offset = PCIE20_PLR_IATU_CTRL2;
- iatu_lbar_offset = PCIE20_PLR_IATU_LBAR;
- iatu_ubar_offset = PCIE20_PLR_IATU_UBAR;
- iatu_lar_offset = PCIE20_PLR_IATU_LAR;
- iatu_ltar_offset = PCIE20_PLR_IATU_LTAR;
- iatu_utar_offset = PCIE20_PLR_IATU_UTAR;
- }
- /* select region */
- if (iatu_viewport_offset)
- msm_pcie_write_reg(iatu_base, iatu_viewport_offset, nr);
- /* switch off region before changing it */
- msm_pcie_write_reg(iatu_base, iatu_ctrl2_offset, 0);
- msm_pcie_write_reg(iatu_base, iatu_ctrl1_offset, type);
- msm_pcie_write_reg(iatu_base, iatu_lbar_offset,
- lower_32_bits(host_addr));
- msm_pcie_write_reg(iatu_base, iatu_ubar_offset,
- upper_32_bits(host_addr));
- msm_pcie_write_reg(iatu_base, iatu_lar_offset, host_end);
- msm_pcie_write_reg(iatu_base, iatu_ltar_offset, lower_32_bits(bdf));
- msm_pcie_write_reg(iatu_base, iatu_utar_offset, 0);
- msm_pcie_write_reg(iatu_base, iatu_ctrl2_offset, BIT(31));
- }
- /**
- * msm_pcie_cfg_bdf - configure for config access
- * @dev: root commpex
- * @bus: PCI bus number
- * @devfn: PCI dev and function number
- *
- * Remap if required region 0 for config access of proper type
- * (CFG0 for bus 1, CFG1 for other buses)
- * Cache current device bdf for speed-up
- */
- static void msm_pcie_cfg_bdf(struct msm_pcie_dev_t *dev, u8 bus, u8 devfn)
- {
- struct resource *axi_conf = dev->res[MSM_PCIE_RES_CONF].resource;
- u32 bdf = BDF_OFFSET(bus, devfn);
- u8 type = bus == 1 ? PCIE20_CTRL1_TYPE_CFG0 : PCIE20_CTRL1_TYPE_CFG1;
- if (dev->current_bdf == bdf)
- return;
- msm_pcie_iatu_config(dev, 0, type,
- axi_conf->start,
- axi_conf->start + SZ_4K - 1,
- bdf);
- dev->current_bdf = bdf;
- }
- static int msm_pcie_oper_conf(struct pci_bus *bus, u32 devfn, int oper,
- int where, int size, u32 *val)
- {
- uint32_t word_offset, byte_offset, mask;
- uint32_t rd_val, wr_val;
- struct msm_pcie_dev_t *dev;
- void __iomem *config_base;
- bool rc = false;
- u32 rc_idx, *filtered_bdf;
- int rv = 0, i;
- u32 bdf = BDF_OFFSET(bus->number, devfn);
- dev = PCIE_BUS_PRIV_DATA(bus);
- if (!dev) {
- pr_err("PCIe: No device found for this bus.\n");
- *val = ~0;
- rv = PCIBIOS_DEVICE_NOT_FOUND;
- goto out;
- }
- rc_idx = dev->rc_idx;
- rc = (bus->number == 0);
- spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
- if (!dev->cfg_access) {
- PCIE_DBG3(dev,
- "Access denied for RC%d %d:0x%02x + 0x%04x[%d]\n",
- rc_idx, bus->number, devfn, where, size);
- *val = ~0;
- rv = PCIBIOS_DEVICE_NOT_FOUND;
- goto unlock;
- }
- if (rc && (devfn != 0)) {
- PCIE_DBG3(dev, "RC%d invalid %s - bus %d devfn %d\n", rc_idx,
- (oper == RD) ? "rd" : "wr", bus->number, devfn);
- *val = ~0;
- rv = PCIBIOS_DEVICE_NOT_FOUND;
- goto unlock;
- }
- if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
- PCIE_DBG3(dev,
- "Access to RC%d %d:0x%02x + 0x%04x[%d] is denied because link is down\n",
- rc_idx, bus->number, devfn, where, size);
- *val = ~0;
- rv = PCIBIOS_DEVICE_NOT_FOUND;
- goto unlock;
- }
- /* check if the link is up for endpoint */
- if (!rc && !msm_pcie_is_link_up(dev)) {
- PCIE_ERR(dev,
- "PCIe: RC%d %s fail, link down - bus %d devfn %d\n",
- rc_idx, (oper == RD) ? "rd" : "wr",
- bus->number, devfn);
- *val = ~0;
- rv = PCIBIOS_DEVICE_NOT_FOUND;
- goto unlock;
- }
- /* 32-bit BDF filtering */
- if (dev->bdf_count) {
- i = dev->bdf_count;
- filtered_bdf = dev->filtered_bdfs;
- while (i--) {
- if (*filtered_bdf == bdf) {
- *val = ~0;
- goto unlock;
- }
- filtered_bdf++;
- }
- }
- if (!rc)
- msm_pcie_cfg_bdf(dev, bus->number, devfn);
- word_offset = where & ~0x3;
- byte_offset = where & 0x3;
- mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
- config_base = rc ? dev->dm_core : dev->conf;
- rd_val = readl_relaxed(config_base + word_offset);
- if (oper == RD) {
- *val = ((rd_val & mask) >> (8 * byte_offset));
- PCIE_DBG3(dev,
- "RC%d %d:0x%02x + 0x%04x[%d] -> 0x%08x; rd 0x%08x\n",
- rc_idx, bus->number, devfn, where, size, *val, rd_val);
- } else {
- wr_val = (rd_val & ~mask) |
- ((*val << (8 * byte_offset)) & mask);
- if ((bus->number == 0) && (where == 0x3c))
- wr_val = wr_val | (3 << 16);
- msm_pcie_write_reg(config_base, word_offset, wr_val);
- PCIE_DBG3(dev,
- "RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
- rc_idx, bus->number, devfn, where, size,
- wr_val, rd_val, *val);
- }
- if (rd_val == PCIE_LINK_DOWN &&
- (readl_relaxed(config_base) == PCIE_LINK_DOWN)) {
- if (dev->config_recovery) {
- PCIE_ERR(dev,
- "RC%d link recovery schedule\n",
- rc_idx);
- dev->cfg_access = false;
- schedule_work(&dev->link_recover_wq);
- }
- }
- unlock:
- spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
- out:
- return rv;
- }
- static int msm_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
- int size, u32 *val)
- {
- int ret = msm_pcie_oper_conf(bus, devfn, RD, where, size, val);
- if ((bus->number == 0) && (where == PCI_CLASS_REVISION))
- *val = (*val & 0xff) | (PCI_CLASS_BRIDGE_PCI << 16);
- return ret;
- }
- static int msm_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
- int where, int size, u32 val)
- {
- return msm_pcie_oper_conf(bus, devfn, WR, where, size, &val);
- }
- static struct pci_ops msm_pcie_ops = {
- .read = msm_pcie_rd_conf,
- .write = msm_pcie_wr_conf,
- };
- /* This function will load the instruction sequence to pcie state manager */
- static void msm_pcie_cesta_load_sm_seq(struct msm_pcie_dev_t *dev)
- {
- int i = 0;
- struct msm_pcie_sm_info *sm_info = dev->sm_info;
- /* Remove the PWR_CTRL Overrides set for this pcie instance */
- msm_pcie_write_reg(dev->pcie_sm,
- pcie_sm_regs[PCIE_SM_PWR_CTRL_OFFSET] +
- (dev->rc_idx * pcie_sm_regs[PCIE_SM_PWR_INSTANCE_OFFSET]),
- 0x0);
- /* Remove the PWR_CTRL_MASK Overrides set for this pcie instance */
- msm_pcie_write_reg(dev->pcie_sm,
- pcie_sm_regs[PCIE_SM_PWR_MASK_OFFSET] +
- (dev->rc_idx * pcie_sm_regs[PCIE_SM_PWR_INSTANCE_OFFSET]),
- 0x0);
- /* Loading the pcie state manager sequence */
- for (i = 0; i < sm_info->sm_seq_len; i++) {
- PCIE_DBG(dev, "sm seq val 0x%x\n", sm_info->sm_seq[i]);
- msm_pcie_write_reg(dev->pcie_sm, 4*i, sm_info->sm_seq[i]);
- }
- /* Loading the pcie state manager branch sequence */
- for (i = 0; i < sm_info->sm_branch_len; i++) {
- PCIE_DBG(dev, "branch seq val 0x%x\n", sm_info->branch_seq[i]);
- msm_pcie_write_reg(dev->pcie_sm, sm_info->branch_offset + 4*i,
- sm_info->branch_seq[i]);
- }
- /* Enable the pcie state manager once the sequence is loaded */
- msm_pcie_write_reg_field(dev->pcie_sm, sm_info->start_offset,
- BIT(0), 1);
- }
- /* This function will get the pcie state manager sequence from DT node */
- static int msm_pcie_cesta_get_sm_seq(struct msm_pcie_dev_t *dev)
- {
- int ret, size = 0;
- struct platform_device *pdev = dev->pdev;
- struct msm_pcie_sm_info *sm_info;
- of_get_property(pdev->dev.of_node, "qcom,pcie-sm-seq", &size);
- if (!size) {
- PCIE_DBG(dev,
- "PCIe: RC%d: state manager seq is not present in DT\n",
- dev->rc_idx);
- return -EIO;
- }
- sm_info = devm_kzalloc(&pdev->dev, sizeof(struct msm_pcie_sm_info),
- GFP_KERNEL);
- if (!sm_info)
- return -ENOMEM;
- sm_info->sm_seq_len = size / sizeof(u32);
- sm_info->sm_seq = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!sm_info->sm_seq)
- return -ENOMEM;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,pcie-sm-seq", sm_info->sm_seq,
- sm_info->sm_seq_len);
- if (ret)
- return -EIO;
- ret = of_property_read_u32(pdev->dev.of_node,
- "qcom,pcie-sm-branch-offset", &sm_info->branch_offset);
- if (ret)
- return -EIO;
- ret = of_property_read_u32(pdev->dev.of_node,
- "qcom,pcie-sm-start-offset", &sm_info->start_offset);
- if (ret)
- return -EIO;
- of_get_property(pdev->dev.of_node, "qcom,pcie-sm-branch-seq", &size);
- if (!size) {
- PCIE_DBG(dev,
- "PCIe: RC%d: sm branch seq is not present in DT\n",
- dev->rc_idx);
- return -EIO;
- }
- sm_info->sm_branch_len = size / sizeof(u32);
- sm_info->branch_seq = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!sm_info->branch_seq)
- return -ENOMEM;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,pcie-sm-branch-seq", sm_info->branch_seq,
- sm_info->sm_branch_len);
- if (ret)
- return -EIO;
- of_get_property(pdev->dev.of_node, "qcom,pcie-sm-debug", &size);
- if (!size) {
- PCIE_DBG(dev,
- "PCIe: RC%d: sm debugs regs are not present in DT\n",
- dev->rc_idx);
- goto out;
- }
- sm_info->reg_dump_len = size / sizeof(u32);
- sm_info->reg_dump = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!sm_info->reg_dump)
- return -ENOMEM;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,pcie-sm-debug", sm_info->reg_dump,
- sm_info->reg_dump_len);
- if (ret)
- sm_info->reg_dump_len = 0;
- out:
- dev->sm_info = sm_info;
- return 0;
- }
- /*
- * Arm the l1ss sleep timeout so that pcie controller can send the
- * l1ss TO signal to pcie state manager and state manager can further
- * go into l1ss sleep state to turn off the resources.
- */
- static void msm_pcie_cesta_enable_l1ss_to(struct msm_pcie_dev_t *dev)
- {
- u32 val;
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER,
- PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET);
- val = PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET |
- L1SS_TIMEOUT_US_TO_TICKS(dev->l1ss_timeout_us,
- dev->aux_clk_freq);
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER,
- val);
- }
- /* Disable L1ss timeout timer */
- static void msm_pcie_cesta_disable_l1ss_to(struct msm_pcie_dev_t *dev)
- {
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER, 0);
- }
- /* Read the curr perf ol value from the cesta register */
- static const char *const msm_pcie_cesta_curr_perf_ol(struct msm_pcie_dev_t *dev)
- {
- u32 ret;
- int res;
- res = crm_read_curr_perf_ol("pcie_crm", dev->rc_idx, &ret);
- if (res) {
- PCIE_ERR(dev, "PCIE: RC:%d Error getting curr_perf_ol %d\n",
- dev->rc_idx, res);
- ret = MAX_PERF_LVL;
- }
- if (ret > MAX_PERF_LVL)
- ret = MAX_PERF_LVL;
- return msm_pcie_cesta_curr_perf_lvl[ret];
- }
- /*
- * This function is used for configuring the CESTA power state
- * to the perf level mapping based on the Gen speed provided in
- * the argument
- */
- static void msm_pcie_cesta_map_save(int gen_speed)
- {
- /* Gen1 speed is equal to perf levle 2 */
- gen_speed += PERF_LVL_L1SS;
- msm_pcie_cesta_map[D0_STATE][POWER_STATE_0] = gen_speed;
- msm_pcie_cesta_map[D0_STATE][POWER_STATE_1] = gen_speed;
- msm_pcie_cesta_map[DRV_STATE][POWER_STATE_1] = gen_speed;
- }
- /*
- * Apply the cesta power state <--> perf ol mapping using the
- * crm driver APIs.
- */
- static int msm_pcie_cesta_map_apply(struct msm_pcie_dev_t *dev, u32 cesta_st)
- {
- int ret = 0;
- struct crm_cmd cmd;
- u32 pwr_st;
- if (!dev->pcie_sm)
- return 0;
- PCIE_DBG(dev, "Current perf ol is %s\n",
- msm_pcie_cesta_curr_perf_ol(dev));
- PCIE_DBG(dev, "Setting the scenario to %s and perf_idx %d\n",
- msm_pcie_cesta_states[cesta_st],
- msm_pcie_cesta_map[cesta_st][POWER_STATE_1]);
- for (pwr_st = 0; pwr_st < MAX_POWER_STATE; pwr_st++) {
- cmd.pwr_state.hw = pwr_st;
- cmd.resource_idx = dev->rc_idx;
- cmd.data = msm_pcie_cesta_map[cesta_st][pwr_st];
- ret = crm_write_perf_ol(dev->crm_dev, CRM_HW_DRV, dev->rc_idx,
- &cmd);
- if (ret) {
- PCIE_DBG(dev, "PCIe: RC%d: pwr_st %d perf_ol %d\n",
- dev->rc_idx, pwr_st, ret);
- return ret;
- }
- }
- ret = crm_write_pwr_states(dev->crm_dev, dev->rc_idx);
- if (ret) {
- PCIE_DBG(dev, "PCIe: RC%d: pwr_st %d pwr_states %d\n",
- dev->rc_idx, pwr_st, ret);
- return ret;
- }
- PCIE_DBG(dev, "New perf ol is %s\n",
- msm_pcie_cesta_curr_perf_ol(dev));
- return 0;
- }
- /*
- * This function will cause the entry into drv state by
- * configuring CESTA to drv state
- */
- static void msm_pcie_cesta_enable_drv(struct msm_pcie_dev_t *dev,
- bool enable_to)
- {
- int ret;
- if (!dev->pcie_sm)
- return;
- if (enable_to)
- msm_pcie_cesta_enable_l1ss_to(dev);
- /*
- * Use CLKREQ as wake up capable gpio so that when APPS
- * is in sleep CESTA block can still get the CLKREQ
- * assertion event.
- */
- ret = msm_gpio_mpm_wake_set(dev->clkreq_gpio, true);
- if (ret)
- PCIE_ERR(dev, "Failed to make clkreq wakeup capable%d\n", ret);
- ret = pcie_pdc_cfg_irq(dev->clkreq_gpio, IRQ_TYPE_EDGE_FALLING, true);
- if (ret)
- PCIE_ERR(dev, "Failed to make clkreq pdc wakeup capable%d\n", ret);
- /* Use CESTA to manage the resources in DRV state */
- ret = msm_pcie_cesta_map_apply(dev, DRV_STATE);
- if (ret)
- PCIE_ERR(dev, "Failed to move to DRV state %d\n", ret);
- }
- /*
- * This function will configure CESTA to move to D0 state
- * from the drv state
- */
- static void msm_pcie_cesta_disable_drv(struct msm_pcie_dev_t *dev)
- {
- int ret;
- if (!dev->pcie_sm)
- return;
- /* Use CESTA to turn on the resources into D0 state from DRV state*/
- ret = msm_pcie_cesta_map_apply(dev, D0_STATE);
- if (ret)
- PCIE_ERR(dev, "Failed to move to D0 State %d\n", ret);
- msm_pcie_cesta_disable_l1ss_to(dev);
- /* Remove CLKREQ as wake up capable gpio */
- ret = msm_gpio_mpm_wake_set(dev->clkreq_gpio, false);
- if (ret)
- PCIE_ERR(dev, "Fail to remove clkreq wakeup capable%d\n", ret);
- ret = pcie_pdc_cfg_irq(dev->clkreq_gpio, IRQ_TYPE_EDGE_FALLING, false);
- if (ret)
- PCIE_ERR(dev, "Fail to remove clkreq pdc wakeup capable%d\n", ret);
- }
- static int msm_pcie_gpio_init(struct msm_pcie_dev_t *dev)
- {
- int rc = 0, i;
- struct msm_pcie_gpio_info_t *info;
- PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
- for (i = 0; i < dev->gpio_n; i++) {
- info = &dev->gpio[i];
- if (!info->num)
- continue;
- rc = gpio_request(info->num, info->name);
- if (rc) {
- PCIE_ERR(dev, "PCIe: RC%d can't get gpio %s; %d\n",
- dev->rc_idx, info->name, rc);
- break;
- }
- if (info->out)
- rc = gpio_direction_output(info->num, info->init);
- else
- rc = gpio_direction_input(info->num);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't set direction for GPIO %s:%d\n",
- dev->rc_idx, info->name, rc);
- gpio_free(info->num);
- break;
- }
- }
- if (rc)
- while (i--)
- gpio_free(dev->gpio[i].num);
- return rc;
- }
- static void msm_pcie_gpio_deinit(struct msm_pcie_dev_t *dev)
- {
- int i;
- PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
- for (i = 0; i < dev->gpio_n; i++)
- gpio_free(dev->gpio[i].num);
- }
- static int msm_pcie_vreg_init(struct msm_pcie_dev_t *dev)
- {
- int i, rc = 0;
- struct regulator *vreg;
- struct msm_pcie_vreg_info_t *info;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
- info = &dev->vreg[i];
- vreg = info->hdl;
- if (!vreg)
- continue;
- PCIE_DBG2(dev, "RC%d Vreg %s is being enabled\n",
- dev->rc_idx, info->name);
- if (info->max_v) {
- rc = regulator_set_voltage(vreg,
- info->min_v, info->max_v);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't set voltage for %s: %d\n",
- dev->rc_idx, info->name, rc);
- break;
- }
- }
- if (info->opt_mode) {
- rc = regulator_set_load(vreg, info->opt_mode);
- if (rc < 0) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't set mode for %s: %d\n",
- dev->rc_idx, info->name, rc);
- break;
- }
- }
- rc = regulator_enable(vreg);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't enable regulator %s: %d\n",
- dev->rc_idx, info->name, rc);
- break;
- }
- }
- if (rc)
- while (i--) {
- struct regulator *hdl = dev->vreg[i].hdl;
- if (hdl) {
- regulator_disable(hdl);
- if (!strcmp(dev->vreg[i].name, "vreg-cx") ||
- !strcmp(dev->vreg[i].name, "vreg-mx")) {
- PCIE_DBG(dev,
- "RC%d: Removing %s vote.\n",
- dev->rc_idx,
- dev->vreg[i].name);
- regulator_set_voltage(hdl,
- RPMH_REGULATOR_LEVEL_RETENTION,
- RPMH_REGULATOR_LEVEL_MAX);
- }
- if (dev->vreg[i].opt_mode) {
- rc = regulator_set_load(hdl, 0);
- if (rc < 0)
- PCIE_ERR(dev,
- "PCIe: RC%d can't set mode for %s: %d\n",
- dev->rc_idx,
- dev->vreg[i].name, rc);
- }
- }
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return rc;
- }
- static void msm_pcie_vreg_init_analog_rails(struct msm_pcie_dev_t *dev)
- {
- int i, rc;
- for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
- if (dev->vreg[i].hdl) {
- /*
- * Enable all the voltage regulators except the 3p3 regulator,
- * as 3p3 is main power supply for some endpoints like NVMe.
- */
- if (strcmp(dev->vreg[i].name, "vreg-3p3")) {
- PCIE_DBG(dev, "Vreg %s is being enabled\n",
- dev->vreg[i].name);
- rc = regulator_enable(dev->vreg[i].hdl);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't enable regulator %s: %d\n",
- dev->rc_idx, dev->vreg[i].name, rc);
- }
- }
- }
- }
- }
- static void msm_pcie_vreg_deinit_analog_rails(struct msm_pcie_dev_t *dev)
- {
- int i;
- for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
- if (dev->vreg[i].hdl) {
- /*
- * Disable all the voltage regulators except the 3p3 regulator,
- * as 3p3 is main power supply for some endpoints like NVMe.
- */
- if (strcmp(dev->vreg[i].name, "vreg-3p3")) {
- PCIE_DBG(dev, "Vreg %s is being disabled\n",
- dev->vreg[i].name);
- regulator_disable(dev->vreg[i].hdl);
- }
- }
- }
- }
- static void msm_pcie_vreg_deinit(struct msm_pcie_dev_t *dev)
- {
- int i, ret;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = MSM_PCIE_MAX_VREG - 1; i >= 0; i--) {
- if (dev->vreg[i].hdl) {
- PCIE_DBG(dev, "Vreg %s is being disabled\n",
- dev->vreg[i].name);
- regulator_disable(dev->vreg[i].hdl);
- if (!strcmp(dev->vreg[i].name, "vreg-cx") ||
- !strcmp(dev->vreg[i].name, "vreg-mx")) {
- PCIE_DBG(dev,
- "RC%d: Removing %s vote.\n",
- dev->rc_idx,
- dev->vreg[i].name);
- regulator_set_voltage(dev->vreg[i].hdl,
- RPMH_REGULATOR_LEVEL_RETENTION,
- RPMH_REGULATOR_LEVEL_MAX);
- }
- if (dev->vreg[i].opt_mode) {
- ret = regulator_set_load(dev->vreg[i].hdl, 0);
- if (ret < 0)
- PCIE_ERR(dev,
- "PCIe: RC%d can't set mode for %s: %d\n",
- dev->rc_idx, dev->vreg[i].name,
- ret);
- }
- }
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- }
- /* This function will initialize gdsc core and gdsc phy regulators */
- static int msm_pcie_gdsc_init(struct msm_pcie_dev_t *dev)
- {
- int rc = 0;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- if (dev->gdsc_core) {
- rc = regulator_enable(dev->gdsc_core);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: fail to enable GDSC-CORE for RC%d (%s)\n",
- dev->rc_idx, dev->pdev->name);
- return rc;
- }
- }
- if (dev->gdsc_phy) {
- rc = regulator_enable(dev->gdsc_phy);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: fail to enable GDSC-PHY for RC%d (%s)\n",
- dev->rc_idx, dev->pdev->name);
- return rc;
- }
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return 0;
- }
- /* This function will de-initialize gdsc core and gdsc phy regulators */
- static int msm_pcie_gdsc_deinit(struct msm_pcie_dev_t *dev)
- {
- int rc = 0;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- if (dev->gdsc_core) {
- rc = regulator_disable(dev->gdsc_core);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe:RC%d fail to disable GDSC-CORE (%s)\n",
- dev->rc_idx, dev->pdev->name);
- return rc;
- }
- }
- if (dev->gdsc_phy) {
- rc = regulator_disable(dev->gdsc_phy);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe:RC%d fail to disable GDSC-PHY (%s)\n",
- dev->rc_idx, dev->pdev->name);
- return rc;
- }
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return 0;
- }
- /* This function will reset pcie controller and phy */
- static int msm_pcie_core_phy_reset(struct msm_pcie_dev_t *dev)
- {
- int i, rc = 0;
- struct msm_pcie_reset_info_t *reset_info;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
- reset_info = &dev->reset[i];
- if (reset_info->hdl) {
- rc = reset_control_assert(reset_info->hdl);
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d failed to assert reset for %s.\n",
- dev->rc_idx, reset_info->name);
- else
- PCIE_DBG2(dev,
- "PCIe: RC%d successfully asserted reset for %s.\n",
- dev->rc_idx, reset_info->name);
- /* add a 1ms delay to ensure the reset is asserted */
- usleep_range(1000, 1005);
- rc = reset_control_deassert(reset_info->hdl);
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d failed to deassert reset for %s.\n",
- dev->rc_idx, reset_info->name);
- else
- PCIE_DBG2(dev,
- "PCIe: RC%d successfully deasserted reset for %s.\n",
- dev->rc_idx, reset_info->name);
- }
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return rc;
- }
- static int msm_pcie_icc_vote(struct msm_pcie_dev_t *dev, u8 speed,
- u8 width, bool drv_state)
- {
- u32 bw;
- int rc = 0;
- u32 icc_tags;
- if (dev->icc_path) {
- icc_tags = drv_state ? QCOM_ICC_TAG_PWR_ST_1 :
- QCOM_ICC_TAG_PWR_ST_0 | QCOM_ICC_TAG_PWR_ST_1;
- /*
- * This API icc_set_tag() call is needed when CESTA is enabled.
- * Instead of pcie driver settiing up the icc bandwidth votes
- * for different power states of CESTA, icc driver will take
- * care of it when we call icc_set_tag API.
- */
- if (dev->pcie_sm)
- icc_set_tag(dev->icc_path, icc_tags);
- switch (speed) {
- case 1:
- bw = 250000; /* avg bw / AB: 2.5 GBps, peak bw / IB: no vote */
- break;
- case 2:
- bw = 500000; /* avg bw / AB: 5 GBps, peak bw / IB: no vote */
- break;
- case 3:
- bw = 1000000; /* avg bw / AB: 8 GBps, peak bw / IB: no vote */
- break;
- case 4:
- bw = 2000000; /* avg bw / AB: 16 GBps, peak bw / IB: no vote */
- break;
- case 5:
- bw = 4000000; /* avg bw / AB: 32 GBps, peak bw / IB: no vote */
- break;
- default:
- bw = 0;
- break;
- }
- if (speed == 0) {
- /* Speed == 0 implies to vote for '0' bandwidth. */
- rc = icc_set_bw(dev->icc_path, 0, 0);
- } else {
- /*
- * If there is no icc voting from the client driver then vote for icc
- * bandwidth is based up on link speed and width or vote for average
- * icc bandwidth.
- */
- if (dev->no_client_based_bw_voting)
- rc = icc_set_bw(dev->icc_path, width * bw, 0);
- else
- rc = icc_set_bw(dev->icc_path, ICC_AVG_BW, ICC_PEAK_BW);
- }
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d: failed to put the ICC vote %d.\n",
- dev->rc_idx, rc);
- else
- PCIE_DBG(dev,
- "PCIe: RC%d: ICC vote successful\n",
- dev->rc_idx);
- /*
- * When PCIe-CESTA is enabled, need to explicitly call
- * crm_write_pwr_states() API so that the icc votes are
- * reflected at the HW level.
- */
- if (dev->pcie_sm) {
- rc = crm_write_pwr_states(dev->crm_dev, dev->rc_idx);
- if (rc) {
- PCIE_DBG(dev, "PCIe: RC%d: pwr_states %d\n",
- dev->rc_idx, rc);
- }
- }
- }
- return rc;
- }
- static int msm_pcie_clk_init(struct msm_pcie_dev_t *dev)
- {
- int i, rc = 0;
- struct msm_pcie_clk_info_t *info;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- /* switch pipe clock source after gdsc-core is turned on */
- if (dev->pipe_clk_mux && dev->pipe_clk_ext_src)
- clk_set_parent(dev->pipe_clk_mux, dev->pipe_clk_ext_src);
- /* vote with GEN1x1 before link up */
- rc = msm_pcie_icc_vote(dev, GEN1_SPEED, LINK_WIDTH_X1, false);
- if (rc)
- return rc;
- for (i = 0; i < dev->num_clk; i++) {
- info = &dev->clk[i];
- if (!info->hdl)
- continue;
- if (info->freq) {
- rc = clk_set_rate(info->hdl, info->freq);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't set rate for clk %s: %d.\n",
- dev->rc_idx, info->name, rc);
- break;
- }
- PCIE_DBG2(dev,
- "PCIe: RC%d set rate for clk %s.\n",
- dev->rc_idx, info->name);
- }
- rc = clk_prepare_enable(info->hdl);
- if (rc) {
- PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s\n",
- dev->rc_idx, info->name);
- break;
- }
- PCIE_DBG2(dev, "enable clk %s for RC%d.\n", info->name,
- dev->rc_idx);
- }
- if (rc) {
- PCIE_DBG(dev, "RC%d disable clocks for error handling.\n",
- dev->rc_idx);
- while (i--) {
- struct clk *hdl = dev->clk[i].hdl;
- if (hdl)
- clk_disable_unprepare(hdl);
- }
- /* switch pipe clock mux to xo before turning off gdsc-core */
- if (dev->pipe_clk_mux && dev->ref_clk_src)
- clk_set_parent(dev->pipe_clk_mux, dev->ref_clk_src);
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return rc;
- }
- static void msm_pcie_clk_deinit(struct msm_pcie_dev_t *dev)
- {
- int i;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = 0; i < dev->num_clk; i++)
- if (dev->clk[i].hdl)
- clk_disable_unprepare(dev->clk[i].hdl);
- msm_pcie_icc_vote(dev, 0, 0, false);
- /* switch phy aux clock mux to xo before turning off gdsc-core */
- if (dev->phy_aux_clk_mux && dev->ref_clk_src)
- clk_set_parent(dev->phy_aux_clk_mux, dev->ref_clk_src);
- /* switch pipe clock mux to xo before turning off gdsc */
- if (dev->pipe_clk_mux && dev->ref_clk_src)
- clk_set_parent(dev->pipe_clk_mux, dev->ref_clk_src);
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- }
- /* This function will assert, de-assert pipe reset signal */
- static int msm_pcie_pipe_reset(struct msm_pcie_dev_t *dev)
- {
- int i, rc = 0;
- struct msm_pcie_reset_info_t *pipe_reset_info;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
- pipe_reset_info = &dev->pipe_reset[i];
- if (pipe_reset_info->hdl) {
- rc = reset_control_assert(pipe_reset_info->hdl);
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d failed to assert pipe reset for %s.\n",
- dev->rc_idx, pipe_reset_info->name);
- else
- PCIE_DBG2(dev,
- "PCIe: RC%d successfully asserted pipe reset for %s.\n",
- dev->rc_idx, pipe_reset_info->name);
- /* add a 1ms delay to ensure the reset is asserted */
- usleep_range(1000, 1005);
- rc = reset_control_deassert(
- pipe_reset_info->hdl);
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d failed to deassert pipe reset for %s.\n",
- dev->rc_idx, pipe_reset_info->name);
- else
- PCIE_DBG2(dev,
- "PCIe: RC%d successfully deasserted pipe reset for %s.\n",
- dev->rc_idx, pipe_reset_info->name);
- }
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return rc;
- }
- static int msm_pcie_pipe_clk_init(struct msm_pcie_dev_t *dev)
- {
- int i, rc = 0;
- struct msm_pcie_clk_info_t *info;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = 0; i < dev->num_pipe_clk; i++) {
- info = &dev->pipe_clk[i];
- if (!info->hdl)
- continue;
- if (info->freq) {
- rc = clk_set_rate(info->hdl, info->freq);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d can't set rate for clk %s: %d.\n",
- dev->rc_idx, info->name, rc);
- break;
- }
- PCIE_DBG2(dev,
- "PCIe: RC%d set rate for clk %s: %d.\n",
- dev->rc_idx, info->name, rc);
- }
- rc = clk_prepare_enable(info->hdl);
- if (rc) {
- PCIE_ERR(dev, "PCIe: RC%d failed to enable clk %s.\n",
- dev->rc_idx, info->name);
- break;
- }
- PCIE_DBG2(dev, "RC%d enabled pipe clk %s.\n", dev->rc_idx,
- info->name);
- }
- if (rc) {
- PCIE_DBG(dev, "RC%d disable pipe clocks for error handling.\n",
- dev->rc_idx);
- while (i--)
- if (dev->pipe_clk[i].hdl)
- clk_disable_unprepare(dev->pipe_clk[i].hdl);
- }
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return rc;
- }
- static void msm_pcie_pipe_clk_deinit(struct msm_pcie_dev_t *dev)
- {
- int i;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- for (i = 0; i < dev->num_pipe_clk; i++)
- if (dev->pipe_clk[i].hdl)
- clk_disable_unprepare(
- dev->pipe_clk[i].hdl);
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- }
- static bool pcie_phy_is_ready(struct msm_pcie_dev_t *dev)
- {
- /* There is no PHY status check in RUMI */
- if (dev->rumi)
- return true;
- if (readl_relaxed(dev->phy + dev->phy_status_offset) &
- BIT(dev->phy_status_bit))
- return false;
- else
- return true;
- }
- #ifdef CONFIG_SEC_PCIE_DEV
- static void pcie_sec_phy_init(struct msm_pcie_dev_t *dev);
- #endif
- static int pcie_phy_init(struct msm_pcie_dev_t *dev)
- {
- int i, ret;
- long retries = 0;
- struct msm_pcie_phy_info_t *phy_seq;
- #ifdef CONFIG_SEC_PCIE
- static int max_retries[MAX_RC_NUM];
- static long int total_enable_cnt[MAX_RC_NUM];
- #endif
- PCIE_DBG(dev, "PCIe: RC%d: Initializing PHY\n", dev->rc_idx);
- if (dev->phy_sequence) {
- i = dev->phy_len;
- phy_seq = dev->phy_sequence;
- while (i--) {
- msm_pcie_write_reg(dev->phy,
- phy_seq->offset,
- phy_seq->val);
- if (phy_seq->delay)
- usleep_range(phy_seq->delay,
- phy_seq->delay + 1);
- phy_seq++;
- }
- }
- #ifdef CONFIG_SEC_PCIE_DEV
- pcie_sec_phy_init(dev);
- #endif
- usleep_range(PHY_STABILIZATION_DELAY_US_MIN,
- PHY_STABILIZATION_DELAY_US_MAX);
- /* Enable the pipe clock */
- ret = msm_pcie_pipe_clk_init(dev);
- /* ensure that changes propagated to the hardware */
- wmb();
- /* Assert, De-assert the pipe reset */
- ret = msm_pcie_pipe_reset(dev);
- /* ensure that changes propagated to the hardware */
- wmb();
- PCIE_DBG(dev, "PCIe RC%d: waiting for phy ready...\n", dev->rc_idx);
- do {
- if (pcie_phy_is_ready(dev))
- break;
- retries++;
- usleep_range(REFCLK_STABILIZATION_DELAY_US_MIN,
- REFCLK_STABILIZATION_DELAY_US_MAX);
- } while (retries < PHY_READY_TIMEOUT_COUNT);
- #ifdef CONFIG_SEC_PCIE
- if (max_retries[dev->rc_idx] < retries)
- max_retries[dev->rc_idx] = retries;
- total_enable_cnt[dev->rc_idx]++;
- PCIE_ERR(dev, "RC%d: number of PHY retries:%ld(Max:%d, Total:%ld).\n",
- dev->rc_idx, retries, max_retries[dev->rc_idx], total_enable_cnt[dev->rc_idx]);
- #else
- PCIE_DBG(dev, "PCIe: RC%d: number of PHY retries: %ld.\n", dev->rc_idx,
- retries);
- #endif
- if (!pcie_phy_is_ready(dev)) {
- PCIE_ERR(dev, "PCIe PHY RC%d failed to come up!\n",
- dev->rc_idx);
- #ifdef CONFIG_SEC_PCIE
- update_phyinit_fail_count(dev->rc_idx);
- #endif
- pcie_phy_dump(dev);
- return -ENODEV;
- }
- #ifdef CONFIG_SEC_PCIE_DEV
- pcie_sec_dump(dev);
- #endif
- PCIE_INFO(dev, "PCIe RC%d PHY is ready!\n", dev->rc_idx);
- return 0;
- }
- static u16 msm_pci_find_ext_capability(struct msm_pcie_dev_t *pci, u8 cap)
- {
- int pos = PCI_CFG_SPACE_SIZE;
- u32 header;
- int ttl;
- /* minimum 8 bytes per capability */
- ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
- header = readl_relaxed(pci->dm_core + pos);
- /*
- * If we have no capabilities, this is indicated by cap ID,
- * cap version and next pointer all being 0.
- */
- if (header == 0)
- return 0;
- while (ttl-- > 0) {
- if (PCI_EXT_CAP_ID(header) == cap && pos != 0)
- return pos;
- pos = PCI_EXT_CAP_NEXT(header);
- if (pos < PCI_CFG_SPACE_SIZE)
- break;
- header = readl_relaxed(pci->dm_core + pos);
- }
- return 0;
- }
- static void msm_pcie_config_core_preset(struct msm_pcie_dev_t *pcie_dev)
- {
- u32 supported_link_speed, supported_link_width;
- u16 cap_id_offset, offset;
- u32 val;
- int i;
- val = readl_relaxed(pcie_dev->dm_core + PCIE20_CAP + PCI_EXP_LNKCAP);
- supported_link_speed = val & PCI_EXP_LNKCAP_SLS;
- supported_link_width = (val & PCI_EXP_LNKCAP_MLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
- /* enable write access to RO register */
- msm_pcie_write_mask(pcie_dev->dm_core + PCIE_GEN3_MISC_CONTROL, 0, BIT(0));
- /* Gen3 */
- if (supported_link_speed >= PCI_EXP_LNKCAP_SLS_8_0GB) {
- cap_id_offset = msm_pci_find_ext_capability(pcie_dev, PCI_EXT_CAP_ID_SECPCI);
- if (cap_id_offset == 0)
- return;
- /* GEN3 preset is at 0xC offset from Secondary PCI Express Extended Capability ID */
- offset = cap_id_offset + 0xC;
- msm_pcie_write_reg(pcie_dev->dm_core, offset, pcie_dev->core_preset);
- /*
- * Each register provides preset hint for 2 lanes.
- * If there are more than 2 lanes then programing remaining lanes.
- */
- for (i = 2; i < supported_link_width; i = i+2) {
- offset += 0x4;
- msm_pcie_write_reg(pcie_dev->dm_core, offset, pcie_dev->core_preset);
- }
- }
- /* Gen4 */
- if (supported_link_speed >= PCI_EXP_LNKCAP_SLS_16_0GB) {
- cap_id_offset = msm_pci_find_ext_capability(pcie_dev, PCI_EXT_CAP_ID_PL_16GT);
- if (cap_id_offset == 0)
- return;
- /*
- * GEN4 preset is at 0x20 offset from Physical Layer
- * 16.0 GT/s Extended Capability ID
- */
- offset = cap_id_offset + 0x20;
- msm_pcie_write_reg(pcie_dev->dm_core, offset, pcie_dev->core_preset);
- /*
- * Each register provides preset hint for 4 lanes.
- * If there are more than 4 lanes then programing remaining lanes.
- */
- for (i = 4; i < supported_link_width; i = i+4) {
- offset += 0x4;
- msm_pcie_write_reg(pcie_dev->dm_core, offset, pcie_dev->core_preset);
- }
- }
- /* disable write access to RO register */
- msm_pcie_write_mask(pcie_dev->dm_core + PCIE_GEN3_MISC_CONTROL, BIT(0), 0);
- }
- /* Controller settings related to PCIe PHY */
- static void msm_pcie_config_controller_phy(struct msm_pcie_dev_t *pcie_dev)
- {
- int i;
- u32 supported_link_speed =
- readl_relaxed(pcie_dev->dm_core + PCIE20_CAP + PCI_EXP_LNKCAP) &
- PCI_EXP_LNKCAP_SLS;
- /* settings apply to GEN3 and above */
- for (i = PCI_EXP_LNKCAP_SLS_8_0GB; i <= supported_link_speed; i++) {
- /* select which GEN speed to configure settings for */
- msm_pcie_write_reg_field(pcie_dev->dm_core, PCIE_GEN3_RELATED,
- PCIE_GEN3_RELATED_RATE_SHADOW_SEL_MASK,
- PCIE_GEN3_RELATED_RATE_SHADOW_SEL(i));
- msm_pcie_write_reg_field(pcie_dev->dm_core, PCIE_GEN3_EQ_CONTROL,
- PCIE_GEN3_EQ_PSET_REQ_VEC_MASK,
- pcie_dev->eq_pset_req_vec);
- /* GEN3_ZRXDC_NONCOMPL */
- msm_pcie_write_mask(pcie_dev->dm_core +
- PCIE_GEN3_RELATED, BIT(0), 0);
- msm_pcie_write_reg_field(pcie_dev->dm_core,
- PCIE_GEN3_EQ_FB_MODE_DIR_CHANGE,
- PCIE_GEN3_EQ_FMDC_T_MIN_PHASE23_MASK,
- pcie_dev->eq_fmdc_t_min_phase23);
- }
- }
- static void msm_pcie_config_controller(struct msm_pcie_dev_t *dev)
- {
- PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
- /*
- * program and enable address translation region 0 (device config
- * address space); region type config;
- * axi config address range to device config address range. Enable
- * translation for bus 1 dev 0 fn 0.
- */
- dev->current_bdf = 0; /* to force IATU re-config */
- msm_pcie_cfg_bdf(dev, 1, 0);
- /* configure N_FTS */
- PCIE_DBG2(dev, "Original PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
- if (!dev->n_fts)
- msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
- 0, BIT(15));
- else
- msm_pcie_write_mask(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG,
- PCIE20_ACK_N_FTS,
- dev->n_fts << 8);
- PCIE_DBG2(dev, "Updated PCIE20_ACK_F_ASPM_CTRL_REG:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_ACK_F_ASPM_CTRL_REG));
- /* configure AUX clock frequency register for PCIe core */
- if (dev->aux_clk_freq)
- msm_pcie_write_reg(dev->dm_core, PCIE20_AUX_CLK_FREQ_REG, dev->aux_clk_freq);
- dev->aux_clk_freq = readl_relaxed(dev->dm_core +
- PCIE20_AUX_CLK_FREQ_REG);
- /* configure the completion timeout value for PCIe core */
- if (dev->cpl_timeout && dev->bridge_found)
- msm_pcie_write_reg_field(dev->dm_core,
- PCIE20_DEVICE_CONTROL2_STATUS2,
- 0xf, dev->cpl_timeout);
- /* Enable AER on RC */
- if (dev->aer_enable) {
- msm_pcie_write_mask(dev->dm_core + PCIE20_BRIDGE_CTRL, 0,
- BIT(16)|BIT(17));
- msm_pcie_write_mask(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS, 0,
- BIT(3)|BIT(2)|BIT(1)|BIT(0));
- PCIE_DBG(dev, "RC's PCIE20_CAP_DEVCTRLSTATUS:0x%x\n",
- readl_relaxed(dev->dm_core + PCIE20_CAP_DEVCTRLSTATUS));
- }
- }
- static int msm_pcie_get_clk(struct msm_pcie_dev_t *pcie_dev)
- {
- struct platform_device *pdev = pcie_dev->pdev;
- u32 *clk_freq = NULL, *clk_suppressible = NULL;
- int ret, i, total_num_clk;
- struct clk_bulk_data *bulk_clks;
- struct msm_pcie_clk_info_t *clk;
- /* get clocks */
- ret = devm_clk_bulk_get_all(&pdev->dev, &bulk_clks);
- if (ret <= 0) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to get clocks: ret: %d\n",
- pcie_dev->rc_idx, ret);
- goto out;
- }
- total_num_clk = ret;
- ret = of_property_count_elems_of_size(pdev->dev.of_node,
- "clock-frequency",
- sizeof(*clk_freq));
- if (ret != total_num_clk) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: mismatch between number of clock and frequency entries: %d != %d\n",
- pcie_dev->rc_idx, total_num_clk, ret);
- return -EIO;
- }
- /* get clock frequency info */
- clk_freq = devm_kcalloc(&pdev->dev, total_num_clk, sizeof(*clk_freq),
- GFP_KERNEL);
- if (!clk_freq)
- return -ENOMEM;
- ret = of_property_read_u32_array(pdev->dev.of_node, "clock-frequency",
- clk_freq, total_num_clk);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to get clock frequencies: ret: %d\n",
- pcie_dev->rc_idx, ret);
- goto out;
- }
- ret = of_property_count_elems_of_size(pdev->dev.of_node,
- "clock-suppressible",
- sizeof(*clk_suppressible));
- if (ret != total_num_clk) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: mismatch between number of clock and suppressible entries: %d != %d\n",
- pcie_dev->rc_idx, total_num_clk, ret);
- return -EIO;
- }
- /* get clock suppressible info */
- clk_suppressible = devm_kcalloc(&pdev->dev, total_num_clk,
- sizeof(*clk_suppressible), GFP_KERNEL);
- if (!clk_suppressible)
- return -ENOMEM;
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "clock-suppressible",
- clk_suppressible, total_num_clk);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to get clock suppressible info: ret: %d\n",
- pcie_dev->rc_idx, ret);
- goto out;
- }
- /* setup array of PCIe clock info */
- clk = devm_kcalloc(&pdev->dev, total_num_clk, sizeof(*clk), GFP_KERNEL);
- if (!clk)
- return -ENOMEM;
- /* Initially, pipe clk and clk both point to the beginning */
- pcie_dev->pipe_clk = pcie_dev->clk = clk;
- for (i = 0; i < total_num_clk; i++, clk++, bulk_clks++) {
- clk->name = bulk_clks->id;
- clk->hdl = bulk_clks->clk;
- clk->freq = *clk_freq++;
- clk->suppressible = *clk_suppressible++;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: %s: frequency: %d: suppressible: %d\n",
- pcie_dev->rc_idx, clk->name, clk->freq,
- clk->suppressible);
- }
- /*
- * PCIe PIPE clock needs to be voted for independently from other PCIe
- * clocks. Assumption is that PCIe pipe clocks come first in the list
- * of clocks. The rest of the clocks will come after.
- */
- if (!strcmp(pcie_dev->clk->name, "pcie_pipe_clk")) {
- pcie_dev->num_pipe_clk++;
- pcie_dev->clk++;
- } else {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: could not find entry for pcie_pipe_clk\n",
- pcie_dev->rc_idx);
- /* Mask the error when PCIe resources are managed by CESTA */
- if (!pcie_dev->pcie_sm)
- goto out;
- }
- pcie_dev->num_clk = total_num_clk - pcie_dev->num_pipe_clk;
- pcie_dev->rate_change_clk = clk_get(&pdev->dev, "pcie_rate_change_clk");
- if (IS_ERR(pcie_dev->rate_change_clk)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: pcie_rate_change_clk is not present\n",
- pcie_dev->rc_idx);
- pcie_dev->rate_change_clk = NULL;
- }
- pcie_dev->pipe_clk_mux = clk_get(&pdev->dev, "pcie_pipe_clk_mux");
- if (IS_ERR(pcie_dev->pipe_clk_mux)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: pcie_pipe_clk_mux is not present\n",
- pcie_dev->rc_idx);
- pcie_dev->pipe_clk_mux = NULL;
- }
- pcie_dev->pipe_clk_ext_src = clk_get(&pdev->dev,
- "pcie_pipe_clk_ext_src");
- if (IS_ERR(pcie_dev->pipe_clk_ext_src)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: pcie_pipe_clk_ext_src is not present\n",
- pcie_dev->rc_idx);
- pcie_dev->pipe_clk_ext_src = NULL;
- }
- pcie_dev->phy_aux_clk_mux = clk_get(&pdev->dev, "pcie_phy_aux_clk_mux");
- if (IS_ERR(pcie_dev->phy_aux_clk_mux))
- pcie_dev->phy_aux_clk_mux = NULL;
- pcie_dev->phy_aux_clk_ext_src = clk_get(&pdev->dev,
- "pcie_phy_aux_clk_ext_src");
- if (IS_ERR(pcie_dev->phy_aux_clk_ext_src))
- pcie_dev->phy_aux_clk_ext_src = NULL;
- pcie_dev->ref_clk_src = clk_get(&pdev->dev, "pcie_ref_clk_src");
- if (IS_ERR(pcie_dev->ref_clk_src)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: pcie_ref_clk_src is not present\n",
- pcie_dev->rc_idx);
- pcie_dev->ref_clk_src = NULL;
- }
- pcie_dev->ahb_clk = clk_get(&pdev->dev, "pcie_cfg_ahb_clk");
- if (IS_ERR(pcie_dev->ahb_clk)) {
- pcie_dev->ahb_clk = NULL;
- PCIE_DBG(pcie_dev, "Clock ahb isn't available\n");
- }
- return 0;
- out:
- return -EIO;
- }
- static int msm_pcie_get_vreg(struct msm_pcie_dev_t *pcie_dev)
- {
- int i, len;
- struct platform_device *pdev = pcie_dev->pdev;
- const __be32 *prop;
- char prop_name[MAX_PROP_SIZE];
- for (i = 0; i < MSM_PCIE_MAX_VREG; i++) {
- struct msm_pcie_vreg_info_t *vreg_info = &pcie_dev->vreg[i];
- vreg_info->hdl = devm_regulator_get_optional(&pdev->dev,
- vreg_info->name);
- if (PTR_ERR(vreg_info->hdl) == -EPROBE_DEFER) {
- PCIE_DBG(pcie_dev, "EPROBE_DEFER for VReg:%s\n",
- vreg_info->name);
- return PTR_ERR(vreg_info->hdl);
- }
- if (IS_ERR(vreg_info->hdl)) {
- if (vreg_info->required && !pcie_dev->pcie_sm) {
- PCIE_DBG(pcie_dev, "Vreg %s doesn't exist\n",
- vreg_info->name);
- return PTR_ERR(vreg_info->hdl);
- }
- PCIE_DBG(pcie_dev, "Optional Vreg %s doesn't exist\n",
- vreg_info->name);
- vreg_info->hdl = NULL;
- } else {
- pcie_dev->vreg_n++;
- scnprintf(prop_name, MAX_PROP_SIZE,
- "qcom,%s-voltage-level", vreg_info->name);
- prop = of_get_property(pdev->dev.of_node,
- prop_name, &len);
- if (!prop || (len != (3 * sizeof(__be32)))) {
- PCIE_DBG(pcie_dev, "%s %s property\n",
- prop ? "invalid format" :
- "no", prop_name);
- } else {
- vreg_info->max_v = be32_to_cpup(&prop[0]);
- vreg_info->min_v = be32_to_cpup(&prop[1]);
- vreg_info->opt_mode =
- be32_to_cpup(&prop[2]);
- }
- if (!strcmp(vreg_info->name, "vreg-cx"))
- pcie_dev->cx_vreg = vreg_info;
- if (!strcmp(vreg_info->name, "vreg-mx"))
- pcie_dev->mx_vreg = vreg_info;
- }
- }
- pcie_dev->gdsc_core = devm_regulator_get(&pdev->dev, "gdsc-core-vdd");
- if (IS_ERR(pcie_dev->gdsc_core)) {
- PCIE_ERR(pcie_dev, "PCIe: RC%d: Failed to get %s GDSC-CORE:%ld\n",
- pcie_dev->rc_idx, pdev->name,
- PTR_ERR(pcie_dev->gdsc_core));
- if (PTR_ERR(pcie_dev->gdsc_core) == -EPROBE_DEFER)
- PCIE_DBG(pcie_dev, "PCIe: EPROBE_DEFER for %s GDSC-CORE\n",
- pdev->name);
- if (!pcie_dev->pcie_sm)
- return PTR_ERR(pcie_dev->gdsc_core);
- }
- pcie_dev->gdsc_phy = devm_regulator_get(&pdev->dev, "gdsc-phy-vdd");
- if (IS_ERR(pcie_dev->gdsc_phy)) {
- PCIE_ERR(pcie_dev, "PCIe: RC%d: Failed to get %s GDSC-PHY:%ld\n",
- pcie_dev->rc_idx, pdev->name,
- PTR_ERR(pcie_dev->gdsc_phy));
- if (PTR_ERR(pcie_dev->gdsc_phy) == -EPROBE_DEFER) {
- PCIE_DBG(pcie_dev, "PCIe: EPROBE_DEFER for %s GDSC-PHY\n",
- pdev->name);
- return PTR_ERR(pcie_dev->gdsc_phy);
- }
- }
- return 0;
- }
- static int msm_pcie_get_reset(struct msm_pcie_dev_t *pcie_dev)
- {
- int i;
- struct msm_pcie_reset_info_t *reset_info;
- for (i = 0; i < MSM_PCIE_MAX_RESET; i++) {
- reset_info = &pcie_dev->reset[i];
- reset_info->hdl = devm_reset_control_get(&pcie_dev->pdev->dev,
- reset_info->name);
- if (IS_ERR(reset_info->hdl)) {
- if (reset_info->required) {
- PCIE_DBG(pcie_dev,
- "Reset %s isn't available:%ld\n",
- reset_info->name,
- PTR_ERR(reset_info->hdl));
- return PTR_ERR(reset_info->hdl);
- }
- PCIE_DBG(pcie_dev, "Ignoring Reset %s\n",
- reset_info->name);
- reset_info->hdl = NULL;
- }
- }
- for (i = 0; i < MSM_PCIE_MAX_PIPE_RESET; i++) {
- reset_info = &pcie_dev->pipe_reset[i];
- reset_info->hdl = devm_reset_control_get(&pcie_dev->pdev->dev,
- reset_info->name);
- if (IS_ERR(reset_info->hdl)) {
- if (reset_info->required) {
- PCIE_DBG(pcie_dev,
- "Pipe Reset %s isn't available:%ld\n",
- reset_info->name,
- PTR_ERR(reset_info->hdl));
- return PTR_ERR(reset_info->hdl);
- }
- PCIE_DBG(pcie_dev, "Ignoring Pipe Reset %s\n",
- reset_info->name);
- reset_info->hdl = NULL;
- }
- }
- for (i = 0; i < MSM_PCIE_MAX_LINKDOWN_RESET; i++) {
- reset_info = &pcie_dev->linkdown_reset[i];
- reset_info->hdl = devm_reset_control_get(&pcie_dev->pdev->dev,
- reset_info->name);
- if (IS_ERR(reset_info->hdl)) {
- if (reset_info->required) {
- PCIE_DBG(pcie_dev,
- "Linkdown Reset %s isn't available:%ld\n",
- reset_info->name,
- PTR_ERR(reset_info->hdl));
- return PTR_ERR(reset_info->hdl);
- }
- PCIE_DBG(pcie_dev, "Ignoring Linkdown Reset %s\n",
- reset_info->name);
- reset_info->hdl = NULL;
- }
- }
- return 0;
- }
- static int msm_pcie_get_bw_scale(struct msm_pcie_dev_t *pcie_dev)
- {
- int size = 0;
- struct platform_device *pdev = pcie_dev->pdev;
- of_get_property(pdev->dev.of_node, "qcom,bw-scale", &size);
- if (size) {
- pcie_dev->bw_scale = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!pcie_dev->bw_scale)
- return -ENOMEM;
- of_property_read_u32_array(pdev->dev.of_node, "qcom,bw-scale",
- (u32 *)pcie_dev->bw_scale, size / sizeof(u32));
- pcie_dev->bw_gen_max = size / sizeof(*pcie_dev->bw_scale);
- } else {
- PCIE_DBG(pcie_dev, "RC%d: bandwidth scaling is not supported\n",
- pcie_dev->rc_idx);
- }
- return 0;
- }
- #ifdef CONFIG_SEC_PCIE
- static int msm_pcie_get_phy_override(struct msm_pcie_dev_t *pcie_dev, int size)
- {
- int ret, size_ovr;
- struct platform_device *pdev = pcie_dev->pdev;
- struct msm_pcie_phy_info_t *old_sequence;
- const __be32 *prop;
- prop = of_get_property(pdev->dev.of_node, "qcom,phy-sequence-override", &size_ovr);
- if (!prop || !size_ovr) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: phy sequence override is not present in DT\n",
- pcie_dev->rc_idx);
- return 0;
- }
- old_sequence = pcie_dev->phy_sequence;
- pcie_dev->phy_sequence = devm_kzalloc(&pdev->dev, size + size_ovr, GFP_KERNEL);
- if (!pcie_dev->phy_sequence) {
- pcie_dev->phy_sequence = old_sequence;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: phy sequence override devm_kzalloc fail\n",
- pcie_dev->rc_idx);
- return 0;
- }
- memcpy(pcie_dev->phy_sequence, old_sequence, size);
- pcie_dev->phy_len += size_ovr / ((unsigned int)sizeof(*pcie_dev->phy_sequence));
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,phy-sequence-override",
- (unsigned int *)(pcie_dev->phy_sequence
- + (size / ((unsigned int)sizeof(*pcie_dev->phy_sequence)))),
- size_ovr / sizeof(pcie_dev->phy_sequence->offset));
- if (ret) {
- devm_kfree(&pdev->dev, pcie_dev->phy_sequence);
- pcie_dev->phy_sequence = old_sequence;
- pcie_dev->phy_len -= size_ovr / ((unsigned int)sizeof(*pcie_dev->phy_sequence));
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: phy sequence override is not loaded\n",
- pcie_dev->rc_idx);
- return 0;
- }
- devm_kfree(&pdev->dev, old_sequence);
- return 0;
- }
- #endif
- static int msm_pcie_get_phy(struct msm_pcie_dev_t *pcie_dev)
- {
- int ret, size = 0;
- struct platform_device *pdev = pcie_dev->pdev;
- of_get_property(pdev->dev.of_node, "qcom,phy-sequence", &size);
- if (!size) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: phy sequence is not present in DT\n",
- pcie_dev->rc_idx);
- return 0;
- }
- pcie_dev->phy_sequence = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!pcie_dev->phy_sequence)
- return -ENOMEM;
- pcie_dev->phy_len = size / sizeof(*pcie_dev->phy_sequence);
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,phy-sequence",
- (unsigned int *)pcie_dev->phy_sequence,
- size / sizeof(pcie_dev->phy_sequence->offset));
- if (ret)
- return -EINVAL;
- #ifdef CONFIG_SEC_PCIE
- msm_pcie_get_phy_override(pcie_dev, size);
- #endif
- return 0;
- }
- static int msm_pcie_get_phy_status_reg(struct msm_pcie_dev_t *pcie_dev)
- {
- int ret, size = 0;
- struct platform_device *pdev = pcie_dev->pdev;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Enter\n",
- pcie_dev->rc_idx);
- of_get_property(pdev->dev.of_node, "qcom,phy-debug-reg", &size);
- if (!size) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: phy debug registers not present in DT\n",
- pcie_dev->rc_idx);
- pcie_dev->phy_debug_reg = NULL;
- return 0;
- }
- pcie_dev->phy_debug_reg = kmalloc(size, GFP_KERNEL);
- if (!pcie_dev->phy_debug_reg)
- return -ENOMEM;
- pcie_dev->phy_debug_reg_len = size / sizeof(*pcie_dev->phy_debug_reg);
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,phy-debug-reg",
- (unsigned int *)pcie_dev->phy_debug_reg,
- size / sizeof(*pcie_dev->phy_debug_reg));
- if (ret) {
- kfree(pcie_dev->phy_debug_reg);
- pcie_dev->phy_debug_reg = NULL;
- return -EINVAL;
- }
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: no of phy dbg regs:%u size:%u\n",
- pcie_dev->rc_idx, size/sizeof(u32), size);
- return 0;
- }
- static int msm_pcie_get_parf_status_reg(struct msm_pcie_dev_t *pcie_dev)
- {
- int ret, size = 0;
- struct platform_device *pdev = pcie_dev->pdev;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Enter\n", pcie_dev->rc_idx);
- of_get_property(pdev->dev.of_node, "qcom,parf-debug-reg", &size);
- if (!size) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: parf debug registers not present in DT\n",
- pcie_dev->rc_idx);
- pcie_dev->parf_debug_reg = NULL;
- return 0;
- }
- pcie_dev->parf_debug_reg = kmalloc(size, GFP_KERNEL);
- if (!pcie_dev->parf_debug_reg)
- return -ENOMEM;
- pcie_dev->parf_debug_reg_len = size / sizeof(u32);
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,parf-debug-reg",
- (unsigned int *)pcie_dev->parf_debug_reg,
- size / sizeof(u32));
- if (ret) {
- kfree(pcie_dev->parf_debug_reg);
- pcie_dev->parf_debug_reg = NULL;
- return -EINVAL;
- }
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: no of parf dbg regs:%u size:%u\n",
- pcie_dev->rc_idx, size/sizeof(u32), size);
- return 0;
- }
- static int msm_pcie_get_dbi_status_reg(struct msm_pcie_dev_t *pcie_dev)
- {
- int ret, size = 0;
- struct platform_device *pdev = pcie_dev->pdev;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Enter\n",
- pcie_dev->rc_idx);
- of_get_property(pdev->dev.of_node, "qcom,dbi-debug-reg", &size);
- if (!size) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: dbi debug registers not present in DT\n",
- pcie_dev->rc_idx);
- pcie_dev->dbi_debug_reg = NULL;
- return 0;
- }
- pcie_dev->dbi_debug_reg = kmalloc(size, GFP_KERNEL);
- if (!pcie_dev->dbi_debug_reg)
- return -ENOMEM;
- pcie_dev->dbi_debug_reg_len = size / sizeof(u32);
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,dbi-debug-reg",
- (unsigned int *)pcie_dev->dbi_debug_reg,
- size / sizeof(u32));
- if (ret) {
- kfree(pcie_dev->dbi_debug_reg);
- pcie_dev->dbi_debug_reg = NULL;
- return -EINVAL;
- }
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: no of dbi dbg regs:%u size:%u\n",
- pcie_dev->rc_idx, size/sizeof(u32), size);
- return 0;
- }
- static int msm_pcie_get_iommu_map(struct msm_pcie_dev_t *pcie_dev)
- {
- /* iommu map structure */
- struct {
- u32 bdf;
- u32 phandle;
- u32 smmu_sid;
- u32 smmu_sid_len;
- } *map;
- struct platform_device *pdev = pcie_dev->pdev;
- int i, size = 0;
- of_get_property(pdev->dev.of_node, "iommu-map", &size);
- if (!size) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: iommu-map is not present in DT.\n",
- pcie_dev->rc_idx);
- return 0;
- }
- map = kzalloc(size, GFP_KERNEL);
- if (!map)
- return -ENOMEM;
- of_property_read_u32_array(pdev->dev.of_node,
- "iommu-map", (u32 *)map, size / sizeof(u32));
- pcie_dev->sid_info_len = size / (sizeof(*map));
- pcie_dev->sid_info = devm_kcalloc(&pdev->dev, pcie_dev->sid_info_len,
- sizeof(*pcie_dev->sid_info), GFP_KERNEL);
- if (!pcie_dev->sid_info) {
- kfree(map);
- return -ENOMEM;
- }
- for (i = 0; i < pcie_dev->sid_info_len; i++) {
- pcie_dev->sid_info[i].bdf = map[i].bdf;
- pcie_dev->sid_info[i].smmu_sid = map[i].smmu_sid;
- pcie_dev->sid_info[i].pcie_sid =
- pcie_dev->sid_info[i].smmu_sid -
- pcie_dev->smmu_sid_base;
- }
- kfree(map);
- return 0;
- }
- static int msm_pcie_get_gpio(struct msm_pcie_dev_t *pcie_dev)
- {
- int i, ret;
- pcie_dev->gpio_n = 0;
- for (i = 0; i < MSM_PCIE_MAX_GPIO; i++) {
- struct msm_pcie_gpio_info_t *gpio_info = &pcie_dev->gpio[i];
- ret = of_get_named_gpio(pcie_dev->pdev->dev.of_node,
- gpio_info->name, 0);
- if (ret >= 0) {
- gpio_info->num = ret;
- pcie_dev->gpio_n++;
- PCIE_DBG(pcie_dev, "GPIO num for %s is %d\n",
- gpio_info->name, gpio_info->num);
- } else {
- if (gpio_info->required) {
- PCIE_ERR(pcie_dev,
- "Could not get required GPIO %s\n",
- gpio_info->name);
- return ret;
- }
- PCIE_DBG(pcie_dev, "Could not get optional GPIO %s\n",
- gpio_info->name);
- }
- }
- pcie_dev->wake_n = 0;
- if (pcie_dev->gpio[MSM_PCIE_GPIO_WAKE].num)
- pcie_dev->wake_n =
- gpio_to_irq(pcie_dev->gpio[MSM_PCIE_GPIO_WAKE].num);
- return 0;
- }
- static int msm_pcie_get_reg(struct msm_pcie_dev_t *pcie_dev)
- {
- struct resource *res;
- struct msm_pcie_res_info_t *res_info;
- int i;
- for (i = 0; i < MSM_PCIE_MAX_RES; i++) {
- res_info = &pcie_dev->res[i];
- res = platform_get_resource_byname(pcie_dev->pdev,
- IORESOURCE_MEM, res_info->name);
- if (!res) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: no %s resource found.\n",
- pcie_dev->rc_idx, res_info->name);
- } else {
- PCIE_DBG(pcie_dev, "start addr for %s is %pa.\n",
- res_info->name, &res->start);
- res_info->base = devm_ioremap(&pcie_dev->pdev->dev,
- res->start, resource_size(res));
- if (!res_info->base) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: can't remap %s.\n",
- pcie_dev->rc_idx, res_info->name);
- return -ENOMEM;
- }
- res_info->resource = res;
- }
- }
- pcie_dev->parf = pcie_dev->res[MSM_PCIE_RES_PARF].base;
- pcie_dev->phy = pcie_dev->res[MSM_PCIE_RES_PHY].base;
- pcie_dev->elbi = pcie_dev->res[MSM_PCIE_RES_ELBI].base;
- pcie_dev->iatu = pcie_dev->res[MSM_PCIE_RES_IATU].base;
- pcie_dev->dm_core = pcie_dev->res[MSM_PCIE_RES_DM_CORE].base;
- pcie_dev->conf = pcie_dev->res[MSM_PCIE_RES_CONF].base;
- pcie_dev->pcie_sm = pcie_dev->res[MSM_PCIE_RES_SM].base;
- pcie_dev->mhi = pcie_dev->res[MSM_PCIE_RES_MHI].base;
- pcie_dev->tcsr = pcie_dev->res[MSM_PCIE_RES_TCSR].base;
- pcie_dev->rumi = pcie_dev->res[MSM_PCIE_RES_RUMI].base;
- return 0;
- }
- static int msm_pcie_get_tcsr_values(struct msm_pcie_dev_t *dev,
- struct platform_device *pdev)
- {
- int size = 0, ret = 0;
- of_get_property(pdev->dev.of_node, "qcom,tcsr", &size);
- if (!size) {
- PCIE_DBG(dev, "PCIe: RC%d: tcsr is not present in DT\n",
- dev->rc_idx);
- return 0;
- }
- dev->tcsr_config = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
- if (!dev->tcsr_config)
- return -ENOMEM;
- dev->tcsr_len = size / sizeof(*dev->tcsr_config);
- of_property_read_u32_array(pdev->dev.of_node,
- "qcom,tcsr",
- (unsigned int *)dev->tcsr_config,
- size / sizeof(dev->tcsr_config->offset));
- return ret;
- }
- static int msm_pcie_get_resources(struct msm_pcie_dev_t *dev,
- struct platform_device *pdev)
- {
- int i, ret = 0;
- int num;
- struct msm_pcie_irq_info_t *irq_info;
- PCIE_DBG(dev, "PCIe: RC%d: entry\n", dev->rc_idx);
- ret = msm_pcie_get_reg(dev);
- if (ret)
- return ret;
- dev->icc_path = of_icc_get(&pdev->dev, "icc_path");
- if (IS_ERR(dev->icc_path)) {
- ret = dev->icc_path ? PTR_ERR(dev->icc_path) : -EINVAL;
- PCIE_ERR(dev, "PCIe: RC%d: failed to get ICC path: %d\n",
- dev->rc_idx, ret);
- if (!dev->rumi)
- return ret;
- }
- for (i = 0; i < MSM_PCIE_MAX_IRQ; i++) {
- irq_info = &dev->irq[i];
- num = platform_get_irq_byname(pdev, irq_info->name);
- if (num < 0) {
- PCIE_DBG(dev,
- "PCIe: RC%d: can't find IRQ # for %s. ret %d\n",
- dev->rc_idx, irq_info->name, num);
- } else {
- irq_info->num = num;
- PCIE_DBG(dev, "IRQ # for %s is %d.\n", irq_info->name,
- irq_info->num);
- }
- }
- ret = msm_pcie_get_tcsr_values(dev, pdev);
- if (ret)
- return ret;
- ret = msm_pcie_get_clk(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_vreg(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_reset(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_bw_scale(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_phy(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_iommu_map(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_gpio(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_parf_status_reg(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_dbi_status_reg(dev);
- if (ret)
- return ret;
- ret = msm_pcie_get_phy_status_reg(dev);
- if (ret)
- return ret;
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return 0;
- }
- static void msm_pcie_release_resources(struct msm_pcie_dev_t *dev)
- {
- dev->parf = NULL;
- dev->elbi = NULL;
- dev->iatu = NULL;
- dev->dm_core = NULL;
- dev->conf = NULL;
- dev->pcie_sm = NULL;
- dev->mhi = NULL;
- dev->tcsr = NULL;
- dev->rumi = NULL;
- kfree(dev->parf_debug_reg);
- kfree(dev->dbi_debug_reg);
- kfree(dev->phy_debug_reg);
- dev->parf_debug_reg = NULL;
- dev->dbi_debug_reg = NULL;
- dev->phy_debug_reg = NULL;
- }
- static void msm_pcie_scale_link_bandwidth(struct msm_pcie_dev_t *pcie_dev,
- u16 target_link_speed)
- {
- struct msm_pcie_bw_scale_info_t *bw_scale;
- u32 index = target_link_speed - PCI_EXP_LNKCTL2_TLS_2_5GT;
- int ret;
- if (!pcie_dev->bw_scale)
- return;
- if (index >= pcie_dev->bw_gen_max) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: invalid target link speed: %d\n",
- pcie_dev->rc_idx, target_link_speed);
- return;
- }
- /* Use CESTA to scale the resources */
- if (pcie_dev->pcie_sm) {
- /* If CESTA already voted for required speed then bail out */
- if (target_link_speed + PERF_LVL_L1SS ==
- msm_pcie_cesta_map[D0_STATE][POWER_STATE_1])
- return;
- msm_pcie_cesta_map_save(target_link_speed);
- ret = msm_pcie_cesta_map_apply(pcie_dev, D0_STATE);
- if (ret)
- PCIE_ERR(pcie_dev, "Failed to move to D0 state %d\n",
- ret);
- return;
- }
- bw_scale = &pcie_dev->bw_scale[index];
- if (pcie_dev->cx_vreg)
- regulator_set_voltage(pcie_dev->cx_vreg->hdl,
- bw_scale->cx_vreg_min,
- pcie_dev->cx_vreg->max_v);
- if (pcie_dev->mx_vreg)
- regulator_set_voltage(pcie_dev->mx_vreg->hdl,
- bw_scale->mx_vreg_min,
- pcie_dev->mx_vreg->max_v);
- if (pcie_dev->rate_change_clk)
- clk_set_rate(pcie_dev->rate_change_clk,
- bw_scale->rate_change_freq);
- }
- static int msm_pcie_link_train(struct msm_pcie_dev_t *dev)
- {
- int link_check_count = 0;
- uint32_t val, link_status;
- msm_pcie_write_reg_field(dev->dm_core,
- PCIE_GEN3_GEN2_CTRL, 0x1f00, 1);
- /* Controller settings related to PCIe PHY */
- msm_pcie_config_controller_phy(dev);
- /* configure PCIe preset */
- msm_pcie_config_core_preset(dev);
- if (dev->target_link_speed) {
- #ifdef CONFIG_SEC_PCIE
- if (dev->target_link_speed < GEN1_SPEED)
- dev->target_link_speed = GEN1_SPEED;
- if (dev->target_link_speed > GEN3_SPEED)
- dev->target_link_speed = GEN3_SPEED;
- #endif
- msm_pcie_write_reg_field(dev->dm_core,
- PCIE20_CAP + PCI_EXP_LNKCTL2,
- PCI_EXP_LNKCTL2_TLS, dev->target_link_speed);
- }
- /* set max tlp read size */
- msm_pcie_write_reg_field(dev->dm_core, PCIE20_DEVICE_CONTROL_STATUS,
- 0x7000, dev->tlp_rd_size);
- /* enable link training */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, BIT(8));
- PCIE_DBG(dev, "%s", "check if link is up\n");
- /* Wait for up to 100ms for the link to come up */
- do {
- usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
- val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
- PCIE_INFO(dev, "PCIe RC%d: LTSSM_STATE: %s\n",
- dev->rc_idx, TO_LTSSM_STR((val >> 12) & 0x3f));
- } while ((!(val & XMLH_LINK_UP) || !msm_pcie_dll_link_active(dev))
- && (link_check_count++ < dev->link_check_max_count));
- if ((val & XMLH_LINK_UP) && msm_pcie_dll_link_active(dev)) {
- PCIE_DBG(dev, "Link is up after %d checkings\n",
- link_check_count);
- PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
- } else {
- #if IS_ENABLED(CONFIG_I2C)
- if (dev->i2c_ctrl.client && dev->i2c_ctrl.client_i2c_dump_regs)
- dev->i2c_ctrl.client_i2c_dump_regs(&dev->i2c_ctrl);
- #endif
- PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
- dev->rc_idx);
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- dev->gpio[MSM_PCIE_GPIO_PERST].on);
- PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
- dev->rc_idx);
- return MSM_PCIE_ERROR;
- }
- link_status = readl_relaxed(dev->dm_core + PCIE20_CAP_LINKCTRLSTATUS);
- dev->current_link_speed = (link_status >> 16) & PCI_EXP_LNKSTA_CLS;
- dev->current_link_width = ((link_status >> 16) & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
- PCIE_DBG(dev, "PCIe: RC%d: Link is up at Gen%dX%d\n",
- dev->rc_idx, dev->current_link_speed,
- dev->current_link_width);
- if ((!dev->enumerated) && dev->panic_genspeed_mismatch &&
- dev->target_link_speed &&
- dev->target_link_speed != dev->current_link_speed)
- panic("PCIe: RC%d: Gen-speed mismatch:%d, expected:%d\n",
- dev->rc_idx, dev->current_link_speed,
- dev->target_link_speed);
- /*
- * If the link up GEN speed is less than the max/default supported,
- * then scale the resources accordingly.
- */
- if (dev->bw_scale && dev->current_link_speed < dev->bw_gen_max) {
- u32 index;
- struct msm_pcie_bw_scale_info_t *bw_scale;
- index = dev->current_link_speed - PCI_EXP_LNKCTL2_TLS_2_5GT;
- if (index >= dev->bw_gen_max) {
- PCIE_ERR(dev,
- "PCIe: RC%d: unsupported gen speed: %d\n",
- dev->rc_idx, dev->current_link_speed);
- return 0;
- }
- bw_scale = &dev->bw_scale[index];
- msm_pcie_write_reg_field(dev->dm_core, PCIE20_CAP +
- PCI_EXP_LNKCTL2, PCI_EXP_LNKCTL2_TLS,
- dev->current_link_speed);
- msm_pcie_scale_link_bandwidth(dev, dev->current_link_speed);
- }
- return 0;
- }
- static int msm_pcie_check_ep_access(struct msm_pcie_dev_t *dev,
- unsigned long ep_up_timeout)
- {
- int ret = 0;
- #ifdef CONFIG_SEC_PCIE
- u32 link_speed[3] = {0,}, link_width = 0;
- #endif
- /* check endpoint configuration space is accessible */
- while (time_before(jiffies, ep_up_timeout)) {
- if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN)
- break;
- usleep_range(EP_UP_TIMEOUT_US_MIN, EP_UP_TIMEOUT_US_MAX);
- }
- if (readl_relaxed(dev->conf) != PCIE_LINK_DOWN) {
- PCIE_DBG(dev,
- "PCIe: RC%d: endpoint config space is accessible\n",
- dev->rc_idx);
- #ifdef CONFIG_SEC_PCIE_L1SS
- dev->ep_config_accessible = true;
- #endif
- #ifdef CONFIG_SEC_PCIE
- PCIE_INFO(dev, "PCIe RC%d Max GEN%d, EP GEN%d\n",
- dev->rc_idx, pcie_get_max_linkspeed(dev->rc_idx, 0),
- pcie_get_max_linkspeed(dev->rc_idx, 1));
- link_speed[0] = pcie_get_target_linkspeed(dev->rc_idx, 0);
- link_speed[1] = pcie_get_target_linkspeed(dev->rc_idx, 1);
- PCIE_INFO(dev, "PCIe RC%d Target GEN%d, EP GEN%d\n",
- dev->rc_idx, link_speed[0], link_speed[1]);
- pcie_get_cur_link_bw(dev->rc_idx, &link_speed[2], &link_width);
- PCIE_INFO(dev, "PCIe RC%d Current GEN%d, %d lanes\n",
- dev->rc_idx, link_speed[2], link_width);
- if (link_speed[0] != link_speed[2]) {
- set_bit(PCIE_ERROR_LINK_SPEED_MISMATCH, &dev->pcie_error);
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- panic("PCIe: RC%d: link speed fail(GEN%d -> %d)\n",
- dev->rc_idx, link_speed[0], link_speed[2]);
- #endif
- }
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- dev->remained_linkup_retry = dev->allow_linkup_retry;
- #endif
- #endif
- } else {
- PCIE_ERR(dev,
- "PCIe: RC%d: endpoint config space is not accessible\n",
- dev->rc_idx);
- dev->link_status = MSM_PCIE_LINK_DISABLED;
- dev->power_on = false;
- dev->link_turned_off_counter++;
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- dev->remained_linkup_retry = 0;
- #endif
- #ifdef CONFIG_SEC_PCIE
- set_bit(PCIE_ERROR_LINK_FAIL, &dev->pcie_error);
- #endif
- ret = -ENODEV;
- }
- return ret;
- }
- #if IS_ENABLED(CONFIG_I2C)
- /* write 32-bit value to 24 bit register */
- static int ntn3_i2c_write(struct i2c_client *client, u32 reg_addr,
- u32 reg_val)
- {
- int ret;
- u8 msg_buf[7];
- struct i2c_msg msg;
- msg.addr = client->addr;
- msg.len = 7;
- msg.flags = 0;
- /* Big Endian for reg addr */
- msg_buf[0] = (u8)(reg_addr >> 16);
- msg_buf[1] = (u8)(reg_addr >> 8);
- msg_buf[2] = (u8)reg_addr;
- /* Little Endian for reg val */
- msg_buf[3] = (u8)(reg_val);
- msg_buf[4] = (u8)(reg_val >> 8);
- msg_buf[5] = (u8)(reg_val >> 16);
- msg_buf[6] = (u8)(reg_val >> 24);
- msg.buf = msg_buf;
- ret = i2c_transfer(client->adapter, &msg, 1);
- return ret == 1 ? 0 : ret;
- }
- /* read 32 bit value from 24 bit reg addr */
- static int ntn3_i2c_read(struct i2c_client *client, u32 reg_addr,
- u32 *reg_val)
- {
- int ret;
- u8 wr_data[3], rd_data[4];
- struct i2c_msg msg[2];
- msg[0].addr = client->addr;
- msg[0].len = 3;
- msg[0].flags = 0;
- // Big Endian for reg addr
- wr_data[0] = (u8)(reg_addr >> 16);
- wr_data[1] = (u8)(reg_addr >> 8);
- wr_data[2] = (u8)reg_addr;
- msg[0].buf = wr_data;
- msg[1].addr = client->addr;
- msg[1].len = 4;
- msg[1].flags = I2C_M_RD;
- msg[1].buf = rd_data;
- ret = i2c_transfer(client->adapter, &msg[0], 2);
- if (ret != 2)
- return ret;
- *reg_val = (rd_data[3] << 24) | (rd_data[2] << 16) | (rd_data[1] << 8) |
- rd_data[0];
- return 0;
- }
- static int ntn3_ep_reset_ctrl(struct pcie_i2c_ctrl *i2c_ctrl, bool reset)
- {
- int ret, rd_val;
- struct msm_pcie_dev_t *pcie_dev = container_of(i2c_ctrl,
- struct msm_pcie_dev_t,
- i2c_ctrl);
- if (!i2c_ctrl->client_i2c_write || !i2c_ctrl->client_i2c_read)
- return -EOPNOTSUPP;
- /* set NTN3 GPIO as output */
- ret = i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->gpio_config_reg, &rd_val);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: gpio config reg read failed : %d\n",
- pcie_dev->rc_idx, ret);
- return ret;
- }
- rd_val &= ~i2c_ctrl->ep_reset_gpio_mask;
- i2c_ctrl->client_i2c_write(i2c_ctrl->client, i2c_ctrl->gpio_config_reg,
- rd_val);
- /* read back to flush write - config gpio */
- ret = i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->gpio_config_reg, &rd_val);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: gpio config reg read failed : %d\n",
- pcie_dev->rc_idx, ret);
- return ret;
- }
- ret = i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->ep_reset_reg, &rd_val);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: ep_reset_gpio read failed : %d\n",
- pcie_dev->rc_idx, ret);
- return ret;
- }
- rd_val &= ~i2c_ctrl->ep_reset_gpio_mask;
- i2c_ctrl->client_i2c_write(i2c_ctrl->client, i2c_ctrl->ep_reset_reg,
- rd_val);
- /* read back to flush write - reset gpio */
- ret = i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->ep_reset_reg, &rd_val);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: ep_reset_gpio read failed : %d\n",
- pcie_dev->rc_idx, ret);
- return ret;
- }
- /* ep reset done */
- if (reset)
- return 0;
- /* toggle (0 -> 1) reset gpios to bring eps out of reset */
- rd_val |= i2c_ctrl->ep_reset_gpio_mask;
- i2c_ctrl->client_i2c_write(i2c_ctrl->client, i2c_ctrl->ep_reset_reg,
- rd_val);
- /* read back to flush write - reset gpio */
- ret = i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->ep_reset_reg, &rd_val);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: ep_reset_gpio read failed : %d\n",
- pcie_dev->rc_idx, ret);
- return ret;
- }
- return 0;
- }
- static void ntn3_dump_regs(struct pcie_i2c_ctrl *i2c_ctrl)
- {
- int i, val;
- struct msm_pcie_dev_t *pcie_dev = container_of(i2c_ctrl,
- struct msm_pcie_dev_t,
- i2c_ctrl);
- if (!i2c_ctrl->client_i2c_read || !i2c_ctrl->dump_reg_count)
- return;
- PCIE_DUMP(pcie_dev, "PCIe: RC%d: NTN3 reg dumps\n", pcie_dev->rc_idx);
- for (i = 0; i < i2c_ctrl->dump_reg_count; i++) {
- i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->dump_regs[i], &val);
- PCIE_DUMP(pcie_dev, "PCIe: RC%d: reg: 0x%04x val: 0x%08x\n",
- pcie_dev->rc_idx, i2c_ctrl->dump_regs[i], val);
- }
- }
- static void ntn3_de_emphasis_wa(struct pcie_i2c_ctrl *i2c_ctrl)
- {
- int i, val, ret, rd_val;
- struct msm_pcie_dev_t *pcie_dev = container_of(i2c_ctrl,
- struct msm_pcie_dev_t,
- i2c_ctrl);
- ret = i2c_ctrl->client_i2c_read(i2c_ctrl->client,
- i2c_ctrl->version_reg, &rd_val);
- if (ret) {
- PCIE_DBG(pcie_dev, "PCIe: RC%d: gpio version reg read failed : %d\n",
- pcie_dev->rc_idx, ret);
- }
- i2c_ctrl->force_i2c_setting = of_property_read_bool(i2c_ctrl->client->dev.of_node,
- "force-i2c-setting");
- rd_val &= CHECK_NTN3_VERSION_MASK;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: NTN3 Version reg:0x%x and force-i2c-setting is %s enabled",
- pcie_dev->rc_idx, rd_val, i2c_ctrl->force_i2c_setting ? "" : "not");
- if (rd_val == NTN3_CHIP_VERSION_1 || i2c_ctrl->force_i2c_setting) {
- PCIE_DBG(pcie_dev, "PCIe: RC%d: NTN3 reg update\n", pcie_dev->rc_idx);
- for (i = 0; i < i2c_ctrl->reg_update_count; i++) {
- i2c_ctrl->client_i2c_write(i2c_ctrl->client, i2c_ctrl->reg_update[i].offset,
- i2c_ctrl->reg_update[i].val);
- /*Read to make sure writes are completed*/
- i2c_ctrl->client_i2c_read(i2c_ctrl->client, i2c_ctrl->reg_update[i].offset,
- &val);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: NTN3 reg off:0x%x wr_val:0x%x rd_val:0x%x\n",
- pcie_dev->rc_idx, i2c_ctrl->reg_update[i].offset,
- i2c_ctrl->reg_update[i].val, val);
- }
- }
- for (i = 0; i < i2c_ctrl->switch_reg_update_count; i++) {
- i2c_ctrl->client_i2c_write(i2c_ctrl->client, i2c_ctrl->switch_reg_update[i].offset,
- i2c_ctrl->switch_reg_update[i].val);
- /*Read to make sure writes are completed*/
- i2c_ctrl->client_i2c_read(i2c_ctrl->client, i2c_ctrl->switch_reg_update[i].offset,
- &val);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: NTN3 reg off:0x%x wr_val:0x%x rd_val:0x%x\n",
- pcie_dev->rc_idx, i2c_ctrl->switch_reg_update[i].offset,
- i2c_ctrl->switch_reg_update[i].val, val);
- }
- }
- #endif
- static int msm_pcie_enable_link(struct msm_pcie_dev_t *dev)
- {
- int ret = 0;
- uint32_t val;
- unsigned long ep_up_timeout = 0;
- /* configure PCIe to RC mode */
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_DEVICE_TYPE, 0x4);
- /* enable l1 mode, clear bit 5 (REQ_NOT_ENTR_L1) */
- if (dev->l1_supported)
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
- /* enable PCIe clocks and resets */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, BIT(0), 0);
- /* change DBI base address */
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_DBI_BASE_ADDR, 0);
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_SYS_CTRL, 0x365E);
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL,
- 0, BIT(4));
- /* enable selected IRQ */
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_MASK, 0,
- BIT(MSM_PCIE_INT_EVT_LINK_DOWN) |
- BIT(MSM_PCIE_INT_EVT_L1SUB_TIMEOUT) |
- BIT(MSM_PCIE_INT_EVT_AER_LEGACY) |
- BIT(MSM_PCIE_INT_EVT_AER_ERR) |
- BIT(MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N) |
- BIT(MSM_PCIE_INT_EVT_MSI_0) |
- BIT(MSM_PCIE_INT_EVT_MSI_1) |
- BIT(MSM_PCIE_INT_EVT_MSI_2) |
- BIT(MSM_PCIE_INT_EVT_MSI_3) |
- BIT(MSM_PCIE_INT_EVT_MSI_4) |
- BIT(MSM_PCIE_INT_EVT_MSI_5) |
- BIT(MSM_PCIE_INT_EVT_MSI_6) |
- BIT(MSM_PCIE_INT_EVT_MSI_7));
- PCIE_INFO(dev, "PCIe: RC%d: PCIE20_PARF_INT_ALL_MASK: 0x%x\n",
- dev->rc_idx,
- readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK));
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_SLV_ADDR_SPACE_SIZE,
- dev->slv_addr_space_size);
- val = dev->wr_halt_size ? dev->wr_halt_size :
- readl_relaxed(dev->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT);
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT,
- BIT(31) | val);
- /* init tcsr */
- if (dev->tcsr_config)
- pcie_tcsr_init(dev);
- /* init PCIe PHY */
- ret = pcie_phy_init(dev);
- if (ret) {
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- dev->remained_linkup_retry = 0;
- #endif
- #ifdef CONFIG_SEC_PCIE
- set_bit(PCIE_ERROR_PHY_INIT, &dev->pcie_error);
- #endif
- return ret;
- }
- /* switch phy aux clock source from xo to phy aux clk */
- if (dev->phy_aux_clk_mux && dev->phy_aux_clk_ext_src)
- clk_set_parent(dev->phy_aux_clk_mux, dev->phy_aux_clk_ext_src);
- usleep_range(dev->ep_latency * 1000, dev->ep_latency * 1000);
- if (dev->gpio[MSM_PCIE_GPIO_EP].num)
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
- dev->gpio[MSM_PCIE_GPIO_EP].on);
- dev->link_width_max =
- (readl_relaxed(dev->dm_core + PCIE20_CAP + PCI_EXP_LNKCAP) &
- PCI_EXP_LNKCAP_MLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
- PCIE_DBG(dev, "PCIe: RC%d: Maximum supported link width is %d\n",
- dev->rc_idx, dev->link_width_max);
- if (dev->target_link_width) {
- ret = msm_pcie_set_link_width(dev, dev->target_link_width <<
- PCI_EXP_LNKSTA_NLW_SHIFT);
- if (ret)
- return ret;
- }
- /* Disable override for fal10_veto logic to de-assert Qactive signal */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_CFG_BITS_3, BIT(0), 0);
- /**
- * configure LANE_SKEW_OFF BIT-5 and PARF_CFG_BITS_3 BIT-8 to support
- * dynamic link width upscaling.
- */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_CFG_BITS_3, 0, BIT(8));
- msm_pcie_write_mask(dev->dm_core + PCIE20_LANE_SKEW_OFF, 0, BIT(5));
- /* de-assert PCIe reset link to bring EP out of reset */
- PCIE_INFO(dev, "PCIe: Release the reset of endpoint of RC%d.\n",
- dev->rc_idx);
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- 1 - dev->gpio[MSM_PCIE_GPIO_PERST].on);
- usleep_range(dev->perst_delay_us_min, dev->perst_delay_us_max);
- ep_up_timeout = jiffies + usecs_to_jiffies(EP_UP_TIMEOUT_US);
- #if IS_ENABLED(CONFIG_I2C)
- if (dev->i2c_ctrl.client && dev->i2c_ctrl.client_i2c_de_emphasis_wa) {
- dev->i2c_ctrl.client_i2c_de_emphasis_wa(&dev->i2c_ctrl);
- msleep(20);
- }
- #endif
- ret = msm_pcie_link_train(dev);
- if (ret) {
- #ifdef CONFIG_SEC_PCIE
- set_bit(PCIE_ERROR_TRAINING_FAIL, &dev->pcie_error);
- #endif
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- if (dev->remained_linkup_retry)
- dev->remained_linkup_retry--;
- #endif
- return ret;
- }
- dev->link_status = MSM_PCIE_LINK_ENABLED;
- dev->power_on = true;
- dev->suspending = false;
- dev->link_turned_on_counter++;
- if (dev->switch_latency) {
- PCIE_DBG(dev, "switch_latency: %dms\n",
- dev->switch_latency);
- if (dev->switch_latency <= SWITCH_DELAY_MAX)
- usleep_range(dev->switch_latency * 1000,
- dev->switch_latency * 1000);
- else
- msleep(dev->switch_latency);
- }
- msm_pcie_config_sid(dev);
- msm_pcie_config_controller(dev);
- ret = msm_pcie_check_ep_access(dev, ep_up_timeout);
- return ret;
- }
- static int msm_pcie_enable_cesta(struct msm_pcie_dev_t *dev)
- {
- int ret = 0;
- if (dev->pcie_sm) {
- /*
- * Make sure that resources are scaled to link up in max
- * possible Gen speed and scale down the resources if link
- * up happens in lower speeds.
- */
- msm_pcie_cesta_map_save(dev->bw_gen_max);
- ret = msm_pcie_cesta_map_apply(dev, D0_STATE);
- if (ret)
- PCIE_ERR(dev, "Fail to go to D0 State %d\n", ret);
- }
- return ret;
- }
- static void msm_pcie_disable_cesta(struct msm_pcie_dev_t *dev)
- {
- int ret = 0;
- if (dev->pcie_sm) {
- ret = msm_pcie_cesta_map_apply(dev, D3COLD_STATE);
- if (ret)
- PCIE_ERR(dev, "Fail to move to D3 cold state %d\n",
- ret);
- }
- }
- static void msm_pcie_parf_cesta_config(struct msm_pcie_dev_t *dev)
- {
- u32 cesta_config_bits;
- /* Propagate l1ss timeout and clkreq signals to CESTA */
- if (dev->pcie_sm) {
- cesta_config_bits = PARF_CESTA_CLKREQ_SEL |
- PARF_CESTA_L1SUB_TIMEOUT_EXT_INT_EN |
- readl_relaxed(dev->parf + dev->pcie_parf_cesta_config);
- /* Set clkreq to be accessed by CESTA */
- msm_pcie_write_reg(dev->parf, dev->pcie_parf_cesta_config,
- cesta_config_bits);
- } else {
- /*
- * This is currently required only for platforms where clkreq
- * signal is routed to CESTA by default, CESTA is not enabled.
- */
- msm_pcie_write_reg_field(dev->parf,
- dev->pcie_parf_cesta_config,
- PARF_CESTA_CLKREQ_SEL, 0);
- }
- }
- static int msm_pcie_enable(struct msm_pcie_dev_t *dev)
- {
- int ret = 0;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- dev->prevent_l1 = 0;
- dev->debugfs_l1 = false;
- mutex_lock(&dev->setup_lock);
- if (dev->link_status == MSM_PCIE_LINK_ENABLED) {
- PCIE_ERR(dev, "PCIe: the link of RC%d is already enabled\n",
- dev->rc_idx);
- goto out;
- }
- /* assert PCIe reset link to keep EP in reset */
- PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
- dev->rc_idx);
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- dev->gpio[MSM_PCIE_GPIO_PERST].on);
- usleep_range(PERST_PROPAGATION_DELAY_US_MIN,
- PERST_PROPAGATION_DELAY_US_MAX);
- /* enable power */
- ret = msm_pcie_vreg_init(dev);
- if (ret) {
- PCIE_ERR(dev, "PCIe: failed to msm_pcie_vreg_init(). RC%d, ret=%d\n",
- dev->rc_idx, ret);
- goto out;
- }
- /* enable core, phy gdsc */
- ret = msm_pcie_gdsc_init(dev);
- if (ret) {
- PCIE_ERR(dev, "PCIe: failed to msm_pcie_gdsc_init(). RC%d, ret=%d\n",
- dev->rc_idx, ret);
- goto gdsc_fail;
- }
- /* enable clocks */
- ret = msm_pcie_clk_init(dev);
- /* ensure that changes propagated to the hardware */
- wmb();
- if (ret) {
- #ifdef CONFIG_SEC_PCIE
- set_bit(PCIE_ERROR_CLK_FAIL, &dev->pcie_error);
- #endif
- PCIE_ERR(dev, "PCIe: failed to msm_pcie_clk_init(). RC%d, ret=%d\n",
- dev->rc_idx, ret);
- goto clk_fail;
- }
- /* Use CESTA to turn on the resources */
- ret = msm_pcie_enable_cesta(dev);
- if (ret) {
- PCIE_ERR(dev, "PCIe: failed to msm_pcie_enable_cesta(). RC%d, ret=%d\n",
- dev->rc_idx, ret);
- goto reset_fail;
- }
- /* reset pcie controller and phy */
- ret = msm_pcie_core_phy_reset(dev);
- /* ensure that changes propagated to the hardware */
- wmb();
- if (ret) {
- PCIE_ERR(dev, "PCIe: failed to msm_pcie_core_phy_reset(). RC%d, ret=%d\n",
- dev->rc_idx, ret);
- goto reset_fail;
- }
- /* Configure clkreq, l1ss sleep timeout access to CESTA */
- if (dev->pcie_parf_cesta_config)
- msm_pcie_parf_cesta_config(dev);
- /* RUMI PCIe reset sequence */
- if (dev->rumi_init)
- dev->rumi_init(dev);
- ret = msm_pcie_enable_link(dev);
- if (ret) {
- PCIE_ERR(dev, "PCIe: failed to msm_pcie_enable_link(). RC%d, ret=%d\n",
- dev->rc_idx, ret);
- goto link_fail;
- }
- if (dev->no_client_based_bw_voting)
- msm_pcie_icc_vote(dev, dev->current_link_speed, dev->current_link_width, false);
- if (dev->enumerated) {
- if (!dev->lpi_enable)
- msm_msi_config(dev_get_msi_domain(&dev->dev->dev));
- msm_pcie_config_link_pm(dev, true);
- }
- #if IS_ENABLED(CONFIG_I2C)
- /* Bring EP out of reset*/
- if (dev->i2c_ctrl.client && dev->i2c_ctrl.client_i2c_reset) {
- dev->i2c_ctrl.client_i2c_reset(&dev->i2c_ctrl, false);
- PCIE_DBG(dev,
- "PCIe: Bring EPs out of reset and then wait for link training.\n");
- msleep(200);
- PCIE_DBG(dev, "PCIe: Finish EPs link training wait.\n");
- }
- #endif
- goto out;
- link_fail:
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- if (!dev->remained_linkup_retry) {
- if (is_need_pcie_error_oops(NULL, dev)) {
- if (dev->pcie_error_wq) {
- if (dev->first_pcie_error == PCIE_ERROR_NONE) {
- dev->first_pcie_error = PCIE_ERROR_LINK_FAIL;
- PCIE_ERR(dev, "PCIe RC%d link fail. call delayed work.\n", dev->rc_idx);
- queue_delayed_work(dev->pcie_error_wq,
- &dev->pcie_error_dwork, msecs_to_jiffies(dev->pcie_error_defer_ms));
- } else {
- PCIE_ERR(dev, "PCIe RC%d link fail.\n", dev->rc_idx);
- }
- } else {
- panic("PCIe RC%d link fail!\n", dev->rc_idx);
- }
- }
- } else {
- PCIE_ERR(dev, "PCIe RC%d link fail. Remained retry %d\n",
- dev->rc_idx, dev->remained_linkup_retry);
- }
- #endif
- if (msm_pcie_keep_resources_on & BIT(dev->rc_idx))
- goto out;
- if (dev->gpio[MSM_PCIE_GPIO_EP].num)
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
- 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
- if (dev->phy_power_down_offset)
- msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0);
- /* Use CESTA to turn off the resources */
- msm_pcie_disable_cesta(dev);
- msm_pcie_pipe_clk_deinit(dev);
- reset_fail:
- msm_pcie_clk_deinit(dev);
- clk_fail:
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- if (test_bit(PCIE_ERROR_CLK_FAIL, &dev->pcie_error)
- && is_need_pcie_error_oops(NULL, dev))
- panic("PCIe RC%d clk fail!\n", dev->rc_idx);
- #endif
- msm_pcie_gdsc_deinit(dev);
- gdsc_fail:
- msm_pcie_vreg_deinit(dev);
- out:
- mutex_unlock(&dev->setup_lock);
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return ret;
- }
- static void msm_pcie_disable(struct msm_pcie_dev_t *dev)
- {
- int ret;
- PCIE_DBG(dev, "RC%d: entry\n", dev->rc_idx);
- mutex_lock(&dev->setup_lock);
- if (!dev->power_on) {
- PCIE_DBG(dev,
- "PCIe: the link of RC%d is already power down.\n",
- dev->rc_idx);
- mutex_unlock(&dev->setup_lock);
- return;
- }
- /* suspend access to MSI register. resume access in msm_msi_config */
- if (!dev->lpi_enable)
- msm_msi_config_access(dev_get_msi_domain(&dev->dev->dev),
- false);
- dev->link_status = MSM_PCIE_LINK_DISABLED;
- dev->power_on = false;
- dev->link_turned_off_counter++;
- #if IS_ENABLED(CONFIG_I2C)
- /* assert reset on eps */
- if (dev->i2c_ctrl.client && dev->i2c_ctrl.client_i2c_reset)
- dev->i2c_ctrl.client_i2c_reset(&dev->i2c_ctrl, true);
- #endif
- #ifdef CONFIG_SEC_PCIE_L1SS
- dev->ep_config_accessible = false;
- #endif
- PCIE_INFO(dev, "PCIe: Assert the reset of endpoint of RC%d.\n",
- dev->rc_idx);
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- dev->gpio[MSM_PCIE_GPIO_PERST].on);
- if (dev->phy_power_down_offset)
- msm_pcie_write_reg(dev->phy, dev->phy_power_down_offset, 0);
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_PHY_CTRL, 0,
- BIT(0));
- /* Enable override for fal10_veto logic to assert Qactive signal.*/
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_CFG_BITS_3, 0, BIT(0));
- /* Use CESTA to turn off the resources */
- if (dev->pcie_sm) {
- ret = msm_pcie_cesta_map_apply(dev, D3COLD_STATE);
- if (ret)
- PCIE_ERR(dev, "Failed to move to D3 cold state %d\n",
- ret);
- }
- msm_pcie_clk_deinit(dev);
- msm_pcie_gdsc_deinit(dev);
- msm_pcie_vreg_deinit(dev);
- msm_pcie_pipe_clk_deinit(dev);
- if (dev->gpio[MSM_PCIE_GPIO_EP].num)
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_EP].num,
- 1 - dev->gpio[MSM_PCIE_GPIO_EP].on);
- mutex_unlock(&dev->setup_lock);
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- }
- static int msm_pcie_config_device_info(struct pci_dev *pcidev, void *pdev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *) pdev;
- struct msm_pcie_device_info *dev_info;
- int ret;
- PCIE_DBG(pcie_dev,
- "PCI device found: vendor-id:0x%x device-id:0x%x\n",
- pcidev->vendor, pcidev->device);
- if (pci_pcie_type(pcidev) == PCI_EXP_TYPE_ENDPOINT) {
- dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
- if (!dev_info)
- return -ENOMEM;
- dev_info->dev = pcidev;
- list_add_tail(&dev_info->pcidev_node, &pcie_dev->enum_ep_list);
- }
- /* for upstream port of a switch */
- if (pci_pcie_type(pcidev) == PCI_EXP_TYPE_UPSTREAM) {
- ret = pci_enable_device(pcidev);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: BDF 0x%04x pci_enable_device failed\n",
- PCI_DEVID(pcidev->bus->number, pcidev->devfn));
- return ret;
- }
- pci_set_master(pcidev);
- }
- if (pcie_dev->aer_enable) {
- if (pci_pcie_type(pcidev) == PCI_EXP_TYPE_ROOT_PORT)
- pcie_dev->aer_stats = pcidev->aer_stats;
- if (pci_enable_pcie_error_reporting(pcidev))
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: PCIE error reporting unavailable on %02x:%02x:%01x\n",
- pcie_dev->rc_idx, pcidev->bus->number,
- PCI_SLOT(pcidev->devfn), PCI_FUNC(pcidev->devfn));
- }
- return 0;
- }
- static void msm_pcie_config_sid(struct msm_pcie_dev_t *dev)
- {
- void __iomem *bdf_to_sid_base = dev->parf +
- PCIE20_PARF_BDF_TO_SID_TABLE_N;
- int i;
- if (!dev->sid_info)
- return;
- /* clear BDF_TO_SID_BYPASS bit to enable BDF to SID translation */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_BDF_TO_SID_CFG, BIT(0), 0);
- /* Registers need to be zero out first */
- memset_io(bdf_to_sid_base, 0, CRC8_TABLE_SIZE * sizeof(u32));
- if (dev->enumerated) {
- for (i = 0; i < dev->sid_info_len; i++)
- msm_pcie_write_reg(bdf_to_sid_base,
- dev->sid_info[i].hash * sizeof(u32),
- dev->sid_info[i].value);
- return;
- }
- /* initial setup for boot */
- for (i = 0; i < dev->sid_info_len; i++) {
- struct msm_pcie_sid_info_t *sid_info = &dev->sid_info[i];
- u32 val;
- u8 hash;
- __be16 bdf_be = cpu_to_be16(sid_info->bdf);
- hash = crc8(msm_pcie_crc8_table, (u8 *)&bdf_be, sizeof(bdf_be),
- 0);
- val = readl_relaxed(bdf_to_sid_base + hash * sizeof(u32));
- /* if there is a collision, look for next available entry */
- while (val) {
- u8 current_hash = hash++;
- u8 next_mask = 0xff;
- /* if NEXT is NULL then update current entry */
- if (!(val & next_mask)) {
- int j;
- val |= (u32)hash;
- msm_pcie_write_reg(bdf_to_sid_base,
- current_hash * sizeof(u32), val);
- /* sid_info of current hash and update it */
- for (j = 0; j < dev->sid_info_len; j++) {
- if (dev->sid_info[j].hash !=
- current_hash)
- continue;
- dev->sid_info[j].next_hash = hash;
- dev->sid_info[j].value = val;
- break;
- }
- }
- val = readl_relaxed(bdf_to_sid_base +
- hash * sizeof(u32));
- }
- /* BDF [31:16] | SID [15:8] | NEXT [7:0] */
- val = sid_info->bdf << 16 | sid_info->pcie_sid << 8 | 0;
- msm_pcie_write_reg(bdf_to_sid_base, hash * sizeof(u32), val);
- sid_info->hash = hash;
- sid_info->value = val;
- }
- }
- int msm_pcie_enumerate(u32 rc_idx)
- {
- int ret = 0;
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
- struct pci_dev *pcidev = NULL;
- struct pci_host_bridge *bridge;
- bool found = false;
- u32 ids, vendor_id, device_id;
- LIST_HEAD(res);
- mutex_lock(&dev->enumerate_lock);
- PCIE_DBG(dev, "Enumerate RC%d\n", rc_idx);
- if (!dev->drv_ready) {
- PCIE_DBG(dev,
- "PCIe: RC%d: has not been successfully probed yet\n",
- rc_idx);
- ret = -EPROBE_DEFER;
- goto out;
- }
- if (dev->enumerated) {
- PCIE_ERR(dev, "PCIe: RC%d: has already been enumerated.\n",
- dev->rc_idx);
- goto out;
- }
- ret = msm_pcie_enable(dev);
- if (ret) {
- PCIE_ERR(dev, "PCIe: RC%d: failed to enable\n", dev->rc_idx);
- goto out;
- }
- dev->cfg_access = true;
- /* kick start ARM PCI configuration framework */
- ids = readl_relaxed(dev->dm_core);
- vendor_id = ids & 0xffff;
- device_id = (ids & 0xffff0000) >> 16;
- PCIE_DBG(dev, "PCIe: RC%d: vendor-id:0x%x device_id:0x%x\n",
- dev->rc_idx, vendor_id, device_id);
- if (!dev->bridge) {
- bridge = devm_pci_alloc_host_bridge(&dev->pdev->dev, sizeof(*dev));
- if (!bridge) {
- PCIE_ERR(dev, "PCIe: RC%d: bridge allocation failed\n", dev->rc_idx);
- ret = -ENOMEM;
- goto out;
- }
- dev->bridge = bridge;
- if (!dev->lpi_enable) {
- ret = msm_msi_init(&dev->pdev->dev);
- if (ret) {
- PCIE_ERR(dev,
- "PCIe: failed to initialize msi for RC%d: %d\n",
- dev->rc_idx, ret);
- goto out;
- }
- }
- } else {
- bridge = dev->bridge;
- if (!dev->lpi_enable)
- msm_msi_config_access(dev_get_msi_domain(&dev->dev->dev), true);
- }
- bridge->sysdata = dev;
- bridge->ops = &msm_pcie_ops;
- pci_host_probe(bridge);
- dev->enumerated = true;
- #ifdef CONFIG_SEC_PCIE
- if (dev->esoc_name) {
- dev->ssr_notifier = qcom_register_ssr_notifier(dev->esoc_name, &dev->ssr_nb);
- if (IS_ERR(dev->ssr_notifier)) {
- PCIE_ERR(dev, "PCIe: RC%d: %s: failed to register ssr notifier\n",
- dev->rc_idx, dev->esoc_name);
- dev->ssr_notifier = NULL;
- }
- dev->ssr_nb.notifier_call = msm_pcie_esoc_ssr_notifier;
- }
- #endif
- schedule_work(&pcie_drv.drv_connect);
- msm_pcie_write_mask(dev->dm_core +
- PCIE20_COMMAND_STATUS, 0, BIT(2)|BIT(1));
- if (dev->cpl_timeout && dev->bridge_found)
- msm_pcie_write_reg_field(dev->dm_core,
- PCIE20_DEVICE_CONTROL2_STATUS2, 0xf, dev->cpl_timeout);
- do {
- pcidev = pci_get_device(vendor_id, device_id, pcidev);
- if (pcidev && (dev == (struct msm_pcie_dev_t *)
- PCIE_BUS_PRIV_DATA(pcidev->bus))) {
- dev->dev = pcidev;
- found = true;
- }
- } while (!found && pcidev);
- if (!pcidev) {
- PCIE_ERR(dev, "PCIe: RC%d: Did not find PCI device.\n",
- dev->rc_idx);
- ret = -ENODEV;
- goto out;
- }
- pci_walk_bus(dev->dev->bus, msm_pcie_config_device_info, dev);
- msm_pcie_check_l1ss_support_all(dev);
- msm_pcie_config_link_pm(dev, true);
- pci_save_state(pcidev);
- dev->default_state = pci_store_saved_state(pcidev);
- if (dev->boot_option & MSM_PCIE_NO_PROBE_ENUMERATION)
- dev_pm_syscore_device(&pcidev->dev, true);
- out:
- mutex_unlock(&dev->enumerate_lock);
- return ret;
- }
- EXPORT_SYMBOL(msm_pcie_enumerate);
- int msm_pcie_deenumerate(u32 rc_idx)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[rc_idx];
- struct pci_host_bridge *bridge = dev->bridge;
- mutex_lock(&dev->enumerate_lock);
- PCIE_DBG(dev, "RC%d: Entry\n", dev->rc_idx);
- if (!dev->enumerated) {
- PCIE_DBG(dev, "RC%d:device is not enumerated\n", dev->rc_idx);
- mutex_unlock(&dev->enumerate_lock);
- return 0;
- }
- if (dev->config_recovery) {
- PCIE_DBG(dev, "RC%d: cancel link_recover_wq\n", dev->rc_idx);
- cancel_work_sync(&dev->link_recover_wq);
- }
- spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
- dev->cfg_access = false;
- spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
- pci_stop_root_bus(bridge->bus);
- pci_remove_root_bus(bridge->bus);
- /* Mask all the interrupts */
- msm_pcie_write_reg(dev->parf, PCIE20_PARF_INT_ALL_MASK, 0);
- msm_pcie_disable(dev);
- dev->enumerated = false;
- mutex_unlock(&dev->enumerate_lock);
- PCIE_DBG(dev, "RC%d: exit\n", dev->rc_idx);
- return 0;
- }
- EXPORT_SYMBOL_GPL(msm_pcie_deenumerate);
- static bool msm_pcie_notify_client(struct msm_pcie_dev_t *dev,
- enum msm_pcie_event event)
- {
- struct msm_pcie_register_event *reg_itr, *temp;
- struct msm_pcie_notify *notify;
- struct msm_pcie_notify client_notify;
- unsigned long flags;
- bool notified = false;
- spin_lock_irqsave(&dev->evt_reg_list_lock, flags);
- list_for_each_entry_safe(reg_itr, temp, &dev->event_reg_list, node) {
- if ((reg_itr->events & event) && reg_itr->callback) {
- notify = ®_itr->notify;
- client_notify.event = event;
- client_notify.user = reg_itr->user;
- client_notify.data = notify->data;
- client_notify.options = notify->options;
- PCIE_DUMP(dev, "PCIe: callback RC%d for event %d\n",
- dev->rc_idx, event);
- /* Release spinlock before notifying client driver
- * and acquire it once done because once host notifies
- * client driver with an event, client can schedule an
- * recovery in same context before returning and
- * expects an new event which could cause an race
- * condition if spinlock is acquired.
- */
- spin_unlock_irqrestore(&dev->evt_reg_list_lock, flags);
- reg_itr->callback(&client_notify);
- notified = true;
- spin_lock_irqsave(&dev->evt_reg_list_lock, flags);
- if ((reg_itr->options & MSM_PCIE_CONFIG_NO_RECOVERY) &&
- (event == MSM_PCIE_EVENT_LINKDOWN)) {
- dev->user_suspend = true;
- PCIE_DBG(dev,
- "PCIe: Client of RC%d will recover the link later.\n",
- dev->rc_idx);
- }
- break;
- }
- }
- spin_unlock_irqrestore(&dev->evt_reg_list_lock, flags);
- return notified;
- }
- static void handle_sbr_func(struct work_struct *work)
- {
- int rc, i;
- u32 val, link_check_count = 0;
- struct msm_pcie_reset_info_t *reset_info;
- struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
- handle_sbr_work);
- PCIE_DBG(dev, "PCIe: SBR work for RC%d\n", dev->rc_idx);
- for (i = 0; i < MSM_PCIE_MAX_LINKDOWN_RESET; i++) {
- reset_info = &dev->linkdown_reset[i];
- if (!reset_info->hdl)
- continue;
- rc = reset_control_assert(reset_info->hdl);
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d failed to assert reset for %s.\n",
- dev->rc_idx, reset_info->name);
- else
- PCIE_DBG2(dev,
- "PCIe: RC%d successfully asserted reset for %s.\n",
- dev->rc_idx, reset_info->name);
- }
- /* add a 1ms delay to ensure the reset is asserted */
- usleep_range(1000, 1005);
- for (i = MSM_PCIE_MAX_LINKDOWN_RESET - 1; i >= 0; i--) {
- reset_info = &dev->linkdown_reset[i];
- if (!reset_info->hdl)
- continue;
- rc = reset_control_deassert(reset_info->hdl);
- if (rc)
- PCIE_ERR(dev,
- "PCIe: RC%d failed to deassert reset for %s.\n",
- dev->rc_idx, reset_info->name);
- else
- PCIE_DBG2(dev,
- "PCIe: RC%d successfully deasserted reset for %s.\n",
- dev->rc_idx, reset_info->name);
- }
- PCIE_DBG(dev, "post reset ltssm:%x\n",
- readl_relaxed(dev->parf + PCIE20_PARF_LTSSM));
- /* enable link training */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM, 0, LTSSM_EN);
- /* Wait for up to 100ms for the link to come up */
- do {
- val = readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
- PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE: %x %s\n",
- dev->rc_idx, val, TO_LTSSM_STR((val >> 12) & 0x3f));
- usleep_range(10000, 11000);
- } while ((!(val & XMLH_LINK_UP) ||
- !msm_pcie_confirm_linkup(dev, false, false, NULL))
- && (link_check_count++ < 10));
- if ((val & XMLH_LINK_UP) &&
- msm_pcie_confirm_linkup(dev, false, false, NULL)) {
- dev->link_status = MSM_PCIE_LINK_ENABLED;
- PCIE_DBG(dev, "Link is up after %d checkings\n",
- link_check_count);
- PCIE_INFO(dev, "PCIe RC%d link initialized\n", dev->rc_idx);
- } else {
- PCIE_ERR(dev, "PCIe RC%d link initialization failed\n",
- dev->rc_idx);
- }
- }
- static irqreturn_t handle_flush_irq(int irq, void *data)
- {
- struct msm_pcie_dev_t *dev = data;
- schedule_work(&dev->handle_sbr_work);
- return IRQ_HANDLED;
- }
- static void handle_wake_func(struct work_struct *work)
- {
- struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
- handle_wake_work);
- PCIE_DBG(dev, "PCIe: Wake work for RC%d\n", dev->rc_idx);
- mutex_lock(&dev->recovery_lock);
- if (dev->enumerated) {
- PCIE_ERR(dev,
- "PCIe: The enumeration for RC%d has already been done.\n",
- dev->rc_idx);
- goto out;
- }
- PCIE_DBG(dev,
- "PCIe: Start enumeration for RC%d upon the wake from endpoint.\n",
- dev->rc_idx);
- if (msm_pcie_enumerate(dev->rc_idx)) {
- PCIE_ERR(dev,
- "PCIe: failed to enable RC%d upon wake request from the device.\n",
- dev->rc_idx);
- goto out;
- }
- msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKUP);
- out:
- mutex_unlock(&dev->recovery_lock);
- }
- static void handle_link_recover(struct work_struct *work)
- {
- struct msm_pcie_dev_t *dev = container_of(work, struct msm_pcie_dev_t,
- link_recover_wq);
- PCIE_DBG(dev, "PCIe: link recover start for RC%d\n", dev->rc_idx);
- msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINK_RECOVER);
- }
- /* AER error handling */
- static void msm_pci_dev_aer_stats_incr(struct pci_dev *pdev,
- struct msm_aer_err_info *info)
- {
- unsigned long status = info->status & ~info->mask;
- int i, max = -1;
- u64 *counter = NULL;
- struct aer_stats *aer_stats = pdev->aer_stats;
- if (!aer_stats)
- return;
- switch (info->severity) {
- case AER_CORRECTABLE:
- aer_stats->dev_total_cor_errs++;
- counter = &aer_stats->dev_cor_errs[0];
- max = AER_MAX_TYPEOF_COR_ERRS;
- break;
- case AER_NONFATAL:
- aer_stats->dev_total_nonfatal_errs++;
- counter = &aer_stats->dev_nonfatal_errs[0];
- max = AER_MAX_TYPEOF_UNCOR_ERRS;
- break;
- case AER_FATAL:
- aer_stats->dev_total_fatal_errs++;
- counter = &aer_stats->dev_fatal_errs[0];
- max = AER_MAX_TYPEOF_UNCOR_ERRS;
- break;
- }
- for_each_set_bit(i, &status, max)
- counter[i]++;
- }
- static void msm_pci_rootport_aer_stats_incr(struct pci_dev *pdev,
- struct aer_err_source *e_src)
- {
- struct aer_stats *aer_stats = pdev->aer_stats;
- if (!aer_stats)
- return;
- if (e_src->status & PCI_ERR_ROOT_COR_RCV)
- aer_stats->rootport_total_cor_errs++;
- if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- aer_stats->rootport_total_fatal_errs++;
- else
- aer_stats->rootport_total_nonfatal_errs++;
- }
- }
- static void msm_print_tlp_header(struct pci_dev *dev,
- struct msm_aer_err_info *info)
- {
- PCIE_DBG(info->rdev, "PCIe: RC%d: TLP Header: %08x %08x %08x %08x\n",
- info->rdev->rc_idx, info->tlp.dw0, info->tlp.dw1, info->tlp.dw2, info->tlp.dw3);
- }
- static void msm_aer_print_error_stats(struct pci_dev *dev,
- struct msm_aer_err_info *info)
- {
- const char * const *strings;
- unsigned long status = info->status & ~info->mask;
- const char *errmsg;
- int i;
- if (info->severity == AER_CORRECTABLE)
- strings = aer_correctable_error_string;
- else
- strings = aer_uncorrectable_error_string;
- for_each_set_bit(i, &status, 32) {
- errmsg = strings[i];
- if (!errmsg)
- errmsg = "Unknown Error Bit";
- PCIE_DBG(info->rdev, "PCIe: RC%d: [%2d] %-22s%s\n",
- info->rdev->rc_idx, i, errmsg,
- info->first_error == i ? " (First)" : "");
- }
- msm_pci_dev_aer_stats_incr(dev, info);
- }
- void msm_aer_print_error(struct pci_dev *dev, struct msm_aer_err_info *info)
- {
- int layer, agent;
- int id = ((dev->bus->number << 8) | dev->devfn);
- if (!info->status) {
- PCIE_DBG(info->rdev,
- "PCIe: RC%d: PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
- info->rdev->rc_idx, aer_error_severity_string[info->severity]);
- goto out;
- }
- layer = AER_GET_LAYER_ERROR(info->severity, info->status);
- agent = AER_GET_AGENT(info->severity, info->status);
- PCIE_DBG(info->rdev, "PCIe: RC%d: PCIe Bus Error: severity=%s, type=%s, (%s)\n",
- info->rdev->rc_idx, aer_error_severity_string[info->severity],
- aer_error_layer[layer], aer_agent_string[agent]);
- PCIE_DBG(info->rdev, "PCIe: RC%d: device [%04x:%04x] error status/mask=%08x/%08x\n",
- info->rdev->rc_idx, dev->vendor, dev->device, info->status,
- info->mask);
- PCIE_DBG(info->rdev, "PCIe: RC%d: device [%04x:%04x] error l1ss_ctl1=%x lnkstat=%x\n",
- info->rdev->rc_idx, dev->vendor, dev->device, info->l1ss_ctl1,
- info->lnksta);
- msm_aer_print_error_stats(dev, info);
- if (info->tlp_header_valid)
- msm_print_tlp_header(dev, info);
- out:
- if (info->id && info->error_dev_num > 1 && info->id == id)
- PCIE_DBG(info->rdev, "PCIe: RC%d: Error of this Agent is reported first\n",
- info->rdev->rc_idx);
- }
- static void msm_aer_print_port_info(struct pci_dev *dev,
- struct msm_aer_err_info *info)
- {
- u8 bus = info->id >> 8;
- u8 devfn = info->id & 0xff;
- PCIE_DBG(info->rdev, "PCIe: RC%d: %s%s error received: %04x:%02x:%02x.%d\n",
- info->rdev->rc_idx, info->multi_error_valid ? "Multiple " : "",
- aer_error_severity_string[info->severity],
- pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
- }
- /**
- * msm_add_error_device - list device to be handled
- * @e_info: pointer to error info
- * @dev: pointer to pci_dev to be added
- */
- static int msm_add_error_device(struct msm_aer_err_info *e_info,
- struct pci_dev *dev)
- {
- if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
- e_info->dev[e_info->error_dev_num] = pci_dev_get(dev);
- e_info->error_dev_num++;
- return 0;
- }
- return -ENOSPC;
- }
- /**
- * msm_is_error_source - check whether the device is source of reported error
- * @dev: pointer to pci_dev to be checked
- * @e_info: pointer to reported error info
- */
- static bool msm_is_error_source(struct pci_dev *dev,
- struct msm_aer_err_info *e_info)
- {
- int aer = dev->aer_cap;
- u32 status, mask;
- u16 reg16;
- /*
- * When bus id is equal to 0, it might be a bad id
- * reported by root port.
- */
- if ((PCI_BUS_NUM(e_info->id) != 0) &&
- !(dev->bus->bus_flags & PCI_BUS_FLAGS_NO_AERSID)) {
- /* Device ID match? */
- if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
- return true;
- /* Continue id comparing if there is no multiple error */
- if (!e_info->multi_error_valid)
- return false;
- }
- /*
- * When either
- * 1) bus id is equal to 0. Some ports might lose the bus
- * id of error source id;
- * 2) bus flag PCI_BUS_FLAGS_NO_AERSID is set
- * 3) There are multiple errors and prior ID comparing fails;
- * We check AER status registers to find possible reporter.
- */
- if (atomic_read(&dev->enable_cnt) == 0)
- return false;
- /* Check if AER is enabled */
- pcie_capability_read_word(dev, PCI_EXP_DEVCTL, ®16);
- if (!(reg16 & PCI_EXP_AER_FLAGS))
- return false;
- if (!aer)
- return false;
- /* Check if error is recorded */
- if (e_info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS, &status);
- pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK, &mask);
- } else {
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK, &mask);
- }
- if (status & ~mask)
- return true;
- return false;
- }
- static int msm_find_device_iter(struct pci_dev *dev, void *data)
- {
- struct msm_aer_err_info *e_info = (struct msm_aer_err_info *)data;
- if (msm_is_error_source(dev, e_info)) {
- /* List this device */
- if (msm_add_error_device(e_info, dev)) {
- /* We cannot handle more... Stop iteration */
- return 1;
- }
- /* If there is only a single error, stop iteration */
- if (!e_info->multi_error_valid)
- return 1;
- }
- return 0;
- }
- /**
- * msm_find_source_device - search through device hierarchy for source device
- * @parent: pointer to Root Port pci_dev data structure
- * @e_info: including detailed error information such like id
- *
- * Return true if found.
- *
- * Invoked by DPC when error is detected at the Root Port.
- * Caller of this function must set id, severity, and multi_error_valid of
- * struct msm_aer_err_info pointed by @e_info properly. This function must fill
- * e_info->error_dev_num and e_info->dev[], based on the given information.
- */
- static bool msm_find_source_device(struct pci_dev *parent,
- struct msm_aer_err_info *e_info)
- {
- struct pci_dev *dev = parent;
- int result;
- /* Must reset in this function */
- e_info->error_dev_num = 0;
- /* Is Root Port an agent that sends error message? */
- result = msm_find_device_iter(dev, e_info);
- if (result)
- return true;
- pci_walk_bus(parent->subordinate, msm_find_device_iter, e_info);
- if (!e_info->error_dev_num) {
- PCIE_DBG(e_info->rdev, "PCIe: RC%d: can't find device of ID%04x\n",
- e_info->rdev->rc_idx, e_info->id);
- return false;
- }
- return true;
- }
- /**
- * msm_handle_error_source - handle logging error into an event log
- * @dev: pointer to pci_dev data structure of error source device
- * @info: comprehensive error information
- *
- * Invoked when an error being detected by Root Port.
- */
- static void msm_handle_error_source(struct pci_dev *dev,
- struct msm_aer_err_info *info)
- {
- int aer = dev->aer_cap;
- struct msm_pcie_dev_t *rdev = info->rdev;
- u32 status, sev;
- #ifdef CONFIG_SEC_PCIE_AER
- static int aer_counter;
- #endif
- if (!rdev->aer_dump && !rdev->suspending &&
- rdev->link_status == MSM_PCIE_LINK_ENABLED) {
- /* Print the dumps only once */
- rdev->aer_dump = true;
- if (info->severity == AER_CORRECTABLE &&
- !rdev->panic_on_aer)
- goto skip;
- /* Disable dumping PCIe registers when we are in DRV suspend */
- spin_lock_irqsave(&rdev->cfg_lock, rdev->irqsave_flags);
- if (!rdev->cfg_access) {
- PCIE_DBG2(rdev,
- "PCIe: RC%d is currently in drv suspend.\n",
- rdev->rc_idx);
- spin_unlock_irqrestore(&rdev->cfg_lock, rdev->irqsave_flags);
- return;
- }
- pcie_parf_dump(rdev);
- pcie_dm_core_dump(rdev);
- pcie_phy_dump(rdev);
- pcie_sm_dump(rdev);
- pcie_crm_dump(rdev);
- spin_unlock_irqrestore(&rdev->cfg_lock, rdev->irqsave_flags);
- skip:
- if (rdev->panic_on_aer)
- panic("AER error severity %d\n", info->severity);
- }
- if (info->severity == AER_CORRECTABLE) {
- /*
- * Correctable error does not need software intervention.
- * No need to go through error recovery process.
- */
- if (aer)
- pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS,
- info->status);
- pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVSTA, 0,
- PCI_EXP_DEVSTA_CED |
- PCI_EXP_DEVSTA_NFED |
- PCI_EXP_DEVSTA_FED);
- } else if (info->severity == AER_NONFATAL) {
- if (aer) {
- /* Clear status bits for ERR_NONFATAL errors only */
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, &status);
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_SEVER, &sev);
- status &= ~sev;
- if (status)
- pci_write_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS, status);
- }
- pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVSTA, 0,
- PCI_EXP_DEVSTA_CED |
- PCI_EXP_DEVSTA_NFED |
- PCI_EXP_DEVSTA_FED);
- #ifdef CONFIG_SEC_PCIE_AER
- if (aer_counter >= corr_counter_limit)
- panic("AER error severity %d\n", info->severity);
- else {
- pr_err("AER error severity %d, aer_counter=%d\n", info->severity, aer_counter);
- aer_counter++;
- }
- #endif
- } else {
- /* AER_FATAL */
- #ifdef CONFIG_SEC_PCIE_AER
- if (aer_counter >= corr_counter_limit)
- panic("AER error severity %d\n", info->severity);
- else {
- pr_err("AER error severity %d, aer_counter=%d\n", info->severity, aer_counter);
- aer_counter++;
- }
- #else
- panic("AER error severity %d\n", info->severity);
- #endif
- }
- pci_dev_put(dev);
- }
- /**
- * msm_aer_get_device_error_info - read error status from dev and store it to
- * info
- * @dev: pointer to the device expected to have a error record
- * @info: pointer to structure to store the error record
- *
- * Return 1 on success, 0 on error.
- *
- * Note that @info is reused among all error devices. Clear fields properly.
- */
- static int msm_aer_get_device_error_info(struct pci_dev *dev,
- struct msm_aer_err_info *info)
- {
- int type = pci_pcie_type(dev);
- int aer = dev->aer_cap;
- int temp;
- u32 l1ss_cap_id_offset;
- /* Must reset in this function */
- info->status = 0;
- info->tlp_header_valid = 0;
- /* The device might not support AER */
- if (!aer)
- return 0;
- l1ss_cap_id_offset = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss_cap_id_offset) {
- PCIE_DBG(info->rdev,
- "PCIe: RC%d: Could not read l1ss cap reg offset\n",
- info->rdev->rc_idx);
- return 0;
- }
- if (info->severity == AER_CORRECTABLE) {
- pci_read_config_dword(dev, aer + PCI_ERR_COR_STATUS,
- &info->status);
- pci_read_config_dword(dev, aer + PCI_ERR_COR_MASK,
- &info->mask);
- pci_read_config_dword(dev, l1ss_cap_id_offset + PCI_L1SS_CTL1,
- &info->l1ss_ctl1);
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA,
- &info->lnksta);
- if (!(info->status & ~info->mask))
- return 0;
- } else if (type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_RC_EC ||
- type == PCI_EXP_TYPE_DOWNSTREAM ||
- info->severity == AER_NONFATAL) {
- /* Link is still healthy for IO reads */
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_STATUS,
- &info->status);
- pci_read_config_dword(dev, aer + PCI_ERR_UNCOR_MASK,
- &info->mask);
- pci_read_config_dword(dev, l1ss_cap_id_offset + PCI_L1SS_CTL1,
- &info->l1ss_ctl1);
- pcie_capability_read_word(dev, PCI_EXP_LNKSTA,
- &info->lnksta);
- if (!(info->status & ~info->mask))
- return 0;
- /* Get First Error Pointer */
- pci_read_config_dword(dev, aer + PCI_ERR_CAP, &temp);
- info->first_error = PCI_ERR_CAP_FEP(temp);
- if (info->status & AER_LOG_TLP_MASKS) {
- info->tlp_header_valid = 1;
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
- pci_read_config_dword(dev,
- aer + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
- }
- }
- return 1;
- }
- static inline void msm_aer_process_err_devices(struct msm_aer_err_info *e_info)
- {
- int i;
- /* Report all before handle them, not to lost records by reset etc. */
- for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (msm_aer_get_device_error_info(e_info->dev[i], e_info))
- msm_aer_print_error(e_info->dev[i], e_info);
- }
- for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
- if (msm_aer_get_device_error_info(e_info->dev[i], e_info))
- msm_handle_error_source(e_info->dev[i], e_info);
- }
- }
- static void msm_aer_isr_one_error(struct msm_pcie_dev_t *dev,
- struct aer_err_source *e_src)
- {
- struct msm_aer_err_info e_info;
- e_info.rdev = dev;
- msm_pci_rootport_aer_stats_incr(dev->dev, e_src);
- /*
- * There is a possibility that both correctable error and
- * uncorrectable error being logged. Report correctable error first.
- */
- if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
- e_info.id = ERR_COR_ID(e_src->id);
- e_info.severity = AER_CORRECTABLE;
- if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
- msm_aer_print_port_info(dev->dev, &e_info);
- if (msm_find_source_device(dev->dev, &e_info))
- msm_aer_process_err_devices(&e_info);
- }
- if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
- e_info.id = ERR_UNCOR_ID(e_src->id);
- if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
- e_info.severity = AER_FATAL;
- else
- e_info.severity = AER_NONFATAL;
- if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
- e_info.multi_error_valid = 1;
- else
- e_info.multi_error_valid = 0;
- msm_aer_print_port_info(dev->dev, &e_info);
- if (msm_find_source_device(dev->dev, &e_info))
- msm_aer_process_err_devices(&e_info);
- }
- }
- static irqreturn_t handle_aer_irq(int irq, void *data)
- {
- struct msm_pcie_dev_t *dev = data;
- struct aer_err_source e_src;
- #ifdef CONFIG_SEC_PCIE_AER
- dev->aer_irq_counter++;
- #endif
- if (kfifo_is_empty(&dev->aer_fifo))
- return IRQ_NONE;
- while (kfifo_get(&dev->aer_fifo, &e_src)) {
- /* Not handling aer interrupts when we are in drv suspend */
- spin_lock_irqsave(&dev->cfg_lock, dev->irqsave_flags);
- if (!dev->cfg_access) {
- PCIE_DBG2(dev,
- "PCIe: RC%d is currently in drv suspend.\n",
- dev->rc_idx);
- spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
- goto done;
- }
- spin_unlock_irqrestore(&dev->cfg_lock, dev->irqsave_flags);
- msm_aer_isr_one_error(dev, &e_src);
- }
- done:
- #if defined(CONFIG_SEC_PANIC_PCIE_ERR) && defined(CONFIG_SEC_PCIE_AER)
- if (!dev->ignore_pcie_error && dev->aer_irq_counter >= corr_counter_limit) {
- panic("PCIe RC%d AER detect(%lu)!\n",
- dev->rc_idx, dev->aer_irq_counter);
- }
- #endif
- return IRQ_HANDLED;
- }
- static irqreturn_t handle_wake_irq(int irq, void *data)
- {
- struct msm_pcie_dev_t *dev = data;
- unsigned long irqsave_flags;
- spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
- dev->wake_counter++;
- PCIE_DBG(dev, "PCIe: No. %ld wake IRQ for RC%d\n",
- dev->wake_counter, dev->rc_idx);
- PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
- dev->rc_idx);
- if (!dev->enumerated && !(dev->boot_option &
- MSM_PCIE_NO_WAKE_ENUMERATION)) {
- PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
- schedule_work(&dev->handle_wake_work);
- } else {
- PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
- __pm_stay_awake(dev->ws);
- __pm_relax(dev->ws);
- if (dev->drv_supported && !dev->suspending &&
- dev->link_status == MSM_PCIE_LINK_ENABLED) {
- pcie_phy_dump(dev);
- pcie_parf_dump(dev);
- pcie_dm_core_dump(dev);
- pcie_sm_dump(dev);
- pcie_crm_dump(dev);
- }
- msm_pcie_notify_client(dev, MSM_PCIE_EVENT_WAKEUP);
- }
- spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
- return IRQ_HANDLED;
- }
- /* Attempt to recover link, return 0 if success */
- static int msm_pcie_linkdown_recovery(struct msm_pcie_dev_t *dev)
- {
- u32 status = 0;
- u32 cnt = 100; /* 1msec timeout */
- PCIE_DUMP(dev, "PCIe:Linkdown IRQ for RC%d attempt recovery\n",
- dev->rc_idx);
- while (cnt--) {
- status = readl_relaxed(dev->parf + PCIE20_PARF_STATUS);
- if (status & FLUSH_COMPLETED) {
- PCIE_DBG(dev,
- "flush complete (%d), status:%x\n", cnt, status);
- break;
- }
- udelay(10);
- }
- if (!cnt) {
- PCIE_DBG(dev, "flush timeout, status:%x\n", status);
- return -ETIMEDOUT;
- }
- /* Clear flush and move core to reset mode */
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_LTSSM,
- 0, SW_CLR_FLUSH_MODE);
- /* wait for flush mode to clear */
- cnt = 100; /* 1msec timeout */
- while (cnt--) {
- status = readl_relaxed(dev->parf + PCIE20_PARF_LTSSM);
- if (!(status & FLUSH_MODE)) {
- PCIE_DBG(dev, "flush mode clear:%d, %x\n", cnt, status);
- break;
- }
- udelay(10);
- }
- if (!cnt) {
- PCIE_DBG(dev, "flush-mode timeout, status:%x\n", status);
- return -ETIMEDOUT;
- }
- return 0;
- }
- static void msm_pcie_handle_linkdown(struct msm_pcie_dev_t *dev)
- {
- int ret;
- if (dev->link_status == MSM_PCIE_LINK_DOWN)
- return;
- #ifdef CONFIG_SEC_PCIE
- if (dev->linkdown_panic) {
- if (is_need_pcie_error_oops(NULL, dev) && dev->pcie_error_wq) {
- if (dev->first_pcie_error == PCIE_ERROR_NONE) {
- dev->first_pcie_error = PCIE_ERROR_LINKDOWN;
- queue_delayed_work(dev->pcie_error_wq,
- &dev->pcie_error_dwork, msecs_to_jiffies(dev->pcie_error_defer_ms));
- }
- }
- }
- if (!is_esoc_alive(dev)) {
- PCIE_ERR(dev, "PCIe RC%d linkdown caused by esoc crash.\n", dev->rc_idx);
- } else {
- set_bit(PCIE_ERROR_LINKDOWN, &dev->pcie_error);
- update_linkdown_count(dev->rc_idx);
- }
- #endif
- dev->link_status = MSM_PCIE_LINK_DOWN;
- /* Linkdown is expected. As it must be due to card removal action. So return */
- if ((dev->gpio[MSM_PCIE_GPIO_CARD_PRESENCE_PIN].num) &&
- (gpio_get_value(dev->gpio[MSM_PCIE_GPIO_CARD_PRESENCE_PIN].num))) {
- PCIE_DUMP(dev, "Linkdown due to card removal\n");
- return;
- }
- #ifdef CONFIG_SEC_PCIE
- if (sec_debug_is_enabled()) {
- #endif
- if (!dev->suspending) {
- /* PCIe registers dump on link down */
- PCIE_DUMP(dev,
- "PCIe:Linkdown IRQ for RC%d Dumping PCIe registers\n",
- dev->rc_idx);
- #ifdef CONFIG_SEC_PCIE
- if (dev->linkdown_panic) {
- #endif
- pcie_phy_dump(dev);
- pcie_parf_dump(dev);
- pcie_dm_core_dump(dev);
- pcie_sm_dump(dev);
- pcie_crm_dump(dev);
- }
- #ifdef CONFIG_SEC_PCIE
- }
- }
- #endif
- /* Attempt link-down recovery instead of PERST if supported */
- if (dev->linkdown_recovery_enable) {
- ret = msm_pcie_linkdown_recovery(dev);
- /* Return without PERST assertion if success */
- if (!ret)
- return;
- }
- /* assert PERST */
- if (!(msm_pcie_keep_resources_on & BIT(dev->rc_idx)))
- gpio_set_value(dev->gpio[MSM_PCIE_GPIO_PERST].num,
- dev->gpio[MSM_PCIE_GPIO_PERST].on);
- PCIE_ERR(dev, "PCIe link is down for RC%d\n", dev->rc_idx);
- if (dev->linkdown_panic) {
- #ifdef CONFIG_SEC_PCIE
- if (!dev->pcie_error_wq) {
- if (is_need_pcie_error_oops(NULL, dev))
- panic("PCIe RC%d : User has chosen to panic on linkdown\n", dev->rc_idx);
- else
- PCIE_ERR(dev, "PCIe RC%d linkdown caused by esoc crash.\n", dev->rc_idx);
- }
- #else
- panic("User has chosen to panic on linkdown\n");
- #endif
- }
- msm_pcie_notify_client(dev, MSM_PCIE_EVENT_LINKDOWN);
- }
- #ifdef CONFIG_SEC_PCIE
- static void sec_pcie_error_worker(struct work_struct *work)
- {
- struct delayed_work *dwork = to_delayed_work(work);
- struct msm_pcie_dev_t *dev
- = container_of(dwork, struct msm_pcie_dev_t, pcie_error_dwork);
- switch (dev->first_pcie_error) {
- case PCIE_ERROR_LINKDOWN:
- panic("PCIe RC%d : User has chosen to deferred panic on linkdown\n",
- dev->rc_idx);
- break;
- case PCIE_ERROR_LINK_FAIL:
- panic("PCIe RC%d link fail!\n", dev->rc_idx);
- break;
- }
- }
- #endif
- static irqreturn_t handle_linkdown_irq(int irq, void *data)
- {
- struct msm_pcie_dev_t *dev = data;
- dev->linkdown_counter++;
- PCIE_DBG(dev,
- "PCIe: No. %ld linkdown IRQ for RC%d.\n",
- dev->linkdown_counter, dev->rc_idx);
- if (!dev->enumerated || dev->link_status != MSM_PCIE_LINK_ENABLED)
- PCIE_DBG(dev,
- "PCIe:Linkdown IRQ for RC%d when the link is not enabled\n",
- dev->rc_idx);
- else if (dev->suspending)
- PCIE_DBG(dev,
- "PCIe:the link of RC%d is suspending.\n",
- dev->rc_idx);
- else
- msm_pcie_handle_linkdown(dev);
- return IRQ_HANDLED;
- }
- static irqreturn_t handle_global_irq(int irq, void *data)
- {
- int i;
- struct msm_pcie_dev_t *dev = data;
- struct pci_dev *rp = dev->dev;
- int aer;
- unsigned long irqsave_flags;
- u32 status = 0, status2 = 0;
- irqreturn_t ret = IRQ_HANDLED;
- struct aer_err_source e_src = {};
- spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
- if (dev->suspending) {
- PCIE_DBG2(dev,
- "PCIe: RC%d is currently suspending.\n",
- dev->rc_idx);
- goto done;
- }
- /* Not handling the interrupts when we are in drv suspend */
- if (!dev->cfg_access) {
- PCIE_DBG2(dev,
- "PCIe: RC%d is currently in drv suspend.\n",
- dev->rc_idx);
- goto done;
- }
- status = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_STATUS) &
- readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_MASK);
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0, status);
- status2 = readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_2_STATUS) &
- readl_relaxed(dev->parf + PCIE20_PARF_INT_ALL_2_MASK);
- msm_pcie_write_mask(dev->parf + PCIE20_PARF_INT_ALL_2_CLEAR, 0, status2);
- PCIE_DUMP(dev, "RC%d: Global IRQ %d received: 0x%x status2: 0x%x\n",
- dev->rc_idx, irq, status, status2);
- for (i = 0; i <= MSM_PCIE_INT_EVT_MAX; i++) {
- if (status & BIT(i)) {
- switch (i) {
- case MSM_PCIE_INT_EVT_LINK_DOWN:
- PCIE_DBG(dev,
- "PCIe: RC%d: handle linkdown event.\n",
- dev->rc_idx);
- handle_linkdown_irq(irq, data);
- break;
- case MSM_PCIE_INT_EVT_L1SUB_TIMEOUT:
- msm_pcie_notify_client(dev,
- MSM_PCIE_EVENT_L1SS_TIMEOUT);
- break;
- case MSM_PCIE_INT_EVT_AER_LEGACY:
- case MSM_PCIE_INT_EVT_AER_ERR:
- PCIE_DBG(dev,
- "PCIe: RC%d: AER event idx %d.\n",
- dev->rc_idx, i);
- if (!rp) {
- PCIE_DBG2(dev, "PCIe: RC%d pci_dev is not allocated.\n",
- dev->rc_idx);
- goto done;
- }
- aer = rp->aer_cap;
- pci_read_config_dword(rp,
- aer + PCI_ERR_ROOT_STATUS, &e_src.status);
- if (!(e_src.status &
- (PCI_ERR_ROOT_UNCOR_RCV|
- PCI_ERR_ROOT_COR_RCV))) {
- ret = IRQ_NONE;
- goto done;
- }
- pci_read_config_dword(rp,
- aer + PCI_ERR_ROOT_ERR_SRC, &e_src.id);
- pci_write_config_dword(rp,
- aer + PCI_ERR_ROOT_STATUS, e_src.status);
- if (kfifo_put(&dev->aer_fifo, e_src))
- ret = IRQ_WAKE_THREAD;
- break;
- case MSM_PCIE_INT_EVT_BRIDGE_FLUSH_N:
- PCIE_DBG(dev,
- "PCIe: RC%d: FLUSH event.\n",
- dev->rc_idx);
- handle_flush_irq(irq, data);
- break;
- default:
- PCIE_DUMP(dev,
- "PCIe: RC%d: Unexpected event %d is caught!\n",
- dev->rc_idx, i);
- }
- }
- }
- if (status2 & MSM_PCIE_BW_MGT_INT_STATUS) {
- /* Disable configuration for bandwidth interrupt */
- msm_pcie_config_bandwidth_int(dev, false);
- /* Clear bandwidth interrupt status */
- msm_pcie_clear_bandwidth_int_status(dev);
- PCIE_DBG(dev,
- "PCIe: RC%d: Speed change interrupt received.\n",
- dev->rc_idx);
- complete(&dev->speed_change_completion);
- }
- done:
- spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
- return ret;
- }
- static int32_t msm_pcie_irq_init(struct msm_pcie_dev_t *dev)
- {
- int rc;
- struct device *pdev = &dev->pdev->dev;
- PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
- dev->ws = wakeup_source_register(pdev, dev_name(pdev));
- if (!dev->ws) {
- PCIE_ERR(dev,
- "PCIe: RC%d: failed to register wakeup source\n",
- dev->rc_idx);
- return -ENOMEM;
- }
- if (dev->irq[MSM_PCIE_INT_GLOBAL_INT].num) {
- rc = devm_request_threaded_irq(pdev,
- dev->irq[MSM_PCIE_INT_GLOBAL_INT].num,
- handle_global_irq,
- handle_aer_irq,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- dev->irq[MSM_PCIE_INT_GLOBAL_INT].name,
- dev);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d: Unable to request global_int interrupt: %d\n",
- dev->rc_idx,
- dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
- return rc;
- }
- }
- /* register handler for PCIE_WAKE_N interrupt line */
- if (dev->wake_n) {
- rc = devm_request_irq(pdev,
- dev->wake_n, handle_wake_irq,
- IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d: Unable to request wake interrupt\n",
- dev->rc_idx);
- return rc;
- }
- INIT_WORK(&dev->handle_wake_work, handle_wake_func);
- INIT_WORK(&dev->handle_sbr_work, handle_sbr_func);
- rc = enable_irq_wake(dev->wake_n);
- if (rc) {
- PCIE_ERR(dev,
- "PCIe: RC%d: Unable to enable wake interrupt\n",
- dev->rc_idx);
- return rc;
- }
- }
- return 0;
- }
- static void msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev)
- {
- PCIE_DBG(dev, "RC%d\n", dev->rc_idx);
- wakeup_source_unregister(dev->ws);
- if (dev->wake_n)
- disable_irq(dev->wake_n);
- }
- static int msm_pcie_check_l0s_support(struct pci_dev *pdev, void *dev)
- {
- struct pci_dev *parent = pdev->bus->self;
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- u32 val;
- /* check parent supports L0s */
- if (parent) {
- pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCAP,
- &val);
- val = (val & BIT(10));
- if (!val) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Parent PCI device %02x:%02x.%01x does not support L0s\n",
- pcie_dev->rc_idx, parent->bus->number,
- PCI_SLOT(parent->devfn),
- PCI_FUNC(parent->devfn));
- pcie_dev->l0s_supported = false;
- return 0;
- }
- }
- pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
- if (!(val & BIT(10))) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x does not support L0s\n",
- pcie_dev->rc_idx, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- pcie_dev->l0s_supported = false;
- }
- return 0;
- }
- static bool msm_pcie_check_l1_support(struct pci_dev *pdev,
- struct msm_pcie_dev_t *pcie_dev)
- {
- struct pci_dev *parent = pdev->bus->self;
- u32 val;
- /* check parent supports L1 */
- if (parent) {
- u32 val2;
- pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCAP,
- &val);
- pci_read_config_dword(parent, parent->pcie_cap + PCI_EXP_LNKCTL,
- &val2);
- val = (val & BIT(11)) && (val2 & PCI_EXP_LNKCTL_ASPM_L1);
- if (!val) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Parent PCI device %02x:%02x.%01x does not support L1\n",
- pcie_dev->rc_idx, parent->bus->number,
- PCI_SLOT(parent->devfn),
- PCI_FUNC(parent->devfn));
- return false;
- }
- }
- pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
- if (!(val & BIT(11))) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x does not support L1\n",
- pcie_dev->rc_idx, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- return false;
- }
- return true;
- }
- static int msm_pcie_check_l1ss_support(struct pci_dev *pdev, void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- u32 val;
- u32 l1ss_cap_id_offset, l1ss_cap_offset, l1ss_ctl1_offset;
- if (!pcie_dev->l1ss_supported)
- return -ENXIO;
- l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss_cap_id_offset) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x could not find L1ss capability register\n",
- pcie_dev->rc_idx, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- pcie_dev->l1ss_supported = false;
- return -ENXIO;
- }
- l1ss_cap_offset = l1ss_cap_id_offset + PCI_L1SS_CAP;
- l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
- pci_read_config_dword(pdev, l1ss_cap_offset, &val);
- pcie_dev->l1_1_pcipm_supported &= !!(val & (PCI_L1SS_CAP_PCIPM_L1_1));
- pcie_dev->l1_2_pcipm_supported &= !!(val & (PCI_L1SS_CAP_PCIPM_L1_2));
- pcie_dev->l1_1_aspm_supported &= !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
- pcie_dev->l1_2_aspm_supported &= !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
- if (!pcie_dev->l1_1_pcipm_supported &&
- !pcie_dev->l1_2_pcipm_supported &&
- !pcie_dev->l1_1_aspm_supported &&
- !pcie_dev->l1_2_aspm_supported) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x does not support any L1ss\n",
- pcie_dev->rc_idx, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- pcie_dev->l1ss_supported = false;
- return -ENXIO;
- }
- return 0;
- }
- static int msm_pcie_config_common_clock_enable(struct pci_dev *pdev,
- void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device %02x:%02x.%01x\n",
- pcie_dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- msm_pcie_config_clear_set_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCTL,
- 0, PCI_EXP_LNKCTL_CCC);
- return 0;
- }
- static void msm_pcie_config_common_clock_enable_all(struct msm_pcie_dev_t *dev)
- {
- if (dev->common_clk_en)
- pci_walk_bus(dev->dev->bus,
- msm_pcie_config_common_clock_enable, dev);
- }
- static int msm_pcie_config_clock_power_management_enable(struct pci_dev *pdev,
- void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- u32 val;
- /* enable only for upstream ports */
- if (pci_is_root_bus(pdev->bus))
- return 0;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device %02x:%02x.%01x\n",
- pcie_dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
- if (val & PCI_EXP_LNKCAP_CLKPM)
- msm_pcie_config_clear_set_dword(pdev,
- pdev->pcie_cap + PCI_EXP_LNKCTL, 0,
- PCI_EXP_LNKCTL_CLKREQ_EN);
- else
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x does not support clock power management\n",
- pcie_dev->rc_idx, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- return 0;
- }
- static void msm_pcie_config_clock_power_management_enable_all(
- struct msm_pcie_dev_t *dev)
- {
- if (dev->clk_power_manage_en)
- pci_walk_bus(dev->dev->bus,
- msm_pcie_config_clock_power_management_enable, dev);
- }
- static void msm_pcie_config_l0s(struct msm_pcie_dev_t *dev,
- struct pci_dev *pdev, bool enable)
- {
- u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
- PCIE_DBG(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x %s\n",
- dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), enable ? "enable" : "disable");
- if (enable) {
- msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
- PCI_EXP_LNKCTL_ASPM_L0S);
- } else {
- msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
- PCI_EXP_LNKCTL_ASPM_L0S, 0);
- }
- }
- static void msm_pcie_config_l0s_disable_all(struct msm_pcie_dev_t *dev,
- struct pci_bus *bus)
- {
- struct pci_dev *pdev;
- if (!dev->l0s_supported)
- return;
- list_for_each_entry(pdev, &bus->devices, bus_list) {
- struct pci_bus *child;
- child = pdev->subordinate;
- if (child)
- msm_pcie_config_l0s_disable_all(dev, child);
- msm_pcie_config_l0s(dev, pdev, false);
- }
- }
- static int msm_pcie_config_l0s_enable(struct pci_dev *pdev, void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- if (!pcie_dev->l0s_supported)
- return 0;
- msm_pcie_config_l0s(pcie_dev, pdev, true);
- return 0;
- }
- static void msm_pcie_config_l0s_enable_all(struct msm_pcie_dev_t *dev)
- {
- if (dev->l0s_supported)
- pci_walk_bus(dev->dev->bus, msm_pcie_check_l0s_support, dev);
- if (dev->l0s_supported)
- pci_walk_bus(dev->dev->bus, msm_pcie_config_l0s_enable, dev);
- }
- static void msm_pcie_config_l1(struct msm_pcie_dev_t *dev,
- struct pci_dev *pdev, bool enable)
- {
- u32 lnkctl_offset = pdev->pcie_cap + PCI_EXP_LNKCTL;
- int ret;
- PCIE_DBG(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x %s\n",
- dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), enable ? "enable" : "disable");
- if (enable) {
- ret = msm_pcie_check_l1_support(pdev, dev);
- if (!ret)
- return;
- msm_pcie_config_clear_set_dword(pdev, lnkctl_offset, 0,
- PCI_EXP_LNKCTL_ASPM_L1);
- } else {
- msm_pcie_config_clear_set_dword(pdev, lnkctl_offset,
- PCI_EXP_LNKCTL_ASPM_L1, 0);
- }
- }
- static void msm_pcie_config_l1_disable_all(struct msm_pcie_dev_t *dev,
- struct pci_bus *bus)
- {
- struct pci_dev *pdev;
- if (!dev->l1_supported)
- return;
- list_for_each_entry(pdev, &bus->devices, bus_list) {
- struct pci_bus *child;
- child = pdev->subordinate;
- if (child)
- msm_pcie_config_l1_disable_all(dev, child);
- msm_pcie_config_l1(dev, pdev, false);
- }
- }
- static int msm_pcie_config_l1_enable(struct pci_dev *pdev, void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- msm_pcie_config_l1(pcie_dev, pdev, true);
- return 0;
- }
- static void msm_pcie_config_l1_enable_all(struct msm_pcie_dev_t *dev)
- {
- #ifdef CONFIG_SEC_PCIE_L1SS
- /* make sure caller has setup_lock */
- if (dev->prevent_l1) {
- PCIE_INFO(dev, "PCIe: RC%d: skip. reason - prevent_l1(%d)\n",
- dev->rc_idx, dev->prevent_l1);
- return;
- }
- #endif
- if (dev->l1_supported)
- pci_walk_bus(dev->dev->bus, msm_pcie_config_l1_enable, dev);
- }
- static void msm_pcie_config_l1ss(struct msm_pcie_dev_t *dev,
- struct pci_dev *pdev, bool enable)
- {
- u32 val, val2;
- u32 l1ss_cap_id_offset, l1ss_ctl1_offset;
- u32 devctl2_offset = pdev->pcie_cap + PCI_EXP_DEVCTL2;
- #ifdef CONFIG_SEC_PCIE_L1SS
- u32 l1ss_ctl2_offset, l1ss_ltr_cap_id_offset;
- u32 bus_num = pdev->bus->number;
- #endif
- PCIE_DBG(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x %s\n",
- dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), enable ? "enable" : "disable");
- l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss_cap_id_offset) {
- PCIE_DBG(dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x could not find L1ss capability register\n",
- dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- return;
- }
- l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
- #ifdef CONFIG_SEC_PCIE_L1SS
- l1ss_ctl2_offset = l1ss_cap_id_offset + PCI_L1SS_CTL2;
- l1ss_ltr_cap_id_offset =
- pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
- if (!pci_is_root_bus(pdev->bus) && !l1ss_ltr_cap_id_offset) {
- PCIE_DBG(dev,
- "PCIe: RC%d:%d: PCI device does not support LTR\n",
- dev->rc_idx, bus_num);
- return;
- }
- #endif
- /* Enable the AUX Clock and the Core Clk to be synchronous for L1ss */
- if (pci_is_root_bus(pdev->bus) && !dev->aux_clk_sync) {
- if (enable)
- msm_pcie_write_mask(dev->parf +
- PCIE20_PARF_SYS_CTRL, BIT(3), 0);
- else
- msm_pcie_write_mask(dev->parf +
- PCIE20_PARF_SYS_CTRL, 0, BIT(3));
- }
- if (enable) {
- #ifdef CONFIG_SEC_PCIE_L1SS
- if (!pci_is_root_bus(pdev->bus)
- && dev->l1ss_ltr_max_snoop_latency
- && l1ss_ltr_cap_id_offset) {
- msm_pcie_config_clear_set_dword(pdev,
- l1ss_ltr_cap_id_offset + PCI_LTR_MAX_SNOOP_LAT,
- 0, dev->l1ss_ltr_max_snoop_latency);
- }
- if (dev->l1ss_tpoweron) {
- msm_pcie_config_clear_set_dword(pdev,
- l1ss_ctl2_offset, 0, dev->l1ss_tpoweron);
- }
- #endif
- msm_pcie_config_clear_set_dword(pdev, devctl2_offset, 0,
- PCI_EXP_DEVCTL2_LTR_EN);
- msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
- (dev->l1_1_pcipm_supported ?
- PCI_L1SS_CTL1_PCIPM_L1_1 : 0) |
- (dev->l1_2_pcipm_supported ?
- PCI_L1SS_CTL1_PCIPM_L1_2 : 0) |
- (dev->l1_1_aspm_supported ?
- PCI_L1SS_CTL1_ASPM_L1_1 : 0) |
- (dev->l1_2_aspm_supported ?
- PCI_L1SS_CTL1_ASPM_L1_2 : 0));
- } else {
- msm_pcie_config_clear_set_dword(pdev, devctl2_offset,
- PCI_EXP_DEVCTL2_LTR_EN, 0);
- msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset,
- PCI_L1SS_CTL1_PCIPM_L1_1 | PCI_L1SS_CTL1_PCIPM_L1_2 |
- PCI_L1SS_CTL1_ASPM_L1_1 | PCI_L1SS_CTL1_ASPM_L1_2, 0);
- }
- pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
- PCIE_DBG2(dev, "PCIe: RC%d: L1SUB_CONTROL1:0x%x\n", dev->rc_idx, val);
- #ifdef CONFIG_SEC_PCIE_L1SS
- pci_read_config_dword(pdev, l1ss_ctl2_offset, &val);
- PCIE_DBG2(dev, "PCIe: RC%d:%d: L1SUB_CONTROL2:0x%x\n",
- dev->rc_idx, bus_num, val);
- #endif
- pci_read_config_dword(pdev, devctl2_offset, &val2);
- PCIE_DBG2(dev, "PCIe: RC%d: DEVICE_CONTROL2_STATUS2::0x%x\n",
- dev->rc_idx, val2);
- #ifdef CONFIG_SEC_PCIE_L1SS
- if (l1ss_ltr_cap_id_offset) {
- pci_read_config_dword(pdev,
- l1ss_ltr_cap_id_offset + PCI_LTR_MAX_SNOOP_LAT, &val2);
- PCIE_DBG2(dev, "PCIe: RC%d:%d: LTR_MAX_SNOOP_LAT:0x%x\n",
- dev->rc_idx, bus_num, val2);
- }
- #endif
- }
- static int msm_pcie_config_l1ss_disable(struct pci_dev *pdev, void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- msm_pcie_config_l1ss(pcie_dev, pdev, false);
- return 0;
- }
- static void msm_pcie_config_l1ss_disable_all(struct msm_pcie_dev_t *dev,
- struct pci_bus *bus)
- {
- struct pci_dev *pdev;
- if (!dev->l1ss_supported)
- return;
- list_for_each_entry(pdev, &bus->devices, bus_list) {
- struct pci_bus *child;
- child = pdev->subordinate;
- if (child)
- msm_pcie_config_l1ss_disable_all(dev, child);
- msm_pcie_config_l1ss_disable(pdev, dev);
- }
- }
- static int msm_pcie_config_l1_2_threshold(struct pci_dev *pdev, void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- u32 l1ss_cap_id_offset, l1ss_ctl1_offset;
- u32 l1_2_th_scale_shift = 29;
- u32 l1_2_th_value_shift = 16;
- /* LTR is not supported */
- if (!pcie_dev->l1_2_th_value)
- return 0;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI device %02x:%02x.%01x\n",
- pcie_dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss_cap_id_offset) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: PCI device %02x:%02x.%01x could not find L1ss capability register\n",
- pcie_dev->rc_idx, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
- return 0;
- }
- l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
- msm_pcie_config_clear_set_dword(pdev, l1ss_ctl1_offset, 0,
- (PCI_L1SS_CTL1_LTR_L12_TH_SCALE &
- (pcie_dev->l1_2_th_scale << l1_2_th_scale_shift)) |
- (PCI_L1SS_CTL1_LTR_L12_TH_VALUE &
- (pcie_dev->l1_2_th_value << l1_2_th_value_shift)));
- return 0;
- }
- static int msm_pcie_config_l1ss_enable(struct pci_dev *pdev, void *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)dev;
- msm_pcie_config_l1ss(pcie_dev, pdev, true);
- return 0;
- }
- static void msm_pcie_config_l1ss_enable_all(struct msm_pcie_dev_t *dev)
- {
- if (dev->l1ss_supported) {
- pci_walk_bus(dev->dev->bus, msm_pcie_config_l1_2_threshold,
- dev);
- pci_walk_bus(dev->dev->bus, msm_pcie_config_l1ss_enable, dev);
- }
- }
- static void msm_pcie_config_link_pm(struct msm_pcie_dev_t *dev, bool enable)
- {
- struct pci_bus *bus = dev->dev->bus;
- if (enable) {
- msm_pcie_config_common_clock_enable_all(dev);
- msm_pcie_config_clock_power_management_enable_all(dev);
- #ifdef CONFIG_SEC_PCIE_L1SS
- mutex_lock(&dev->l1ss_ctrl_lock);
- if (dev->pending_l1ss_ctrl && dev->l1ss_disable_flag) {
- msm_pcie_config_l1ss_disable_all(dev, bus);
- } else {
- #endif
- msm_pcie_config_l1ss_enable_all(dev);
- #ifdef CONFIG_SEC_PCIE_L1SS
- }
- dev->pending_l1ss_ctrl = false;
- mutex_unlock(&dev->l1ss_ctrl_lock);
- #endif
- msm_pcie_config_l1_enable_all(dev);
- msm_pcie_config_l0s_enable_all(dev);
- } else {
- msm_pcie_config_l0s_disable_all(dev, bus);
- msm_pcie_config_l1_disable_all(dev, bus);
- msm_pcie_config_l1ss_disable_all(dev, bus);
- }
- }
- static void msm_pcie_check_l1ss_support_all(struct msm_pcie_dev_t *dev)
- {
- pci_walk_bus(dev->dev->bus, msm_pcie_check_l1ss_support, dev);
- }
- static void msm_pcie_setup_drv_msg(struct msm_pcie_drv_msg *msg, u32 dev_id,
- enum msm_pcie_drv_cmds cmd)
- {
- struct msm_pcie_drv_tre *pkt = &msg->pkt;
- struct msm_pcie_drv_header *hdr = &msg->hdr;
- hdr->major_ver = MSM_PCIE_DRV_MAJOR_VERSION;
- hdr->minor_ver = MSM_PCIE_DRV_MINOR_VERSION;
- hdr->msg_id = MSM_PCIE_DRV_MSG_ID_CMD;
- hdr->payload_size = sizeof(*pkt);
- hdr->dev_id = dev_id;
- pkt->dword[0] = cmd;
- pkt->dword[1] = hdr->dev_id;
- }
- static int msm_pcie_setup_drv(struct msm_pcie_dev_t *pcie_dev,
- struct device_node *of_node)
- {
- struct msm_pcie_drv_info *drv_info;
- drv_info = devm_kzalloc(&pcie_dev->pdev->dev, sizeof(*drv_info),
- GFP_KERNEL);
- if (!drv_info)
- return -ENOMEM;
- drv_info->dev_id = pcie_dev->rc_idx;
- msm_pcie_setup_drv_msg(&drv_info->drv_enable, drv_info->dev_id,
- MSM_PCIE_DRV_CMD_ENABLE);
- msm_pcie_setup_drv_msg(&drv_info->drv_disable, drv_info->dev_id,
- MSM_PCIE_DRV_CMD_DISABLE);
- msm_pcie_setup_drv_msg(&drv_info->drv_enable_l1ss_sleep,
- drv_info->dev_id,
- MSM_PCIE_DRV_CMD_ENABLE_L1SS_SLEEP);
- drv_info->drv_enable_l1ss_sleep.pkt.dword[2] =
- pcie_dev->l1ss_timeout_us / 1000;
- msm_pcie_setup_drv_msg(&drv_info->drv_disable_l1ss_sleep,
- drv_info->dev_id,
- MSM_PCIE_DRV_CMD_DISABLE_L1SS_SLEEP);
- msm_pcie_setup_drv_msg(&drv_info->drv_enable_pc, drv_info->dev_id,
- MSM_PCIE_DRV_CMD_ENABLE_PC);
- msm_pcie_setup_drv_msg(&drv_info->drv_disable_pc, drv_info->dev_id,
- MSM_PCIE_DRV_CMD_DISABLE_PC);
- init_completion(&drv_info->completion);
- drv_info->timeout_ms = IPC_TIMEOUT_MS;
- pcie_dev->drv_info = drv_info;
- return 0;
- }
- static struct rpmsg_device_id msm_pcie_drv_rpmsg_match_table[] = {
- { .name = "pcie_drv" },
- {},
- };
- static struct rpmsg_driver msm_pcie_drv_rpmsg_driver = {
- .id_table = msm_pcie_drv_rpmsg_match_table,
- .probe = msm_pcie_drv_rpmsg_probe,
- .remove = msm_pcie_drv_rpmsg_remove,
- .callback = msm_pcie_drv_rpmsg_cb,
- .drv = {
- .name = "pci-msm-drv",
- },
- };
- static void msm_pcie_drv_cesta_connect_worker(struct work_struct *work)
- {
- struct pcie_drv_sta *pcie_drv = container_of(work, struct pcie_drv_sta,
- drv_connect);
- struct msm_pcie_dev_t *pcie_itr = pcie_drv->msm_pcie_dev;
- int i;
- for (i = 0; i < MAX_RC_NUM; i++, pcie_itr++) {
- if (!pcie_itr->pcie_sm)
- continue;
- msm_pcie_notify_client(pcie_itr,
- MSM_PCIE_EVENT_DRV_CONNECT);
- }
- }
- #if IS_ENABLED(CONFIG_I2C)
- static int msm_pcie_i2c_ctrl_init(struct msm_pcie_dev_t *pcie_dev)
- {
- int ret = 0, size;
- struct device_node *of_node, *i2c_client_node;
- struct device *dev = &pcie_dev->pdev->dev;
- struct pcie_i2c_ctrl *i2c_ctrl = &pcie_dev->i2c_ctrl;
- of_node = of_parse_phandle(dev->of_node, "pcie-i2c-phandle", 0);
- if (!of_node) {
- PCIE_DBG(pcie_dev, "PCIe: RC%d: No i2c phandle found\n",
- pcie_dev->rc_idx);
- return 0;
- } else {
- if (!i2c_ctrl->client) {
- PCIE_DBG(pcie_dev, "PCIe: RC%d: No i2c probe yet\n",
- pcie_dev->rc_idx);
- return -EPROBE_DEFER;
- }
- }
- i2c_client_node = i2c_ctrl->client->dev.of_node;
- if (!i2c_client_node) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: No i2c slave node phandle found\n",
- pcie_dev->rc_idx);
- goto err;
- }
- of_property_read_u32(i2c_client_node, "gpio-config-reg",
- &i2c_ctrl->gpio_config_reg);
- of_property_read_u32(i2c_client_node, "ep-reset-reg",
- &i2c_ctrl->ep_reset_reg);
- of_property_read_u32(i2c_client_node, "ep-reset-gpio-mask",
- &i2c_ctrl->ep_reset_gpio_mask);
- of_property_read_u32(i2c_client_node, "version-reg",
- &i2c_ctrl->version_reg);
- i2c_ctrl->force_i2c_setting = of_property_read_bool(i2c_client_node,
- "force-i2c-setting");
- i2c_ctrl->ep_reset_postlinkup = of_property_read_bool(i2c_client_node,
- "ep_reset_postlinkup");
- of_get_property(i2c_client_node, "dump-regs", &size);
- if (size) {
- i2c_ctrl->dump_regs = devm_kzalloc(dev, size, GFP_KERNEL);
- if (!i2c_ctrl->dump_regs) {
- ret = -ENOMEM;
- goto err;
- }
- i2c_ctrl->dump_reg_count = size / sizeof(*i2c_ctrl->dump_regs);
- ret = of_property_read_u32_array(i2c_client_node, "dump-regs",
- i2c_ctrl->dump_regs,
- i2c_ctrl->dump_reg_count);
- if (ret)
- i2c_ctrl->dump_reg_count = 0;
- }
- of_get_property(i2c_client_node, "reg_update", &size);
- if (size) {
- i2c_ctrl->reg_update = devm_kzalloc(dev, size, GFP_KERNEL);
- if (!i2c_ctrl->reg_update) {
- ret = -ENOMEM;
- goto err;
- }
- i2c_ctrl->reg_update_count = size / sizeof(*i2c_ctrl->reg_update);
- ret = of_property_read_u32_array(i2c_client_node,
- "reg_update",
- (unsigned int *)i2c_ctrl->reg_update,
- size/sizeof(i2c_ctrl->reg_update->offset));
- if (ret)
- i2c_ctrl->reg_update_count = 0;
- }
- of_get_property(i2c_client_node, "switch_reg_update", &size);
- if (size) {
- i2c_ctrl->switch_reg_update = devm_kzalloc(dev, size, GFP_KERNEL);
- if (!i2c_ctrl->switch_reg_update) {
- ret = -ENOMEM;
- goto err;
- }
- i2c_ctrl->switch_reg_update_count = size / sizeof(*i2c_ctrl->switch_reg_update);
- ret = of_property_read_u32_array(i2c_client_node,
- "switch_reg_update",
- (unsigned int *)i2c_ctrl->switch_reg_update,
- size/sizeof(i2c_ctrl->switch_reg_update->offset));
- if (ret)
- i2c_ctrl->switch_reg_update_count = 0;
- }
- return 0;
- err:
- of_node_put(of_node);
- return ret;
- }
- #endif
- static void msm_pcie_read_dt(struct msm_pcie_dev_t *pcie_dev, int rc_idx,
- struct platform_device *pdev,
- struct device_node *of_node)
- {
- int ret = 0;
- pcie_drv.rc_num++;
- pcie_dev = &msm_pcie_dev[rc_idx];
- pcie_dev->rc_idx = rc_idx;
- pcie_dev->pdev = pdev;
- pcie_dev->link_status = MSM_PCIE_LINK_DEINIT;
- PCIE_DBG(pcie_dev, "PCIe: RC index is %d.\n", pcie_dev->rc_idx);
- pcie_dev->l0s_supported = !of_property_read_bool(of_node,
- "qcom,no-l0s-supported");
- PCIE_DBG(pcie_dev, "L0s is %s supported.\n", pcie_dev->l0s_supported ?
- "" : "not");
- pcie_dev->l1_supported = !of_property_read_bool(of_node,
- "qcom,no-l1-supported");
- PCIE_DBG(pcie_dev, "L1 is %s supported.\n", pcie_dev->l1_supported ?
- "" : "not");
- pcie_dev->l1ss_supported = !of_property_read_bool(of_node,
- "qcom,no-l1ss-supported");
- PCIE_DBG(pcie_dev, "L1ss is %s supported.\n", pcie_dev->l1ss_supported ?
- "" : "not");
- pcie_dev->l1_1_aspm_supported = pcie_dev->l1ss_supported;
- pcie_dev->l1_2_aspm_supported = pcie_dev->l1ss_supported;
- pcie_dev->l1_1_pcipm_supported = pcie_dev->l1ss_supported;
- pcie_dev->l1_2_pcipm_supported = pcie_dev->l1ss_supported;
- pcie_dev->apss_based_l1ss_sleep = of_property_read_bool(of_node,
- "qcom,apss-based-l1ss-sleep");
- pcie_dev->no_client_based_bw_voting = of_property_read_bool(of_node,
- "qcom,no-client-based-bw-voting");
- of_property_read_u32(of_node, "qcom,l1-2-th-scale",
- &pcie_dev->l1_2_th_scale);
- of_property_read_u32(of_node, "qcom,l1-2-th-value",
- &pcie_dev->l1_2_th_value);
- PCIE_DBG(pcie_dev, "PCIe: RC%d: L1.2 threshold scale: %d value: %d.\n",
- pcie_dev->rc_idx, pcie_dev->l1_2_th_scale,
- pcie_dev->l1_2_th_value);
- #ifdef CONFIG_SEC_PCIE_L1SS
- ret = of_property_read_u32((&pdev->dev)->of_node,
- "l1ss-ltr-max-snoop-latency",
- &pcie_dev->l1ss_ltr_max_snoop_latency);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "RC%d: l1ss-ltr-max-snoop-latency is not found.\n", pcie_dev->rc_idx);
- } else {
- PCIE_DBG(pcie_dev,
- "RC%d: l1ss-ltr-max-snoop-latency = 0x%x\n", pcie_dev->rc_idx,
- pcie_dev->l1ss_ltr_max_snoop_latency);
- }
- ret = of_property_read_u32((&pdev->dev)->of_node,
- "l1ss-tpoweron",
- &pcie_dev->l1ss_tpoweron);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "RC%d: l1ss-tpoweron is not found.\n", pcie_dev->rc_idx);
- } else {
- PCIE_DBG(pcie_dev,
- "RC%d: l1ss-tpoweron = 0x%x\n", pcie_dev->rc_idx,
- pcie_dev->l1ss_tpoweron);
- }
- pcie_dev->use_ep_loaded = false;
- pcie_dev->ep_loaded = false;
- #endif
- #ifdef CONFIG_SEC_PCIE
- pcie_dev->ignore_pcie_error =
- of_property_read_bool((&pdev->dev)->of_node,
- "ignore-pcie-error");
- PCIE_DBG(pcie_dev, "RC%d ignore-pcie-error is %s set.\n",
- pcie_dev->rc_idx, pcie_dev->ignore_pcie_error ? "" : "not");
- ret = of_property_read_string(of_node, "esoc-name",
- &pcie_dev->esoc_name);
- if (ret)
- pcie_dev->esoc_name = NULL;
- PCIE_DBG(pcie_dev, "RC%d esoc-name is %s\n",
- pcie_dev->rc_idx, pcie_dev->esoc_name ? pcie_dev->esoc_name : "NULL");
- ret = of_get_named_gpio(of_node, "subpcb-det-upper-gpio", 0);
- if (ret >= 0) {
- pcie_dev->subpcb_det_upper_gpio = ret;
- PCIE_DBG(pcie_dev, "RC%d subpcb_det_upper_gpio:%d\n",
- pcie_dev->rc_idx, pcie_dev->subpcb_det_upper_gpio);
- pcie_dev->subpcb_det_upper_gpio_level = gpio_get_value(
- pcie_dev->subpcb_det_upper_gpio);
- } else {
- pcie_dev->subpcb_det_upper_gpio = -1;
- pcie_dev->subpcb_det_upper_gpio_level = 0;
- }
- ret = of_get_named_gpio(of_node, "subpcb-det-lower-gpio", 0);
- if (ret >= 0) {
- pcie_dev->subpcb_det_lower_gpio = ret;
- PCIE_DBG(pcie_dev, "RC%d subpcb_det_lower_gpio:%d\n",
- pcie_dev->rc_idx, pcie_dev->subpcb_det_lower_gpio);
- pcie_dev->subpcb_det_lower_gpio_level = gpio_get_value(
- pcie_dev->subpcb_det_lower_gpio);
- } else {
- pcie_dev->subpcb_det_lower_gpio = -1;
- pcie_dev->subpcb_det_lower_gpio_level = 0;
- }
- PCIE_ERR(pcie_dev, "RC%d subpcb_det_upper_gpio is %s\n", pcie_dev->rc_idx,
- pcie_dev->subpcb_det_upper_gpio_level ? "High" : "Low");
- PCIE_ERR(pcie_dev, "RC%d subpcb_det_lower_gpio is %s\n", pcie_dev->rc_idx,
- pcie_dev->subpcb_det_lower_gpio_level ? "High" : "Low");
- #endif
- pcie_dev->common_clk_en = of_property_read_bool(of_node,
- "qcom,common-clk-en");
- PCIE_DBG(pcie_dev, "Common clock is %s enabled.\n",
- pcie_dev->common_clk_en ? "" : "not");
- pcie_dev->clk_power_manage_en = of_property_read_bool(of_node,
- "qcom,clk-power-manage-en");
- PCIE_DBG(pcie_dev, "Clock power management is %s enabled.\n",
- pcie_dev->clk_power_manage_en ? "" : "not");
- pcie_dev->aux_clk_sync = !of_property_read_bool(of_node,
- "qcom,no-aux-clk-sync");
- PCIE_DBG(pcie_dev, "AUX clock is %s synchronous to Core clock.\n",
- pcie_dev->aux_clk_sync ? "" : "not");
- of_property_read_u32(of_node, "qcom,smmu-sid-base",
- &pcie_dev->smmu_sid_base);
- PCIE_DBG(pcie_dev, "RC%d: qcom,smmu-sid-base: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->smmu_sid_base);
- of_property_read_u32(of_node, "qcom,boot-option",
- &pcie_dev->boot_option);
- PCIE_DBG(pcie_dev, "PCIe: RC%d boot option is 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->boot_option);
- of_property_read_u32(of_node, "qcom,pcie-phy-ver",
- &pcie_dev->phy_ver);
- PCIE_DBG(pcie_dev, "RC%d: pcie-phy-ver: %d.\n", pcie_dev->rc_idx,
- pcie_dev->phy_ver);
- pcie_dev->link_check_max_count = LINK_UP_CHECK_MAX_COUNT;
- of_property_read_u32(pdev->dev.of_node,
- "qcom,link-check-max-count",
- &pcie_dev->link_check_max_count);
- PCIE_DBG(pcie_dev, "PCIe: RC%d: link-check-max-count: %u.\n",
- pcie_dev->rc_idx, pcie_dev->link_check_max_count);
- of_property_read_u32(of_node, "qcom,target-link-speed",
- &pcie_dev->dt_target_link_speed);
- PCIE_DBG(pcie_dev, "PCIe: RC%d: target-link-speed: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->dt_target_link_speed);
- pcie_dev->target_link_speed = pcie_dev->dt_target_link_speed;
- #ifdef CONFIG_SEC_PCIE
- if (pcie_dev->subpcb_det_upper_gpio_level &&
- !pcie_dev->subpcb_det_lower_gpio_level) {
- pcie_dev->target_link_speed = GEN1_SPEED;
- PCIE_INFO(pcie_dev,
- "PCIe: RC%d: force target link speed: %d\n",
- pcie_dev->rc_idx, pcie_dev->target_link_speed);
- }
- #endif
- msm_pcie_dev[rc_idx].target_link_width = 0;
- of_property_read_u32(of_node, "qcom,target-link-width",
- &pcie_dev->target_link_width);
- PCIE_DBG(pcie_dev, "PCIe: RC%d: target-link-width: %d.\n",
- pcie_dev->rc_idx, pcie_dev->target_link_width);
- of_property_read_u32(of_node, "qcom,n-fts", &pcie_dev->n_fts);
- PCIE_DBG(pcie_dev, "n-fts: 0x%x.\n", pcie_dev->n_fts);
- of_property_read_u32(of_node, "qcom,ep-latency",
- &pcie_dev->ep_latency);
- PCIE_DBG(pcie_dev, "RC%d: ep-latency: %ums.\n", pcie_dev->rc_idx,
- pcie_dev->ep_latency);
- of_property_read_u32(of_node, "qcom,switch-latency",
- &pcie_dev->switch_latency);
- PCIE_DBG(pcie_dev, "RC%d: switch-latency: %ums.\n", pcie_dev->rc_idx,
- pcie_dev->switch_latency);
- ret = of_property_read_u32(of_node, "qcom,wr-halt-size",
- &pcie_dev->wr_halt_size);
- if (ret)
- PCIE_DBG(pcie_dev,
- "RC%d: wr-halt-size not specified in dt. Use default value.\n",
- pcie_dev->rc_idx);
- else
- PCIE_DBG(pcie_dev, "RC%d: wr-halt-size: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->wr_halt_size);
- pcie_dev->gdsc_clk_drv_ss_nonvotable = of_property_read_bool(of_node,
- "qcom,gdsc-clk-drv-ss-nonvotable");
- PCIE_DBG(pcie_dev, "Gdsc clk is %s votable during drv hand over.\n",
- pcie_dev->gdsc_clk_drv_ss_nonvotable ? "not" : "");
- pcie_dev->slv_addr_space_size = SZ_16M;
- of_property_read_u32(of_node, "qcom,slv-addr-space-size",
- &pcie_dev->slv_addr_space_size);
- PCIE_DBG(pcie_dev, "RC%d: slv-addr-space-size: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->slv_addr_space_size);
- of_property_read_u32(of_node, "qcom,num-parf-testbus-sel",
- &pcie_dev->num_parf_testbus_sel);
- PCIE_DBG(pcie_dev, "RC%d: num-parf-testbus-sel: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->num_parf_testbus_sel);
- of_property_read_u32(of_node, "qcom,phy-status-offset",
- &pcie_dev->phy_status_offset);
- PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx,
- pcie_dev->phy_status_offset);
- of_property_read_u32(pdev->dev.of_node, "qcom,phy-status-bit",
- &pcie_dev->phy_status_bit);
- PCIE_DBG(pcie_dev, "RC%d: phy-status-bit: %u.\n", pcie_dev->rc_idx,
- pcie_dev->phy_status_bit);
- of_property_read_u32(of_node, "qcom,phy-power-down-offset",
- &pcie_dev->phy_power_down_offset);
- PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->phy_power_down_offset);
- of_property_read_u32(of_node, "qcom,phy-aux-clk-config1-offset",
- &pcie_dev->phy_aux_clk_config1_offset);
- PCIE_DBG(pcie_dev, "RC%d: phy-aux-clk-config1-offset: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->phy_aux_clk_config1_offset);
- of_property_read_u32(of_node, "qcom,phy-pll-clk-enable1-offset",
- &pcie_dev->phy_pll_clk_enable1_offset);
- PCIE_DBG(pcie_dev, "RC%d: phy-pll-clk-enable1-offset: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->phy_pll_clk_enable1_offset);
- of_property_read_u32(pdev->dev.of_node,
- "qcom,eq-pset-req-vec",
- &pcie_dev->eq_pset_req_vec);
- PCIE_DBG(pcie_dev, "RC%d: eq-pset-req-vec: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->eq_pset_req_vec);
- pcie_dev->core_preset = PCIE_GEN3_PRESET_DEFAULT;
- of_property_read_u32(pdev->dev.of_node,
- "qcom,core-preset",
- &pcie_dev->core_preset);
- PCIE_DBG(pcie_dev, "RC%d: core-preset: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->core_preset);
- of_property_read_u32(pdev->dev.of_node,
- "qcom,eq-fmdc-t-min-phase23",
- &pcie_dev->eq_fmdc_t_min_phase23);
- PCIE_DBG(pcie_dev, "RC%d: qcom,eq-fmdc-t-min-phase23: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->eq_fmdc_t_min_phase23);
- of_property_read_u32(of_node, "qcom,cpl-timeout",
- &pcie_dev->cpl_timeout);
- PCIE_DBG(pcie_dev, "RC%d: cpl-timeout: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->cpl_timeout);
- pcie_dev->perst_delay_us_min = PERST_PROPAGATION_DELAY_US_MIN;
- pcie_dev->perst_delay_us_max = PERST_PROPAGATION_DELAY_US_MAX;
- of_property_read_u32(of_node, "qcom,perst-delay-us-min",
- &pcie_dev->perst_delay_us_min);
- of_property_read_u32(of_node, "qcom,perst-delay-us-max",
- &pcie_dev->perst_delay_us_max);
- PCIE_DBG(pcie_dev,
- "RC%d: perst-delay-us-min: %dus. perst-delay-us-max: %dus.\n",
- pcie_dev->rc_idx, pcie_dev->perst_delay_us_min,
- pcie_dev->perst_delay_us_max);
- pcie_dev->tlp_rd_size = PCIE_TLP_RD_SIZE;
- of_property_read_u32(of_node, "qcom,tlp-rd-size",
- &pcie_dev->tlp_rd_size);
- PCIE_DBG(pcie_dev, "RC%d: tlp-rd-size: 0x%x.\n", pcie_dev->rc_idx,
- pcie_dev->tlp_rd_size);
- ret = of_property_read_u32(of_node, "qcom,aux-clk-freq",
- &pcie_dev->aux_clk_freq);
- if (ret)
- PCIE_DBG(pcie_dev, "RC%d: using default aux clock frequency.\n",
- pcie_dev->rc_idx);
- else
- PCIE_DBG(pcie_dev, "RC%d: aux clock frequency: %d.\n",
- pcie_dev->rc_idx, pcie_dev->aux_clk_freq);
- #ifdef CONFIG_SEC_PCIE_AER
- pcie_dev->aer_irq_counter = 0;
- #endif
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- pcie_dev->linkdown_panic = true;
- ret = of_property_read_u32((&pdev->dev)->of_node,
- "allow-linkup-retry",
- &pcie_dev->allow_linkup_retry);
- if (ret) {
- PCIE_DBG(pcie_dev,
- "RC%d: allow-linkup-retry is not found.\n", pcie_dev->rc_idx);
- } else {
- PCIE_DBG(pcie_dev,
- "RC%d: allow-linkup-retry = 0x%x\n", pcie_dev->rc_idx,
- pcie_dev->allow_linkup_retry);
- }
- pcie_dev->remained_linkup_retry = pcie_dev->allow_linkup_retry;
- #else
- pcie_dev->linkdown_panic = false;
- #endif
- pcie_dev->aer_enable = true;
- if (!of_find_property(of_node, "msi-map", NULL)) {
- PCIE_DBG(pcie_dev, "RC%d: LPI not supported.\n",
- pcie_dev->rc_idx);
- } else {
- PCIE_DBG(pcie_dev, "RC%d: LPI supported.\n",
- pcie_dev->rc_idx);
- pcie_dev->lpi_enable = true;
- }
- ret = of_property_read_u32(of_node, "qcom,pcie-clkreq-offset",
- &pcie_dev->pcie_parf_cesta_config);
- if (ret)
- pcie_dev->pcie_parf_cesta_config = 0;
- pcie_dev->config_recovery = of_property_read_bool(of_node,
- "qcom,config-recovery");
- if (pcie_dev->config_recovery) {
- PCIE_DUMP(pcie_dev,
- "PCIe RC%d config space recovery enabled\n",
- pcie_dev->rc_idx);
- INIT_WORK(&pcie_dev->link_recover_wq, handle_link_recover);
- }
- of_property_read_u32(of_node, "qcom,l1ss-sleep-disable",
- &pcie_dev->l1ss_sleep_disable);
- ret = of_property_read_u32(of_node, "qcom,drv-l1ss-timeout-us",
- &pcie_dev->l1ss_timeout_us);
- if (ret)
- pcie_dev->l1ss_timeout_us = L1SS_TIMEOUT_US;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: DRV L1ss timeout: %dus\n",
- pcie_dev->rc_idx, pcie_dev->l1ss_timeout_us);
- ret = of_property_read_string(of_node, "qcom,drv-name",
- &pcie_dev->drv_name);
- if (!ret) {
- pcie_dev->drv_supported = true;
- ret = msm_pcie_setup_drv(pcie_dev, of_node);
- if (ret)
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: failed to setup DRV: ret: %d\n",
- pcie_dev->rc_idx, ret);
- }
- pcie_dev->panic_genspeed_mismatch = of_property_read_bool(of_node,
- "qcom,panic-genspeed-mismatch");
- }
- static int msm_pcie_cesta_init(struct msm_pcie_dev_t *pcie_dev,
- struct device_node *of_node)
- {
- int ret = 0;
- ret = of_property_read_u32(of_node, "qcom,pcie-clkreq-gpio",
- &pcie_dev->clkreq_gpio);
- if (ret) {
- PCIE_ERR(pcie_dev, "Couldn't find clkreq gpio %d\n",
- ret);
- return ret;
- }
- ret = msm_pcie_cesta_get_sm_seq(pcie_dev);
- if (ret)
- return ret;
- msm_pcie_cesta_load_sm_seq(pcie_dev);
- pcie_dev->crm_dev = crm_get_device("pcie_crm");
- if (IS_ERR(pcie_dev->crm_dev)) {
- PCIE_ERR(pcie_dev, "PCIe: RC%d: fail to get crm_dev\n",
- pcie_dev->rc_idx);
- return ret;
- }
- msm_pcie_cesta_map_save(pcie_dev->bw_gen_max);
- INIT_WORK(&pcie_drv.drv_connect,
- msm_pcie_drv_cesta_connect_worker);
- pcie_dev->drv_supported = true;
- return 0;
- }
- static void msm_pcie_get_pinctrl(struct msm_pcie_dev_t *pcie_dev,
- struct platform_device *pdev)
- {
- pcie_dev->pinctrl = devm_pinctrl_get(&pdev->dev);
- if (IS_ERR_OR_NULL(pcie_dev->pinctrl))
- PCIE_ERR(pcie_dev, "PCIe: RC%d failed to get pinctrl\n",
- pcie_dev->rc_idx);
- else
- pcie_dev->use_pinctrl = true;
- if (pcie_dev->use_pinctrl) {
- pcie_dev->pins_default = pinctrl_lookup_state(pcie_dev->pinctrl,
- "default");
- if (IS_ERR(pcie_dev->pins_default)) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d could not get pinctrl default state\n",
- pcie_dev->rc_idx);
- pcie_dev->pins_default = NULL;
- }
- pcie_dev->pins_sleep = pinctrl_lookup_state(pcie_dev->pinctrl,
- "sleep");
- if (IS_ERR(pcie_dev->pins_sleep)) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d could not get pinctrl sleep state\n",
- pcie_dev->rc_idx);
- pcie_dev->pins_sleep = NULL;
- }
- }
- }
- static int msm_pcie_probe(struct platform_device *pdev)
- {
- int ret = 0;
- int rc_idx = -1, size;
- struct msm_pcie_dev_t *pcie_dev;
- struct device_node *of_node;
- #ifdef CONFIG_SEC_PCIE
- char rc_name[MAX_RC_NAME_LEN];
- #endif
- dev_info(&pdev->dev, "PCIe: %s\n", __func__);
- mutex_lock(&pcie_drv.drv_lock);
- of_node = pdev->dev.of_node;
- ret = of_property_read_u32(of_node, "cell-index", &rc_idx);
- if (ret) {
- dev_err(&pdev->dev, "PCIe: %s: Did not find RC index\n",
- __func__);
- goto out;
- }
- if (rc_idx >= MAX_RC_NUM)
- goto out;
- pcie_drv.rc_num++;
- pcie_dev = &msm_pcie_dev[rc_idx];
- pcie_dev->rc_idx = rc_idx;
- pcie_dev->pdev = pdev;
- pcie_dev->link_status = MSM_PCIE_LINK_DEINIT;
- PCIE_DBG(pcie_dev, "PCIe: RC index is %d.\n", pcie_dev->rc_idx);
- msm_pcie_read_dt(pcie_dev, rc_idx, pdev, of_node);
-
- #ifdef CONFIG_SEC_PCIE
- snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", pcie_dev->rc_idx);
- pcie_dev->ipc_log =
- ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
- if (pcie_dev->ipc_log == NULL)
- pr_err("%s: unable to create IPC log context for %s\n",
- __func__, rc_name);
- else
- PCIE_DBG(pcie_dev,
- "PCIe IPC logging is enable for RC%d\n",
- pcie_dev->rc_idx);
- snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", pcie_dev->rc_idx);
- pcie_dev->ipc_log_long =
- ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
- if (pcie_dev->ipc_log_long == NULL)
- pr_err("%s: unable to create IPC log context for %s\n",
- __func__, rc_name);
- else
- PCIE_DBG(pcie_dev,
- "PCIe IPC logging %s is enable for RC%d\n",
- rc_name, pcie_dev->rc_idx);
- snprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", pcie_dev->rc_idx);
- pcie_dev->ipc_log_dump =
- ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
- if (pcie_dev->ipc_log_dump == NULL)
- pr_err("%s: unable to create IPC log context for %s\n",
- __func__, rc_name);
- else
- PCIE_DBG(pcie_dev,
- "PCIe IPC logging %s is enable for RC%d\n",
- rc_name, pcie_dev->rc_idx);
- #endif
- #ifdef CONFIG_SEC_PCIE
- if (pcie_dev->rc_idx == 0) {
- pcie_dev->pcie_error_wq
- = create_singlethread_workqueue("pcie_error_wq0");
- INIT_DELAYED_WORK(&pcie_dev->pcie_error_dwork, sec_pcie_error_worker);
- pcie_dev->pcie_error_defer_ms = 15000;
- } else if (pcie_dev->rc_idx == 1) {
- pcie_dev->pcie_error_wq
- = create_singlethread_workqueue("pcie_error_wq1");
- INIT_DELAYED_WORK(&pcie_dev->pcie_error_dwork, sec_pcie_error_worker);
- pcie_dev->pcie_error_defer_ms = 15000;
- } else if (pcie_dev->rc_idx == 2) {
- pcie_dev->pcie_error_wq
- = create_singlethread_workqueue("pcie_error_wq2");
- INIT_DELAYED_WORK(&pcie_dev->pcie_error_dwork, sec_pcie_error_worker);
- pcie_dev->pcie_error_defer_ms = 15000;
- }
- #endif
- memcpy(pcie_dev->vreg, msm_pcie_vreg_info, sizeof(msm_pcie_vreg_info));
- memcpy(pcie_dev->gpio, msm_pcie_gpio_info, sizeof(msm_pcie_gpio_info));
- memcpy(pcie_dev->res, msm_pcie_res_info, sizeof(msm_pcie_res_info));
- memcpy(pcie_dev->irq, msm_pcie_irq_info, sizeof(msm_pcie_irq_info));
- memcpy(pcie_dev->reset, msm_pcie_reset_info[rc_idx],
- sizeof(msm_pcie_reset_info[rc_idx]));
- memcpy(pcie_dev->pipe_reset, msm_pcie_pipe_reset_info[rc_idx],
- sizeof(msm_pcie_pipe_reset_info[rc_idx]));
- memcpy(pcie_dev->linkdown_reset, msm_pcie_linkdown_reset_info[rc_idx],
- sizeof(msm_pcie_linkdown_reset_info[rc_idx]));
- init_completion(&pcie_dev->speed_change_completion);
- dev_set_drvdata(&pdev->dev, pcie_dev);
- #if IS_ENABLED(CONFIG_I2C)
- ret = msm_pcie_i2c_ctrl_init(pcie_dev);
- if (ret)
- goto decrease_rc_num;
- #endif
- ret = msm_pcie_get_resources(pcie_dev, pcie_dev->pdev);
- if (ret)
- goto decrease_rc_num;
- if (pcie_dev->rumi)
- pcie_dev->rumi_init = msm_pcie_rumi_init;
- if (pcie_dev->pcie_sm) {
- PCIE_DBG(pcie_dev, "pcie CESTA is supported\n");
- ret = msm_pcie_cesta_init(pcie_dev, of_node);
- if (ret)
- goto decrease_rc_num;
- } else {
- ret = register_rpmsg_driver(&msm_pcie_drv_rpmsg_driver);
- if (ret && ret != -EBUSY)
- PCIE_ERR(pcie_dev,
- "PCIe %d: DRV: rpmsg register fail: ret: %d\n",
- pcie_dev->rc_idx, ret);
- }
- msm_pcie_get_pinctrl(pcie_dev, pdev);
- #ifdef CONFIG_SEC_PCIE
- if (pcie_dev->subpcb_det_upper_gpio_level
- && pcie_dev->subpcb_det_lower_gpio_level) {
- PCIE_INFO(pcie_dev, "RC%d subpcb is not connected.\n", pcie_dev->rc_idx);
- if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep) {
- PCIE_INFO(pcie_dev, "RC%d pinctrl to sleep\n", pcie_dev->rc_idx);
- pinctrl_select_state(pcie_dev->pinctrl, pcie_dev->pins_sleep);
- }
- PCIE_INFO(pcie_dev, "RC%d set ignore_pcie_error\n", pcie_dev->rc_idx);
- pcie_dev->ignore_pcie_error = true;
- }
- #endif
- ret = msm_pcie_gpio_init(pcie_dev);
- if (ret) {
- msm_pcie_release_resources(pcie_dev);
- goto decrease_rc_num;
- }
- ret = msm_pcie_irq_init(pcie_dev);
- if (ret) {
- msm_pcie_release_resources(pcie_dev);
- msm_pcie_gpio_deinit(pcie_dev);
- goto decrease_rc_num;
- }
- INIT_KFIFO(pcie_dev->aer_fifo);
- msm_pcie_sysfs_init(pcie_dev);
- pcie_dev->drv_ready = true;
- of_get_property(pdev->dev.of_node, "qcom,filtered-bdfs", &size);
- if (size) {
- pcie_dev->filtered_bdfs = devm_kzalloc(&pdev->dev, size,
- GFP_KERNEL);
- if (!pcie_dev->filtered_bdfs) {
- mutex_unlock(&pcie_drv.drv_lock);
- return -ENOMEM;
- }
- pcie_dev->bdf_count = size / sizeof(*pcie_dev->filtered_bdfs);
- ret = of_property_read_u32_array(pdev->dev.of_node,
- "qcom,filtered-bdfs",
- pcie_dev->filtered_bdfs,
- pcie_dev->bdf_count);
- if (ret)
- pcie_dev->bdf_count = 0;
- }
- if (pcie_dev->boot_option & MSM_PCIE_NO_PROBE_ENUMERATION) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d will be enumerated by client or endpoint.\n",
- pcie_dev->rc_idx);
- mutex_unlock(&pcie_drv.drv_lock);
- return 0;
- }
- ret = msm_pcie_enumerate(rc_idx);
- if (ret)
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d is not enabled during bootup; it will be enumerated upon client request.\n",
- pcie_dev->rc_idx);
- else
- PCIE_ERR(pcie_dev, "RC%d is enabled in bootup\n",
- pcie_dev->rc_idx);
- PCIE_DBG(pcie_dev, "PCIe probed %s\n", dev_name(&pdev->dev));
- mutex_unlock(&pcie_drv.drv_lock);
- return 0;
- decrease_rc_num:
- #ifdef CONFIG_SEC_PCIE
- if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep) {
- pinctrl_select_state(pcie_dev->pinctrl,
- pcie_dev->pins_sleep);
- }
- #endif
- pcie_drv.rc_num--;
- PCIE_ERR(pcie_dev, "PCIe: RC%d: Driver probe failed. ret: %d\n",
- pcie_dev->rc_idx, ret);
- out:
- if (rc_idx < 0 || rc_idx >= MAX_RC_NUM)
- pr_err("PCIe: Invalid RC index %d. Driver probe failed\n",
- rc_idx);
- mutex_unlock(&pcie_drv.drv_lock);
- return ret;
- }
- static int msm_pcie_remove(struct platform_device *pdev)
- {
- int ret = 0;
- int rc_idx;
- struct msm_pcie_device_info *dev_info, *temp;
- mutex_lock(&pcie_drv.drv_lock);
- ret = of_property_read_u32((&pdev->dev)->of_node,
- "cell-index", &rc_idx);
- if (ret) {
- pr_err("%s: Did not find RC index.\n", __func__);
- goto out;
- } else {
- pcie_drv.rc_num--;
- dev_info(&pdev->dev, "PCIe: RC%d: being removed\n", rc_idx);
- }
- if (msm_pcie_dev[rc_idx].saved_state)
- pci_load_and_free_saved_state(msm_pcie_dev[rc_idx].dev,
- &msm_pcie_dev[rc_idx].saved_state);
- if (msm_pcie_dev[rc_idx].default_state)
- pci_load_and_free_saved_state(msm_pcie_dev[rc_idx].dev,
- &msm_pcie_dev[rc_idx].default_state);
- /* Use CESTA to turn off the resources */
- if (msm_pcie_dev[rc_idx].pcie_sm)
- msm_pcie_cesta_map_apply(&msm_pcie_dev[rc_idx], D3COLD_STATE);
- msm_pcie_irq_deinit(&msm_pcie_dev[rc_idx]);
- msm_pcie_vreg_deinit(&msm_pcie_dev[rc_idx]);
- msm_pcie_clk_deinit(&msm_pcie_dev[rc_idx]);
- msm_pcie_gdsc_deinit(&msm_pcie_dev[rc_idx]);
- msm_pcie_gpio_deinit(&msm_pcie_dev[rc_idx]);
- msm_pcie_release_resources(&msm_pcie_dev[rc_idx]);
- #ifdef CONFIG_SEC_PCIE
- if (msm_pcie_dev[rc_idx].ssr_notifier) {
- ret = qcom_unregister_ssr_notifier(msm_pcie_dev[rc_idx].ssr_notifier,
- &msm_pcie_dev[rc_idx].ssr_nb);
- if (ret) {
- dev_info(&pdev->dev, "PCIe: RC%d: %s: error %d unregistering notifier\n",
- rc_idx, msm_pcie_dev[rc_idx].esoc_name, ret);
- msm_pcie_dev[rc_idx].ssr_notifier = NULL;
- }
- }
- #endif
- list_for_each_entry_safe(dev_info, temp,
- &msm_pcie_dev[rc_idx].enum_ep_list,
- pcidev_node) {
- list_del(&dev_info->pcidev_node);
- kfree(dev_info);
- }
- list_for_each_entry_safe(dev_info, temp,
- &msm_pcie_dev[rc_idx].susp_ep_list,
- pcidev_node) {
- list_del(&dev_info->pcidev_node);
- kfree(dev_info);
- }
- out:
- mutex_unlock(&pcie_drv.drv_lock);
- return ret;
- }
- static int msm_pcie_link_retrain(struct msm_pcie_dev_t *pcie_dev,
- struct pci_dev *pci_dev)
- {
- u32 cnt_max = 150; /* 150ms timeout */
- u32 link_status;
- u32 link_status_lbms_mask = PCI_EXP_LNKSTA_LBMS << PCI_EXP_LNKCTL;
- int ret, status;
- /* Enable configuration for bandwidth interrupt */
- msm_pcie_config_bandwidth_int(pcie_dev, true);
- reinit_completion(&pcie_dev->speed_change_completion);
- /* link retrain */
- msm_pcie_config_clear_set_dword(pci_dev,
- pci_dev->pcie_cap + PCI_EXP_LNKCTL,
- 0, PCI_EXP_LNKCTL_RL);
- ret = wait_for_completion_timeout(&pcie_dev->speed_change_completion,
- msecs_to_jiffies(cnt_max));
- if (!ret) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Bandwidth int: completion timeout\n",
- pcie_dev->rc_idx);
- /* poll to check if link train is done */
- if (!(readl_relaxed(pcie_dev->dm_core + pci_dev->pcie_cap +
- PCI_EXP_LNKCTL) & link_status_lbms_mask)) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to retrain\n",
- pcie_dev->rc_idx);
- return -EIO;
- }
- status = (readl_relaxed(pcie_dev->dm_core + pci_dev->pcie_cap +
- PCI_EXP_LNKCTL) & link_status_lbms_mask);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Status set 0x%x\n",
- pcie_dev->rc_idx, status);
- }
- link_status = readl_relaxed(pcie_dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS);
- pcie_dev->current_link_speed = (link_status >> 16) & PCI_EXP_LNKSTA_CLS;
- pcie_dev->current_link_width = ((link_status >> 16) & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
- return 0;
- }
- static int msm_pcie_set_link_width(struct msm_pcie_dev_t *pcie_dev,
- u16 target_link_width)
- {
- u16 link_width;
- if (pcie_dev->target_link_width &&
- (pcie_dev->target_link_width > pcie_dev->link_width_max))
- goto invalid_link_width;
- switch (target_link_width) {
- case PCI_EXP_LNKSTA_NLW_X1:
- link_width = LINK_WIDTH_X1;
- break;
- case PCI_EXP_LNKSTA_NLW_X2:
- link_width = LINK_WIDTH_X2;
- break;
- default:
- goto invalid_link_width;
- }
- msm_pcie_write_reg_field(pcie_dev->dm_core,
- PCIE20_PORT_LINK_CTRL_REG,
- LINK_WIDTH_MASK << LINK_WIDTH_SHIFT,
- link_width);
- /* Set NUM_OF_LANES in GEN2_CTRL_OFF */
- msm_pcie_write_reg_field(pcie_dev->dm_core,
- PCIE_GEN3_GEN2_CTRL,
- NUM_OF_LANES_MASK << NUM_OF_LANES_SHIFT,
- link_width);
- /* enable write access to RO register */
- msm_pcie_write_mask(pcie_dev->dm_core + PCIE_GEN3_MISC_CONTROL, 0,
- BIT(0));
- /* Set Maximum link width as current width */
- msm_pcie_write_reg_field(pcie_dev->dm_core, PCIE20_CAP + PCI_EXP_LNKCAP,
- PCI_EXP_LNKCAP_MLW, link_width);
- /* disable write access to RO register */
- msm_pcie_write_mask(pcie_dev->dm_core + PCIE_GEN3_MISC_CONTROL, BIT(0),
- 0);
- pcie_dev->link_width_max =
- (readl_relaxed(pcie_dev->dm_core + PCIE20_CAP + PCI_EXP_LNKCAP) &
- PCI_EXP_LNKCAP_MLW) >> PCI_EXP_LNKSTA_NLW_SHIFT;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: updated maximum link width supported to: %d\n",
- pcie_dev->rc_idx, pcie_dev->link_width_max);
- return 0;
- invalid_link_width:
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: unsupported link width request: %d, Max: %d\n",
- pcie_dev->rc_idx,
- target_link_width >> PCI_EXP_LNKSTA_NLW_SHIFT,
- pcie_dev->link_width_max);
- return -EINVAL;
- }
- int msm_pcie_dsp_link_control(struct pci_dev *pci_dev,
- bool link_enable)
- {
- int ret = 0;
- struct pci_dev *dsp_dev = NULL;
- u16 link_control = 0;
- u16 link_status = 0;
- u32 link_capability = 0;
- int link_check_count = 0;
- bool link_trained = false;
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(pci_dev->bus);
- if (!pcie_dev->power_on)
- return 0;
- dsp_dev = pci_dev->bus->self;
- if (pci_pcie_type(dsp_dev) != PCI_EXP_TYPE_DOWNSTREAM) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: no DSP<->EP link under this RC\n",
- pcie_dev->rc_idx);
- return 0;
- }
- pci_read_config_dword(dsp_dev, dsp_dev->pcie_cap + PCI_EXP_LNKCAP,
- &link_capability);
- pci_read_config_word(dsp_dev, dsp_dev->pcie_cap + PCI_EXP_LNKCTL,
- &link_control);
- if (link_enable) {
- link_control &= ~PCI_EXP_LNKCTL_LD;
- pci_write_config_word(dsp_dev,
- dsp_dev->pcie_cap + PCI_EXP_LNKCTL,
- link_control);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: DSP<->EP Link is enabled\n",
- pcie_dev->rc_idx);
- /* Wait for up to 100ms for the link to come up */
- do {
- usleep_range(LINK_UP_TIMEOUT_US_MIN,
- LINK_UP_TIMEOUT_US_MAX);
- pci_read_config_word(dsp_dev,
- dsp_dev->pcie_cap + PCI_EXP_LNKSTA,
- &link_status);
- if (link_capability & PCI_EXP_LNKCAP_DLLLARC)
- link_trained = (!(link_status &
- PCI_EXP_LNKSTA_LT)) &&
- (link_status &
- PCI_EXP_LNKSTA_DLLLA);
- else
- link_trained = !(link_status &
- PCI_EXP_LNKSTA_LT);
- if (link_trained)
- break;
- } while (link_check_count++ < LINK_UP_CHECK_MAX_COUNT);
- if (link_trained) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: DSP<->EP link status: 0x%04x\n",
- pcie_dev->rc_idx, link_status);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: DSP<->EP Link is up after %d checkings\n",
- pcie_dev->rc_idx, link_check_count);
- } else {
- PCIE_DBG(pcie_dev, "DSP<->EP link initialization failed\n");
- ret = MSM_PCIE_ERROR;
- }
- } else {
- link_control |= PCI_EXP_LNKCTL_LD;
- pci_write_config_word(dsp_dev,
- dsp_dev->pcie_cap + PCI_EXP_LNKCTL,
- link_control);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: DSP<->EP Link is disabled\n",
- pcie_dev->rc_idx);
- }
- return ret;
- }
- EXPORT_SYMBOL_GPL(msm_pcie_dsp_link_control);
- void msm_pcie_allow_l1(struct pci_dev *pci_dev)
- {
- struct pci_dev *root_pci_dev;
- struct msm_pcie_dev_t *pcie_dev;
- #ifdef CONFIG_SEC_PCIE_L1SS
- u32 ltssm, val = ~0;
- struct pci_dev *ep_pci_dev = NULL;
- #endif
- root_pci_dev = pcie_find_root_port(pci_dev);
- if (!root_pci_dev) {
- pr_info("[%d] skip %s root_pci_dev : null\n", current->pid, __func__);
- return;
- }
- pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
- mutex_lock(&pcie_dev->aspm_lock);
- if (pcie_dev->debugfs_l1) {
- PCIE_DBG2(pcie_dev,
- "PCIe: RC%d: debugfs_l1 is set so no-op\n",
- pcie_dev->rc_idx);
- mutex_unlock(&pcie_dev->aspm_lock);
- return;
- }
- if (!pcie_dev->l1_supported) {
- PCIE_DBG2(pcie_dev,
- "[%d] PCIe: RC%d: %02x:%02x.%01x: l1 not supported\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
- mutex_unlock(&pcie_dev->aspm_lock);
- return;
- }
- /* Reject the allow_l1 call if we are already in drv state */
- if (pcie_dev->link_status == MSM_PCIE_LINK_DRV) {
- PCIE_DBG2(pcie_dev, "PCIe: RC%d: %02x:%02x.%01x: Error\n",
- pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn),
- PCI_FUNC(pci_dev->devfn));
- mutex_unlock(&pcie_dev->aspm_lock);
- return;
- }
- if (unlikely(--pcie_dev->prevent_l1 < 0))
- PCIE_ERR(pcie_dev,
- "[%d] PCIe: RC%d: %02x:%02x.%01x: unbalanced prevent_l1: %d < 0\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn),
- pcie_dev->prevent_l1);
- if (pcie_dev->prevent_l1) {
- PCIE_DBG(pcie_dev, "[%d] PCIe: RC%d: %02x:%02x.%01x: exit, prevent_l1(%d)\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn), pcie_dev->prevent_l1);
- mutex_unlock(&pcie_dev->aspm_lock);
- return;
- }
- #ifdef CONFIG_SEC_PCIE_L1SS
- ltssm = readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) & MSM_PCIE_LTSSM_MASK;
- if ((ltssm != MSM_PCIE_LTSSM_L0) && (ltssm != MSM_PCIE_LTSSM_L0S)) {
- PCIE_INFO(pcie_dev, "PCIe RC%d: LTSSM_STATE: %s\n",
- pcie_dev->rc_idx, TO_LTSSM_STR(ltssm));
- } else {
- if (!list_empty(&root_pci_dev->subordinate->devices))
- ep_pci_dev = list_entry(root_pci_dev->subordinate->devices.next,
- struct pci_dev, bus_list);
- if (ep_pci_dev) {
- pci_read_config_dword(ep_pci_dev,
- ep_pci_dev->pcie_cap + PCI_EXP_LNKCTL, &val);
- }
- }
- #endif
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, BIT(5), 0);
- /* enable L1 */
- msm_pcie_write_mask(pcie_dev->dm_core +
- (root_pci_dev->pcie_cap + PCI_EXP_LNKCTL),
- 0, PCI_EXP_LNKCTL_ASPM_L1);
- #ifdef CONFIG_SEC_PCIE_L1SS
- if (!(val & PCI_EXP_LNKCTL_ASPM_L1))
- sec_pcie_enable_ep_l1(ep_pci_dev, pcie_dev);
- #endif
- PCIE_DBG2(pcie_dev, "[%d] PCIe: RC%d: %02x:%02x.%01x: exit\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
- mutex_unlock(&pcie_dev->aspm_lock);
- }
- EXPORT_SYMBOL(msm_pcie_allow_l1);
- int msm_pcie_prevent_l1(struct pci_dev *pci_dev)
- {
- struct pci_dev *root_pci_dev;
- struct msm_pcie_dev_t *pcie_dev;
- u32 cnt = 0;
- u32 cnt_max = 1000; /* 100ms timeout */
- int ret = 0;
- root_pci_dev = pcie_find_root_port(pci_dev);
- if (!root_pci_dev) {
- pr_info("[%d] skip %s root_pci_dev : null\n", current->pid, __func__);
- return -ENODEV;
- }
- pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
- /* disable L1 */
- mutex_lock(&pcie_dev->aspm_lock);
- if (pcie_dev->debugfs_l1) {
- PCIE_DBG2(pcie_dev,
- "PCIe: RC%d: debugfs_l1 is set so no-op\n",
- pcie_dev->rc_idx);
- mutex_unlock(&pcie_dev->aspm_lock);
- return 0;
- }
- if (!pcie_dev->l1_supported) {
- PCIE_DBG2(pcie_dev,
- "[%d] PCIe: RC%d: %02x:%02x.%01x: L1 not supported\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
- mutex_unlock(&pcie_dev->aspm_lock);
- return 0;
- }
- /* Reject the prevent_l1 call if we are already in drv state */
- if (pcie_dev->link_status == MSM_PCIE_LINK_DRV) {
- ret = -EINVAL;
- PCIE_DBG2(pcie_dev, "PCIe: RC%d: %02x:%02x.%01x:ret %d exit\n",
- pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn),
- PCI_FUNC(pci_dev->devfn), ret);
- mutex_unlock(&pcie_dev->aspm_lock);
- goto out;
- }
- if (pcie_dev->prevent_l1++) {
- PCIE_DBG(pcie_dev, "[%d] PCIe: RC%d: exit - prevent_l1(%d)\n",
- current->pid, pcie_dev->rc_idx, pcie_dev->prevent_l1);
- mutex_unlock(&pcie_dev->aspm_lock);
- return 0;
- }
- msm_pcie_write_mask(pcie_dev->dm_core +
- (root_pci_dev->pcie_cap + PCI_EXP_LNKCTL),
- PCI_EXP_LNKCTL_ASPM_L1, 0);
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_PM_CTRL, 0, BIT(5));
- /* confirm link is in L0/L0s */
- while (!msm_pcie_check_ltssm_state(pcie_dev, MSM_PCIE_LTSSM_L0) &&
- !msm_pcie_check_ltssm_state(pcie_dev, MSM_PCIE_LTSSM_L0S)) {
- if (unlikely(cnt++ >= cnt_max)) {
- PCIE_ERR(pcie_dev,
- "[%d] PCIe: RC%d: %02x:%02x.%01x: failed to transition to L0\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn),
- PCI_FUNC(pci_dev->devfn));
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: dump PCIe registers\n",
- pcie_dev->rc_idx);
- msm_pcie_clk_dump(pcie_dev);
- pcie_parf_dump(pcie_dev);
- pcie_dm_core_dump(pcie_dev);
- pcie_phy_dump(pcie_dev);
- pcie_sm_dump(pcie_dev);
- pcie_crm_dump(pcie_dev);
- ret = -EIO;
- goto err;
- }
- usleep_range(100, 105);
- }
- PCIE_DBG2(pcie_dev, "[%d] PCIe: RC%d: %02x:%02x.%01x: exit\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
- mutex_unlock(&pcie_dev->aspm_lock);
- return 0;
- err:
- PCIE_ERR(pcie_dev, "[%d] PCIe: RC%d: %02x:%02x.%01x: err exit\n",
- current->pid, pcie_dev->rc_idx, pci_dev->bus->number,
- PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
- mutex_unlock(&pcie_dev->aspm_lock);
- msm_pcie_allow_l1(pci_dev);
- out:
- return ret;
- }
- EXPORT_SYMBOL(msm_pcie_prevent_l1);
- static int msm_pcie_read_devid_all(struct pci_dev *pdev, void *dev)
- {
- u16 device_id;
- pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
- return 0;
- }
- static void msm_pcie_poll_for_l0_from_l0s(struct msm_pcie_dev_t *dev)
- {
- if (!dev->l0s_supported)
- return;
- while (!msm_pcie_check_ltssm_state(dev, MSM_PCIE_LTSSM_L0))
- pci_walk_bus(dev->dev->bus, msm_pcie_read_devid_all, dev);
- }
- int msm_pcie_set_target_link_speed(u32 rc_idx, u32 target_link_speed,
- bool force)
- {
- struct msm_pcie_dev_t *pcie_dev;
- if (rc_idx >= MAX_RC_NUM) {
- pr_err("PCIe: invalid rc index %u\n", rc_idx);
- return -EINVAL;
- }
- pcie_dev = &msm_pcie_dev[rc_idx];
- if (!pcie_dev->drv_ready) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: has not been successfully probed yet\n",
- pcie_dev->rc_idx);
- return -EPROBE_DEFER;
- }
- /*
- * Reject the request if it exceeds what PCIe RC is capable or if
- * it's greater than what was specified in DT (if present)
- */
- #ifdef CONFIG_SEC_PCIE_KEEP_LINKBW
- PCIE_DBG(pcie_dev, "PCIe: RC%d: keep current link speed\n", pcie_dev->rc_idx);
- return -EINVAL;
- #endif
- if (target_link_speed > pcie_dev->bw_gen_max ||
- (pcie_dev->dt_target_link_speed && !force &&
- target_link_speed > pcie_dev->dt_target_link_speed)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: invalid target link speed: %d\n",
- pcie_dev->rc_idx, target_link_speed);
- return -EINVAL;
- }
- pcie_dev->target_link_speed = target_link_speed;
- /*
- * The request 0 will reset maximum GEN speed to default. Default will
- * be devicetree specified GEN speed if present else it will be whatever
- * the PCIe root complex is capable of.
- */
- if (!target_link_speed) {
- pcie_dev->target_link_speed = pcie_dev->dt_target_link_speed ?
- pcie_dev->dt_target_link_speed : pcie_dev->bw_gen_max;
- if (force)
- pcie_dev->target_link_speed = pcie_dev->bw_gen_max;
- }
- PCIE_DBG(pcie_dev, "PCIe: RC%d: target_link_speed is now: 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->target_link_speed);
- return 0;
- }
- EXPORT_SYMBOL(msm_pcie_set_target_link_speed);
- /**
- * msm_pcie_set_link_bandwidth() - will perform only dynamic GEN speed request
- * @target_link_speed: input the target link speed
- * @target_link_width: currently this API does not support dynamic link width change
- */
- int msm_pcie_set_link_bandwidth(struct pci_dev *pci_dev, u16 target_link_speed,
- u16 target_link_width)
- {
- struct pci_dev *root_pci_dev;
- struct msm_pcie_dev_t *pcie_dev;
- u16 link_status;
- u16 current_link_speed;
- u16 current_link_width;
- bool set_link_speed = true;
- int ret;
- if (!pci_dev) {
- pr_info("[%d] skip %s pci_dev : null\n", current->pid, __func__);
- return -EINVAL;
- }
- root_pci_dev = pcie_find_root_port(pci_dev);
- if (!root_pci_dev)
- return -ENODEV;
- pcie_dev = PCIE_BUS_PRIV_DATA(root_pci_dev->bus);
- PCIE_DBG(pcie_dev, "[%d] PCIe: RC%d: enter\n", current->pid, pcie_dev->rc_idx);
- #ifdef CONFIG_SEC_PCIE_KEEP_LINKBW
- PCIE_DBG(pcie_dev, "PCIe: RC%d: keep current link bandwidth\n", pcie_dev->rc_idx);
- return -EINVAL;
- #endif
- if (target_link_speed > pcie_dev->bw_gen_max ||
- (pcie_dev->target_link_speed &&
- target_link_speed > pcie_dev->target_link_speed)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: invalid target link speed: %d\n",
- pcie_dev->rc_idx, target_link_speed);
- return -EINVAL;
- }
- pcie_capability_read_word(root_pci_dev, PCI_EXP_LNKSTA, &link_status);
- current_link_speed = link_status & PCI_EXP_LNKSTA_CLS;
- current_link_width = link_status & PCI_EXP_LNKSTA_NLW;
- if (target_link_speed == current_link_speed)
- set_link_speed = false;
- else
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: switching from Gen%d to Gen%d\n",
- pcie_dev->rc_idx, current_link_speed,
- target_link_speed);
- if (!set_link_speed) {
- PCIE_DBG(pcie_dev, "[%d] PCIe: RC%d: exit - nothing to change\n",
- current->pid, pcie_dev->rc_idx);
- return 0;
- }
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: current link width:%d max link width:%d\n",
- pcie_dev->rc_idx,
- current_link_width >> PCI_EXP_LNKSTA_NLW_SHIFT,
- pcie_dev->link_width_max);
- if (set_link_speed)
- msm_pcie_config_clear_set_dword(root_pci_dev,
- root_pci_dev->pcie_cap +
- PCI_EXP_LNKCTL2,
- PCI_EXP_LNKCTL2_TLS,
- target_link_speed);
- /* need to be in L0 for gen switch */
- ret = msm_pcie_prevent_l1(root_pci_dev);
- if (ret) {
- PCIE_ERR(pcie_dev, "[%d] PCIe: RC%d: exit - msm_pcie_prevent_l1(%d)\n",
- current->pid, pcie_dev->rc_idx, ret);
- return ret;
- }
- msm_pcie_config_l0s_disable_all(pcie_dev, root_pci_dev->bus);
- /* in case link is already in L0s bring link back to L0 */
- msm_pcie_poll_for_l0_from_l0s(pcie_dev);
- if (target_link_speed > current_link_speed)
- msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
- ret = msm_pcie_link_retrain(pcie_dev, root_pci_dev);
- if (ret) {
- PCIE_ERR(pcie_dev, "[%d] PCIe: RC%d: exit - msm_pcie_link_retrain(%d)\n",
- current->pid, pcie_dev->rc_idx, ret);
- goto out;
- }
- if (pcie_dev->current_link_speed != target_link_speed) {
- #ifdef CONFIG_SEC_PCIE
- set_bit(PCIE_ERROR_LINK_SPEED_MISMATCH, &pcie_dev->pcie_error);
- #endif
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to switch bandwidth: target speed: %d"
- " / current speed: %d width: %d\n",
- pcie_dev->rc_idx, target_link_speed,
- link_status & PCI_EXP_LNKSTA_CLS, link_status & PCI_EXP_LNKSTA_NLW);
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- panic("PCIe: RC%d: Link BW fail. target GEN%d, current GEN%d,%dL\n",
- pcie_dev->rc_idx,
- target_link_speed,
- link_status & PCI_EXP_LNKSTA_CLS, link_status & PCI_EXP_LNKSTA_NLW);
- #endif
- ret = -EIO;
- goto out;
- }
- #ifdef CONFIG_SEC_PCIE
- else {
- PCIE_INFO(pcie_dev,
- "PCIe: RC%d: switched bandwidth: target speed: %d\n",
- pcie_dev->rc_idx, target_link_speed);
- }
- #endif
- if (target_link_speed < current_link_speed)
- msm_pcie_scale_link_bandwidth(pcie_dev, target_link_speed);
- msm_pcie_icc_vote(pcie_dev,
- pcie_dev->current_link_speed, pcie_dev->current_link_width, false);
- PCIE_DBG(pcie_dev, "PCIe: RC%d: successfully switched link bandwidth\n",
- pcie_dev->rc_idx);
- out:
- if (ret) {
- /* Dump registers incase of the bandwidth switch failure */
- pcie_parf_dump(pcie_dev);
- pcie_dm_core_dump(pcie_dev);
- pcie_phy_dump(pcie_dev);
- pcie_sm_dump(pcie_dev);
- pcie_crm_dump(pcie_dev);
- }
- msm_pcie_config_l0s_enable_all(pcie_dev);
- msm_pcie_allow_l1(root_pci_dev);
- PCIE_DBG(pcie_dev, "[%d] PCIe: RC%d: exit\n", current->pid, pcie_dev->rc_idx);
- return ret;
- }
- EXPORT_SYMBOL(msm_pcie_set_link_bandwidth);
- static int __maybe_unused msm_pcie_pm_suspend_noirq(struct device *dev)
- {
- u32 val;
- int ret_l1ss, i, rc;
- unsigned long irqsave_flags;
- char ahb_clk[MAX_PROP_SIZE];
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
- scnprintf(ahb_clk, MAX_PROP_SIZE, "pcie_cfg_ahb_clk");
- mutex_lock(&pcie_dev->recovery_lock);
- if (pcie_dev->enumerated && pcie_dev->power_on &&
- pcie_dev->apss_based_l1ss_sleep) {
- /* Wait till link settle's in L1ss */
- ret_l1ss = readl_poll_timeout((pcie_dev->parf
- + PCIE20_PARF_PM_STTS), val, (val & BIT(8)), L1SS_POLL_INTERVAL_US,
- L1SS_POLL_TIMEOUT_US);
- if (!ret_l1ss) {
- PCIE_DBG(pcie_dev, "RC%d: Link is in L1ss\n",
- pcie_dev->rc_idx);
- } else {
- PCIE_INFO(pcie_dev, "RC%d: Link is not in L1ss\n",
- pcie_dev->rc_idx);
- mutex_unlock(&pcie_dev->recovery_lock);
- PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
- return 0;
- }
- /* Keep the device in power off state */
- pcie_dev->power_on = false;
- /* Set flag to indicate client has suspended */
- pcie_dev->user_suspend = true;
- /* Set flag to indicate device has suspended */
- spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
- pcie_dev->suspending = true;
- spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
- /* Restrict access to config space */
- spin_lock_irqsave(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- pcie_dev->cfg_access = false;
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- /* suspend access to MSI register. resume access in resume */
- if (!pcie_dev->lpi_enable)
- msm_msi_config_access(dev_get_msi_domain(&pcie_dev->dev->dev),
- false);
- /*
- * When GDSC is turned off, it will reset controller and it can assert
- * clk-req GPIO. With assertion of CLKREQ gpio, endpoint tries to bring
- * link back to L0, but since all clocks are turned off on host, this
- * can result in link down.
- *
- * So, release the control of CLKREQ gpio from controller by overriding it.
- */
- msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_CLKREQ_OVERRIDE,
- PCIE20_PARF_CLKREQ_IN_ENABLE | PCIE20_PARF_CLKREQ_IN_VALUE);
- if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
- pinctrl_select_state(pcie_dev->pinctrl,
- pcie_dev->pins_sleep);
- for (i = 0; i < pcie_dev->num_clk; i++)
- if (pcie_dev->clk[i].hdl && strcmp(pcie_dev->clk[i].name, ahb_clk))
- clk_disable_unprepare(pcie_dev->clk[i].hdl);
- rc = msm_pcie_icc_vote(pcie_dev, 0, 0, false);
- if (rc)
- goto out;
- /* switch phy aux clock mux to xo before turning off gdsc-core */
- if (pcie_dev->phy_aux_clk_mux && pcie_dev->ref_clk_src)
- clk_set_parent(pcie_dev->phy_aux_clk_mux, pcie_dev->ref_clk_src);
- /* switch pipe clock mux to xo before turning off gdsc */
- if (pcie_dev->pipe_clk_mux && pcie_dev->ref_clk_src)
- clk_set_parent(pcie_dev->pipe_clk_mux, pcie_dev->ref_clk_src);
- /* Disable the pipe clock*/
- msm_pcie_pipe_clk_deinit(pcie_dev);
- /* Shut off FLL */
- if (pcie_dev->phy_aux_clk_config1_offset)
- msm_pcie_write_reg_field(pcie_dev->phy,
- pcie_dev->phy_aux_clk_config1_offset,
- MSM_PCIE_PHY_SW_AUX_CLK_REQ,
- MSM_PCIE_PHY_SW_AUX_CLK_REQ_VAL);
- /* Enable ext clk buf en to eliminate VDDA lekeage path*/
- if (pcie_dev->phy_pll_clk_enable1_offset)
- msm_pcie_write_reg_field(pcie_dev->phy,
- pcie_dev->phy_pll_clk_enable1_offset,
- MSM_PCIE_EXT_CLKBUF_EN_MUX,
- MSM_PCIE_EXT_CLKBUF_EN_MUX_VAL);
- /* park the PCIe PHY in power down mode */
- if (pcie_dev->phy_power_down_offset)
- msm_pcie_write_reg(pcie_dev->phy, pcie_dev->phy_power_down_offset, 0);
- /* Turn off AHB clk as there won't be any more register access */
- clk_disable_unprepare(pcie_dev->ahb_clk);
- /* disable the controller GDSC*/
- regulator_disable(pcie_dev->gdsc_core);
- /* Disable the voltage regulators*/
- msm_pcie_vreg_deinit_analog_rails(pcie_dev);
- }
- mutex_unlock(&pcie_dev->recovery_lock);
- PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
- return 0;
- out:
- mutex_unlock(&pcie_dev->recovery_lock);
- return rc;
- }
- static int __maybe_unused msm_pcie_pm_resume_noirq(struct device *dev)
- {
- int i, rc;
- unsigned long irqsave_flags;
- char ahb_clk[MAX_PROP_SIZE];
- struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
- dev_get_drvdata(dev);
- PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
- scnprintf(ahb_clk, MAX_PROP_SIZE, "pcie_cfg_ahb_clk");
- mutex_lock(&pcie_dev->recovery_lock);
- if (pcie_dev->enumerated && !pcie_dev->power_on &&
- pcie_dev->apss_based_l1ss_sleep) {
- /* Enable the voltage regulators*/
- msm_pcie_vreg_init_analog_rails(pcie_dev);
- /* Enable GDSC core */
- rc = regulator_enable(pcie_dev->gdsc_core);
- if (rc) {
- PCIE_ERR(pcie_dev, "PCIe: fail to enable GDSC-CORE for RC%d (%s)\n",
- pcie_dev->rc_idx, pcie_dev->pdev->name);
- msm_pcie_vreg_deinit_analog_rails(pcie_dev);
- mutex_unlock(&pcie_dev->recovery_lock);
- return rc;
- }
- /* Turn on ahb clock first as its needed for register access */
- clk_prepare_enable(pcie_dev->ahb_clk);
- /* switch pipe clock source after gdsc-core is turned on */
- if (pcie_dev->pipe_clk_mux && pcie_dev->pipe_clk_ext_src)
- clk_set_parent(pcie_dev->pipe_clk_mux, pcie_dev->pipe_clk_ext_src);
- if (pcie_dev->phy_pll_clk_enable1_offset)
- msm_pcie_clear_set_reg(pcie_dev->phy, pcie_dev->phy_pll_clk_enable1_offset,
- MSM_PCIE_EXT_CLKBUF_EN_MUX, 0x0);
- if (pcie_dev->phy_aux_clk_config1_offset)
- msm_pcie_clear_set_reg(pcie_dev->phy, pcie_dev->phy_aux_clk_config1_offset,
- MSM_PCIE_PHY_SW_AUX_CLK_REQ, 0x0);
- /* Bring back PCIe PHY from power down */
- if (pcie_dev->phy_power_down_offset)
- msm_pcie_write_reg(pcie_dev->phy, pcie_dev->phy_power_down_offset,
- MSM_PCIE_PHY_SW_PWRDN | MSM_PCIE_PHY_REFCLK_DRV_DSBL);
- rc = msm_pcie_icc_vote(pcie_dev, pcie_dev->current_link_speed,
- pcie_dev->current_link_width, false);
- if (rc)
- goto out;
- for (i = 0; i < pcie_dev->num_clk; i++) {
- if (pcie_dev->clk[i].hdl && strcmp(pcie_dev->clk[i].name, ahb_clk)) {
- rc = clk_prepare_enable(pcie_dev->clk[i].hdl);
- if (rc)
- PCIE_ERR(pcie_dev, "PCIe: RC%d failed to enable clk %s\n",
- pcie_dev->rc_idx, pcie_dev->clk[i].name);
- else
- PCIE_DBG2(pcie_dev, "enable clk %s for RC%d.\n",
- pcie_dev->clk[i].name, pcie_dev->rc_idx);
- }
- }
- /* Enable pipe clocks */
- for (i = 0; i < pcie_dev->num_pipe_clk; i++)
- if (pcie_dev->pipe_clk[i].hdl)
- clk_prepare_enable(pcie_dev->pipe_clk[i].hdl);
- /* switch phy aux clock source from xo to phy aux clk */
- if (pcie_dev->phy_aux_clk_mux && pcie_dev->phy_aux_clk_ext_src)
- clk_set_parent(pcie_dev->phy_aux_clk_mux, pcie_dev->phy_aux_clk_ext_src);
- /* Disable the clkreq override functionality */
- msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_CLKREQ_OVERRIDE, 0x0);
- if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
- pinctrl_select_state(pcie_dev->pinctrl,
- pcie_dev->pins_default);
- /* Keep the device in power on state */
- pcie_dev->power_on = true;
- /* Clear flag to indicate client has resumed */
- pcie_dev->user_suspend = false;
- /* Clear flag to indicate device has resumed */
- spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
- pcie_dev->suspending = false;
- spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
- /* Allow access to config space */
- spin_lock_irqsave(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- pcie_dev->cfg_access = true;
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- /* resume access to MSI register as link is resumed */
- if (!pcie_dev->lpi_enable)
- msm_msi_config_access(dev_get_msi_domain(&pcie_dev->dev->dev),
- true);
- }
- mutex_unlock(&pcie_dev->recovery_lock);
- PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
- return 0;
- out:
- if (pcie_dev->pipe_clk_ext_src && pcie_dev->pipe_clk_mux)
- clk_set_parent(pcie_dev->pipe_clk_ext_src, pcie_dev->pipe_clk_mux);
- regulator_disable(pcie_dev->gdsc_core);
- msm_pcie_vreg_deinit_analog_rails(pcie_dev);
- mutex_unlock(&pcie_dev->recovery_lock);
- return rc;
- }
- static const struct dev_pm_ops qcom_pcie_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(msm_pcie_pm_suspend_noirq, msm_pcie_pm_resume_noirq)
- };
- static int msm_pci_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *device_id)
- {
- int ret;
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(pci_dev->bus);
- struct msm_root_dev_t *root_dev;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: PCI Probe\n", pcie_dev->rc_idx);
- if (!pci_dev->dev.of_node)
- return -ENODEV;
- root_dev = devm_kzalloc(&pci_dev->dev, sizeof(*root_dev), GFP_KERNEL);
- if (!root_dev)
- return -ENOMEM;
- root_dev->pcie_dev = pcie_dev;
- root_dev->pci_dev = pci_dev;
- dev_set_drvdata(&pci_dev->dev, root_dev);
- ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
- if (ret) {
- PCIE_ERR(pcie_dev, "DMA set mask failed (%d)\n", ret);
- return ret;
- }
- return 0;
- }
- static struct pci_device_id msm_pci_device_id[] = {
- {PCI_DEVICE(0x17cb, 0x0108)},
- {PCI_DEVICE(0x17cb, 0x010b)},
- {PCI_DEVICE(0x17cb, 0x010c)},
- {0},
- };
- static struct pci_driver msm_pci_driver = {
- .name = "pci-msm-rc",
- .id_table = msm_pci_device_id,
- .probe = msm_pci_probe,
- };
- static const struct of_device_id msm_pcie_match[] = {
- { .compatible = "qcom,pci-msm", },
- {}
- };
- static struct platform_driver msm_pcie_driver = {
- .probe = msm_pcie_probe,
- .remove = msm_pcie_remove,
- .driver = {
- .name = "pci-msm",
- .pm = &qcom_pcie_pm_ops,
- .of_match_table = msm_pcie_match,
- },
- };
- #ifdef CONFIG_SEC_PCIE_L1SS
- static struct device *sec_pcie_dev;
- void sec_pcie_set_use_ep_loaded(struct pci_dev *dev)
- {
- /* fixme : check caller */
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- pcie_dev->use_ep_loaded = true;
- }
- EXPORT_SYMBOL(sec_pcie_set_use_ep_loaded);
- void sec_pcie_set_ep_driver_loaded(struct pci_dev *dev, bool is_loaded)
- {
- /* fixme : check caller */
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- pcie_dev->ep_loaded = is_loaded;
- }
- EXPORT_SYMBOL(sec_pcie_set_ep_driver_loaded);
- static bool sec_pcie_check_ep_driver_up(struct msm_pcie_dev_t *dev)
- {
- return dev->use_ep_loaded ? dev->ep_loaded : true;
- }
- static int sec_pcie_enable_ep_l1(struct pci_dev *pdev, void *data)
- {
- struct msm_pcie_dev_t *dev = data;
- if (pdev->subordinate || !pdev->bus->parent) {
- PCIE_INFO(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x is not EP\n",
- dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- return -ENODEV;
- }
- msm_pcie_config_clear_set_dword(pdev,
- pdev->pcie_cap + PCI_EXP_LNKCTL, 0,
- PCI_EXP_LNKCTL_ASPM_L1);
- PCIE_DBG2(dev, "PCIe: RC%d: PCI device %02x:%02x.%01x L1 enabled\n",
- dev->rc_idx, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
- return 0;
- }
- static ssize_t sec_show_l1ss_stat(struct device *in_dev,
- struct device_attribute *attr, char *buf)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[0];
- struct pci_dev *pdev = dev->dev;
- unsigned long irqsave_flags;
- bool l1_1_cap_support, l1_2_cap_support;
- u32 val;
- u32 l1ss_cap_id_offset, l1ss_cap_offset;
- u32 l1ss_ctl1_offset, l1ss_ctl2_offset, l1ss_ltr_cap_id_offset;
- int count = 0;
- u32 ltssm;
- mutex_lock(&dev->setup_lock);
- spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
- if (!sec_pcie_check_ep_driver_up(dev)) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "RC%d: EP driver is not up.\n", dev->rc_idx);
- goto out;
- }
- if (dev->link_status != MSM_PCIE_LINK_ENABLED) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "RC%d link is not enabled.\n", dev->rc_idx);
- goto out;
- }
- if (dev->suspending) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "RC%d is suspending.\n", dev->rc_idx);
- goto out;
- }
- ltssm = readl_relaxed(dev->parf + PCIE20_PARF_LTSSM) & MSM_PCIE_LTSSM_MASK;
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "LTSSM : %s\n", TO_LTSSM_STR(ltssm));
- while (pdev) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "%s\n", dev_name(&pdev->dev));
- pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCAP, &val);
- if (!(val & BIT(10)))
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tL0s - Not Support\n");
- if (!(val & BIT(11)))
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tL1 - Not Support\n");
-
- pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_LNKCTL, &val);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tL0s %s\n", val & PCI_EXP_LNKCTL_ASPM_L0S ? "E" : "D");
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tL1 %s\n", val & PCI_EXP_LNKCTL_ASPM_L1 ? "E" : "D");
- l1ss_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!l1ss_cap_id_offset) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tnot found L1ss capability register\n");
- goto out;
- }
- l1ss_cap_offset = l1ss_cap_id_offset + PCI_L1SS_CAP;
- l1ss_ctl1_offset = l1ss_cap_id_offset + PCI_L1SS_CTL1;
- l1ss_ctl2_offset = l1ss_cap_id_offset + PCI_L1SS_CTL2;
- pci_read_config_dword(pdev, l1ss_cap_offset, &val);
- l1_1_cap_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_1));
- l1_2_cap_support = !!(val & (PCI_L1SS_CAP_ASPM_L1_2));
- if (!l1_1_cap_support && !l1_2_cap_support) {
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tnot support L1ss\n");
- goto out;
- }
- pci_read_config_dword(pdev, l1ss_ctl1_offset, &val);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\tL1ss %s (L1SS_CTL1:0x%08x)\n",
- (val & 0xf) == 0xf ? "E" : "D", val);
- pci_read_config_dword(pdev, l1ss_ctl2_offset, &val);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\t (L1SS_CTL2:0x%08x)\n", val);
- pci_read_config_dword(pdev, pdev->pcie_cap + PCI_EXP_DEVCTL2, &val);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\t (DEV_CTL2:0x%08x)\n", val);
- l1ss_ltr_cap_id_offset = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
- if (l1ss_ltr_cap_id_offset) {
- pci_read_config_dword(pdev,
- l1ss_ltr_cap_id_offset + PCI_LTR_MAX_SNOOP_LAT, &val);
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "\t (LTR_MAX_SNOOP_LAT:0x%08x)\n", val);
- }
- if (pdev->subordinate)
- pdev = list_entry(pdev->subordinate->devices.next,
- struct pci_dev, bus_list);
- else
- break;
- }
- out:
- spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
- mutex_unlock(&dev->setup_lock);
- return (ssize_t)count;
- }
- static void sec_pcie_l1ss_ctrl_worker(struct work_struct *work)
- {
- struct delayed_work *dwork = to_delayed_work(work);
- struct l1ss_ctrl *l1ss_ctrl =
- container_of(dwork, struct l1ss_ctrl, dwork);
- sec_pcie_l1ss_enable(l1ss_ctrl->id);
- }
- int sec_pcie_l1ss_enable(int ctrl_id)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[0];
- struct pci_dev *pdev = dev->dev;
- int ret = 0;
- u32 prev_flag;
- unsigned long irqsave_flags;
- bool suspending;
- if (ctrl_id < 0 || ctrl_id >= L1SS_MAX) {
- PCIE_ERR(dev, "RC%d wrong id(%d) -> ignore\n", dev->rc_idx, ctrl_id);
- return -EINVAL;
- }
- mutex_lock(&dev->setup_lock);
- mutex_lock(&dev->l1ss_ctrl_lock);
- prev_flag = dev->l1ss_disable_flag;
- dev->l1ss_disable_flag &= ~l1ss_ctrls[ctrl_id].flag;
- PCIE_DBG(dev, "RC%d triggered by %d:%s (flag=0x%x)\n", dev->rc_idx,
- ctrl_id, l1ss_ctrls[ctrl_id].name,
- dev->l1ss_disable_flag);
- if (prev_flag && !dev->l1ss_disable_flag) {
- spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
- suspending = dev->suspending;
- spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
- if (suspending || !dev->ep_config_accessible) {
- PCIE_DBG(dev, "RC%d pending - suspending or not accessible\n", dev->rc_idx);
- dev->pending_l1ss_ctrl = true;
- } else if (!sec_pcie_check_ep_driver_up(dev)) {
- PCIE_DBG(dev, "RC%d pending - EP driver is not up\n", dev->rc_idx);
- dev->pending_l1ss_ctrl = true;
- } else {
- msm_pcie_config_l1_disable_all(dev, pdev->bus);
- msm_pcie_config_l1ss_enable_all(dev);
- msm_pcie_config_l1_enable_all(dev);
- }
- }
- mutex_unlock(&dev->l1ss_ctrl_lock);
- mutex_unlock(&dev->setup_lock);
- return ret;
- }
- EXPORT_SYMBOL(sec_pcie_l1ss_enable);
- int sec_pcie_l1ss_disable(int ctrl_id)
- {
- struct msm_pcie_dev_t *dev = &msm_pcie_dev[0];
- struct pci_dev *pdev = dev->dev;
- int ret = 0;
- u32 prev_flag;
- unsigned long irqsave_flags;
- bool suspending;
- if (ctrl_id < 0 || ctrl_id >= L1SS_MAX) {
- PCIE_ERR(dev, "RC%d wrong id(%d) -> ignore\n", dev->rc_idx, ctrl_id);
- return -EINVAL;
- }
- cancel_delayed_work_sync(&l1ss_ctrls[ctrl_id].dwork);
- mutex_lock(&dev->setup_lock);
- mutex_lock(&dev->l1ss_ctrl_lock);
- prev_flag = dev->l1ss_disable_flag;
- dev->l1ss_disable_flag |= l1ss_ctrls[ctrl_id].flag;
- PCIE_DBG(dev, "RC%d triggered by %d:%s (flag=0x%x)\n", dev->rc_idx,
- ctrl_id, l1ss_ctrls[ctrl_id].name,
- dev->l1ss_disable_flag);
- if (l1ss_ctrls[ctrl_id].timeout) {
- queue_delayed_work(l1ss_ctrl_wq,
- &l1ss_ctrls[ctrl_id].dwork,
- l1ss_ctrls[ctrl_id].timeout * HZ);
- }
- if (!prev_flag && dev->l1ss_disable_flag) {
- spin_lock_irqsave(&dev->irq_lock, irqsave_flags);
- suspending = dev->suspending;
- spin_unlock_irqrestore(&dev->irq_lock, irqsave_flags);
- if (suspending || !dev->ep_config_accessible) {
- PCIE_DBG(dev, "RC%d pending - suspending or not accessible\n", dev->rc_idx);
- dev->pending_l1ss_ctrl = true;
- } else if (!sec_pcie_check_ep_driver_up(dev)) {
- PCIE_DBG(dev, "RC%d pending - EP driver is not up\n", dev->rc_idx);
- dev->pending_l1ss_ctrl = true;
- } else {
- msm_pcie_config_l1_disable_all(dev, pdev->bus);
- msm_pcie_config_l1ss_disable_all(dev, pdev->bus);
- msm_pcie_config_l1_enable_all(dev);
- }
- }
- mutex_unlock(&dev->l1ss_ctrl_lock);
- mutex_unlock(&dev->setup_lock);
- return ret;
- }
- EXPORT_SYMBOL(sec_pcie_l1ss_disable);
- static ssize_t sec_write_l1ss_stat(struct device *in_dev,
- struct device_attribute *attr, const char *buf, size_t count)
- {
- unsigned int enable;
- int ret;
- ret = kstrtouint(buf, 0, &enable);
- if (ret) {
- pr_err("%s : Fail get enable(%s)\n", __func__, buf);
- return count;
- }
- if (enable)
- ret = sec_pcie_l1ss_enable(L1SS_SYSFS);
- else
- ret = sec_pcie_l1ss_disable(L1SS_SYSFS);
- if (ret)
- pr_err("%s: Fail sec_pcie_l1ss_%s(%d)\n", __func__,
- enable ? "enable" : "disable", ret);
- return count;
- }
- static DEVICE_ATTR(pcie_l1ss_ctrl, 0664, sec_show_l1ss_stat, sec_write_l1ss_stat);
- #endif
- static int msm_pcie_drv_rpmsg_probe(struct rpmsg_device *rpdev)
- {
- mutex_lock(&pcie_drv.rpmsg_lock);
- pcie_drv.rpdev = rpdev;
- dev_set_drvdata(&rpdev->dev, &pcie_drv);
- mutex_unlock(&pcie_drv.rpmsg_lock);
- /* start drv connection */
- schedule_work(&pcie_drv.drv_connect);
- return 0;
- }
- static void msm_pcie_drv_notify_client(struct pcie_drv_sta *pcie_drv,
- enum msm_pcie_event event)
- {
- struct msm_pcie_dev_t *pcie_dev = pcie_drv->msm_pcie_dev;
- int i;
- for (i = 0; i < MAX_RC_NUM; i++, pcie_dev++) {
- struct msm_pcie_drv_info *drv_info = pcie_dev->drv_info;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: event %d received\n",
- pcie_dev->rc_idx, event);
- /* does not support DRV or has not been probed yet */
- if (!drv_info)
- continue;
- if (drv_info->ep_connected) {
- msm_pcie_notify_client(pcie_dev, event);
- if (event & MSM_PCIE_EVENT_DRV_DISCONNECT) {
- mutex_lock(&pcie_dev->drv_pc_lock);
- drv_info->ep_connected = false;
- cancel_work_sync(&pcie_dev->drv_disable_pc_work);
- cancel_work_sync(&pcie_dev->drv_enable_pc_work);
- mutex_unlock(&pcie_dev->drv_pc_lock);
- }
- }
- }
- }
- static void msm_pcie_drv_rpmsg_remove(struct rpmsg_device *rpdev)
- {
- int ret;
- struct pcie_drv_sta *pcie_drv = dev_get_drvdata(&rpdev->dev);
- struct msm_pcie_dev_t *pcie_dev = pcie_drv->msm_pcie_dev;
- mutex_lock(&pcie_drv->rpmsg_lock);
- pcie_drv->rc_drv_enabled = 0;
- pcie_drv->rpdev = NULL;
- mutex_unlock(&pcie_drv->rpmsg_lock);
- flush_work(&pcie_drv->drv_connect);
- msm_pcie_drv_notify_client(pcie_drv, MSM_PCIE_EVENT_DRV_DISCONNECT);
- if (!pcie_drv->notifier)
- return;
- ret = qcom_unregister_ssr_notifier(pcie_drv->notifier, &pcie_drv->nb);
- if (ret)
- PCIE_ERR(pcie_dev, "PCIe: RC%d: DRV: error %d unregistering notifier\n",
- pcie_dev->rc_idx, ret);
- pcie_drv->notifier = NULL;
- }
- static int msm_pcie_drv_rpmsg_cb(struct rpmsg_device *rpdev, void *data,
- int len, void *priv, u32 src)
- {
- struct pcie_drv_sta *pcie_drv = dev_get_drvdata(&rpdev->dev);
- struct msm_pcie_dev_t *pcie_dev;
- struct msm_pcie_drv_header *drv_header;
- struct msm_pcie_drv_info *drv_info;
- while (len) {
- if (len < sizeof(*drv_header)) {
- pr_err("PCIe: DRV: invalid header length: %d\n",
- len);
- return -EINVAL;
- }
- drv_header = data;
- data += sizeof(*drv_header);
- len -= sizeof(*drv_header);
- if (drv_header->dev_id >= MAX_RC_NUM) {
- pr_err("PCIe: DRV: invalid device id: %d\n",
- drv_header->dev_id);
- return -EINVAL;
- }
- pcie_dev = pcie_drv->msm_pcie_dev + drv_header->dev_id;
- drv_info = pcie_dev->drv_info;
- if (!drv_info) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: no device info found\n",
- pcie_dev->rc_idx);
- return -ENODEV;
- }
- switch (drv_header->msg_id) {
- case MSM_PCIE_DRV_MSG_ID_ACK:
- {
- u32 *status;
- size_t status_size = sizeof(*status);
- if (drv_header->payload_size != status_size) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: invalid payload size: %d\n",
- pcie_dev->rc_idx,
- drv_header->payload_size);
- return -EINVAL;
- }
- if (len < status_size) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: invalid status length: %d\n",
- pcie_dev->rc_idx, len);
- return -EINVAL;
- }
- status = data;
- data += status_size;
- len -= status_size;
- if (drv_header->reply_seq != drv_info->reply_seq) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: incorrect reply seq: %d: expected seq: %d\n",
- pcie_dev->rc_idx,
- drv_header->reply_seq,
- drv_info->reply_seq);
- return -EINVAL;
- }
- if (*status) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: invalid status\n",
- pcie_dev->rc_idx);
- return -EINVAL;
- }
- complete(&drv_info->completion);
- break;
- }
- default:
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: unsupported command: 0x%x\n",
- pcie_dev->rc_idx, drv_header->msg_id);
- return -EINVAL;
- }
- }
- return 0;
- }
- static int msm_pcie_ssr_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
- {
- struct pcie_drv_sta *pcie_drv = container_of(nb, struct pcie_drv_sta,
- nb);
- if (action == QCOM_SSR_BEFORE_SHUTDOWN) {
- pcie_drv->rc_drv_enabled = 0;
- pcie_drv->rpdev = NULL;
- msm_pcie_drv_notify_client(pcie_drv, MSM_PCIE_EVENT_WAKEUP);
- }
- return NOTIFY_OK;
- };
- static void msm_pcie_drv_disable_pc(struct work_struct *w)
- {
- struct msm_pcie_dev_t *pcie_dev = container_of(w, struct msm_pcie_dev_t,
- drv_disable_pc_work);
- msm_pcie_drv_send_rpmsg(pcie_dev, &pcie_dev->drv_info->drv_disable_pc);
- }
- static void msm_pcie_drv_enable_pc(struct work_struct *w)
- {
- struct msm_pcie_dev_t *pcie_dev = container_of(w, struct msm_pcie_dev_t,
- drv_enable_pc_work);
- msm_pcie_drv_send_rpmsg(pcie_dev, &pcie_dev->drv_info->drv_enable_pc);
- }
- static void msm_pcie_drv_connect_worker(struct work_struct *work)
- {
- struct pcie_drv_sta *pcie_drv = container_of(work, struct pcie_drv_sta,
- drv_connect);
- struct msm_pcie_dev_t *pcie_itr, *pcie_dev = pcie_drv->msm_pcie_dev;
- int i;
- /* rpmsg probe hasn't happened yet */
- if (!pcie_drv->rpdev)
- return;
- pcie_itr = pcie_dev;
- for (i = 0; i < MAX_RC_NUM; i++, pcie_itr++) {
- struct msm_pcie_drv_info *drv_info = pcie_itr->drv_info;
- /* does not support DRV or has not been probed yet */
- if (!drv_info || drv_info->ep_connected)
- continue;
- if (!msm_pcie_notify_client(pcie_itr,
- MSM_PCIE_EVENT_DRV_CONNECT))
- continue;
- mutex_lock(&pcie_itr->drv_pc_lock);
- drv_info->ep_connected = true;
- if (pcie_itr->drv_disable_pc_vote)
- queue_work(mpcie_wq, &pcie_itr->drv_disable_pc_work);
- mutex_unlock(&pcie_itr->drv_pc_lock);
- }
- if (!pcie_dev->drv_name)
- return;
- pcie_drv->notifier = qcom_register_early_ssr_notifier(pcie_dev->drv_name, &pcie_drv->nb);
- if (IS_ERR(pcie_drv->notifier)) {
- PCIE_ERR(pcie_dev, "PCIe: RC%d: DRV: failed to register ssr notifier\n",
- pcie_dev->rc_idx);
- pcie_drv->notifier = NULL;
- }
- }
- #if IS_ENABLED(CONFIG_I2C)
- static const struct i2c_driver_data ntn3_data = {
- .client_id = I2C_CLIENT_ID_NTN3,
- };
- static const struct of_device_id of_i2c_id_table[] = {
- { .compatible = "qcom,pcie-i2c-ntn3", .data = &ntn3_data },
- {}
- };
- MODULE_DEVICE_TABLE(of, of_i2c_id_table);
- static int pcie_i2c_ctrl_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
- {
- int rc_index = -EINVAL;
- enum i2c_client_id client_id;
- struct pcie_i2c_ctrl *i2c_ctrl;
- const struct of_device_id *match;
- struct i2c_driver_data *data;
- if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
- dev_err(&client->dev, "I2C functionality not supported\n");
- return -EIO;
- }
- if (client->dev.of_node) {
- match = of_match_device(of_match_ptr(of_i2c_id_table),
- &client->dev);
- if (!match) {
- dev_err(&client->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- data = (struct i2c_driver_data *)match->data;
- client_id = data->client_id;
- }
- of_property_read_u32(client->dev.of_node, "rc-index", &rc_index);
- dev_info(&client->dev, "%s: PCIe rc-index: 0x%X\n", __func__, rc_index);
- if (rc_index >= MAX_RC_NUM) {
- dev_err(&client->dev, "invalid RC index %d\n", rc_index);
- return -EINVAL;
- }
- if (client_id == I2C_CLIENT_ID_NTN3) {
- i2c_ctrl = &msm_pcie_dev[rc_index].i2c_ctrl;
- i2c_ctrl->client_i2c_read = ntn3_i2c_read;
- i2c_ctrl->client_i2c_write = ntn3_i2c_write;
- i2c_ctrl->client_i2c_reset = ntn3_ep_reset_ctrl;
- i2c_ctrl->client_i2c_dump_regs = ntn3_dump_regs;
- i2c_ctrl->client_i2c_de_emphasis_wa = ntn3_de_emphasis_wa;
- i2c_ctrl->client = client;
- } else {
- dev_err(&client->dev, "invalid client id %d\n", client_id);
- }
- return 0;
- }
- static struct i2c_driver pcie_i2c_ctrl_driver = {
- .driver = {
- .name = "pcie-i2c-ctrl",
- .of_match_table = of_match_ptr(of_i2c_id_table),
- },
- .probe = pcie_i2c_ctrl_probe,
- };
- #endif
- static int __init pcie_init(void)
- {
- int ret = 0, i;
- #ifndef CONFIG_SEC_PCIE
- char rc_name[MAX_RC_NAME_LEN];
- #endif
- void __iomem *reg_addr;
- pr_debug("pcie:%s.\n", __func__);
- pcie_drv.rc_num = 0;
- mutex_init(&pcie_drv.drv_lock);
- mutex_init(&pcie_drv.rpmsg_lock);
- for (i = 0; i < MAX_RC_NUM; i++) {
- #ifndef CONFIG_SEC_PCIE
- scnprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-short", i);
- msm_pcie_dev[i].ipc_log =
- ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
- if (msm_pcie_dev[i].ipc_log == NULL)
- pr_err("%s: unable to create IPC log context for %s\n",
- __func__, rc_name);
- else
- PCIE_DBG(&msm_pcie_dev[i],
- "PCIe IPC logging is enable for RC%d\n",
- i);
- scnprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-long", i);
- msm_pcie_dev[i].ipc_log_long =
- ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
- if (msm_pcie_dev[i].ipc_log_long == NULL)
- pr_err("%s: unable to create IPC log context for %s\n",
- __func__, rc_name);
- else
- PCIE_DBG(&msm_pcie_dev[i],
- "PCIe IPC logging %s is enable for RC%d\n",
- rc_name, i);
- scnprintf(rc_name, MAX_RC_NAME_LEN, "pcie%d-dump", i);
- msm_pcie_dev[i].ipc_log_dump =
- ipc_log_context_create(PCIE_LOG_PAGES, rc_name, 0);
- if (msm_pcie_dev[i].ipc_log_dump == NULL)
- pr_err("%s: unable to create IPC log context for %s\n",
- __func__, rc_name);
- else
- PCIE_DBG(&msm_pcie_dev[i],
- "PCIe IPC logging %s is enable for RC%d\n",
- rc_name, i);
- #endif
- spin_lock_init(&msm_pcie_dev[i].cfg_lock);
- spin_lock_init(&msm_pcie_dev[i].evt_reg_list_lock);
- msm_pcie_dev[i].cfg_access = true;
- mutex_init(&msm_pcie_dev[i].enumerate_lock);
- mutex_init(&msm_pcie_dev[i].setup_lock);
- mutex_init(&msm_pcie_dev[i].recovery_lock);
- mutex_init(&msm_pcie_dev[i].aspm_lock);
- mutex_init(&msm_pcie_dev[i].drv_pc_lock);
- spin_lock_init(&msm_pcie_dev[i].irq_lock);
- msm_pcie_dev[i].drv_ready = false;
- msm_pcie_dev[i].l23_rdy_poll_timeout = L23_READY_POLL_TIMEOUT;
- INIT_WORK(&msm_pcie_dev[i].drv_disable_pc_work,
- msm_pcie_drv_disable_pc);
- INIT_WORK(&msm_pcie_dev[i].drv_enable_pc_work,
- msm_pcie_drv_enable_pc);
- INIT_LIST_HEAD(&msm_pcie_dev[i].enum_ep_list);
- INIT_LIST_HEAD(&msm_pcie_dev[i].susp_ep_list);
- INIT_LIST_HEAD(&msm_pcie_dev[i].event_reg_list);
- #ifdef CONFIG_SEC_PCIE_L1SS
- mutex_init(&msm_pcie_dev[i].l1ss_ctrl_lock);
- msm_pcie_dev[i].l1ss_disable_flag = 0;
- msm_pcie_dev[i].pending_l1ss_ctrl = false;
- msm_pcie_dev[i].ep_config_accessible = false;
- #endif
- }
- #if IS_ENABLED(CONFIG_I2C)
- ret = i2c_add_driver(&pcie_i2c_ctrl_driver);
- if (ret != 0)
- pr_err("Failed to add i2c ctrl driver: %d\n", ret);
- #endif
- crc8_populate_msb(msm_pcie_crc8_table, MSM_PCIE_CRC8_POLYNOMIAL);
- msm_pcie_debugfs_init();
- if (count == MAX_PCIE_SM_REGS) {
- for (i = 0; i < pcie_sm_regs[PCIE_SM_NUM_INSTANCES]; i++) {
- reg_addr = ioremap(pcie_sm_regs[PCIE_SM_BASE] +
- pcie_sm_regs[PCIE_SM_PWR_CTRL_OFFSET] +
- (i * pcie_sm_regs[PCIE_SM_PWR_INSTANCE_OFFSET]), 4);
- msm_pcie_write_reg(reg_addr, 0x0, 0x1);
- iounmap(reg_addr);
- reg_addr = ioremap(pcie_sm_regs[PCIE_SM_BASE] +
- pcie_sm_regs[PCIE_SM_PWR_MASK_OFFSET] +
- (i * pcie_sm_regs[PCIE_SM_PWR_INSTANCE_OFFSET]), 4);
- msm_pcie_write_reg(reg_addr, 0x0, 0x1);
- iounmap(reg_addr);
- }
- }
- ret = pci_register_driver(&msm_pci_driver);
- if (ret)
- return ret;
- mpcie_wq = alloc_ordered_workqueue("mpcie_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI);
- if (!mpcie_wq)
- return -ENOMEM;
- pcie_drv.nb.notifier_call = msm_pcie_ssr_notifier;
- INIT_WORK(&pcie_drv.drv_connect, msm_pcie_drv_connect_worker);
- pcie_drv.msm_pcie_dev = msm_pcie_dev;
- #ifdef CONFIG_SEC_PCIE_L1SS
- l1ss_ctrl_wq = create_singlethread_workqueue("pcie_l1ss_ctrl_wq");
- for (i = 0; i < L1SS_MAX; i++)
- INIT_DELAYED_WORK(&l1ss_ctrls[i].dwork,
- sec_pcie_l1ss_ctrl_worker);
- sec_pcie_dev = sec_device_create(NULL, "pcie-wifi");
- if (IS_ERR(sec_pcie_dev)) {
- pr_err("%s: Failed to create pcie device\n", __func__);
- goto sec_device_err;
- }
- if (device_create_file(sec_pcie_dev, &dev_attr_pcie_l1ss_ctrl) < 0) {
- pr_err("%s: Failed to create pcie_l1ss_ctrl\n", __func__);
- goto sec_device_err;
- }
- sec_device_err:
- #endif
- ret = platform_driver_register(&msm_pcie_driver);
- if (ret)
- destroy_workqueue(mpcie_wq);
- return ret;
- }
- static void __exit pcie_exit(void)
- {
- int i;
- pr_info("PCIe: %s\n", __func__);
- #if IS_ENABLED(CONFIG_I2C)
- i2c_del_driver(&pcie_i2c_ctrl_driver);
- #endif
- if (mpcie_wq)
- destroy_workqueue(mpcie_wq);
- platform_driver_unregister(&msm_pcie_driver);
- msm_pcie_debugfs_exit();
- for (i = 0; i < MAX_RC_NUM; i++)
- msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
- }
- subsys_initcall_sync(pcie_init);
- module_exit(pcie_exit);
- /* RC do not represent the right class; set it to PCI_CLASS_BRIDGE_PCI */
- static void msm_pcie_fixup_early(struct pci_dev *dev)
- {
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- PCIE_DBG(pcie_dev, "hdr_type %d\n", dev->hdr_type);
- if (pci_is_root_bus(dev->bus))
- dev->class = (dev->class & 0xff) | (PCI_CLASS_BRIDGE_PCI << 8);
- }
- DECLARE_PCI_FIXUP_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
- msm_pcie_fixup_early);
- static void __msm_pcie_l1ss_timeout_disable(struct msm_pcie_dev_t *pcie_dev)
- {
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_DEBUG_INT_EN,
- PCIE20_PARF_DEBUG_INT_EN_L1SUB_TIMEOUT_BIT, 0);
- msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER,
- 0);
- }
- static void __msm_pcie_l1ss_timeout_enable(struct msm_pcie_dev_t *pcie_dev)
- {
- u32 val = 0;
- msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER,
- PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET);
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_INT_ALL_CLEAR, 0,
- BIT(MSM_PCIE_INT_EVT_L1SUB_TIMEOUT));
- msm_pcie_write_mask(pcie_dev->parf + PCIE20_PARF_DEBUG_INT_EN, 0,
- PCIE20_PARF_DEBUG_INT_EN_L1SUB_TIMEOUT_BIT);
- val = PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER_RESET |
- L1SS_TIMEOUT_US_TO_TICKS(L1SS_TIMEOUT_US,
- pcie_dev->aux_clk_freq);
- msm_pcie_write_reg(pcie_dev->parf, PCIE20_PARF_L1SUB_AHB_CLK_MAX_TIMER,
- val);
- }
- /* Suspend the PCIe link */
- static int msm_pcie_pm_suspend(struct pci_dev *dev,
- void *user, void *data, u32 options)
- {
- int ret = 0;
- u32 val = 0;
- int ret_l23;
- unsigned long irqsave_flags;
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- #ifdef CONFIG_SEC_PCIE
- u32 ltssm_pre = 0, ltssm_post = 0, rc_linkup = 0, ep_linkup = 0;
- #endif
- PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
- #if defined(CONFIG_SEC_PANIC_PCIE_ERR) && defined(CONFIG_SEC_PCIE_AER)
- if (!pcie_dev->ignore_pcie_error && pcie_dev->aer_irq_counter) {
- panic("PCIe RC%d AER detect(%lu)!\n",
- pcie_dev->rc_idx, pcie_dev->aer_irq_counter);
- }
- #endif
- spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
- pcie_dev->suspending = true;
- spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
- if (pcie_dev->config_recovery) {
- if (work_pending(&pcie_dev->link_recover_wq)) {
- PCIE_DBG(pcie_dev,
- "RC%d: cancel link_recover_wq at pm suspend\n",
- pcie_dev->rc_idx);
- cancel_work_sync(&pcie_dev->link_recover_wq);
- }
- }
- if (!pcie_dev->power_on) {
- PCIE_DBG(pcie_dev,
- "PCIe: power of RC%d has been turned off.\n",
- pcie_dev->rc_idx);
- return ret;
- }
- if (dev) {
- #ifdef CONFIG_SEC_PCIE
- PCIE_DBG(pcie_dev,
- "dev->bus->number = %d dev->bus->primary = %d\n",
- dev->bus->number, dev->bus->primary);
- rc_linkup = msm_pcie_confirm_linkup(pcie_dev, true, false, dev);
- if (rc_linkup) {
- ep_linkup = readl_relaxed(pcie_dev->conf);
- PCIE_DBG(pcie_dev,
- "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
- pcie_dev->rc_idx, ep_linkup);
- if (ep_linkup == PCIE_LINK_DOWN) {
- PCIE_ERR(pcie_dev,
- "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->rc_idx, ep_linkup);
- }
- ret = pci_save_state(dev);
- }
- #else
- if (msm_pcie_confirm_linkup(pcie_dev, true, true, dev)) {
- PCIE_DBG(pcie_dev, "PCIe: RC%d: save config space\n",
- pcie_dev->rc_idx);
- ret = pci_save_state(dev);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: fail to save state:%d.\n",
- pcie_dev->rc_idx, ret);
- pcie_dev->suspending = false;
- return ret;
- }
- }
- #endif
- else {
- kfree(pcie_dev->saved_state);
- pcie_dev->saved_state = NULL;
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: load default config space\n",
- pcie_dev->rc_idx);
- ret = pci_load_saved_state(dev, pcie_dev->default_state);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: fail to load default state:%d.\n",
- pcie_dev->rc_idx, ret);
- pcie_dev->suspending = false;
- return ret;
- }
- }
- PCIE_DBG(pcie_dev, "PCIe: RC%d: store saved state\n",
- pcie_dev->rc_idx);
- pcie_dev->saved_state = pci_store_saved_state(dev);
- }
- spin_lock_irqsave(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- pcie_dev->cfg_access = false;
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- #ifdef CONFIG_SEC_PCIE
- ltssm_pre = readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM);
- PCIE_INFO(pcie_dev, "PCIe RC%d: PARF LTSSM_STATE: %s\n",
- pcie_dev->rc_idx, TO_LTSSM_STR(ltssm_pre & 0x3f));
- rc_linkup = (u32)msm_pcie_confirm_linkup(pcie_dev, true, false, dev);
- if (rc_linkup && (ep_linkup != PCIE_LINK_DOWN)) {
- ep_linkup = readl_relaxed(pcie_dev->conf);
- PCIE_DBG(pcie_dev,
- "PCIe: device ID and vender ID of EP of RC %d are 0x%x.\n",
- pcie_dev->rc_idx, ep_linkup);
- if (ep_linkup == PCIE_LINK_DOWN) {
- PCIE_ERR(pcie_dev,
- "PCIe: The link of RC %d is not really up; device ID and vender ID of EP of RC %d are 0x%x.\n",
- pcie_dev->rc_idx, pcie_dev->rc_idx, ep_linkup);
- }
- }
- #endif
- writel_relaxed(BIT(4), pcie_dev->elbi + PCIE20_ELBI_SYS_CTRL);
- wmb(); /* ensure changes propagated to the hardware */
- PCIE_INFO(pcie_dev, "RC%d: PME_TURNOFF_MSG is sent out\n",
- pcie_dev->rc_idx);
- ret_l23 = readl_poll_timeout((pcie_dev->parf + PCIE20_PARF_PM_STTS_1),
- val, PCIE_LINK_IN_L2_STATE(val),
- 9000, pcie_dev->l23_rdy_poll_timeout);
- #ifdef CONFIG_SEC_PCIE
- ltssm_post = readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM);
- PCIE_INFO(pcie_dev, "PCIe RC%d: PARF LTSSM_STATE: %s\n",
- pcie_dev->rc_idx, TO_LTSSM_STR(ltssm_post & 0x3f));
- #endif
- /* check L23_Ready */
- PCIE_DBG(pcie_dev, "RC%d: PCIE20_PARF_PM_STTS_1 is 0x%x.\n",
- pcie_dev->rc_idx,
- readl_relaxed(pcie_dev->parf + PCIE20_PARF_PM_STTS_1));
- if (!ret_l23)
- PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is received\n",
- pcie_dev->rc_idx);
- else {
- #ifdef CONFIG_SEC_PCIE
- PCIE_INFO(pcie_dev, "RC%d: PM_Enter_L23 is NOT received. RC(%s) EP(0x%08x) LTSSM(%s, %s)\n",
- pcie_dev->rc_idx, rc_linkup ? "OK" : "NG", ep_linkup, TO_LTSSM_STR(ltssm_pre & 0x3f), TO_LTSSM_STR(ltssm_post & 0x3f));
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- if (is_need_pcie_error_oops((struct pci_dev *)user, pcie_dev))
- panic("PCIe RC%d Fail(L23). RC(%s) EP(0x%08x) LTSSM(%s, %s)\n",
- pcie_dev->rc_idx, rc_linkup ? "OK" : "NG", ep_linkup, TO_LTSSM_STR(ltssm_pre & 0x3f), TO_LTSSM_STR(ltssm_post & 0x3f));
- #endif
- #else
- PCIE_DBG(pcie_dev, "RC%d: PM_Enter_L23 is NOT received\n",
- pcie_dev->rc_idx);
- #endif
- }
- if (pcie_dev->use_pinctrl && pcie_dev->pins_sleep)
- pinctrl_select_state(pcie_dev->pinctrl,
- pcie_dev->pins_sleep);
- msm_pcie_disable(pcie_dev);
- PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
- return ret;
- }
- static void msm_pcie_fixup_suspend(struct pci_dev *dev)
- {
- int ret;
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
- if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED ||
- !pci_is_root_bus(dev->bus))
- return;
- spin_lock_irqsave(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- if (pcie_dev->disable_pc) {
- PCIE_DBG(pcie_dev,
- "RC%d: Skip suspend because of user request\n",
- pcie_dev->rc_idx);
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- return;
- }
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- mutex_lock(&pcie_dev->recovery_lock);
- ret = msm_pcie_pm_suspend(dev, NULL, NULL, 0);
- if (ret)
- PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in suspend:%d.\n",
- pcie_dev->rc_idx, ret);
- mutex_unlock(&pcie_dev->recovery_lock);
- }
- DECLARE_PCI_FIXUP_SUSPEND(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
- msm_pcie_fixup_suspend);
- /* Resume the PCIe link */
- static int msm_pcie_pm_resume(struct pci_dev *dev,
- void *user, void *data, u32 options)
- {
- int ret;
- #ifdef CONFIG_SEC_PCIE
- u32 val;
- #endif
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- PCIE_DBG(pcie_dev, "RC%d: entry\n", pcie_dev->rc_idx);
- if (pcie_dev->use_pinctrl && pcie_dev->pins_default)
- pinctrl_select_state(pcie_dev->pinctrl,
- pcie_dev->pins_default);
- spin_lock_irqsave(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- pcie_dev->cfg_access = true;
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- ret = msm_pcie_enable(pcie_dev);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d fail to enable PCIe link in resume.\n",
- pcie_dev->rc_idx);
- return ret;
- }
- pcie_dev->suspending = false;
- PCIE_DBG(pcie_dev,
- "dev->bus->number = %d dev->bus->primary = %d\n",
- dev->bus->number, dev->bus->primary);
- if (dev) {
- PCIE_DBG(pcie_dev, "RC%d: restore config space\n",
- pcie_dev->rc_idx);
- /*
- * Pci framework tries to read the pm_cap config register
- * during the system resume process and since our pcie
- * controller might not have the clocks/regulators on at
- * that time, framework will put the power_state as D3Cold.
- *
- * Since the power_state is D3Cold, pci_restore_state API
- * will not be able to write the MSI address to config space.
- * Thereby resulting in a smmu fault when trying to rise a
- * MSI for the AER.
- */
- pci_set_power_state(dev, PCI_D0);
- pci_load_and_free_saved_state(dev, &pcie_dev->saved_state);
- pci_restore_state(dev);
- #ifdef CONFIG_SEC_PCIE_L1SS
- /*
- * restore the configuratoins for l1/l1ss
- * which are set during PCIe suspend period
- */
- mutex_lock(&pcie_dev->l1ss_ctrl_lock);
- if (pcie_dev->pending_l1ss_ctrl) {
- msm_pcie_config_l1_disable_all(pcie_dev, dev->bus);
- if (pcie_dev->l1ss_disable_flag)
- msm_pcie_config_l1ss_disable_all(pcie_dev, dev->bus);
- else
- msm_pcie_config_l1ss_enable_all(pcie_dev);
- msm_pcie_config_l1_enable_all(pcie_dev);
- pcie_dev->pending_l1ss_ctrl = false;
- }
- mutex_unlock(&pcie_dev->l1ss_ctrl_lock);
- #endif
- }
- #ifdef CONFIG_SEC_PCIE
- val = readl_relaxed(pcie_dev->dm_core + 0x4);
- if (!(val & (1 << 2)))
- PCIE_ERR(pcie_dev, "RC%d: BME is not set. conf[4] = 0x%08x\n",
- pcie_dev->rc_idx, val);
- #endif
- PCIE_DBG(pcie_dev, "RC%d: exit\n", pcie_dev->rc_idx);
- return ret;
- }
- static void msm_pcie_fixup_resume(struct pci_dev *dev)
- {
- int ret;
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
- if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
- pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
- return;
- mutex_lock(&pcie_dev->recovery_lock);
- ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
- if (ret)
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d got failure in fixup resume:%d.\n",
- pcie_dev->rc_idx, ret);
- mutex_unlock(&pcie_dev->recovery_lock);
- }
- DECLARE_PCI_FIXUP_RESUME(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
- msm_pcie_fixup_resume);
- static void msm_pcie_fixup_resume_early(struct pci_dev *dev)
- {
- int ret;
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(dev->bus);
- PCIE_DBG(pcie_dev, "RC%d\n", pcie_dev->rc_idx);
- if ((pcie_dev->link_status != MSM_PCIE_LINK_DISABLED) ||
- pcie_dev->user_suspend || !pci_is_root_bus(dev->bus))
- return;
- mutex_lock(&pcie_dev->recovery_lock);
- ret = msm_pcie_pm_resume(dev, NULL, NULL, 0);
- if (ret)
- PCIE_ERR(pcie_dev, "PCIe: RC%d got failure in resume:%d.\n",
- pcie_dev->rc_idx, ret);
- mutex_unlock(&pcie_dev->recovery_lock);
- }
- DECLARE_PCI_FIXUP_RESUME_EARLY(PCIE_VENDOR_ID_QCOM, PCI_ANY_ID,
- msm_pcie_fixup_resume_early);
- static int msm_pcie_drv_send_rpmsg(struct msm_pcie_dev_t *pcie_dev,
- struct msm_pcie_drv_msg *msg)
- {
- struct msm_pcie_drv_info *drv_info = pcie_dev->drv_info;
- int ret, re_try = 20; /* sleep 5 ms per re-try */
- struct rpmsg_device *rpdev;
- /* This function becomes a dummy call when CESTA support is present */
- if (pcie_dev->pcie_sm)
- return 0;
- mutex_lock(&pcie_drv.rpmsg_lock);
- rpdev = pcie_drv.rpdev;
- if (!pcie_drv.rpdev) {
- ret = -EIO;
- goto out;
- }
- reinit_completion(&drv_info->completion);
- drv_info->reply_seq = drv_info->seq++;
- msg->hdr.seq = drv_info->reply_seq;
- if (unlikely(drv_info->seq == MSM_PCIE_DRV_SEQ_RESV))
- drv_info->seq = 0;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: DRV: sending rpmsg: command: 0x%x\n",
- pcie_dev->rc_idx, msg->pkt.dword[0]);
- retry:
- ret = rpmsg_trysend(rpdev->ept, msg, sizeof(*msg));
- if (ret) {
- if (ret == -EBUSY && re_try) {
- usleep_range(5000, 5001);
- re_try--;
- goto retry;
- }
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: failed to send rpmsg, ret:%d\n",
- pcie_dev->rc_idx, ret);
- goto out;
- }
- ret = wait_for_completion_timeout(&drv_info->completion,
- msecs_to_jiffies(drv_info->timeout_ms));
- if (!ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: completion timeout for rpmsg\n",
- pcie_dev->rc_idx);
- ret = -ETIMEDOUT;
- goto out;
- }
- ret = 0;
- PCIE_DBG(pcie_dev, "PCIe: RC%d: DRV: rpmsg successfully sent\n",
- pcie_dev->rc_idx);
- out:
- mutex_unlock(&pcie_drv.rpmsg_lock);
- return ret;
- }
- static int msm_pcie_drv_resume(struct msm_pcie_dev_t *pcie_dev)
- {
- struct msm_pcie_drv_info *drv_info = pcie_dev->drv_info;
- struct msm_pcie_clk_info_t *clk_info;
- u32 clkreq_override_en = 0;
- int ret, i, rpmsg_ret = 0;
- #ifdef CONFIG_SEC_PCIE
- u32 val;
- u32 link_speed[3] = {0,}, link_width = 0;
- #endif
- #ifdef CONFIG_SEC_PCIE_L1SS
- struct pci_dev *dev;
- #endif
- mutex_lock(&pcie_dev->recovery_lock);
- mutex_lock(&pcie_dev->setup_lock);
- /* if DRV hand-off was done and DRV subsystem is powered up */
- if (PCIE_RC_DRV_ENABLED(pcie_dev->rc_idx) &&
- !pcie_dev->l1ss_sleep_disable)
- rpmsg_ret = msm_pcie_drv_send_rpmsg(pcie_dev,
- &drv_info->drv_disable_l1ss_sleep);
- msm_pcie_vreg_init(pcie_dev);
- PCIE_DBG(pcie_dev, "PCIe: RC%d:enable gdsc-core\n", pcie_dev->rc_idx);
- if (pcie_dev->gdsc_core && !pcie_dev->gdsc_clk_drv_ss_nonvotable) {
- ret = regulator_enable(pcie_dev->gdsc_core);
- if (ret)
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: failed to enable GDSC: ret %d\n",
- pcie_dev->rc_idx, ret);
- }
- PCIE_DBG(pcie_dev, "PCIe: RC%d:set ICC path vote\n", pcie_dev->rc_idx);
- ret = msm_pcie_icc_vote(pcie_dev, pcie_dev->current_link_speed,
- pcie_dev->current_link_width, false);
- if (ret)
- goto out;
- PCIE_DBG(pcie_dev, "PCIe: RC%d:turn on unsuppressible clks\n",
- pcie_dev->rc_idx);
- /* turn on all unsuppressible clocks */
- clk_info = pcie_dev->clk;
- for (i = 0; i < pcie_dev->num_clk; i++, clk_info++) {
- if (clk_info->hdl && !clk_info->suppressible) {
- ret = clk_prepare_enable(clk_info->hdl);
- if (ret)
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d:clk_prepare_enable failed for %s\n",
- pcie_dev->rc_idx, clk_info->name);
- }
- }
- PCIE_DBG(pcie_dev, "PCIe: RC%d:turn on unsuppressible clks Done.\n",
- pcie_dev->rc_idx);
- msm_pcie_cesta_disable_drv(pcie_dev);
- clkreq_override_en = readl_relaxed(pcie_dev->parf +
- PCIE20_PARF_CLKREQ_OVERRIDE) &
- PCIE20_PARF_CLKREQ_IN_ENABLE;
- if (clkreq_override_en)
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: CLKREQ Override detected\n",
- pcie_dev->rc_idx);
- /*
- * if PCIe CLKREQ override is still enabled, then make sure PCIe PIPE
- * clk source mux is set to PCIe PIPE CLK. Similarly set phy aux clk src
- * to phy aux clk before enabling PCIe PIPE CLK and phy aux clk.
- * APPS votes for mux was PCIe PIPE and phy aux clk before DRV suspend.
- * In order to vote for PCIe PIPE and phy aux clk, need to first set mux
- * to XO then PCIe PIPE and phy aux clk or else clock driver will
- * short the request.
- */
- if (clkreq_override_en) {
- if (pcie_dev->pipe_clk_mux) {
- if (pcie_dev->ref_clk_src) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: setting PCIe PIPE MUX to XO\n",
- pcie_dev->rc_idx);
- clk_set_parent(pcie_dev->pipe_clk_mux,
- pcie_dev->ref_clk_src);
- }
- if (pcie_dev->pipe_clk_ext_src) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: setting PCIe PIPE MUX to PCIe PIPE\n",
- pcie_dev->rc_idx);
- clk_set_parent(pcie_dev->pipe_clk_mux,
- pcie_dev->pipe_clk_ext_src);
- }
- }
- if (pcie_dev->phy_aux_clk_mux) {
- if (pcie_dev->ref_clk_src) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: setting PCIe phy aux MUX to XO\n",
- pcie_dev->rc_idx);
- clk_set_parent(pcie_dev->phy_aux_clk_mux,
- pcie_dev->ref_clk_src);
- }
- if (pcie_dev->phy_aux_clk_ext_src) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: setting PCIe phy aux MUX to phy aux clk\n",
- pcie_dev->rc_idx);
- clk_set_parent(pcie_dev->phy_aux_clk_mux,
- pcie_dev->phy_aux_clk_ext_src);
- }
- }
- }
- PCIE_DBG(pcie_dev, "PCIe: RC%d:turn on pipe clk\n",
- pcie_dev->rc_idx);
- clk_info = pcie_dev->pipe_clk;
- for (i = 0; i < pcie_dev->num_pipe_clk; i++, clk_info++) {
- if (clk_info->hdl && !clk_info->suppressible) {
- ret = clk_prepare_enable(clk_info->hdl);
- if (ret)
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d:clk_prepare_enable failed for %s\n",
- pcie_dev->rc_idx, clk_info->name);
- }
- }
- PCIE_DBG(pcie_dev, "PCIe: RC%d:turn on pipe clk, Done\n",
- pcie_dev->rc_idx);
- if (clkreq_override_en) {
- /* remove CLKREQ override */
- msm_pcie_write_reg_field(pcie_dev->parf,
- PCIE20_PARF_CLKREQ_OVERRIDE,
- PCIE20_PARF_CLKREQ_IN_ENABLE, 0);
- msm_pcie_write_reg_field(pcie_dev->parf,
- PCIE20_PARF_CLKREQ_OVERRIDE,
- PCIE20_PARF_CLKREQ_IN_VALUE, 0);
- }
- /* if DRV hand-off was done and DRV subsystem is powered up */
- if (PCIE_RC_DRV_ENABLED(pcie_dev->rc_idx) && !rpmsg_ret) {
- msm_pcie_drv_send_rpmsg(pcie_dev,
- &drv_info->drv_disable);
- clear_bit(pcie_dev->rc_idx, &pcie_drv.rc_drv_enabled);
- }
- /* scale CX and rate change based on current GEN speed */
- pcie_dev->current_link_speed = (readl_relaxed(pcie_dev->dm_core +
- PCIE20_CAP_LINKCTRLSTATUS) >> 16) &
- PCI_EXP_LNKSTA_CLS;
- msm_pcie_scale_link_bandwidth(pcie_dev, pcie_dev->current_link_speed);
- pcie_dev->user_suspend = false;
- spin_lock_irq(&pcie_dev->cfg_lock);
- pcie_dev->cfg_access = true;
- spin_unlock_irq(&pcie_dev->cfg_lock);
- mutex_lock(&pcie_dev->aspm_lock);
- pcie_dev->link_status = MSM_PCIE_LINK_ENABLED;
- mutex_unlock(&pcie_dev->aspm_lock);
- /* resume access to MSI register as link is resumed */
- if (!pcie_dev->lpi_enable)
- msm_msi_config_access(dev_get_msi_domain(&pcie_dev->dev->dev),
- true);
- if (!pcie_dev->pcie_sm)
- enable_irq(pcie_dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
- #ifdef CONFIG_SEC_PCIE
- val = readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM);
- PCIE_INFO(pcie_dev, "PCIe RC%d: LTSSM_STATE: %s\n",
- pcie_dev->rc_idx, TO_LTSSM_STR(val & 0x3f));
- if (readl_relaxed(pcie_dev->conf) != PCIE_LINK_DOWN) {
- link_speed[0] = pcie_get_target_linkspeed(pcie_dev->rc_idx, 0);
- link_speed[1] = pcie_get_target_linkspeed(pcie_dev->rc_idx, 1);
- PCIE_INFO(pcie_dev, "PCIe RC%d Target GEN%d, EP GEN%d\n",
- pcie_dev->rc_idx, link_speed[0], link_speed[1]);
- pcie_get_cur_link_bw(pcie_dev->rc_idx, &link_speed[2], &link_width);
- PCIE_INFO(pcie_dev, "PCIe RC%d Current GEN%d, %d lanes\n",
- pcie_dev->rc_idx, link_speed[2], link_width);
- if (link_speed[0] != link_speed[2]) {
- set_bit(PCIE_ERROR_LINK_SPEED_MISMATCH, &pcie_dev->pcie_error);
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- panic("PCIe: RC%d: link speed fail(GEN%d -> %d)\n",
- pcie_dev->rc_idx, link_speed[0], link_speed[2]);
- #endif
- }
- } else {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: endpoint config space is not accessible\n",
- pcie_dev->rc_idx);
- }
- #endif
- #ifdef CONFIG_SEC_PCIE_L1SS
- /*
- * restore the configuratoins for l1/l1ss
- * which are set during PCIe suspend period
- */
- mutex_lock(&pcie_dev->l1ss_ctrl_lock);
- dev = pcie_dev->dev;
- if (pcie_dev->pending_l1ss_ctrl) {
- msm_pcie_config_l1_disable_all(pcie_dev, dev->bus);
- if (pcie_dev->l1ss_disable_flag)
- msm_pcie_config_l1ss_disable_all(pcie_dev, dev->bus);
- else
- msm_pcie_config_l1ss_enable_all(pcie_dev);
- msm_pcie_config_l1_enable_all(pcie_dev);
- pcie_dev->pending_l1ss_ctrl = false;
- }
- mutex_unlock(&pcie_dev->l1ss_ctrl_lock);
- pcie_dev->ep_config_accessible = true;
- #endif
- mutex_unlock(&pcie_dev->setup_lock);
- mutex_unlock(&pcie_dev->recovery_lock);
- return 0;
- out:
- mutex_unlock(&pcie_dev->setup_lock);
- mutex_unlock(&pcie_dev->recovery_lock);
- return ret;
- }
- static int msm_pcie_drv_suspend(struct msm_pcie_dev_t *pcie_dev,
- u32 options)
- {
- struct msm_pcie_drv_info *drv_info = pcie_dev->drv_info;
- struct msm_pcie_clk_info_t *clk_info;
- int ret, i;
- unsigned long irqsave_flags, cfg_irqsave_flags;
- u32 ab = 0, ib = 0;
- /* If CESTA is available then drv is always supported */
- if (!pcie_dev->pcie_sm && !drv_info->ep_connected) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: DRV: client requests to DRV suspend while not connected\n",
- pcie_dev->rc_idx);
- return -EINVAL;
- }
- #ifdef CONFIG_SEC_PANIC_PCIE_ERR
- if (!pcie_dev->ignore_pcie_error && pcie_dev->aer_irq_counter) {
- panic("PCIe RC%d AER detect(%lu)!\n",
- pcie_dev->rc_idx, pcie_dev->aer_irq_counter);
- }
- #endif
- mutex_lock(&pcie_dev->recovery_lock);
- /* disable global irq - no more linkdown/aer detection */
- if (!pcie_dev->pcie_sm)
- disable_irq(pcie_dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
- ret = msm_pcie_drv_send_rpmsg(pcie_dev, &drv_info->drv_enable);
- if (ret) {
- ret = -EBUSY;
- goto out;
- }
- /* suspend access to MSI register. resume access in drv_resume */
- if (!pcie_dev->lpi_enable)
- msm_msi_config_access(dev_get_msi_domain(&pcie_dev->dev->dev),
- false);
- pcie_dev->user_suspend = true;
- set_bit(pcie_dev->rc_idx, &pcie_drv.rc_drv_enabled);
- spin_lock_irqsave(&pcie_dev->irq_lock, irqsave_flags);
- spin_lock_irqsave(&pcie_dev->cfg_lock, cfg_irqsave_flags);
- pcie_dev->cfg_access = false;
- spin_unlock_irqrestore(&pcie_dev->cfg_lock, cfg_irqsave_flags);
- spin_unlock_irqrestore(&pcie_dev->irq_lock, irqsave_flags);
- mutex_lock(&pcie_dev->setup_lock);
- mutex_lock(&pcie_dev->aspm_lock);
- pcie_dev->link_status = MSM_PCIE_LINK_DRV;
- mutex_unlock(&pcie_dev->aspm_lock);
- #ifdef CONFIG_SEC_PCIE_L1SS
- pcie_dev->ep_config_accessible = false;
- #endif
- PCIE_ERR(pcie_dev, "PCIe: RC%d: prevnet_l1=%d LTSSM_STATE=:%s\n",
- pcie_dev->rc_idx,
- pcie_dev->prevent_l1,
- TO_LTSSM_STR(readl_relaxed(pcie_dev->parf + PCIE20_PARF_LTSSM) & 0x3f));
- if (pcie_dev->pcie_sm) {
- msm_pcie_cesta_enable_drv(pcie_dev,
- !(options & MSM_PCIE_CONFIG_NO_L1SS_TO));
- ab = ICC_AVG_BW;
- ib = ICC_PEAK_BW;
- }
- /* turn off all unsuppressible clocks */
- clk_info = pcie_dev->pipe_clk;
- for (i = 0; i < pcie_dev->num_pipe_clk; i++, clk_info++)
- if (clk_info->hdl && !clk_info->suppressible)
- clk_disable_unprepare(clk_info->hdl);
- clk_info = pcie_dev->clk;
- for (i = 0; i < pcie_dev->num_clk; i++, clk_info++)
- if (clk_info->hdl && !clk_info->suppressible)
- clk_disable_unprepare(clk_info->hdl);
- /* enable L1ss sleep if client allows it */
- if (!pcie_dev->l1ss_sleep_disable &&
- !(options & MSM_PCIE_CONFIG_NO_L1SS_TO))
- msm_pcie_drv_send_rpmsg(pcie_dev,
- &drv_info->drv_enable_l1ss_sleep);
- if (pcie_dev->pcie_sm)
- ret = msm_pcie_icc_vote(pcie_dev, pcie_dev->current_link_speed,
- pcie_dev->current_link_width, true);
- else
- ret = msm_pcie_icc_vote(pcie_dev, 0, 0, true);
- if (ret) {
- mutex_unlock(&pcie_dev->setup_lock);
- mutex_unlock(&pcie_dev->recovery_lock);
- return ret;
- }
- if (pcie_dev->gdsc_core && !pcie_dev->gdsc_clk_drv_ss_nonvotable)
- regulator_disable(pcie_dev->gdsc_core);
- msm_pcie_vreg_deinit(pcie_dev);
- mutex_unlock(&pcie_dev->setup_lock);
- mutex_unlock(&pcie_dev->recovery_lock);
- return 0;
- out:
- if (!pcie_dev->pcie_sm)
- enable_irq(pcie_dev->irq[MSM_PCIE_INT_GLOBAL_INT].num);
- mutex_unlock(&pcie_dev->recovery_lock);
- return ret;
- }
- int msm_pcie_pm_control(enum msm_pcie_pm_opt pm_opt, u32 busnr, void *user,
- void *data, u32 options)
- {
- int ret = 0;
- struct pci_dev *dev;
- unsigned long flags;
- struct msm_pcie_dev_t *pcie_dev;
- struct msm_pcie_device_info *dev_info_itr, *temp, *dev_info = NULL;
- struct pci_dev *pcidev;
- bool force_rc_suspend = !!(options & MSM_PCIE_CONFIG_FORCE_SUSP);
- if (!user) {
- pr_err("PCIe: endpoint device is NULL\n");
- ret = -ENODEV;
- goto out;
- }
- pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)user)->bus);
- if (pcie_dev) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: pm_opt:%d;busnr:%d;options:%d\n",
- pcie_dev->rc_idx, pm_opt, busnr, options);
- } else {
- pr_err(
- "PCIe: did not find RC for pci endpoint device.\n"
- );
- ret = -ENODEV;
- goto out;
- }
- dev = pcie_dev->dev;
- pcidev = (struct pci_dev *)user;
- if (!pcie_dev->drv_ready) {
- PCIE_ERR(pcie_dev,
- "RC%d has not been successfully probed yet\n",
- pcie_dev->rc_idx);
- return -EPROBE_DEFER;
- }
- switch (pm_opt) {
- case MSM_PCIE_DRV_SUSPEND:
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: DRV: user requests for DRV suspend\n",
- pcie_dev->rc_idx);
- /* make sure disable pc is done before enabling drv */
- flush_work(&pcie_dev->drv_disable_pc_work);
- ret = msm_pcie_drv_suspend(pcie_dev, options);
- break;
- case MSM_PCIE_SUSPEND:
- PCIE_DBG(pcie_dev,
- "User of RC%d requests to suspend the link\n",
- pcie_dev->rc_idx);
- if (pcie_dev->link_status != MSM_PCIE_LINK_ENABLED)
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: requested to suspend when link is not enabled:%d.\n",
- pcie_dev->rc_idx, pcie_dev->link_status);
- if (!pcie_dev->power_on) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: requested to suspend when link is powered down:%d.\n",
- pcie_dev->rc_idx, pcie_dev->link_status);
- break;
- }
- mutex_lock(&pcie_dev->recovery_lock);
- mutex_lock(&pcie_dev->enumerate_lock);
- /*
- * Remove current user requesting for suspend from ep list and
- * add it to suspend ep list. Reject susp if list is still not
- * empty.
- */
- list_for_each_entry_safe(dev_info_itr, temp,
- &pcie_dev->enum_ep_list, pcidev_node) {
- if (dev_info_itr->dev == pcidev) {
- list_del(&dev_info_itr->pcidev_node);
- dev_info = dev_info_itr;
- list_add_tail(&dev_info->pcidev_node,
- &pcie_dev->susp_ep_list);
- break;
- }
- }
- if (!dev_info)
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: ep BDF 0x%04x not in enum list\n",
- pcie_dev->rc_idx, PCI_DEVID(
- pcidev->bus->number,
- pcidev->devfn));
- if (!force_rc_suspend && !list_empty(&pcie_dev->enum_ep_list)) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: request to suspend the link is rejected\n",
- pcie_dev->rc_idx);
- mutex_unlock(&pcie_dev->enumerate_lock);
- mutex_unlock(&pcie_dev->recovery_lock);
- break;
- }
- pcie_dev->user_suspend = true;
- ret = msm_pcie_pm_suspend(dev, user, data, options);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: user failed to suspend the link.\n",
- pcie_dev->rc_idx);
- pcie_dev->user_suspend = false;
- if (dev_info) {
- list_del(&dev_info->pcidev_node);
- list_add_tail(&dev_info->pcidev_node,
- &pcie_dev->enum_ep_list);
- }
- }
- mutex_unlock(&pcie_dev->enumerate_lock);
- mutex_unlock(&pcie_dev->recovery_lock);
- break;
- case MSM_PCIE_RESUME:
- PCIE_DBG(pcie_dev,
- "User of RC%d requests to resume the link\n",
- pcie_dev->rc_idx);
- /* DRV resume */
- if (pcie_dev->link_status == MSM_PCIE_LINK_DRV) {
- ret = msm_pcie_drv_resume(pcie_dev);
- break;
- }
- mutex_lock(&pcie_dev->recovery_lock);
- /* when link was suspended and link resume is requested */
- mutex_lock(&pcie_dev->enumerate_lock);
- list_for_each_entry_safe(dev_info_itr, temp,
- &pcie_dev->susp_ep_list, pcidev_node) {
- if (dev_info_itr->dev == user) {
- list_del(&dev_info_itr->pcidev_node);
- dev_info = dev_info_itr;
- list_add_tail(&dev_info->pcidev_node,
- &pcie_dev->enum_ep_list);
- break;
- }
- }
- if (!dev_info) {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: ep BDF 0x%04x not in susp list\n",
- pcie_dev->rc_idx, PCI_DEVID(
- pcidev->bus->number,
- pcidev->devfn));
- }
- mutex_unlock(&pcie_dev->enumerate_lock);
- if (pcie_dev->power_on) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: requested to resume when link is already powered on.\n",
- pcie_dev->rc_idx);
- mutex_unlock(&pcie_dev->recovery_lock);
- break;
- }
- ret = msm_pcie_pm_resume(dev, user, data, options);
- if (ret) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: user failed to resume the link.\n",
- pcie_dev->rc_idx);
- mutex_lock(&pcie_dev->enumerate_lock);
- if (dev_info) {
- list_del(&dev_info->pcidev_node);
- list_add_tail(&dev_info->pcidev_node,
- &pcie_dev->susp_ep_list);
- }
- mutex_unlock(&pcie_dev->enumerate_lock);
- } else {
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: user succeeded to resume the link.\n",
- pcie_dev->rc_idx);
- pcie_dev->user_suspend = false;
- }
- mutex_unlock(&pcie_dev->recovery_lock);
- break;
- case MSM_PCIE_DISABLE_PC:
- PCIE_DBG(pcie_dev,
- "User of RC%d requests to keep the link always alive.\n",
- pcie_dev->rc_idx);
- spin_lock_irqsave(&pcie_dev->cfg_lock, pcie_dev->irqsave_flags);
- if (pcie_dev->suspending) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d Link has been suspended before request\n",
- pcie_dev->rc_idx);
- ret = MSM_PCIE_ERROR;
- } else {
- pcie_dev->disable_pc = true;
- }
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- break;
- case MSM_PCIE_ENABLE_PC:
- PCIE_DBG(pcie_dev,
- "User of RC%d cancels the request of alive link.\n",
- pcie_dev->rc_idx);
- spin_lock_irqsave(&pcie_dev->cfg_lock, pcie_dev->irqsave_flags);
- pcie_dev->disable_pc = false;
- spin_unlock_irqrestore(&pcie_dev->cfg_lock,
- pcie_dev->irqsave_flags);
- break;
- case MSM_PCIE_HANDLE_LINKDOWN:
- PCIE_DBG(pcie_dev,
- "User of RC%d requests handling link down.\n",
- pcie_dev->rc_idx);
- spin_lock_irqsave(&pcie_dev->irq_lock, flags);
- msm_pcie_handle_linkdown(pcie_dev);
- spin_unlock_irqrestore(&pcie_dev->irq_lock, flags);
- break;
- case MSM_PCIE_DRV_PC_CTRL:
- PCIE_DBG(pcie_dev,
- "User of RC%d requests handling drv pc options %u.\n",
- pcie_dev->rc_idx, options);
- /* Mask the DRV_PC_CTRL if CESTA is supported */
- if (pcie_dev->pcie_sm)
- break;
- mutex_lock(&pcie_dev->drv_pc_lock);
- pcie_dev->drv_disable_pc_vote =
- options & MSM_PCIE_CONFIG_NO_DRV_PC;
- if (!pcie_dev->drv_info || !pcie_dev->drv_info->ep_connected) {
- mutex_unlock(&pcie_dev->drv_pc_lock);
- break;
- }
- if (pcie_dev->drv_disable_pc_vote) {
- queue_work(mpcie_wq, &pcie_dev->drv_disable_pc_work);
- } else {
- queue_work(mpcie_wq, &pcie_dev->drv_enable_pc_work);
- /* make sure enable pc happens asap */
- flush_work(&pcie_dev->drv_enable_pc_work);
- }
- mutex_unlock(&pcie_dev->drv_pc_lock);
- break;
- default:
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: unsupported pm operation:%d.\n",
- pcie_dev->rc_idx, pm_opt);
- ret = -ENODEV;
- goto out;
- }
- out:
- return ret;
- }
- EXPORT_SYMBOL(msm_pcie_pm_control);
- void msm_pcie_l1ss_timeout_disable(struct pci_dev *pci_dev)
- {
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(pci_dev->bus);
- __msm_pcie_l1ss_timeout_disable(pcie_dev);
- }
- EXPORT_SYMBOL(msm_pcie_l1ss_timeout_disable);
- void msm_pcie_l1ss_timeout_enable(struct pci_dev *pci_dev)
- {
- struct msm_pcie_dev_t *pcie_dev = PCIE_BUS_PRIV_DATA(pci_dev->bus);
- __msm_pcie_l1ss_timeout_enable(pcie_dev);
- }
- EXPORT_SYMBOL(msm_pcie_l1ss_timeout_enable);
- int msm_pcie_register_event(struct msm_pcie_register_event *reg)
- {
- int ret = 0;
- struct msm_pcie_dev_t *pcie_dev;
- struct msm_pcie_register_event *reg_itr, *temp;
- struct pci_dev *pcidev;
- unsigned long flags;
- if (!reg) {
- pr_err("PCIe: Event registration is NULL\n");
- return -ENODEV;
- }
- if (!reg->user) {
- pr_err("PCIe: User of event registration is NULL\n");
- return -ENODEV;
- }
- pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
- if (!pcie_dev) {
- pr_err("PCIe: did not find RC for pci endpoint device.\n");
- return -ENODEV;
- }
- pcidev = (struct pci_dev *)reg->user;
- spin_lock_irqsave(&pcie_dev->evt_reg_list_lock, flags);
- list_for_each_entry_safe(reg_itr, temp,
- &pcie_dev->event_reg_list, node) {
- if (reg_itr->user == reg->user) {
- PCIE_ERR(pcie_dev,
- "PCIe: RC%d: EP BDF 0x%4x already registered\n",
- pcie_dev->rc_idx,
- PCI_DEVID(pcidev->bus->number, pcidev->devfn));
- spin_unlock(&pcie_dev->evt_reg_list_lock);
- return -EEXIST;
- }
- }
- list_add_tail(®->node, &pcie_dev->event_reg_list);
- spin_unlock_irqrestore(&pcie_dev->evt_reg_list_lock, flags);
- if (pcie_dev->drv_supported)
- schedule_work(&pcie_drv.drv_connect);
- return ret;
- }
- EXPORT_SYMBOL(msm_pcie_register_event);
- int msm_pcie_deregister_event(struct msm_pcie_register_event *reg)
- {
- struct msm_pcie_dev_t *pcie_dev;
- struct pci_dev *pcidev;
- struct msm_pcie_register_event *reg_itr, *temp;
- unsigned long flags;
- if (!reg) {
- pr_err("PCIe: Event deregistration is NULL\n");
- return -ENODEV;
- }
- if (!reg->user) {
- pr_err("PCIe: User of event deregistration is NULL\n");
- return -ENODEV;
- }
- pcie_dev = PCIE_BUS_PRIV_DATA(((struct pci_dev *)reg->user)->bus);
- if (!pcie_dev) {
- PCIE_ERR(pcie_dev, "%s",
- "PCIe: did not find RC for pci endpoint device.\n");
- return -ENODEV;
- }
- pcidev = (struct pci_dev *)reg->user;
- spin_lock_irqsave(&pcie_dev->evt_reg_list_lock, flags);
- list_for_each_entry_safe(reg_itr, temp, &pcie_dev->event_reg_list,
- node) {
- if (reg_itr->user == reg->user) {
- list_del(®->node);
- spin_unlock_irqrestore(&pcie_dev->evt_reg_list_lock, flags);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Event deregistered for BDF 0x%04x\n",
- pcie_dev->rc_idx,
- PCI_DEVID(pcidev->bus->number, pcidev->devfn));
- return 0;
- }
- }
- spin_unlock_irqrestore(&pcie_dev->evt_reg_list_lock, flags);
- PCIE_DBG(pcie_dev,
- "PCIe: RC%d: Failed to deregister event for BDF 0x%04x\n",
- pcie_dev->rc_idx,
- PCI_DEVID(pcidev->bus->number, pcidev->devfn));
- return -EINVAL;
- }
- EXPORT_SYMBOL(msm_pcie_deregister_event);
- MODULE_DESCRIPTION("Qualcomm Technologies, Inc. PCIe RC driver");
- MODULE_LICENSE("GPL");
|