dsi_display.c 168 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "msm-dsi-display:[%s] " fmt, __func__
  6. #include <linux/list.h>
  7. #include <linux/of.h>
  8. #include <linux/of_gpio.h>
  9. #include <linux/err.h>
  10. #include "msm_drv.h"
  11. #include "sde_connector.h"
  12. #include "msm_mmu.h"
  13. #include "dsi_display.h"
  14. #include "dsi_panel.h"
  15. #include "dsi_ctrl.h"
  16. #include "dsi_ctrl_hw.h"
  17. #include "dsi_drm.h"
  18. #include "dsi_clk.h"
  19. #include "dsi_pwr.h"
  20. #include "sde_dbg.h"
  21. #include "dsi_parser.h"
  22. #define to_dsi_display(x) container_of(x, struct dsi_display, host)
  23. #define INT_BASE_10 10
  24. #define NO_OVERRIDE -1
  25. #define MISR_BUFF_SIZE 256
  26. #define ESD_MODE_STRING_MAX_LEN 256
  27. #define ESD_TRIGGER_STRING_MAX_LEN 10
  28. #define MAX_NAME_SIZE 64
  29. #define DSI_CLOCK_BITRATE_RADIX 10
  30. #define MAX_TE_SOURCE_ID 2
  31. static char dsi_display_primary[MAX_CMDLINE_PARAM_LEN];
  32. static char dsi_display_secondary[MAX_CMDLINE_PARAM_LEN];
  33. static struct dsi_display_boot_param boot_displays[MAX_DSI_ACTIVE_DISPLAY] = {
  34. {.boot_param = dsi_display_primary},
  35. {.boot_param = dsi_display_secondary},
  36. };
  37. static const struct of_device_id dsi_display_dt_match[] = {
  38. {.compatible = "qcom,dsi-display"},
  39. {}
  40. };
  41. static void dsi_display_mask_ctrl_error_interrupts(struct dsi_display *display,
  42. u32 mask, bool enable)
  43. {
  44. int i;
  45. struct dsi_display_ctrl *ctrl;
  46. if (!display)
  47. return;
  48. display_for_each_ctrl(i, display) {
  49. ctrl = &display->ctrl[i];
  50. if (!ctrl)
  51. continue;
  52. dsi_ctrl_mask_error_status_interrupts(ctrl->ctrl, mask, enable);
  53. }
  54. }
  55. static int dsi_display_config_clk_gating(struct dsi_display *display,
  56. bool enable)
  57. {
  58. int rc = 0, i = 0;
  59. struct dsi_display_ctrl *mctrl, *ctrl;
  60. if (!display) {
  61. pr_err("Invalid params\n");
  62. return -EINVAL;
  63. }
  64. mctrl = &display->ctrl[display->clk_master_idx];
  65. if (!mctrl) {
  66. pr_err("Invalid controller\n");
  67. return -EINVAL;
  68. }
  69. rc = dsi_ctrl_config_clk_gating(mctrl->ctrl, enable, PIXEL_CLK |
  70. DSI_PHY);
  71. if (rc) {
  72. pr_err("[%s] failed to %s clk gating, rc=%d\n",
  73. display->name, enable ? "enable" : "disable",
  74. rc);
  75. return rc;
  76. }
  77. display_for_each_ctrl(i, display) {
  78. ctrl = &display->ctrl[i];
  79. if (!ctrl->ctrl || (ctrl == mctrl))
  80. continue;
  81. /**
  82. * In Split DSI usecase we should not enable clock gating on
  83. * DSI PHY1 to ensure no display atrifacts are seen.
  84. */
  85. rc = dsi_ctrl_config_clk_gating(ctrl->ctrl, enable, PIXEL_CLK);
  86. if (rc) {
  87. pr_err("[%s] failed to %s pixel clk gating, rc=%d\n",
  88. display->name, enable ? "enable" : "disable",
  89. rc);
  90. return rc;
  91. }
  92. }
  93. return 0;
  94. }
  95. static void dsi_display_set_ctrl_esd_check_flag(struct dsi_display *display,
  96. bool enable)
  97. {
  98. int i;
  99. struct dsi_display_ctrl *ctrl;
  100. if (!display)
  101. return;
  102. display_for_each_ctrl(i, display) {
  103. ctrl = &display->ctrl[i];
  104. if (!ctrl)
  105. continue;
  106. ctrl->ctrl->esd_check_underway = enable;
  107. }
  108. }
  109. static void dsi_display_ctrl_irq_update(struct dsi_display *display, bool en)
  110. {
  111. int i;
  112. struct dsi_display_ctrl *ctrl;
  113. if (!display)
  114. return;
  115. display_for_each_ctrl(i, display) {
  116. ctrl = &display->ctrl[i];
  117. if (!ctrl)
  118. continue;
  119. dsi_ctrl_irq_update(ctrl->ctrl, en);
  120. }
  121. }
  122. void dsi_rect_intersect(const struct dsi_rect *r1,
  123. const struct dsi_rect *r2,
  124. struct dsi_rect *result)
  125. {
  126. int l, t, r, b;
  127. if (!r1 || !r2 || !result)
  128. return;
  129. l = max(r1->x, r2->x);
  130. t = max(r1->y, r2->y);
  131. r = min((r1->x + r1->w), (r2->x + r2->w));
  132. b = min((r1->y + r1->h), (r2->y + r2->h));
  133. if (r <= l || b <= t) {
  134. memset(result, 0, sizeof(*result));
  135. } else {
  136. result->x = l;
  137. result->y = t;
  138. result->w = r - l;
  139. result->h = b - t;
  140. }
  141. }
  142. int dsi_display_set_backlight(struct drm_connector *connector,
  143. void *display, u32 bl_lvl)
  144. {
  145. struct dsi_display *dsi_display = display;
  146. struct dsi_panel *panel;
  147. u32 bl_scale, bl_scale_sv;
  148. u64 bl_temp;
  149. int rc = 0;
  150. if (dsi_display == NULL || dsi_display->panel == NULL)
  151. return -EINVAL;
  152. panel = dsi_display->panel;
  153. mutex_lock(&panel->panel_lock);
  154. if (!dsi_panel_initialized(panel)) {
  155. rc = -EINVAL;
  156. goto error;
  157. }
  158. panel->bl_config.bl_level = bl_lvl;
  159. /* scale backlight */
  160. bl_scale = panel->bl_config.bl_scale;
  161. bl_temp = bl_lvl * bl_scale / MAX_BL_SCALE_LEVEL;
  162. bl_scale_sv = panel->bl_config.bl_scale_sv;
  163. bl_temp = (u32)bl_temp * bl_scale_sv / MAX_SV_BL_SCALE_LEVEL;
  164. pr_debug("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n",
  165. bl_scale, bl_scale_sv, (u32)bl_temp);
  166. rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
  167. DSI_CORE_CLK, DSI_CLK_ON);
  168. if (rc) {
  169. pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
  170. dsi_display->name, rc);
  171. goto error;
  172. }
  173. rc = dsi_panel_set_backlight(panel, (u32)bl_temp);
  174. if (rc)
  175. pr_err("unable to set backlight\n");
  176. rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
  177. DSI_CORE_CLK, DSI_CLK_OFF);
  178. if (rc) {
  179. pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
  180. dsi_display->name, rc);
  181. goto error;
  182. }
  183. error:
  184. mutex_unlock(&panel->panel_lock);
  185. return rc;
  186. }
  187. static int dsi_display_cmd_engine_enable(struct dsi_display *display)
  188. {
  189. int rc = 0;
  190. int i;
  191. struct dsi_display_ctrl *m_ctrl, *ctrl;
  192. m_ctrl = &display->ctrl[display->cmd_master_idx];
  193. mutex_lock(&m_ctrl->ctrl->ctrl_lock);
  194. if (display->cmd_engine_refcount > 0) {
  195. display->cmd_engine_refcount++;
  196. goto done;
  197. }
  198. rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
  199. if (rc) {
  200. pr_err("[%s] failed to enable cmd engine, rc=%d\n",
  201. display->name, rc);
  202. goto done;
  203. }
  204. display_for_each_ctrl(i, display) {
  205. ctrl = &display->ctrl[i];
  206. if (!ctrl->ctrl || (ctrl == m_ctrl))
  207. continue;
  208. rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
  209. DSI_CTRL_ENGINE_ON);
  210. if (rc) {
  211. pr_err("[%s] failed to enable cmd engine, rc=%d\n",
  212. display->name, rc);
  213. goto error_disable_master;
  214. }
  215. }
  216. display->cmd_engine_refcount++;
  217. goto done;
  218. error_disable_master:
  219. (void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
  220. done:
  221. mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
  222. return rc;
  223. }
  224. static int dsi_display_cmd_engine_disable(struct dsi_display *display)
  225. {
  226. int rc = 0;
  227. int i;
  228. struct dsi_display_ctrl *m_ctrl, *ctrl;
  229. m_ctrl = &display->ctrl[display->cmd_master_idx];
  230. mutex_lock(&m_ctrl->ctrl->ctrl_lock);
  231. if (display->cmd_engine_refcount == 0) {
  232. pr_err("[%s] Invalid refcount\n", display->name);
  233. goto done;
  234. } else if (display->cmd_engine_refcount > 1) {
  235. display->cmd_engine_refcount--;
  236. goto done;
  237. }
  238. display_for_each_ctrl(i, display) {
  239. ctrl = &display->ctrl[i];
  240. if (!ctrl->ctrl || (ctrl == m_ctrl))
  241. continue;
  242. rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
  243. DSI_CTRL_ENGINE_OFF);
  244. if (rc)
  245. pr_err("[%s] failed to enable cmd engine, rc=%d\n",
  246. display->name, rc);
  247. }
  248. rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
  249. if (rc) {
  250. pr_err("[%s] failed to enable cmd engine, rc=%d\n",
  251. display->name, rc);
  252. goto error;
  253. }
  254. error:
  255. display->cmd_engine_refcount = 0;
  256. done:
  257. mutex_unlock(&m_ctrl->ctrl->ctrl_lock);
  258. return rc;
  259. }
  260. static void dsi_display_aspace_cb_locked(void *cb_data, bool is_detach)
  261. {
  262. struct dsi_display *display;
  263. struct dsi_display_ctrl *display_ctrl;
  264. int rc, cnt;
  265. if (!cb_data) {
  266. pr_err("aspace cb called with invalid cb_data\n");
  267. return;
  268. }
  269. display = (struct dsi_display *)cb_data;
  270. /*
  271. * acquire panel_lock to make sure no commands are in-progress
  272. * while detaching the non-secure context banks
  273. */
  274. dsi_panel_acquire_panel_lock(display->panel);
  275. if (is_detach) {
  276. /* invalidate the stored iova */
  277. display->cmd_buffer_iova = 0;
  278. /* return the virtual address mapping */
  279. msm_gem_put_vaddr(display->tx_cmd_buf);
  280. msm_gem_vunmap(display->tx_cmd_buf, OBJ_LOCK_NORMAL);
  281. } else {
  282. rc = msm_gem_get_iova(display->tx_cmd_buf,
  283. display->aspace, &(display->cmd_buffer_iova));
  284. if (rc) {
  285. pr_err("failed to get the iova rc %d\n", rc);
  286. goto end;
  287. }
  288. display->vaddr =
  289. (void *) msm_gem_get_vaddr(display->tx_cmd_buf);
  290. if (IS_ERR_OR_NULL(display->vaddr)) {
  291. pr_err("failed to get va rc %d\n", rc);
  292. goto end;
  293. }
  294. }
  295. display_for_each_ctrl(cnt, display) {
  296. display_ctrl = &display->ctrl[cnt];
  297. display_ctrl->ctrl->cmd_buffer_size = display->cmd_buffer_size;
  298. display_ctrl->ctrl->cmd_buffer_iova = display->cmd_buffer_iova;
  299. display_ctrl->ctrl->vaddr = display->vaddr;
  300. display_ctrl->ctrl->secure_mode = is_detach;
  301. }
  302. end:
  303. /* release panel_lock */
  304. dsi_panel_release_panel_lock(display->panel);
  305. }
  306. static irqreturn_t dsi_display_panel_te_irq_handler(int irq, void *data)
  307. {
  308. struct dsi_display *display = (struct dsi_display *)data;
  309. /*
  310. * This irq handler is used for sole purpose of identifying
  311. * ESD attacks on panel and we can safely assume IRQ_HANDLED
  312. * in case of display not being initialized yet
  313. */
  314. if (!display)
  315. return IRQ_HANDLED;
  316. SDE_EVT32(SDE_EVTLOG_FUNC_CASE1);
  317. complete_all(&display->esd_te_gate);
  318. return IRQ_HANDLED;
  319. }
  320. static void dsi_display_change_te_irq_status(struct dsi_display *display,
  321. bool enable)
  322. {
  323. if (!display) {
  324. pr_err("Invalid params\n");
  325. return;
  326. }
  327. /* Handle unbalanced irq enable/disable calls */
  328. if (enable && !display->is_te_irq_enabled) {
  329. enable_irq(gpio_to_irq(display->disp_te_gpio));
  330. display->is_te_irq_enabled = true;
  331. } else if (!enable && display->is_te_irq_enabled) {
  332. disable_irq(gpio_to_irq(display->disp_te_gpio));
  333. display->is_te_irq_enabled = false;
  334. }
  335. }
  336. static void dsi_display_register_te_irq(struct dsi_display *display)
  337. {
  338. int rc = 0;
  339. struct platform_device *pdev;
  340. struct device *dev;
  341. unsigned int te_irq;
  342. pdev = display->pdev;
  343. if (!pdev) {
  344. pr_err("invalid platform device\n");
  345. return;
  346. }
  347. dev = &pdev->dev;
  348. if (!dev) {
  349. pr_err("invalid device\n");
  350. return;
  351. }
  352. if (!gpio_is_valid(display->disp_te_gpio)) {
  353. rc = -EINVAL;
  354. goto error;
  355. }
  356. init_completion(&display->esd_te_gate);
  357. te_irq = gpio_to_irq(display->disp_te_gpio);
  358. /* Avoid deferred spurious irqs with disable_irq() */
  359. irq_set_status_flags(te_irq, IRQ_DISABLE_UNLAZY);
  360. rc = devm_request_irq(dev, te_irq, dsi_display_panel_te_irq_handler,
  361. IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
  362. "TE_GPIO", display);
  363. if (rc) {
  364. pr_err("TE request_irq failed for ESD rc:%d\n", rc);
  365. irq_clear_status_flags(te_irq, IRQ_DISABLE_UNLAZY);
  366. goto error;
  367. }
  368. disable_irq(te_irq);
  369. display->is_te_irq_enabled = false;
  370. return;
  371. error:
  372. /* disable the TE based ESD check */
  373. pr_warn("Unable to register for TE IRQ\n");
  374. if (display->panel->esd_config.status_mode == ESD_MODE_PANEL_TE)
  375. display->panel->esd_config.esd_enabled = false;
  376. }
  377. static bool dsi_display_is_te_based_esd(struct dsi_display *display)
  378. {
  379. u32 status_mode = 0;
  380. if (!display->panel) {
  381. pr_err("Invalid panel data\n");
  382. return false;
  383. }
  384. status_mode = display->panel->esd_config.status_mode;
  385. if (status_mode == ESD_MODE_PANEL_TE &&
  386. gpio_is_valid(display->disp_te_gpio))
  387. return true;
  388. return false;
  389. }
  390. /* Allocate memory for cmd dma tx buffer */
  391. static int dsi_host_alloc_cmd_tx_buffer(struct dsi_display *display)
  392. {
  393. int rc = 0, cnt = 0;
  394. struct dsi_display_ctrl *display_ctrl;
  395. display->tx_cmd_buf = msm_gem_new(display->drm_dev,
  396. SZ_4K,
  397. MSM_BO_UNCACHED);
  398. if ((display->tx_cmd_buf) == NULL) {
  399. pr_err("Failed to allocate cmd tx buf memory\n");
  400. rc = -ENOMEM;
  401. goto error;
  402. }
  403. display->cmd_buffer_size = SZ_4K;
  404. display->aspace = msm_gem_smmu_address_space_get(
  405. display->drm_dev, MSM_SMMU_DOMAIN_UNSECURE);
  406. if (!display->aspace) {
  407. pr_err("failed to get aspace\n");
  408. rc = -EINVAL;
  409. goto free_gem;
  410. }
  411. /* register to aspace */
  412. rc = msm_gem_address_space_register_cb(display->aspace,
  413. dsi_display_aspace_cb_locked, (void *)display);
  414. if (rc) {
  415. pr_err("failed to register callback %d\n", rc);
  416. goto free_gem;
  417. }
  418. rc = msm_gem_get_iova(display->tx_cmd_buf, display->aspace,
  419. &(display->cmd_buffer_iova));
  420. if (rc) {
  421. pr_err("failed to get the iova rc %d\n", rc);
  422. goto free_aspace_cb;
  423. }
  424. display->vaddr =
  425. (void *) msm_gem_get_vaddr(display->tx_cmd_buf);
  426. if (IS_ERR_OR_NULL(display->vaddr)) {
  427. pr_err("failed to get va rc %d\n", rc);
  428. rc = -EINVAL;
  429. goto put_iova;
  430. }
  431. display_for_each_ctrl(cnt, display) {
  432. display_ctrl = &display->ctrl[cnt];
  433. display_ctrl->ctrl->cmd_buffer_size = SZ_4K;
  434. display_ctrl->ctrl->cmd_buffer_iova =
  435. display->cmd_buffer_iova;
  436. display_ctrl->ctrl->vaddr = display->vaddr;
  437. display_ctrl->ctrl->tx_cmd_buf = display->tx_cmd_buf;
  438. }
  439. return rc;
  440. put_iova:
  441. msm_gem_put_iova(display->tx_cmd_buf, display->aspace);
  442. free_aspace_cb:
  443. msm_gem_address_space_unregister_cb(display->aspace,
  444. dsi_display_aspace_cb_locked, display);
  445. free_gem:
  446. mutex_lock(&display->drm_dev->struct_mutex);
  447. msm_gem_free_object(display->tx_cmd_buf);
  448. mutex_unlock(&display->drm_dev->struct_mutex);
  449. error:
  450. return rc;
  451. }
  452. static bool dsi_display_validate_reg_read(struct dsi_panel *panel)
  453. {
  454. int i, j = 0;
  455. int len = 0, *lenp;
  456. int group = 0, count = 0;
  457. struct drm_panel_esd_config *config;
  458. if (!panel)
  459. return false;
  460. config = &(panel->esd_config);
  461. lenp = config->status_valid_params ?: config->status_cmds_rlen;
  462. count = config->status_cmd.count;
  463. for (i = 0; i < count; i++)
  464. len += lenp[i];
  465. for (i = 0; i < len; i++)
  466. j += len;
  467. for (j = 0; j < config->groups; ++j) {
  468. for (i = 0; i < len; ++i) {
  469. if (config->return_buf[i] !=
  470. config->status_value[group + i])
  471. break;
  472. }
  473. if (i == len)
  474. return true;
  475. group += len;
  476. }
  477. return false;
  478. }
  479. static void dsi_display_parse_te_data(struct dsi_display *display)
  480. {
  481. struct platform_device *pdev;
  482. struct device *dev;
  483. int rc = 0;
  484. u32 val = 0;
  485. pdev = display->pdev;
  486. if (!pdev) {
  487. pr_err("Invalid platform device\n");
  488. return;
  489. }
  490. dev = &pdev->dev;
  491. if (!dev) {
  492. pr_err("Invalid platform device\n");
  493. return;
  494. }
  495. display->disp_te_gpio = of_get_named_gpio(dev->of_node,
  496. "qcom,platform-te-gpio", 0);
  497. if (display->fw)
  498. rc = dsi_parser_read_u32(display->parser_node,
  499. "qcom,panel-te-source", &val);
  500. else
  501. rc = of_property_read_u32(dev->of_node,
  502. "qcom,panel-te-source", &val);
  503. if (rc || (val > MAX_TE_SOURCE_ID)) {
  504. pr_err("invalid vsync source selection\n");
  505. val = 0;
  506. }
  507. display->te_source = val;
  508. }
  509. static int dsi_display_read_status(struct dsi_display_ctrl *ctrl,
  510. struct dsi_panel *panel)
  511. {
  512. int i, rc = 0, count = 0, start = 0, *lenp;
  513. struct drm_panel_esd_config *config;
  514. struct dsi_cmd_desc *cmds;
  515. u32 flags = 0;
  516. if (!panel || !ctrl || !ctrl->ctrl)
  517. return -EINVAL;
  518. /*
  519. * When DSI controller is not in initialized state, we do not want to
  520. * report a false ESD failure and hence we defer until next read
  521. * happen.
  522. */
  523. if (!dsi_ctrl_validate_host_state(ctrl->ctrl))
  524. return 1;
  525. config = &(panel->esd_config);
  526. lenp = config->status_valid_params ?: config->status_cmds_rlen;
  527. count = config->status_cmd.count;
  528. cmds = config->status_cmd.cmds;
  529. flags |= (DSI_CTRL_CMD_FETCH_MEMORY | DSI_CTRL_CMD_READ |
  530. DSI_CTRL_CMD_CUSTOM_DMA_SCHED);
  531. for (i = 0; i < count; ++i) {
  532. memset(config->status_buf, 0x0, SZ_4K);
  533. if (cmds[i].last_command) {
  534. cmds[i].msg.flags |= MIPI_DSI_MSG_LASTCOMMAND;
  535. flags |= DSI_CTRL_CMD_LAST_COMMAND;
  536. }
  537. if (config->status_cmd.state == DSI_CMD_SET_STATE_LP)
  538. cmds[i].msg.flags |= MIPI_DSI_MSG_USE_LPM;
  539. cmds[i].msg.rx_buf = config->status_buf;
  540. cmds[i].msg.rx_len = config->status_cmds_rlen[i];
  541. rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, &cmds[i].msg, flags);
  542. if (rc <= 0) {
  543. pr_err("rx cmd transfer failed rc=%d\n", rc);
  544. return rc;
  545. }
  546. memcpy(config->return_buf + start,
  547. config->status_buf, lenp[i]);
  548. start += lenp[i];
  549. }
  550. return rc;
  551. }
  552. static int dsi_display_validate_status(struct dsi_display_ctrl *ctrl,
  553. struct dsi_panel *panel)
  554. {
  555. int rc = 0;
  556. rc = dsi_display_read_status(ctrl, panel);
  557. if (rc <= 0) {
  558. goto exit;
  559. } else {
  560. /*
  561. * panel status read successfully.
  562. * check for validity of the data read back.
  563. */
  564. rc = dsi_display_validate_reg_read(panel);
  565. if (!rc) {
  566. rc = -EINVAL;
  567. goto exit;
  568. }
  569. }
  570. exit:
  571. return rc;
  572. }
  573. static int dsi_display_status_reg_read(struct dsi_display *display)
  574. {
  575. int rc = 0, i;
  576. struct dsi_display_ctrl *m_ctrl, *ctrl;
  577. pr_debug(" ++\n");
  578. m_ctrl = &display->ctrl[display->cmd_master_idx];
  579. if (display->tx_cmd_buf == NULL) {
  580. rc = dsi_host_alloc_cmd_tx_buffer(display);
  581. if (rc) {
  582. pr_err("failed to allocate cmd tx buffer memory\n");
  583. goto done;
  584. }
  585. }
  586. rc = dsi_display_cmd_engine_enable(display);
  587. if (rc) {
  588. pr_err("cmd engine enable failed\n");
  589. return -EPERM;
  590. }
  591. rc = dsi_display_validate_status(m_ctrl, display->panel);
  592. if (rc <= 0) {
  593. pr_err("[%s] read status failed on master,rc=%d\n",
  594. display->name, rc);
  595. goto exit;
  596. }
  597. if (!display->panel->sync_broadcast_en)
  598. goto exit;
  599. display_for_each_ctrl(i, display) {
  600. ctrl = &display->ctrl[i];
  601. if (ctrl == m_ctrl)
  602. continue;
  603. rc = dsi_display_validate_status(ctrl, display->panel);
  604. if (rc <= 0) {
  605. pr_err("[%s] read status failed on slave,rc=%d\n",
  606. display->name, rc);
  607. goto exit;
  608. }
  609. }
  610. exit:
  611. dsi_display_cmd_engine_disable(display);
  612. done:
  613. return rc;
  614. }
  615. static int dsi_display_status_bta_request(struct dsi_display *display)
  616. {
  617. int rc = 0;
  618. pr_debug(" ++\n");
  619. /* TODO: trigger SW BTA and wait for acknowledgment */
  620. return rc;
  621. }
  622. static int dsi_display_status_check_te(struct dsi_display *display)
  623. {
  624. int rc = 1;
  625. int const esd_te_timeout = msecs_to_jiffies(3*20);
  626. dsi_display_change_te_irq_status(display, true);
  627. reinit_completion(&display->esd_te_gate);
  628. if (!wait_for_completion_timeout(&display->esd_te_gate,
  629. esd_te_timeout)) {
  630. pr_err("TE check failed\n");
  631. rc = -EINVAL;
  632. }
  633. dsi_display_change_te_irq_status(display, false);
  634. return rc;
  635. }
  636. int dsi_display_check_status(struct drm_connector *connector, void *display,
  637. bool te_check_override)
  638. {
  639. struct dsi_display *dsi_display = display;
  640. struct dsi_panel *panel;
  641. u32 status_mode;
  642. int rc = 0x1;
  643. u32 mask;
  644. if (!dsi_display || !dsi_display->panel)
  645. return -EINVAL;
  646. panel = dsi_display->panel;
  647. dsi_panel_acquire_panel_lock(panel);
  648. if (!panel->panel_initialized) {
  649. pr_debug("Panel not initialized\n");
  650. goto release_panel_lock;
  651. }
  652. /* Prevent another ESD check,when ESD recovery is underway */
  653. if (atomic_read(&panel->esd_recovery_pending))
  654. goto release_panel_lock;
  655. status_mode = panel->esd_config.status_mode;
  656. if (status_mode == ESD_MODE_SW_SIM_SUCCESS)
  657. goto release_panel_lock;
  658. if (status_mode == ESD_MODE_SW_SIM_FAILURE) {
  659. rc = -EINVAL;
  660. goto release_panel_lock;
  661. }
  662. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  663. if (te_check_override && gpio_is_valid(dsi_display->disp_te_gpio))
  664. status_mode = ESD_MODE_PANEL_TE;
  665. dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
  666. DSI_ALL_CLKS, DSI_CLK_ON);
  667. /* Mask error interrupts before attempting ESD read */
  668. mask = BIT(DSI_FIFO_OVERFLOW) | BIT(DSI_FIFO_UNDERFLOW);
  669. dsi_display_set_ctrl_esd_check_flag(dsi_display, true);
  670. dsi_display_mask_ctrl_error_interrupts(dsi_display, mask, true);
  671. if (status_mode == ESD_MODE_REG_READ) {
  672. rc = dsi_display_status_reg_read(dsi_display);
  673. } else if (status_mode == ESD_MODE_SW_BTA) {
  674. rc = dsi_display_status_bta_request(dsi_display);
  675. } else if (status_mode == ESD_MODE_PANEL_TE) {
  676. rc = dsi_display_status_check_te(dsi_display);
  677. } else {
  678. pr_warn("unsupported check status mode\n");
  679. panel->esd_config.esd_enabled = false;
  680. }
  681. /* Unmask error interrupts */
  682. if (rc > 0) {
  683. dsi_display_set_ctrl_esd_check_flag(dsi_display, false);
  684. dsi_display_mask_ctrl_error_interrupts(dsi_display, mask,
  685. false);
  686. } else {
  687. /* Handle Panel failures during display disable sequence */
  688. atomic_set(&panel->esd_recovery_pending, 1);
  689. }
  690. dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
  691. DSI_ALL_CLKS, DSI_CLK_OFF);
  692. release_panel_lock:
  693. dsi_panel_release_panel_lock(panel);
  694. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
  695. return rc;
  696. }
  697. static int dsi_display_cmd_prepare(const char *cmd_buf, u32 cmd_buf_len,
  698. struct dsi_cmd_desc *cmd, u8 *payload, u32 payload_len)
  699. {
  700. int i;
  701. memset(cmd, 0x00, sizeof(*cmd));
  702. cmd->msg.type = cmd_buf[0];
  703. cmd->last_command = (cmd_buf[1] == 1);
  704. cmd->msg.channel = cmd_buf[2];
  705. cmd->msg.flags = cmd_buf[3];
  706. cmd->msg.ctrl = 0;
  707. cmd->post_wait_ms = cmd->msg.wait_ms = cmd_buf[4];
  708. cmd->msg.tx_len = ((cmd_buf[5] << 8) | (cmd_buf[6]));
  709. if (cmd->msg.tx_len > payload_len) {
  710. pr_err("Incorrect payload length tx_len %zu, payload_len %d\n",
  711. cmd->msg.tx_len, payload_len);
  712. return -EINVAL;
  713. }
  714. for (i = 0; i < cmd->msg.tx_len; i++)
  715. payload[i] = cmd_buf[7 + i];
  716. cmd->msg.tx_buf = payload;
  717. return 0;
  718. }
  719. static int dsi_display_ctrl_get_host_init_state(struct dsi_display *dsi_display,
  720. bool *state)
  721. {
  722. struct dsi_display_ctrl *ctrl;
  723. int i, rc = -EINVAL;
  724. display_for_each_ctrl(i, dsi_display) {
  725. ctrl = &dsi_display->ctrl[i];
  726. rc = dsi_ctrl_get_host_engine_init_state(ctrl->ctrl, state);
  727. if (rc)
  728. break;
  729. }
  730. return rc;
  731. }
  732. int dsi_display_cmd_transfer(struct drm_connector *connector,
  733. void *display, const char *cmd_buf,
  734. u32 cmd_buf_len)
  735. {
  736. struct dsi_display *dsi_display = display;
  737. struct dsi_cmd_desc cmd;
  738. u8 cmd_payload[MAX_CMD_PAYLOAD_SIZE];
  739. int rc = 0;
  740. bool state = false;
  741. if (!dsi_display || !cmd_buf) {
  742. pr_err("[DSI] invalid params\n");
  743. return -EINVAL;
  744. }
  745. pr_debug("[DSI] Display command transfer\n");
  746. rc = dsi_display_cmd_prepare(cmd_buf, cmd_buf_len,
  747. &cmd, cmd_payload, MAX_CMD_PAYLOAD_SIZE);
  748. if (rc) {
  749. pr_err("[DSI] command prepare failed. rc %d\n", rc);
  750. return rc;
  751. }
  752. mutex_lock(&dsi_display->display_lock);
  753. rc = dsi_display_ctrl_get_host_init_state(dsi_display, &state);
  754. /**
  755. * Handle scenario where a command transfer is initiated through
  756. * sysfs interface when device is in suepnd state.
  757. */
  758. if (!rc && !state) {
  759. pr_warn_ratelimited("Command xfer attempted while device is in suspend state\n"
  760. );
  761. rc = -EPERM;
  762. goto end;
  763. }
  764. if (rc || !state) {
  765. pr_err("[DSI] Invalid host state %d rc %d\n",
  766. state, rc);
  767. rc = -EPERM;
  768. goto end;
  769. }
  770. rc = dsi_display->host.ops->transfer(&dsi_display->host,
  771. &cmd.msg);
  772. end:
  773. mutex_unlock(&dsi_display->display_lock);
  774. return rc;
  775. }
  776. static void _dsi_display_continuous_clk_ctrl(struct dsi_display *display,
  777. bool enable)
  778. {
  779. int i;
  780. struct dsi_display_ctrl *ctrl;
  781. if (!display || !display->panel->host_config.force_hs_clk_lane)
  782. return;
  783. display_for_each_ctrl(i, display) {
  784. ctrl = &display->ctrl[i];
  785. dsi_ctrl_set_continuous_clk(ctrl->ctrl, enable);
  786. }
  787. }
  788. int dsi_display_soft_reset(void *display)
  789. {
  790. struct dsi_display *dsi_display;
  791. struct dsi_display_ctrl *ctrl;
  792. int rc = 0;
  793. int i;
  794. if (!display)
  795. return -EINVAL;
  796. dsi_display = display;
  797. display_for_each_ctrl(i, dsi_display) {
  798. ctrl = &dsi_display->ctrl[i];
  799. rc = dsi_ctrl_soft_reset(ctrl->ctrl);
  800. if (rc) {
  801. pr_err("[%s] failed to soft reset host_%d, rc=%d\n",
  802. dsi_display->name, i, rc);
  803. break;
  804. }
  805. }
  806. return rc;
  807. }
  808. enum dsi_pixel_format dsi_display_get_dst_format(
  809. struct drm_connector *connector,
  810. void *display)
  811. {
  812. enum dsi_pixel_format format = DSI_PIXEL_FORMAT_MAX;
  813. struct dsi_display *dsi_display = (struct dsi_display *)display;
  814. if (!dsi_display || !dsi_display->panel) {
  815. pr_err("Invalid params(s) dsi_display %pK, panel %pK\n",
  816. dsi_display,
  817. ((dsi_display) ? dsi_display->panel : NULL));
  818. return format;
  819. }
  820. format = dsi_display->panel->host_config.dst_format;
  821. return format;
  822. }
  823. static void _dsi_display_setup_misr(struct dsi_display *display)
  824. {
  825. int i;
  826. display_for_each_ctrl(i, display) {
  827. dsi_ctrl_setup_misr(display->ctrl[i].ctrl,
  828. display->misr_enable,
  829. display->misr_frame_count);
  830. }
  831. }
  832. /**
  833. * dsi_display_get_cont_splash_status - Get continuous splash status.
  834. * @dsi_display: DSI display handle.
  835. *
  836. * Return: boolean to signify whether continuous splash is enabled.
  837. */
  838. static bool dsi_display_get_cont_splash_status(struct dsi_display *display)
  839. {
  840. u32 val = 0;
  841. int i;
  842. struct dsi_display_ctrl *ctrl;
  843. struct dsi_ctrl_hw *hw;
  844. display_for_each_ctrl(i, display) {
  845. ctrl = &(display->ctrl[i]);
  846. if (!ctrl || !ctrl->ctrl)
  847. continue;
  848. hw = &(ctrl->ctrl->hw);
  849. val = hw->ops.get_cont_splash_status(hw);
  850. if (!val)
  851. return false;
  852. }
  853. return true;
  854. }
  855. int dsi_display_set_power(struct drm_connector *connector,
  856. int power_mode, void *disp)
  857. {
  858. struct dsi_display *display = disp;
  859. int rc = 0;
  860. if (!display || !display->panel) {
  861. pr_err("invalid display/panel\n");
  862. return -EINVAL;
  863. }
  864. switch (power_mode) {
  865. case SDE_MODE_DPMS_LP1:
  866. rc = dsi_panel_set_lp1(display->panel);
  867. break;
  868. case SDE_MODE_DPMS_LP2:
  869. rc = dsi_panel_set_lp2(display->panel);
  870. break;
  871. default:
  872. rc = dsi_panel_set_nolp(display->panel);
  873. break;
  874. }
  875. return rc;
  876. }
  877. static ssize_t debugfs_dump_info_read(struct file *file,
  878. char __user *user_buf,
  879. size_t user_len,
  880. loff_t *ppos)
  881. {
  882. struct dsi_display *display = file->private_data;
  883. char *buf;
  884. u32 len = 0;
  885. int i;
  886. if (!display)
  887. return -ENODEV;
  888. if (*ppos)
  889. return 0;
  890. buf = kzalloc(SZ_4K, GFP_KERNEL);
  891. if (!buf)
  892. return -ENOMEM;
  893. len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", display->name);
  894. len += snprintf(buf + len, (SZ_4K - len),
  895. "\tResolution = %dx%d\n",
  896. display->config.video_timing.h_active,
  897. display->config.video_timing.v_active);
  898. display_for_each_ctrl(i, display) {
  899. len += snprintf(buf + len, (SZ_4K - len),
  900. "\tCTRL_%d:\n\t\tctrl = %s\n\t\tphy = %s\n",
  901. i, display->ctrl[i].ctrl->name,
  902. display->ctrl[i].phy->name);
  903. }
  904. len += snprintf(buf + len, (SZ_4K - len),
  905. "\tPanel = %s\n", display->panel->name);
  906. len += snprintf(buf + len, (SZ_4K - len),
  907. "\tClock master = %s\n",
  908. display->ctrl[display->clk_master_idx].ctrl->name);
  909. if (copy_to_user(user_buf, buf, len)) {
  910. kfree(buf);
  911. return -EFAULT;
  912. }
  913. *ppos += len;
  914. kfree(buf);
  915. return len;
  916. }
  917. static ssize_t debugfs_misr_setup(struct file *file,
  918. const char __user *user_buf,
  919. size_t user_len,
  920. loff_t *ppos)
  921. {
  922. struct dsi_display *display = file->private_data;
  923. char *buf;
  924. int rc = 0;
  925. size_t len;
  926. u32 enable, frame_count;
  927. if (!display)
  928. return -ENODEV;
  929. if (*ppos)
  930. return 0;
  931. buf = kzalloc(MISR_BUFF_SIZE, GFP_KERNEL);
  932. if (!buf)
  933. return -ENOMEM;
  934. /* leave room for termination char */
  935. len = min_t(size_t, user_len, MISR_BUFF_SIZE - 1);
  936. if (copy_from_user(buf, user_buf, len)) {
  937. rc = -EINVAL;
  938. goto error;
  939. }
  940. buf[len] = '\0'; /* terminate the string */
  941. if (sscanf(buf, "%u %u", &enable, &frame_count) != 2) {
  942. rc = -EINVAL;
  943. goto error;
  944. }
  945. display->misr_enable = enable;
  946. display->misr_frame_count = frame_count;
  947. mutex_lock(&display->display_lock);
  948. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  949. DSI_CORE_CLK, DSI_CLK_ON);
  950. if (rc) {
  951. pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
  952. display->name, rc);
  953. goto unlock;
  954. }
  955. _dsi_display_setup_misr(display);
  956. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  957. DSI_CORE_CLK, DSI_CLK_OFF);
  958. if (rc) {
  959. pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
  960. display->name, rc);
  961. goto unlock;
  962. }
  963. rc = user_len;
  964. unlock:
  965. mutex_unlock(&display->display_lock);
  966. error:
  967. kfree(buf);
  968. return rc;
  969. }
  970. static ssize_t debugfs_misr_read(struct file *file,
  971. char __user *user_buf,
  972. size_t user_len,
  973. loff_t *ppos)
  974. {
  975. struct dsi_display *display = file->private_data;
  976. char *buf;
  977. u32 len = 0;
  978. int rc = 0;
  979. struct dsi_ctrl *dsi_ctrl;
  980. int i;
  981. u32 misr;
  982. size_t max_len = min_t(size_t, user_len, MISR_BUFF_SIZE);
  983. if (!display)
  984. return -ENODEV;
  985. if (*ppos)
  986. return 0;
  987. buf = kzalloc(max_len, GFP_KERNEL);
  988. if (ZERO_OR_NULL_PTR(buf))
  989. return -ENOMEM;
  990. mutex_lock(&display->display_lock);
  991. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  992. DSI_CORE_CLK, DSI_CLK_ON);
  993. if (rc) {
  994. pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
  995. display->name, rc);
  996. goto error;
  997. }
  998. display_for_each_ctrl(i, display) {
  999. dsi_ctrl = display->ctrl[i].ctrl;
  1000. misr = dsi_ctrl_collect_misr(display->ctrl[i].ctrl);
  1001. len += snprintf((buf + len), max_len - len,
  1002. "DSI_%d MISR: 0x%x\n", dsi_ctrl->cell_index, misr);
  1003. if (len >= max_len)
  1004. break;
  1005. }
  1006. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  1007. DSI_CORE_CLK, DSI_CLK_OFF);
  1008. if (rc) {
  1009. pr_err("[%s] failed to disable DSI core clocks, rc=%d\n",
  1010. display->name, rc);
  1011. goto error;
  1012. }
  1013. if (copy_to_user(user_buf, buf, max_len)) {
  1014. rc = -EFAULT;
  1015. goto error;
  1016. }
  1017. *ppos += len;
  1018. error:
  1019. mutex_unlock(&display->display_lock);
  1020. kfree(buf);
  1021. return len;
  1022. }
  1023. static ssize_t debugfs_esd_trigger_check(struct file *file,
  1024. const char __user *user_buf,
  1025. size_t user_len,
  1026. loff_t *ppos)
  1027. {
  1028. struct dsi_display *display = file->private_data;
  1029. char *buf;
  1030. int rc = 0;
  1031. u32 esd_trigger;
  1032. size_t len;
  1033. if (!display)
  1034. return -ENODEV;
  1035. if (*ppos)
  1036. return 0;
  1037. if (user_len > sizeof(u32))
  1038. return -EINVAL;
  1039. if (!user_len || !user_buf)
  1040. return -EINVAL;
  1041. if (!display->panel ||
  1042. atomic_read(&display->panel->esd_recovery_pending))
  1043. return user_len;
  1044. buf = kzalloc(ESD_TRIGGER_STRING_MAX_LEN, GFP_KERNEL);
  1045. if (!buf)
  1046. return -ENOMEM;
  1047. len = min_t(size_t, user_len, ESD_TRIGGER_STRING_MAX_LEN - 1);
  1048. if (copy_from_user(buf, user_buf, len)) {
  1049. rc = -EINVAL;
  1050. goto error;
  1051. }
  1052. buf[len] = '\0'; /* terminate the string */
  1053. if (kstrtouint(buf, 10, &esd_trigger)) {
  1054. rc = -EINVAL;
  1055. goto error;
  1056. }
  1057. if (esd_trigger != 1) {
  1058. rc = -EINVAL;
  1059. goto error;
  1060. }
  1061. display->esd_trigger = esd_trigger;
  1062. if (display->esd_trigger) {
  1063. pr_info("ESD attack triggered by user\n");
  1064. rc = dsi_panel_trigger_esd_attack(display->panel);
  1065. if (rc) {
  1066. pr_err("Failed to trigger ESD attack\n");
  1067. goto error;
  1068. }
  1069. }
  1070. rc = len;
  1071. error:
  1072. kfree(buf);
  1073. return rc;
  1074. }
  1075. static ssize_t debugfs_alter_esd_check_mode(struct file *file,
  1076. const char __user *user_buf,
  1077. size_t user_len,
  1078. loff_t *ppos)
  1079. {
  1080. struct dsi_display *display = file->private_data;
  1081. struct drm_panel_esd_config *esd_config;
  1082. char *buf;
  1083. int rc = 0;
  1084. size_t len;
  1085. if (!display)
  1086. return -ENODEV;
  1087. if (*ppos)
  1088. return 0;
  1089. buf = kzalloc(ESD_MODE_STRING_MAX_LEN, GFP_KERNEL);
  1090. if (ZERO_OR_NULL_PTR(buf))
  1091. return -ENOMEM;
  1092. len = min_t(size_t, user_len, ESD_MODE_STRING_MAX_LEN - 1);
  1093. if (copy_from_user(buf, user_buf, len)) {
  1094. rc = -EINVAL;
  1095. goto error;
  1096. }
  1097. buf[len] = '\0'; /* terminate the string */
  1098. if (!display->panel) {
  1099. rc = -EINVAL;
  1100. goto error;
  1101. }
  1102. esd_config = &display->panel->esd_config;
  1103. if (!esd_config) {
  1104. pr_err("Invalid panel esd config\n");
  1105. rc = -EINVAL;
  1106. goto error;
  1107. }
  1108. if (!esd_config->esd_enabled)
  1109. goto error;
  1110. if (!strcmp(buf, "te_signal_check\n")) {
  1111. pr_info("ESD check is switched to TE mode by user\n");
  1112. esd_config->status_mode = ESD_MODE_PANEL_TE;
  1113. dsi_display_change_te_irq_status(display, true);
  1114. }
  1115. if (!strcmp(buf, "reg_read\n")) {
  1116. pr_info("ESD check is switched to reg read by user\n");
  1117. rc = dsi_panel_parse_esd_reg_read_configs(display->panel);
  1118. if (rc) {
  1119. pr_err("failed to alter esd check mode,rc=%d\n",
  1120. rc);
  1121. rc = user_len;
  1122. goto error;
  1123. }
  1124. esd_config->status_mode = ESD_MODE_REG_READ;
  1125. if (dsi_display_is_te_based_esd(display))
  1126. dsi_display_change_te_irq_status(display, false);
  1127. }
  1128. if (!strcmp(buf, "esd_sw_sim_success\n"))
  1129. esd_config->status_mode = ESD_MODE_SW_SIM_SUCCESS;
  1130. if (!strcmp(buf, "esd_sw_sim_failure\n"))
  1131. esd_config->status_mode = ESD_MODE_SW_SIM_FAILURE;
  1132. rc = len;
  1133. error:
  1134. kfree(buf);
  1135. return rc;
  1136. }
  1137. static ssize_t debugfs_read_esd_check_mode(struct file *file,
  1138. char __user *user_buf,
  1139. size_t user_len,
  1140. loff_t *ppos)
  1141. {
  1142. struct dsi_display *display = file->private_data;
  1143. struct drm_panel_esd_config *esd_config;
  1144. char *buf;
  1145. int rc = 0;
  1146. size_t len;
  1147. if (!display)
  1148. return -ENODEV;
  1149. if (*ppos)
  1150. return 0;
  1151. if (!display->panel) {
  1152. pr_err("invalid panel data\n");
  1153. return -EINVAL;
  1154. }
  1155. buf = kzalloc(ESD_MODE_STRING_MAX_LEN, GFP_KERNEL);
  1156. if (ZERO_OR_NULL_PTR(buf))
  1157. return -ENOMEM;
  1158. esd_config = &display->panel->esd_config;
  1159. if (!esd_config) {
  1160. pr_err("Invalid panel esd config\n");
  1161. rc = -EINVAL;
  1162. goto error;
  1163. }
  1164. len = min_t(size_t, user_len, ESD_MODE_STRING_MAX_LEN - 1);
  1165. if (!esd_config->esd_enabled) {
  1166. rc = snprintf(buf, len, "ESD feature not enabled");
  1167. goto output_mode;
  1168. }
  1169. switch (esd_config->status_mode) {
  1170. case ESD_MODE_REG_READ:
  1171. rc = snprintf(buf, len, "reg_read");
  1172. break;
  1173. case ESD_MODE_PANEL_TE:
  1174. rc = snprintf(buf, len, "te_signal_check");
  1175. break;
  1176. case ESD_MODE_SW_SIM_FAILURE:
  1177. rc = snprintf(buf, len, "esd_sw_sim_failure");
  1178. break;
  1179. case ESD_MODE_SW_SIM_SUCCESS:
  1180. rc = snprintf(buf, len, "esd_sw_sim_success");
  1181. break;
  1182. default:
  1183. rc = snprintf(buf, len, "invalid");
  1184. break;
  1185. }
  1186. output_mode:
  1187. if (!rc) {
  1188. rc = -EINVAL;
  1189. goto error;
  1190. }
  1191. if (copy_to_user(user_buf, buf, len)) {
  1192. rc = -EFAULT;
  1193. goto error;
  1194. }
  1195. *ppos += len;
  1196. error:
  1197. kfree(buf);
  1198. return len;
  1199. }
  1200. static const struct file_operations dump_info_fops = {
  1201. .open = simple_open,
  1202. .read = debugfs_dump_info_read,
  1203. };
  1204. static const struct file_operations misr_data_fops = {
  1205. .open = simple_open,
  1206. .read = debugfs_misr_read,
  1207. .write = debugfs_misr_setup,
  1208. };
  1209. static const struct file_operations esd_trigger_fops = {
  1210. .open = simple_open,
  1211. .write = debugfs_esd_trigger_check,
  1212. };
  1213. static const struct file_operations esd_check_mode_fops = {
  1214. .open = simple_open,
  1215. .write = debugfs_alter_esd_check_mode,
  1216. .read = debugfs_read_esd_check_mode,
  1217. };
  1218. static int dsi_display_debugfs_init(struct dsi_display *display)
  1219. {
  1220. int rc = 0;
  1221. struct dentry *dir, *dump_file, *misr_data;
  1222. char name[MAX_NAME_SIZE];
  1223. int i;
  1224. dir = debugfs_create_dir(display->name, NULL);
  1225. if (IS_ERR_OR_NULL(dir)) {
  1226. rc = PTR_ERR(dir);
  1227. pr_err("[%s] debugfs create dir failed, rc = %d\n",
  1228. display->name, rc);
  1229. goto error;
  1230. }
  1231. dump_file = debugfs_create_file("dump_info",
  1232. 0400,
  1233. dir,
  1234. display,
  1235. &dump_info_fops);
  1236. if (IS_ERR_OR_NULL(dump_file)) {
  1237. rc = PTR_ERR(dump_file);
  1238. pr_err("[%s] debugfs create dump info file failed, rc=%d\n",
  1239. display->name, rc);
  1240. goto error_remove_dir;
  1241. }
  1242. dump_file = debugfs_create_file("esd_trigger",
  1243. 0644,
  1244. dir,
  1245. display,
  1246. &esd_trigger_fops);
  1247. if (IS_ERR_OR_NULL(dump_file)) {
  1248. rc = PTR_ERR(dump_file);
  1249. pr_err("[%s] debugfs for esd trigger file failed, rc=%d\n",
  1250. display->name, rc);
  1251. goto error_remove_dir;
  1252. }
  1253. dump_file = debugfs_create_file("esd_check_mode",
  1254. 0644,
  1255. dir,
  1256. display,
  1257. &esd_check_mode_fops);
  1258. if (IS_ERR_OR_NULL(dump_file)) {
  1259. rc = PTR_ERR(dump_file);
  1260. pr_err("[%s] debugfs for esd check mode failed, rc=%d\n",
  1261. display->name, rc);
  1262. goto error_remove_dir;
  1263. }
  1264. misr_data = debugfs_create_file("misr_data",
  1265. 0600,
  1266. dir,
  1267. display,
  1268. &misr_data_fops);
  1269. if (IS_ERR_OR_NULL(misr_data)) {
  1270. rc = PTR_ERR(misr_data);
  1271. pr_err("[%s] debugfs create misr datafile failed, rc=%d\n",
  1272. display->name, rc);
  1273. goto error_remove_dir;
  1274. }
  1275. display_for_each_ctrl(i, display) {
  1276. struct msm_dsi_phy *phy = display->ctrl[i].phy;
  1277. if (!phy || !phy->name)
  1278. continue;
  1279. snprintf(name, ARRAY_SIZE(name),
  1280. "%s_allow_phy_power_off", phy->name);
  1281. dump_file = debugfs_create_bool(name, 0600, dir,
  1282. &phy->allow_phy_power_off);
  1283. if (IS_ERR_OR_NULL(dump_file)) {
  1284. rc = PTR_ERR(dump_file);
  1285. pr_err("[%s] debugfs create %s failed, rc=%d\n",
  1286. display->name, name, rc);
  1287. goto error_remove_dir;
  1288. }
  1289. snprintf(name, ARRAY_SIZE(name),
  1290. "%s_regulator_min_datarate_bps", phy->name);
  1291. dump_file = debugfs_create_u32(name, 0600, dir,
  1292. &phy->regulator_min_datarate_bps);
  1293. if (IS_ERR_OR_NULL(dump_file)) {
  1294. rc = PTR_ERR(dump_file);
  1295. pr_err("[%s] debugfs create %s failed, rc=%d\n",
  1296. display->name, name, rc);
  1297. goto error_remove_dir;
  1298. }
  1299. }
  1300. if (!debugfs_create_bool("ulps_feature_enable", 0600, dir,
  1301. &display->panel->ulps_feature_enabled)) {
  1302. pr_err("[%s] debugfs create ulps feature enable file failed\n",
  1303. display->name);
  1304. goto error_remove_dir;
  1305. }
  1306. if (!debugfs_create_bool("ulps_suspend_feature_enable", 0600, dir,
  1307. &display->panel->ulps_suspend_enabled)) {
  1308. pr_err("[%s] debugfs create ulps-suspend feature enable file failed\n",
  1309. display->name);
  1310. goto error_remove_dir;
  1311. }
  1312. if (!debugfs_create_bool("ulps_status", 0400, dir,
  1313. &display->ulps_enabled)) {
  1314. pr_err("[%s] debugfs create ulps status file failed\n",
  1315. display->name);
  1316. goto error_remove_dir;
  1317. }
  1318. display->root = dir;
  1319. dsi_parser_dbg_init(display->parser, dir);
  1320. return rc;
  1321. error_remove_dir:
  1322. debugfs_remove(dir);
  1323. error:
  1324. return rc;
  1325. }
  1326. static int dsi_display_debugfs_deinit(struct dsi_display *display)
  1327. {
  1328. debugfs_remove_recursive(display->root);
  1329. return 0;
  1330. }
  1331. static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
  1332. struct dsi_display_mode *mode)
  1333. {
  1334. if (display->ctrl_count > 1) {
  1335. mode->timing.h_active /= display->ctrl_count;
  1336. mode->timing.h_front_porch /= display->ctrl_count;
  1337. mode->timing.h_sync_width /= display->ctrl_count;
  1338. mode->timing.h_back_porch /= display->ctrl_count;
  1339. mode->timing.h_skew /= display->ctrl_count;
  1340. mode->pixel_clk_khz /= display->ctrl_count;
  1341. }
  1342. }
  1343. static int dsi_display_is_ulps_req_valid(struct dsi_display *display,
  1344. bool enable)
  1345. {
  1346. /* TODO: make checks based on cont. splash */
  1347. pr_debug("checking ulps req validity\n");
  1348. if (atomic_read(&display->panel->esd_recovery_pending)) {
  1349. pr_debug("%s: ESD recovery sequence underway\n", __func__);
  1350. return false;
  1351. }
  1352. if (!dsi_panel_ulps_feature_enabled(display->panel) &&
  1353. !display->panel->ulps_suspend_enabled) {
  1354. pr_debug("%s: ULPS feature is not enabled\n", __func__);
  1355. return false;
  1356. }
  1357. if (!dsi_panel_initialized(display->panel) &&
  1358. !display->panel->ulps_suspend_enabled) {
  1359. pr_debug("%s: panel not yet initialized\n", __func__);
  1360. return false;
  1361. }
  1362. if (enable && display->ulps_enabled) {
  1363. pr_debug("ULPS already enabled\n");
  1364. return false;
  1365. } else if (!enable && !display->ulps_enabled) {
  1366. pr_debug("ULPS already disabled\n");
  1367. return false;
  1368. }
  1369. /*
  1370. * No need to enter ULPS when transitioning from splash screen to
  1371. * boot animation since it is expected that the clocks would be turned
  1372. * right back on.
  1373. */
  1374. if (enable && display->is_cont_splash_enabled)
  1375. return false;
  1376. return true;
  1377. }
  1378. /**
  1379. * dsi_display_set_ulps() - set ULPS state for DSI lanes.
  1380. * @dsi_display: DSI display handle.
  1381. * @enable: enable/disable ULPS.
  1382. *
  1383. * ULPS can be enabled/disabled after DSI host engine is turned on.
  1384. *
  1385. * Return: error code.
  1386. */
  1387. static int dsi_display_set_ulps(struct dsi_display *display, bool enable)
  1388. {
  1389. int rc = 0;
  1390. int i = 0;
  1391. struct dsi_display_ctrl *m_ctrl, *ctrl;
  1392. if (!display) {
  1393. pr_err("Invalid params\n");
  1394. return -EINVAL;
  1395. }
  1396. if (!dsi_display_is_ulps_req_valid(display, enable)) {
  1397. pr_debug("%s: skipping ULPS config, enable=%d\n",
  1398. __func__, enable);
  1399. return 0;
  1400. }
  1401. m_ctrl = &display->ctrl[display->cmd_master_idx];
  1402. /*
  1403. * ULPS entry-exit can be either through the DSI controller or
  1404. * the DSI PHY depending on hardware variation. For some chipsets,
  1405. * both controller version and phy version ulps entry-exit ops can
  1406. * be present. To handle such cases, send ulps request through PHY,
  1407. * if ulps request is handled in PHY, then no need to send request
  1408. * through controller.
  1409. */
  1410. rc = dsi_phy_set_ulps(m_ctrl->phy, &display->config, enable,
  1411. display->clamp_enabled);
  1412. if (rc == DSI_PHY_ULPS_ERROR) {
  1413. pr_err("Ulps PHY state change(%d) failed\n", enable);
  1414. return -EINVAL;
  1415. }
  1416. else if (rc == DSI_PHY_ULPS_HANDLED) {
  1417. display_for_each_ctrl(i, display) {
  1418. ctrl = &display->ctrl[i];
  1419. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1420. continue;
  1421. rc = dsi_phy_set_ulps(ctrl->phy, &display->config,
  1422. enable, display->clamp_enabled);
  1423. if (rc == DSI_PHY_ULPS_ERROR) {
  1424. pr_err("Ulps PHY state change(%d) failed\n",
  1425. enable);
  1426. return -EINVAL;
  1427. }
  1428. }
  1429. }
  1430. else if (rc == DSI_PHY_ULPS_NOT_HANDLED) {
  1431. rc = dsi_ctrl_set_ulps(m_ctrl->ctrl, enable);
  1432. if (rc) {
  1433. pr_err("Ulps controller state change(%d) failed\n",
  1434. enable);
  1435. return rc;
  1436. }
  1437. display_for_each_ctrl(i, display) {
  1438. ctrl = &display->ctrl[i];
  1439. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1440. continue;
  1441. rc = dsi_ctrl_set_ulps(ctrl->ctrl, enable);
  1442. if (rc) {
  1443. pr_err("Ulps controller state change(%d) failed\n",
  1444. enable);
  1445. return rc;
  1446. }
  1447. }
  1448. }
  1449. display->ulps_enabled = enable;
  1450. return 0;
  1451. }
  1452. /**
  1453. * dsi_display_set_clamp() - set clamp state for DSI IO.
  1454. * @dsi_display: DSI display handle.
  1455. * @enable: enable/disable clamping.
  1456. *
  1457. * Return: error code.
  1458. */
  1459. static int dsi_display_set_clamp(struct dsi_display *display, bool enable)
  1460. {
  1461. int rc = 0;
  1462. int i = 0;
  1463. struct dsi_display_ctrl *m_ctrl, *ctrl;
  1464. bool ulps_enabled = false;
  1465. if (!display) {
  1466. pr_err("Invalid params\n");
  1467. return -EINVAL;
  1468. }
  1469. m_ctrl = &display->ctrl[display->cmd_master_idx];
  1470. ulps_enabled = display->ulps_enabled;
  1471. /*
  1472. * Clamp control can be either through the DSI controller or
  1473. * the DSI PHY depending on hardware variation
  1474. */
  1475. rc = dsi_ctrl_set_clamp_state(m_ctrl->ctrl, enable, ulps_enabled);
  1476. if (rc) {
  1477. pr_err("DSI ctrl clamp state change(%d) failed\n", enable);
  1478. return rc;
  1479. }
  1480. rc = dsi_phy_set_clamp_state(m_ctrl->phy, enable);
  1481. if (rc) {
  1482. pr_err("DSI phy clamp state change(%d) failed\n", enable);
  1483. return rc;
  1484. }
  1485. display_for_each_ctrl(i, display) {
  1486. ctrl = &display->ctrl[i];
  1487. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1488. continue;
  1489. rc = dsi_ctrl_set_clamp_state(ctrl->ctrl, enable, ulps_enabled);
  1490. if (rc) {
  1491. pr_err("DSI Clamp state change(%d) failed\n", enable);
  1492. return rc;
  1493. }
  1494. rc = dsi_phy_set_clamp_state(ctrl->phy, enable);
  1495. if (rc) {
  1496. pr_err("DSI phy clamp state change(%d) failed\n",
  1497. enable);
  1498. return rc;
  1499. }
  1500. pr_debug("Clamps %s for ctrl%d\n",
  1501. enable ? "enabled" : "disabled", i);
  1502. }
  1503. display->clamp_enabled = enable;
  1504. return 0;
  1505. }
  1506. /**
  1507. * dsi_display_setup_ctrl() - setup DSI controller.
  1508. * @dsi_display: DSI display handle.
  1509. *
  1510. * Return: error code.
  1511. */
  1512. static int dsi_display_ctrl_setup(struct dsi_display *display)
  1513. {
  1514. int rc = 0;
  1515. int i = 0;
  1516. struct dsi_display_ctrl *ctrl, *m_ctrl;
  1517. if (!display) {
  1518. pr_err("Invalid params\n");
  1519. return -EINVAL;
  1520. }
  1521. m_ctrl = &display->ctrl[display->cmd_master_idx];
  1522. rc = dsi_ctrl_setup(m_ctrl->ctrl);
  1523. if (rc) {
  1524. pr_err("DSI controller setup failed\n");
  1525. return rc;
  1526. }
  1527. display_for_each_ctrl(i, display) {
  1528. ctrl = &display->ctrl[i];
  1529. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1530. continue;
  1531. rc = dsi_ctrl_setup(ctrl->ctrl);
  1532. if (rc) {
  1533. pr_err("DSI controller setup failed\n");
  1534. return rc;
  1535. }
  1536. }
  1537. return 0;
  1538. }
  1539. static int dsi_display_phy_enable(struct dsi_display *display);
  1540. /**
  1541. * dsi_display_phy_idle_on() - enable DSI PHY while coming out of idle screen.
  1542. * @dsi_display: DSI display handle.
  1543. * @mmss_clamp: True if clamp is enabled.
  1544. *
  1545. * Return: error code.
  1546. */
  1547. static int dsi_display_phy_idle_on(struct dsi_display *display,
  1548. bool mmss_clamp)
  1549. {
  1550. int rc = 0;
  1551. int i = 0;
  1552. struct dsi_display_ctrl *m_ctrl, *ctrl;
  1553. if (!display) {
  1554. pr_err("Invalid params\n");
  1555. return -EINVAL;
  1556. }
  1557. if (mmss_clamp && !display->phy_idle_power_off) {
  1558. dsi_display_phy_enable(display);
  1559. return 0;
  1560. }
  1561. m_ctrl = &display->ctrl[display->cmd_master_idx];
  1562. rc = dsi_phy_idle_ctrl(m_ctrl->phy, true);
  1563. if (rc) {
  1564. pr_err("DSI controller setup failed\n");
  1565. return rc;
  1566. }
  1567. display_for_each_ctrl(i, display) {
  1568. ctrl = &display->ctrl[i];
  1569. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1570. continue;
  1571. rc = dsi_phy_idle_ctrl(ctrl->phy, true);
  1572. if (rc) {
  1573. pr_err("DSI controller setup failed\n");
  1574. return rc;
  1575. }
  1576. }
  1577. display->phy_idle_power_off = false;
  1578. return 0;
  1579. }
  1580. /**
  1581. * dsi_display_phy_idle_off() - disable DSI PHY while going to idle screen.
  1582. * @dsi_display: DSI display handle.
  1583. *
  1584. * Return: error code.
  1585. */
  1586. static int dsi_display_phy_idle_off(struct dsi_display *display)
  1587. {
  1588. int rc = 0;
  1589. int i = 0;
  1590. struct dsi_display_ctrl *m_ctrl, *ctrl;
  1591. if (!display) {
  1592. pr_err("Invalid params\n");
  1593. return -EINVAL;
  1594. }
  1595. display_for_each_ctrl(i, display) {
  1596. struct msm_dsi_phy *phy = display->ctrl[i].phy;
  1597. if (!phy)
  1598. continue;
  1599. if (!phy->allow_phy_power_off) {
  1600. pr_debug("phy doesn't support this feature\n");
  1601. return 0;
  1602. }
  1603. }
  1604. m_ctrl = &display->ctrl[display->cmd_master_idx];
  1605. rc = dsi_phy_idle_ctrl(m_ctrl->phy, false);
  1606. if (rc) {
  1607. pr_err("[%s] failed to enable cmd engine, rc=%d\n",
  1608. display->name, rc);
  1609. return rc;
  1610. }
  1611. display_for_each_ctrl(i, display) {
  1612. ctrl = &display->ctrl[i];
  1613. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1614. continue;
  1615. rc = dsi_phy_idle_ctrl(ctrl->phy, false);
  1616. if (rc) {
  1617. pr_err("DSI controller setup failed\n");
  1618. return rc;
  1619. }
  1620. }
  1621. display->phy_idle_power_off = true;
  1622. return 0;
  1623. }
  1624. void dsi_display_enable_event(struct drm_connector *connector,
  1625. struct dsi_display *display,
  1626. uint32_t event_idx, struct dsi_event_cb_info *event_info,
  1627. bool enable)
  1628. {
  1629. uint32_t irq_status_idx = DSI_STATUS_INTERRUPT_COUNT;
  1630. int i;
  1631. if (!display) {
  1632. pr_err("invalid display\n");
  1633. return;
  1634. }
  1635. if (event_info)
  1636. event_info->event_idx = event_idx;
  1637. switch (event_idx) {
  1638. case SDE_CONN_EVENT_VID_DONE:
  1639. irq_status_idx = DSI_SINT_VIDEO_MODE_FRAME_DONE;
  1640. break;
  1641. case SDE_CONN_EVENT_CMD_DONE:
  1642. irq_status_idx = DSI_SINT_CMD_FRAME_DONE;
  1643. break;
  1644. case SDE_CONN_EVENT_VID_FIFO_OVERFLOW:
  1645. case SDE_CONN_EVENT_CMD_FIFO_UNDERFLOW:
  1646. if (event_info) {
  1647. display_for_each_ctrl(i, display)
  1648. display->ctrl[i].ctrl->recovery_cb =
  1649. *event_info;
  1650. }
  1651. break;
  1652. default:
  1653. /* nothing to do */
  1654. pr_debug("[%s] unhandled event %d\n", display->name, event_idx);
  1655. return;
  1656. }
  1657. if (enable) {
  1658. display_for_each_ctrl(i, display)
  1659. dsi_ctrl_enable_status_interrupt(
  1660. display->ctrl[i].ctrl, irq_status_idx,
  1661. event_info);
  1662. } else {
  1663. display_for_each_ctrl(i, display)
  1664. dsi_ctrl_disable_status_interrupt(
  1665. display->ctrl[i].ctrl, irq_status_idx);
  1666. }
  1667. }
  1668. /**
  1669. * dsi_config_host_engine_state_for_cont_splash()- update host engine state
  1670. * during continuous splash.
  1671. * @display: Handle to dsi display
  1672. *
  1673. */
  1674. static void dsi_config_host_engine_state_for_cont_splash
  1675. (struct dsi_display *display)
  1676. {
  1677. int i;
  1678. struct dsi_display_ctrl *ctrl;
  1679. enum dsi_engine_state host_state = DSI_CTRL_ENGINE_ON;
  1680. /* Sequence does not matter for split dsi usecases */
  1681. display_for_each_ctrl(i, display) {
  1682. ctrl = &display->ctrl[i];
  1683. if (!ctrl->ctrl)
  1684. continue;
  1685. dsi_ctrl_update_host_engine_state_for_cont_splash(ctrl->ctrl,
  1686. host_state);
  1687. }
  1688. }
  1689. static int dsi_display_ctrl_power_on(struct dsi_display *display)
  1690. {
  1691. int rc = 0;
  1692. int i;
  1693. struct dsi_display_ctrl *ctrl;
  1694. /* Sequence does not matter for split dsi usecases */
  1695. display_for_each_ctrl(i, display) {
  1696. ctrl = &display->ctrl[i];
  1697. if (!ctrl->ctrl)
  1698. continue;
  1699. rc = dsi_ctrl_set_power_state(ctrl->ctrl,
  1700. DSI_CTRL_POWER_VREG_ON);
  1701. if (rc) {
  1702. pr_err("[%s] Failed to set power state, rc=%d\n",
  1703. ctrl->ctrl->name, rc);
  1704. goto error;
  1705. }
  1706. }
  1707. return rc;
  1708. error:
  1709. for (i = i - 1; i >= 0; i--) {
  1710. ctrl = &display->ctrl[i];
  1711. if (!ctrl->ctrl)
  1712. continue;
  1713. (void)dsi_ctrl_set_power_state(ctrl->ctrl,
  1714. DSI_CTRL_POWER_VREG_OFF);
  1715. }
  1716. return rc;
  1717. }
  1718. static int dsi_display_ctrl_power_off(struct dsi_display *display)
  1719. {
  1720. int rc = 0;
  1721. int i;
  1722. struct dsi_display_ctrl *ctrl;
  1723. /* Sequence does not matter for split dsi usecases */
  1724. display_for_each_ctrl(i, display) {
  1725. ctrl = &display->ctrl[i];
  1726. if (!ctrl->ctrl)
  1727. continue;
  1728. rc = dsi_ctrl_set_power_state(ctrl->ctrl,
  1729. DSI_CTRL_POWER_VREG_OFF);
  1730. if (rc) {
  1731. pr_err("[%s] Failed to power off, rc=%d\n",
  1732. ctrl->ctrl->name, rc);
  1733. goto error;
  1734. }
  1735. }
  1736. error:
  1737. return rc;
  1738. }
  1739. static void dsi_display_parse_cmdline_topology(struct dsi_display *display,
  1740. unsigned int display_type)
  1741. {
  1742. char *boot_str = NULL;
  1743. char *str = NULL;
  1744. char *sw_te = NULL;
  1745. unsigned long cmdline_topology = NO_OVERRIDE;
  1746. unsigned long cmdline_timing = NO_OVERRIDE;
  1747. if (display_type >= MAX_DSI_ACTIVE_DISPLAY) {
  1748. pr_err("display_type=%d not supported\n", display_type);
  1749. goto end;
  1750. }
  1751. if (display_type == DSI_PRIMARY)
  1752. boot_str = dsi_display_primary;
  1753. else
  1754. boot_str = dsi_display_secondary;
  1755. sw_te = strnstr(boot_str, ":swte", strlen(boot_str));
  1756. if (sw_te)
  1757. display->sw_te_using_wd = true;
  1758. str = strnstr(boot_str, ":config", strlen(boot_str));
  1759. if (!str)
  1760. goto end;
  1761. if (kstrtol(str + strlen(":config"), INT_BASE_10,
  1762. (unsigned long *)&cmdline_topology)) {
  1763. pr_err("invalid config index override: %s\n", boot_str);
  1764. goto end;
  1765. }
  1766. str = strnstr(boot_str, ":timing", strlen(boot_str));
  1767. if (!str)
  1768. goto end;
  1769. if (kstrtol(str + strlen(":timing"), INT_BASE_10,
  1770. (unsigned long *)&cmdline_timing)) {
  1771. pr_err("invalid timing index override: %s. resetting both timing and config\n",
  1772. boot_str);
  1773. cmdline_topology = NO_OVERRIDE;
  1774. goto end;
  1775. }
  1776. pr_debug("successfully parsed command line topology and timing\n");
  1777. end:
  1778. display->cmdline_topology = cmdline_topology;
  1779. display->cmdline_timing = cmdline_timing;
  1780. }
  1781. /**
  1782. * dsi_display_parse_boot_display_selection()- Parse DSI boot display name
  1783. *
  1784. * Return: returns error status
  1785. */
  1786. static int dsi_display_parse_boot_display_selection(void)
  1787. {
  1788. char *pos = NULL;
  1789. char disp_buf[MAX_CMDLINE_PARAM_LEN] = {'\0'};
  1790. int i, j;
  1791. for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
  1792. strlcpy(disp_buf, boot_displays[i].boot_param,
  1793. MAX_CMDLINE_PARAM_LEN);
  1794. pos = strnstr(disp_buf, ":", MAX_CMDLINE_PARAM_LEN);
  1795. /* Use ':' as a delimiter to retrieve the display name */
  1796. if (!pos) {
  1797. pr_debug("display name[%s]is not valid\n", disp_buf);
  1798. continue;
  1799. }
  1800. for (j = 0; (disp_buf + j) < pos; j++)
  1801. boot_displays[i].name[j] = *(disp_buf + j);
  1802. boot_displays[i].name[j] = '\0';
  1803. boot_displays[i].boot_disp_en = true;
  1804. }
  1805. return 0;
  1806. }
  1807. static int dsi_display_phy_power_on(struct dsi_display *display)
  1808. {
  1809. int rc = 0;
  1810. int i;
  1811. struct dsi_display_ctrl *ctrl;
  1812. /* Sequence does not matter for split dsi usecases */
  1813. display_for_each_ctrl(i, display) {
  1814. ctrl = &display->ctrl[i];
  1815. if (!ctrl->ctrl)
  1816. continue;
  1817. rc = dsi_phy_set_power_state(ctrl->phy, true);
  1818. if (rc) {
  1819. pr_err("[%s] Failed to set power state, rc=%d\n",
  1820. ctrl->phy->name, rc);
  1821. goto error;
  1822. }
  1823. }
  1824. return rc;
  1825. error:
  1826. for (i = i - 1; i >= 0; i--) {
  1827. ctrl = &display->ctrl[i];
  1828. if (!ctrl->phy)
  1829. continue;
  1830. (void)dsi_phy_set_power_state(ctrl->phy, false);
  1831. }
  1832. return rc;
  1833. }
  1834. static int dsi_display_phy_power_off(struct dsi_display *display)
  1835. {
  1836. int rc = 0;
  1837. int i;
  1838. struct dsi_display_ctrl *ctrl;
  1839. /* Sequence does not matter for split dsi usecases */
  1840. display_for_each_ctrl(i, display) {
  1841. ctrl = &display->ctrl[i];
  1842. if (!ctrl->phy)
  1843. continue;
  1844. rc = dsi_phy_set_power_state(ctrl->phy, false);
  1845. if (rc) {
  1846. pr_err("[%s] Failed to power off, rc=%d\n",
  1847. ctrl->ctrl->name, rc);
  1848. goto error;
  1849. }
  1850. }
  1851. error:
  1852. return rc;
  1853. }
  1854. static int dsi_display_set_clk_src(struct dsi_display *display)
  1855. {
  1856. int rc = 0;
  1857. int i;
  1858. struct dsi_display_ctrl *m_ctrl, *ctrl;
  1859. /*
  1860. * In case of split DSI usecases, the clock for master controller should
  1861. * be enabled before the other controller. Master controller in the
  1862. * clock context refers to the controller that sources the clock.
  1863. */
  1864. m_ctrl = &display->ctrl[display->clk_master_idx];
  1865. rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
  1866. &display->clock_info.src_clks);
  1867. if (rc) {
  1868. pr_err("[%s] failed to set source clocks for master, rc=%d\n",
  1869. display->name, rc);
  1870. return rc;
  1871. }
  1872. /* Turn on rest of the controllers */
  1873. display_for_each_ctrl(i, display) {
  1874. ctrl = &display->ctrl[i];
  1875. if (!ctrl->ctrl || (ctrl == m_ctrl))
  1876. continue;
  1877. rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
  1878. &display->clock_info.src_clks);
  1879. if (rc) {
  1880. pr_err("[%s] failed to set source clocks, rc=%d\n",
  1881. display->name, rc);
  1882. return rc;
  1883. }
  1884. }
  1885. return 0;
  1886. }
  1887. static int dsi_display_phy_reset_config(struct dsi_display *display,
  1888. bool enable)
  1889. {
  1890. int rc = 0;
  1891. int i;
  1892. struct dsi_display_ctrl *ctrl;
  1893. display_for_each_ctrl(i, display) {
  1894. ctrl = &display->ctrl[i];
  1895. rc = dsi_ctrl_phy_reset_config(ctrl->ctrl, enable);
  1896. if (rc) {
  1897. pr_err("[%s] failed to %s phy reset, rc=%d\n",
  1898. display->name, enable ? "mask" : "unmask", rc);
  1899. return rc;
  1900. }
  1901. }
  1902. return 0;
  1903. }
  1904. static void dsi_display_toggle_resync_fifo(struct dsi_display *display)
  1905. {
  1906. struct dsi_display_ctrl *ctrl;
  1907. int i;
  1908. if (!display)
  1909. return;
  1910. display_for_each_ctrl(i, display) {
  1911. ctrl = &display->ctrl[i];
  1912. dsi_phy_toggle_resync_fifo(ctrl->phy);
  1913. }
  1914. /*
  1915. * After retime buffer synchronization we need to turn of clk_en_sel
  1916. * bit on each phy.
  1917. */
  1918. display_for_each_ctrl(i, display) {
  1919. ctrl = &display->ctrl[i];
  1920. dsi_phy_reset_clk_en_sel(ctrl->phy);
  1921. }
  1922. }
  1923. static int dsi_display_ctrl_update(struct dsi_display *display)
  1924. {
  1925. int rc = 0;
  1926. int i;
  1927. struct dsi_display_ctrl *ctrl;
  1928. display_for_each_ctrl(i, display) {
  1929. ctrl = &display->ctrl[i];
  1930. rc = dsi_ctrl_host_timing_update(ctrl->ctrl);
  1931. if (rc) {
  1932. pr_err("[%s] failed to update host_%d, rc=%d\n",
  1933. display->name, i, rc);
  1934. goto error_host_deinit;
  1935. }
  1936. }
  1937. return 0;
  1938. error_host_deinit:
  1939. for (i = i - 1; i >= 0; i--) {
  1940. ctrl = &display->ctrl[i];
  1941. (void)dsi_ctrl_host_deinit(ctrl->ctrl);
  1942. }
  1943. return rc;
  1944. }
  1945. static int dsi_display_ctrl_init(struct dsi_display *display)
  1946. {
  1947. int rc = 0;
  1948. int i;
  1949. struct dsi_display_ctrl *ctrl;
  1950. /* when ULPS suspend feature is enabled, we will keep the lanes in
  1951. * ULPS during suspend state and clamp DSI phy. Hence while resuming
  1952. * we will programe DSI controller as part of core clock enable.
  1953. * After that we should not re-configure DSI controller again here for
  1954. * usecases where we are resuming from ulps suspend as it might put
  1955. * the HW in bad state.
  1956. */
  1957. if (!display->panel->ulps_suspend_enabled || !display->ulps_enabled) {
  1958. display_for_each_ctrl(i, display) {
  1959. ctrl = &display->ctrl[i];
  1960. rc = dsi_ctrl_host_init(ctrl->ctrl,
  1961. display->is_cont_splash_enabled);
  1962. if (rc) {
  1963. pr_err("[%s] failed to init host_%d, rc=%d\n",
  1964. display->name, i, rc);
  1965. goto error_host_deinit;
  1966. }
  1967. }
  1968. } else {
  1969. display_for_each_ctrl(i, display) {
  1970. ctrl = &display->ctrl[i];
  1971. rc = dsi_ctrl_update_host_init_state(ctrl->ctrl, true);
  1972. if (rc)
  1973. pr_debug("host init update failed rc=%d\n", rc);
  1974. }
  1975. }
  1976. return rc;
  1977. error_host_deinit:
  1978. for (i = i - 1; i >= 0; i--) {
  1979. ctrl = &display->ctrl[i];
  1980. (void)dsi_ctrl_host_deinit(ctrl->ctrl);
  1981. }
  1982. return rc;
  1983. }
  1984. static int dsi_display_ctrl_deinit(struct dsi_display *display)
  1985. {
  1986. int rc = 0;
  1987. int i;
  1988. struct dsi_display_ctrl *ctrl;
  1989. display_for_each_ctrl(i, display) {
  1990. ctrl = &display->ctrl[i];
  1991. rc = dsi_ctrl_host_deinit(ctrl->ctrl);
  1992. if (rc) {
  1993. pr_err("[%s] failed to deinit host_%d, rc=%d\n",
  1994. display->name, i, rc);
  1995. }
  1996. }
  1997. return rc;
  1998. }
  1999. static int dsi_display_ctrl_host_enable(struct dsi_display *display)
  2000. {
  2001. int rc = 0;
  2002. int i;
  2003. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2004. /* Host engine states are already taken care for
  2005. * continuous splash case
  2006. */
  2007. if (display->is_cont_splash_enabled) {
  2008. pr_debug("cont splash enabled, host enable not required\n");
  2009. return 0;
  2010. }
  2011. m_ctrl = &display->ctrl[display->cmd_master_idx];
  2012. rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
  2013. if (rc) {
  2014. pr_err("[%s] failed to enable host engine, rc=%d\n",
  2015. display->name, rc);
  2016. goto error;
  2017. }
  2018. display_for_each_ctrl(i, display) {
  2019. ctrl = &display->ctrl[i];
  2020. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2021. continue;
  2022. rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
  2023. DSI_CTRL_ENGINE_ON);
  2024. if (rc) {
  2025. pr_err("[%s] failed to enable sl host engine, rc=%d\n",
  2026. display->name, rc);
  2027. goto error_disable_master;
  2028. }
  2029. }
  2030. return rc;
  2031. error_disable_master:
  2032. (void)dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
  2033. error:
  2034. return rc;
  2035. }
  2036. static int dsi_display_ctrl_host_disable(struct dsi_display *display)
  2037. {
  2038. int rc = 0;
  2039. int i;
  2040. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2041. m_ctrl = &display->ctrl[display->cmd_master_idx];
  2042. display_for_each_ctrl(i, display) {
  2043. ctrl = &display->ctrl[i];
  2044. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2045. continue;
  2046. rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
  2047. DSI_CTRL_ENGINE_OFF);
  2048. if (rc)
  2049. pr_err("[%s] failed to disable host engine, rc=%d\n",
  2050. display->name, rc);
  2051. }
  2052. rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
  2053. if (rc) {
  2054. pr_err("[%s] failed to disable host engine, rc=%d\n",
  2055. display->name, rc);
  2056. goto error;
  2057. }
  2058. error:
  2059. return rc;
  2060. }
  2061. static int dsi_display_vid_engine_enable(struct dsi_display *display)
  2062. {
  2063. int rc = 0;
  2064. int i;
  2065. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2066. m_ctrl = &display->ctrl[display->video_master_idx];
  2067. rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
  2068. if (rc) {
  2069. pr_err("[%s] failed to enable vid engine, rc=%d\n",
  2070. display->name, rc);
  2071. goto error;
  2072. }
  2073. display_for_each_ctrl(i, display) {
  2074. ctrl = &display->ctrl[i];
  2075. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2076. continue;
  2077. rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
  2078. DSI_CTRL_ENGINE_ON);
  2079. if (rc) {
  2080. pr_err("[%s] failed to enable vid engine, rc=%d\n",
  2081. display->name, rc);
  2082. goto error_disable_master;
  2083. }
  2084. }
  2085. return rc;
  2086. error_disable_master:
  2087. (void)dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
  2088. error:
  2089. return rc;
  2090. }
  2091. static int dsi_display_vid_engine_disable(struct dsi_display *display)
  2092. {
  2093. int rc = 0;
  2094. int i;
  2095. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2096. m_ctrl = &display->ctrl[display->video_master_idx];
  2097. display_for_each_ctrl(i, display) {
  2098. ctrl = &display->ctrl[i];
  2099. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2100. continue;
  2101. rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
  2102. DSI_CTRL_ENGINE_OFF);
  2103. if (rc)
  2104. pr_err("[%s] failed to disable vid engine, rc=%d\n",
  2105. display->name, rc);
  2106. }
  2107. rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
  2108. if (rc)
  2109. pr_err("[%s] failed to disable mvid engine, rc=%d\n",
  2110. display->name, rc);
  2111. return rc;
  2112. }
  2113. static int dsi_display_phy_enable(struct dsi_display *display)
  2114. {
  2115. int rc = 0;
  2116. int i;
  2117. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2118. enum dsi_phy_pll_source m_src = DSI_PLL_SOURCE_STANDALONE;
  2119. m_ctrl = &display->ctrl[display->clk_master_idx];
  2120. if (display->ctrl_count > 1)
  2121. m_src = DSI_PLL_SOURCE_NATIVE;
  2122. rc = dsi_phy_enable(m_ctrl->phy,
  2123. &display->config,
  2124. m_src,
  2125. true,
  2126. display->is_cont_splash_enabled);
  2127. if (rc) {
  2128. pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
  2129. display->name, rc);
  2130. goto error;
  2131. }
  2132. display_for_each_ctrl(i, display) {
  2133. ctrl = &display->ctrl[i];
  2134. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2135. continue;
  2136. rc = dsi_phy_enable(ctrl->phy,
  2137. &display->config,
  2138. DSI_PLL_SOURCE_NON_NATIVE,
  2139. true,
  2140. display->is_cont_splash_enabled);
  2141. if (rc) {
  2142. pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
  2143. display->name, rc);
  2144. goto error_disable_master;
  2145. }
  2146. }
  2147. return rc;
  2148. error_disable_master:
  2149. (void)dsi_phy_disable(m_ctrl->phy);
  2150. error:
  2151. return rc;
  2152. }
  2153. static int dsi_display_phy_disable(struct dsi_display *display)
  2154. {
  2155. int rc = 0;
  2156. int i;
  2157. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2158. m_ctrl = &display->ctrl[display->clk_master_idx];
  2159. display_for_each_ctrl(i, display) {
  2160. ctrl = &display->ctrl[i];
  2161. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2162. continue;
  2163. rc = dsi_phy_disable(ctrl->phy);
  2164. if (rc)
  2165. pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
  2166. display->name, rc);
  2167. }
  2168. rc = dsi_phy_disable(m_ctrl->phy);
  2169. if (rc)
  2170. pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
  2171. display->name, rc);
  2172. return rc;
  2173. }
  2174. static int dsi_display_wake_up(struct dsi_display *display)
  2175. {
  2176. return 0;
  2177. }
  2178. static int dsi_display_broadcast_cmd(struct dsi_display *display,
  2179. const struct mipi_dsi_msg *msg)
  2180. {
  2181. int rc = 0;
  2182. u32 flags, m_flags;
  2183. struct dsi_display_ctrl *ctrl, *m_ctrl;
  2184. int i;
  2185. m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
  2186. DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FETCH_MEMORY);
  2187. flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
  2188. DSI_CTRL_CMD_FETCH_MEMORY);
  2189. if ((msg->flags & MIPI_DSI_MSG_LASTCOMMAND)) {
  2190. flags |= DSI_CTRL_CMD_LAST_COMMAND;
  2191. m_flags |= DSI_CTRL_CMD_LAST_COMMAND;
  2192. }
  2193. /*
  2194. * 1. Setup commands in FIFO
  2195. * 2. Trigger commands
  2196. */
  2197. m_ctrl = &display->ctrl[display->cmd_master_idx];
  2198. rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, m_flags);
  2199. if (rc) {
  2200. pr_err("[%s] cmd transfer failed on master,rc=%d\n",
  2201. display->name, rc);
  2202. goto error;
  2203. }
  2204. display_for_each_ctrl(i, display) {
  2205. ctrl = &display->ctrl[i];
  2206. if (ctrl == m_ctrl)
  2207. continue;
  2208. rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, msg, flags);
  2209. if (rc) {
  2210. pr_err("[%s] cmd transfer failed, rc=%d\n",
  2211. display->name, rc);
  2212. goto error;
  2213. }
  2214. rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl, flags);
  2215. if (rc) {
  2216. pr_err("[%s] cmd trigger failed, rc=%d\n",
  2217. display->name, rc);
  2218. goto error;
  2219. }
  2220. }
  2221. rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl, m_flags);
  2222. if (rc) {
  2223. pr_err("[%s] cmd trigger failed for master, rc=%d\n",
  2224. display->name, rc);
  2225. goto error;
  2226. }
  2227. error:
  2228. return rc;
  2229. }
  2230. static int dsi_display_phy_sw_reset(struct dsi_display *display)
  2231. {
  2232. int rc = 0;
  2233. int i;
  2234. struct dsi_display_ctrl *m_ctrl, *ctrl;
  2235. /* For continuous splash use case ctrl states are updated
  2236. * separately and hence we do an early return
  2237. */
  2238. if (display->is_cont_splash_enabled) {
  2239. pr_debug("cont splash enabled, phy sw reset not required\n");
  2240. return 0;
  2241. }
  2242. m_ctrl = &display->ctrl[display->cmd_master_idx];
  2243. rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
  2244. if (rc) {
  2245. pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
  2246. goto error;
  2247. }
  2248. display_for_each_ctrl(i, display) {
  2249. ctrl = &display->ctrl[i];
  2250. if (!ctrl->ctrl || (ctrl == m_ctrl))
  2251. continue;
  2252. rc = dsi_ctrl_phy_sw_reset(ctrl->ctrl);
  2253. if (rc) {
  2254. pr_err("[%s] failed to reset phy, rc=%d\n",
  2255. display->name, rc);
  2256. goto error;
  2257. }
  2258. }
  2259. error:
  2260. return rc;
  2261. }
  2262. static int dsi_host_attach(struct mipi_dsi_host *host,
  2263. struct mipi_dsi_device *dsi)
  2264. {
  2265. return 0;
  2266. }
  2267. static int dsi_host_detach(struct mipi_dsi_host *host,
  2268. struct mipi_dsi_device *dsi)
  2269. {
  2270. return 0;
  2271. }
  2272. static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
  2273. const struct mipi_dsi_msg *msg)
  2274. {
  2275. struct dsi_display *display;
  2276. int rc = 0, ret = 0;
  2277. if (!host || !msg) {
  2278. pr_err("Invalid params\n");
  2279. return 0;
  2280. }
  2281. display = to_dsi_display(host);
  2282. /* Avoid sending DCS commands when ESD recovery is pending */
  2283. if (atomic_read(&display->panel->esd_recovery_pending)) {
  2284. pr_debug("ESD recovery pending\n");
  2285. return 0;
  2286. }
  2287. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  2288. DSI_ALL_CLKS, DSI_CLK_ON);
  2289. if (rc) {
  2290. pr_err("[%s] failed to enable all DSI clocks, rc=%d\n",
  2291. display->name, rc);
  2292. goto error;
  2293. }
  2294. rc = dsi_display_wake_up(display);
  2295. if (rc) {
  2296. pr_err("[%s] failed to wake up display, rc=%d\n",
  2297. display->name, rc);
  2298. goto error_disable_clks;
  2299. }
  2300. rc = dsi_display_cmd_engine_enable(display);
  2301. if (rc) {
  2302. pr_err("[%s] failed to enable cmd engine, rc=%d\n",
  2303. display->name, rc);
  2304. goto error_disable_clks;
  2305. }
  2306. if (display->tx_cmd_buf == NULL) {
  2307. rc = dsi_host_alloc_cmd_tx_buffer(display);
  2308. if (rc) {
  2309. pr_err("failed to allocate cmd tx buffer memory\n");
  2310. goto error_disable_cmd_engine;
  2311. }
  2312. }
  2313. if (display->ctrl_count > 1 && !(msg->flags & MIPI_DSI_MSG_UNICAST)) {
  2314. rc = dsi_display_broadcast_cmd(display, msg);
  2315. if (rc) {
  2316. pr_err("[%s] cmd broadcast failed, rc=%d\n",
  2317. display->name, rc);
  2318. goto error_disable_cmd_engine;
  2319. }
  2320. } else {
  2321. int ctrl_idx = (msg->flags & MIPI_DSI_MSG_UNICAST) ?
  2322. msg->ctrl : 0;
  2323. rc = dsi_ctrl_cmd_transfer(display->ctrl[ctrl_idx].ctrl, msg,
  2324. DSI_CTRL_CMD_FETCH_MEMORY);
  2325. if (rc) {
  2326. pr_err("[%s] cmd transfer failed, rc=%d\n",
  2327. display->name, rc);
  2328. goto error_disable_cmd_engine;
  2329. }
  2330. }
  2331. error_disable_cmd_engine:
  2332. ret = dsi_display_cmd_engine_disable(display);
  2333. if (ret) {
  2334. pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
  2335. display->name, ret);
  2336. }
  2337. error_disable_clks:
  2338. ret = dsi_display_clk_ctrl(display->dsi_clk_handle,
  2339. DSI_ALL_CLKS, DSI_CLK_OFF);
  2340. if (ret) {
  2341. pr_err("[%s] failed to disable all DSI clocks, rc=%d\n",
  2342. display->name, ret);
  2343. }
  2344. error:
  2345. return rc;
  2346. }
  2347. static struct mipi_dsi_host_ops dsi_host_ops = {
  2348. .attach = dsi_host_attach,
  2349. .detach = dsi_host_detach,
  2350. .transfer = dsi_host_transfer,
  2351. };
  2352. static int dsi_display_mipi_host_init(struct dsi_display *display)
  2353. {
  2354. int rc = 0;
  2355. struct mipi_dsi_host *host = &display->host;
  2356. host->dev = &display->pdev->dev;
  2357. host->ops = &dsi_host_ops;
  2358. rc = mipi_dsi_host_register(host);
  2359. if (rc) {
  2360. pr_err("[%s] failed to register mipi dsi host, rc=%d\n",
  2361. display->name, rc);
  2362. goto error;
  2363. }
  2364. error:
  2365. return rc;
  2366. }
  2367. static int dsi_display_mipi_host_deinit(struct dsi_display *display)
  2368. {
  2369. int rc = 0;
  2370. struct mipi_dsi_host *host = &display->host;
  2371. mipi_dsi_host_unregister(host);
  2372. host->dev = NULL;
  2373. host->ops = NULL;
  2374. return rc;
  2375. }
  2376. static int dsi_display_clocks_deinit(struct dsi_display *display)
  2377. {
  2378. int rc = 0;
  2379. struct dsi_clk_link_set *src = &display->clock_info.src_clks;
  2380. struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
  2381. struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
  2382. if (src->byte_clk) {
  2383. devm_clk_put(&display->pdev->dev, src->byte_clk);
  2384. src->byte_clk = NULL;
  2385. }
  2386. if (src->pixel_clk) {
  2387. devm_clk_put(&display->pdev->dev, src->pixel_clk);
  2388. src->pixel_clk = NULL;
  2389. }
  2390. if (mux->byte_clk) {
  2391. devm_clk_put(&display->pdev->dev, mux->byte_clk);
  2392. mux->byte_clk = NULL;
  2393. }
  2394. if (mux->pixel_clk) {
  2395. devm_clk_put(&display->pdev->dev, mux->pixel_clk);
  2396. mux->pixel_clk = NULL;
  2397. }
  2398. if (shadow->byte_clk) {
  2399. devm_clk_put(&display->pdev->dev, shadow->byte_clk);
  2400. shadow->byte_clk = NULL;
  2401. }
  2402. if (shadow->pixel_clk) {
  2403. devm_clk_put(&display->pdev->dev, shadow->pixel_clk);
  2404. shadow->pixel_clk = NULL;
  2405. }
  2406. return rc;
  2407. }
  2408. static bool dsi_display_check_prefix(const char *clk_prefix,
  2409. const char *clk_name)
  2410. {
  2411. return !!strnstr(clk_name, clk_prefix, strlen(clk_name));
  2412. }
  2413. static int dsi_display_get_clocks_count(struct dsi_display *display,
  2414. char *dsi_clk_name)
  2415. {
  2416. if (display->fw)
  2417. return dsi_parser_count_strings(display->parser_node,
  2418. dsi_clk_name);
  2419. else
  2420. return of_property_count_strings(display->panel_node,
  2421. dsi_clk_name);
  2422. }
  2423. static void dsi_display_get_clock_name(struct dsi_display *display,
  2424. char *dsi_clk_name, int index,
  2425. const char **clk_name)
  2426. {
  2427. if (display->fw)
  2428. dsi_parser_read_string_index(display->parser_node,
  2429. dsi_clk_name, index, clk_name);
  2430. else
  2431. of_property_read_string_index(display->panel_node,
  2432. dsi_clk_name, index, clk_name);
  2433. }
  2434. static int dsi_display_clocks_init(struct dsi_display *display)
  2435. {
  2436. int i, rc = 0, num_clk = 0;
  2437. const char *clk_name;
  2438. const char *src_byte = "src_byte", *src_pixel = "src_pixel";
  2439. const char *mux_byte = "mux_byte", *mux_pixel = "mux_pixel";
  2440. const char *shadow_byte = "shadow_byte", *shadow_pixel = "shadow_pixel";
  2441. struct clk *dsi_clk;
  2442. struct dsi_clk_link_set *src = &display->clock_info.src_clks;
  2443. struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
  2444. struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
  2445. char *dsi_clock_name;
  2446. if (!strcmp(display->display_type, "primary"))
  2447. dsi_clock_name = "qcom,dsi-select-clocks";
  2448. else
  2449. dsi_clock_name = "qcom,dsi-select-sec-clocks";
  2450. num_clk = dsi_display_get_clocks_count(display, dsi_clock_name);
  2451. pr_debug("clk count=%d\n", num_clk);
  2452. for (i = 0; i < num_clk; i++) {
  2453. dsi_display_get_clock_name(display, dsi_clock_name, i,
  2454. &clk_name);
  2455. pr_debug("clock name:%s\n", clk_name);
  2456. dsi_clk = devm_clk_get(&display->pdev->dev, clk_name);
  2457. if (IS_ERR_OR_NULL(dsi_clk)) {
  2458. rc = PTR_ERR(dsi_clk);
  2459. pr_err("failed to get %s, rc=%d\n", clk_name, rc);
  2460. goto error;
  2461. }
  2462. if (dsi_display_check_prefix(src_byte, clk_name)) {
  2463. src->byte_clk = dsi_clk;
  2464. continue;
  2465. }
  2466. if (dsi_display_check_prefix(src_pixel, clk_name)) {
  2467. src->pixel_clk = dsi_clk;
  2468. continue;
  2469. }
  2470. if (dsi_display_check_prefix(mux_byte, clk_name)) {
  2471. mux->byte_clk = dsi_clk;
  2472. continue;
  2473. }
  2474. if (dsi_display_check_prefix(mux_pixel, clk_name)) {
  2475. mux->pixel_clk = dsi_clk;
  2476. continue;
  2477. }
  2478. if (dsi_display_check_prefix(shadow_byte, clk_name)) {
  2479. shadow->byte_clk = dsi_clk;
  2480. continue;
  2481. }
  2482. if (dsi_display_check_prefix(shadow_pixel, clk_name)) {
  2483. shadow->pixel_clk = dsi_clk;
  2484. continue;
  2485. }
  2486. }
  2487. return 0;
  2488. error:
  2489. (void)dsi_display_clocks_deinit(display);
  2490. return rc;
  2491. }
  2492. static int dsi_display_clk_ctrl_cb(void *priv,
  2493. struct dsi_clk_ctrl_info clk_state_info)
  2494. {
  2495. int rc = 0;
  2496. struct dsi_display *display = NULL;
  2497. void *clk_handle = NULL;
  2498. if (!priv) {
  2499. pr_err("Invalid params\n");
  2500. return -EINVAL;
  2501. }
  2502. display = priv;
  2503. if (clk_state_info.client == DSI_CLK_REQ_MDP_CLIENT) {
  2504. clk_handle = display->mdp_clk_handle;
  2505. } else if (clk_state_info.client == DSI_CLK_REQ_DSI_CLIENT) {
  2506. clk_handle = display->dsi_clk_handle;
  2507. } else {
  2508. pr_err("invalid clk handle, return error\n");
  2509. return -EINVAL;
  2510. }
  2511. /*
  2512. * TODO: Wait for CMD_MDP_DONE interrupt if MDP client tries
  2513. * to turn off DSI clocks.
  2514. */
  2515. rc = dsi_display_clk_ctrl(clk_handle,
  2516. clk_state_info.clk_type, clk_state_info.clk_state);
  2517. if (rc) {
  2518. pr_err("[%s] failed to %d DSI %d clocks, rc=%d\n",
  2519. display->name, clk_state_info.clk_state,
  2520. clk_state_info.clk_type, rc);
  2521. return rc;
  2522. }
  2523. return 0;
  2524. }
  2525. static void dsi_display_ctrl_isr_configure(struct dsi_display *display, bool en)
  2526. {
  2527. int i;
  2528. struct dsi_display_ctrl *ctrl;
  2529. if (!display)
  2530. return;
  2531. display_for_each_ctrl(i, display) {
  2532. ctrl = &display->ctrl[i];
  2533. if (!ctrl)
  2534. continue;
  2535. dsi_ctrl_isr_configure(ctrl->ctrl, en);
  2536. }
  2537. }
  2538. int dsi_pre_clkoff_cb(void *priv,
  2539. enum dsi_clk_type clk,
  2540. enum dsi_lclk_type l_type,
  2541. enum dsi_clk_state new_state)
  2542. {
  2543. int rc = 0, i;
  2544. struct dsi_display *display = priv;
  2545. struct dsi_display_ctrl *ctrl;
  2546. if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
  2547. (l_type & DSI_LINK_LP_CLK)) {
  2548. /*
  2549. * If continuous clock is enabled then disable it
  2550. * before entering into ULPS Mode.
  2551. */
  2552. if (display->panel->host_config.force_hs_clk_lane)
  2553. _dsi_display_continuous_clk_ctrl(display, false);
  2554. /*
  2555. * If ULPS feature is enabled, enter ULPS first.
  2556. * However, when blanking the panel, we should enter ULPS
  2557. * only if ULPS during suspend feature is enabled.
  2558. */
  2559. if (!dsi_panel_initialized(display->panel)) {
  2560. if (display->panel->ulps_suspend_enabled)
  2561. rc = dsi_display_set_ulps(display, true);
  2562. } else if (dsi_panel_ulps_feature_enabled(display->panel)) {
  2563. rc = dsi_display_set_ulps(display, true);
  2564. }
  2565. if (rc)
  2566. pr_err("%s: failed enable ulps, rc = %d\n",
  2567. __func__, rc);
  2568. }
  2569. if ((clk & DSI_LINK_CLK) && (new_state == DSI_CLK_OFF) &&
  2570. (l_type & DSI_LINK_HS_CLK)) {
  2571. /*
  2572. * PHY clock gating should be disabled before the PLL and the
  2573. * branch clocks are turned off. Otherwise, it is possible that
  2574. * the clock RCGs may not be turned off correctly resulting
  2575. * in clock warnings.
  2576. */
  2577. rc = dsi_display_config_clk_gating(display, false);
  2578. if (rc)
  2579. pr_err("[%s] failed to disable clk gating, rc=%d\n",
  2580. display->name, rc);
  2581. }
  2582. if ((clk & DSI_CORE_CLK) && (new_state == DSI_CLK_OFF)) {
  2583. /*
  2584. * Enable DSI clamps only if entering idle power collapse or
  2585. * when ULPS during suspend is enabled..
  2586. */
  2587. if (dsi_panel_initialized(display->panel) ||
  2588. display->panel->ulps_suspend_enabled) {
  2589. dsi_display_phy_idle_off(display);
  2590. rc = dsi_display_set_clamp(display, true);
  2591. if (rc)
  2592. pr_err("%s: Failed to enable dsi clamps. rc=%d\n",
  2593. __func__, rc);
  2594. rc = dsi_display_phy_reset_config(display, false);
  2595. if (rc)
  2596. pr_err("%s: Failed to reset phy, rc=%d\n",
  2597. __func__, rc);
  2598. } else {
  2599. /* Make sure that controller is not in ULPS state when
  2600. * the DSI link is not active.
  2601. */
  2602. rc = dsi_display_set_ulps(display, false);
  2603. if (rc)
  2604. pr_err("%s: failed to disable ulps. rc=%d\n",
  2605. __func__, rc);
  2606. }
  2607. /* dsi will not be able to serve irqs from here on */
  2608. dsi_display_ctrl_irq_update(display, false);
  2609. /* cache the MISR values */
  2610. display_for_each_ctrl(i, display) {
  2611. ctrl = &display->ctrl[i];
  2612. if (!ctrl->ctrl)
  2613. continue;
  2614. dsi_ctrl_cache_misr(ctrl->ctrl);
  2615. }
  2616. }
  2617. return rc;
  2618. }
  2619. int dsi_post_clkon_cb(void *priv,
  2620. enum dsi_clk_type clk,
  2621. enum dsi_lclk_type l_type,
  2622. enum dsi_clk_state curr_state)
  2623. {
  2624. int rc = 0;
  2625. struct dsi_display *display = priv;
  2626. bool mmss_clamp = false;
  2627. if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_LP_CLK)) {
  2628. mmss_clamp = display->clamp_enabled;
  2629. /*
  2630. * controller setup is needed if coming out of idle
  2631. * power collapse with clamps enabled.
  2632. */
  2633. if (mmss_clamp)
  2634. dsi_display_ctrl_setup(display);
  2635. /*
  2636. * Phy setup is needed if coming out of idle
  2637. * power collapse with clamps enabled.
  2638. */
  2639. if (display->phy_idle_power_off || mmss_clamp)
  2640. dsi_display_phy_idle_on(display, mmss_clamp);
  2641. if (display->ulps_enabled && mmss_clamp) {
  2642. /*
  2643. * ULPS Entry Request. This is needed if the lanes were
  2644. * in ULPS prior to power collapse, since after
  2645. * power collapse and reset, the DSI controller resets
  2646. * back to idle state and not ULPS. This ulps entry
  2647. * request will transition the state of the DSI
  2648. * controller to ULPS which will match the state of the
  2649. * DSI phy. This needs to be done prior to disabling
  2650. * the DSI clamps.
  2651. *
  2652. * Also, reset the ulps flag so that ulps_config
  2653. * function would reconfigure the controller state to
  2654. * ULPS.
  2655. */
  2656. display->ulps_enabled = false;
  2657. rc = dsi_display_set_ulps(display, true);
  2658. if (rc) {
  2659. pr_err("%s: Failed to enter ULPS. rc=%d\n",
  2660. __func__, rc);
  2661. goto error;
  2662. }
  2663. }
  2664. rc = dsi_display_phy_reset_config(display, true);
  2665. if (rc) {
  2666. pr_err("%s: Failed to reset phy, rc=%d\n",
  2667. __func__, rc);
  2668. goto error;
  2669. }
  2670. rc = dsi_display_set_clamp(display, false);
  2671. if (rc) {
  2672. pr_err("%s: Failed to disable dsi clamps. rc=%d\n",
  2673. __func__, rc);
  2674. goto error;
  2675. }
  2676. }
  2677. if ((clk & DSI_LINK_CLK) && (l_type & DSI_LINK_HS_CLK)) {
  2678. /*
  2679. * Toggle the resync FIFO everytime clock changes, except
  2680. * when cont-splash screen transition is going on.
  2681. * Toggling resync FIFO during cont splash transition
  2682. * can lead to blinks on the display.
  2683. */
  2684. if (!display->is_cont_splash_enabled)
  2685. dsi_display_toggle_resync_fifo(display);
  2686. if (display->ulps_enabled) {
  2687. rc = dsi_display_set_ulps(display, false);
  2688. if (rc) {
  2689. pr_err("%s: failed to disable ulps, rc= %d\n",
  2690. __func__, rc);
  2691. goto error;
  2692. }
  2693. }
  2694. if (display->panel->host_config.force_hs_clk_lane)
  2695. _dsi_display_continuous_clk_ctrl(display, true);
  2696. rc = dsi_display_config_clk_gating(display, true);
  2697. if (rc) {
  2698. pr_err("[%s] failed to enable clk gating %d\n",
  2699. display->name, rc);
  2700. goto error;
  2701. }
  2702. }
  2703. /* enable dsi to serve irqs */
  2704. if (clk & DSI_CORE_CLK)
  2705. dsi_display_ctrl_irq_update(display, true);
  2706. error:
  2707. return rc;
  2708. }
  2709. int dsi_post_clkoff_cb(void *priv,
  2710. enum dsi_clk_type clk_type,
  2711. enum dsi_lclk_type l_type,
  2712. enum dsi_clk_state curr_state)
  2713. {
  2714. int rc = 0;
  2715. struct dsi_display *display = priv;
  2716. if (!display) {
  2717. pr_err("%s: Invalid arg\n", __func__);
  2718. return -EINVAL;
  2719. }
  2720. if ((clk_type & DSI_CORE_CLK) &&
  2721. (curr_state == DSI_CLK_OFF)) {
  2722. rc = dsi_display_phy_power_off(display);
  2723. if (rc)
  2724. pr_err("[%s] failed to power off PHY, rc=%d\n",
  2725. display->name, rc);
  2726. rc = dsi_display_ctrl_power_off(display);
  2727. if (rc)
  2728. pr_err("[%s] failed to power DSI vregs, rc=%d\n",
  2729. display->name, rc);
  2730. }
  2731. return rc;
  2732. }
  2733. int dsi_pre_clkon_cb(void *priv,
  2734. enum dsi_clk_type clk_type,
  2735. enum dsi_lclk_type l_type,
  2736. enum dsi_clk_state new_state)
  2737. {
  2738. int rc = 0;
  2739. struct dsi_display *display = priv;
  2740. if (!display) {
  2741. pr_err("%s: invalid input\n", __func__);
  2742. return -EINVAL;
  2743. }
  2744. if ((clk_type & DSI_CORE_CLK) && (new_state == DSI_CLK_ON)) {
  2745. /*
  2746. * Enable DSI core power
  2747. * 1.> PANEL_PM are controlled as part of
  2748. * panel_power_ctrl. Needed not be handled here.
  2749. * 2.> CORE_PM are controlled by dsi clk manager.
  2750. * 3.> CTRL_PM need to be enabled/disabled
  2751. * only during unblank/blank. Their state should
  2752. * not be changed during static screen.
  2753. */
  2754. pr_debug("updating power states for ctrl and phy\n");
  2755. rc = dsi_display_ctrl_power_on(display);
  2756. if (rc) {
  2757. pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
  2758. display->name, rc);
  2759. return rc;
  2760. }
  2761. rc = dsi_display_phy_power_on(display);
  2762. if (rc) {
  2763. pr_err("[%s] failed to power on dsi phy, rc = %d\n",
  2764. display->name, rc);
  2765. return rc;
  2766. }
  2767. pr_debug("%s: Enable DSI core power\n", __func__);
  2768. }
  2769. return rc;
  2770. }
  2771. static void __set_lane_map_v2(u8 *lane_map_v2,
  2772. enum dsi_phy_data_lanes lane0,
  2773. enum dsi_phy_data_lanes lane1,
  2774. enum dsi_phy_data_lanes lane2,
  2775. enum dsi_phy_data_lanes lane3)
  2776. {
  2777. lane_map_v2[DSI_LOGICAL_LANE_0] = lane0;
  2778. lane_map_v2[DSI_LOGICAL_LANE_1] = lane1;
  2779. lane_map_v2[DSI_LOGICAL_LANE_2] = lane2;
  2780. lane_map_v2[DSI_LOGICAL_LANE_3] = lane3;
  2781. }
  2782. static int dsi_display_parse_lane_map(struct dsi_display *display)
  2783. {
  2784. int rc = 0, i = 0;
  2785. const char *data;
  2786. u8 temp[DSI_LANE_MAX - 1];
  2787. if (!display) {
  2788. pr_err("invalid params\n");
  2789. return -EINVAL;
  2790. }
  2791. /* lane-map-v2 supersedes lane-map-v1 setting */
  2792. rc = of_property_read_u8_array(display->pdev->dev.of_node,
  2793. "qcom,lane-map-v2", temp, (DSI_LANE_MAX - 1));
  2794. if (!rc) {
  2795. for (i = DSI_LOGICAL_LANE_0; i < (DSI_LANE_MAX - 1); i++)
  2796. display->lane_map.lane_map_v2[i] = BIT(temp[i]);
  2797. return 0;
  2798. } else if (rc != EINVAL) {
  2799. pr_debug("Incorrect mapping, configure default\n");
  2800. goto set_default;
  2801. }
  2802. /* lane-map older version, for DSI controller version < 2.0 */
  2803. data = of_get_property(display->pdev->dev.of_node,
  2804. "qcom,lane-map", NULL);
  2805. if (!data)
  2806. goto set_default;
  2807. if (!strcmp(data, "lane_map_3012")) {
  2808. display->lane_map.lane_map_v1 = DSI_LANE_MAP_3012;
  2809. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2810. DSI_PHYSICAL_LANE_1,
  2811. DSI_PHYSICAL_LANE_2,
  2812. DSI_PHYSICAL_LANE_3,
  2813. DSI_PHYSICAL_LANE_0);
  2814. } else if (!strcmp(data, "lane_map_2301")) {
  2815. display->lane_map.lane_map_v1 = DSI_LANE_MAP_2301;
  2816. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2817. DSI_PHYSICAL_LANE_2,
  2818. DSI_PHYSICAL_LANE_3,
  2819. DSI_PHYSICAL_LANE_0,
  2820. DSI_PHYSICAL_LANE_1);
  2821. } else if (!strcmp(data, "lane_map_1230")) {
  2822. display->lane_map.lane_map_v1 = DSI_LANE_MAP_1230;
  2823. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2824. DSI_PHYSICAL_LANE_3,
  2825. DSI_PHYSICAL_LANE_0,
  2826. DSI_PHYSICAL_LANE_1,
  2827. DSI_PHYSICAL_LANE_2);
  2828. } else if (!strcmp(data, "lane_map_0321")) {
  2829. display->lane_map.lane_map_v1 = DSI_LANE_MAP_0321;
  2830. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2831. DSI_PHYSICAL_LANE_0,
  2832. DSI_PHYSICAL_LANE_3,
  2833. DSI_PHYSICAL_LANE_2,
  2834. DSI_PHYSICAL_LANE_1);
  2835. } else if (!strcmp(data, "lane_map_1032")) {
  2836. display->lane_map.lane_map_v1 = DSI_LANE_MAP_1032;
  2837. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2838. DSI_PHYSICAL_LANE_1,
  2839. DSI_PHYSICAL_LANE_0,
  2840. DSI_PHYSICAL_LANE_3,
  2841. DSI_PHYSICAL_LANE_2);
  2842. } else if (!strcmp(data, "lane_map_2103")) {
  2843. display->lane_map.lane_map_v1 = DSI_LANE_MAP_2103;
  2844. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2845. DSI_PHYSICAL_LANE_2,
  2846. DSI_PHYSICAL_LANE_1,
  2847. DSI_PHYSICAL_LANE_0,
  2848. DSI_PHYSICAL_LANE_3);
  2849. } else if (!strcmp(data, "lane_map_3210")) {
  2850. display->lane_map.lane_map_v1 = DSI_LANE_MAP_3210;
  2851. __set_lane_map_v2(display->lane_map.lane_map_v2,
  2852. DSI_PHYSICAL_LANE_3,
  2853. DSI_PHYSICAL_LANE_2,
  2854. DSI_PHYSICAL_LANE_1,
  2855. DSI_PHYSICAL_LANE_0);
  2856. } else {
  2857. pr_warn("%s: invalid lane map %s specified. defaulting to lane_map0123\n",
  2858. __func__, data);
  2859. goto set_default;
  2860. }
  2861. return 0;
  2862. set_default:
  2863. /* default lane mapping */
  2864. __set_lane_map_v2(display->lane_map.lane_map_v2, DSI_PHYSICAL_LANE_0,
  2865. DSI_PHYSICAL_LANE_1, DSI_PHYSICAL_LANE_2, DSI_PHYSICAL_LANE_3);
  2866. display->lane_map.lane_map_v1 = DSI_LANE_MAP_0123;
  2867. return 0;
  2868. }
  2869. static int dsi_display_get_phandle_index(
  2870. struct dsi_display *display,
  2871. const char *propname, int count, int index)
  2872. {
  2873. struct device_node *disp_node = display->panel_node;
  2874. u32 *val = NULL;
  2875. int rc = 0;
  2876. val = kcalloc(count, sizeof(*val), GFP_KERNEL);
  2877. if (ZERO_OR_NULL_PTR(val)) {
  2878. rc = -ENOMEM;
  2879. goto end;
  2880. }
  2881. if (index >= count)
  2882. goto end;
  2883. if (display->fw)
  2884. rc = dsi_parser_read_u32_array(display->parser_node,
  2885. propname, val, count);
  2886. else
  2887. rc = of_property_read_u32_array(disp_node, propname,
  2888. val, count);
  2889. if (rc)
  2890. goto end;
  2891. rc = val[index];
  2892. pr_debug("%s index=%d\n", propname, rc);
  2893. end:
  2894. kfree(val);
  2895. return rc;
  2896. }
  2897. static int dsi_display_get_phandle_count(struct dsi_display *display,
  2898. const char *propname)
  2899. {
  2900. if (display->fw)
  2901. return dsi_parser_count_u32_elems(display->parser_node,
  2902. propname);
  2903. else
  2904. return of_property_count_u32_elems(display->panel_node,
  2905. propname);
  2906. }
  2907. static int dsi_display_parse_dt(struct dsi_display *display)
  2908. {
  2909. int i, rc = 0;
  2910. u32 phy_count = 0;
  2911. struct device_node *of_node = display->pdev->dev.of_node;
  2912. char *dsi_ctrl_name, *dsi_phy_name;
  2913. if (!strcmp(display->display_type, "primary")) {
  2914. dsi_ctrl_name = "qcom,dsi-ctrl-num";
  2915. dsi_phy_name = "qcom,dsi-phy-num";
  2916. } else {
  2917. dsi_ctrl_name = "qcom,dsi-sec-ctrl-num";
  2918. dsi_phy_name = "qcom,dsi-sec-phy-num";
  2919. }
  2920. display->ctrl_count = dsi_display_get_phandle_count(display,
  2921. dsi_ctrl_name);
  2922. phy_count = dsi_display_get_phandle_count(display, dsi_phy_name);
  2923. pr_debug("ctrl count=%d, phy count=%d\n",
  2924. display->ctrl_count, phy_count);
  2925. if (!phy_count || !display->ctrl_count) {
  2926. pr_err("no ctrl/phys found\n");
  2927. rc = -ENODEV;
  2928. goto error;
  2929. }
  2930. if (phy_count != display->ctrl_count) {
  2931. pr_err("different ctrl and phy counts\n");
  2932. rc = -ENODEV;
  2933. goto error;
  2934. }
  2935. display_for_each_ctrl(i, display) {
  2936. struct dsi_display_ctrl *ctrl = &display->ctrl[i];
  2937. int index;
  2938. index = dsi_display_get_phandle_index(display, dsi_ctrl_name,
  2939. display->ctrl_count, i);
  2940. ctrl->ctrl_of_node = of_parse_phandle(of_node,
  2941. "qcom,dsi-ctrl", index);
  2942. of_node_put(ctrl->ctrl_of_node);
  2943. index = dsi_display_get_phandle_index(display, dsi_phy_name,
  2944. display->ctrl_count, i);
  2945. ctrl->phy_of_node = of_parse_phandle(of_node,
  2946. "qcom,dsi-phy", index);
  2947. of_node_put(ctrl->phy_of_node);
  2948. }
  2949. /* Parse TE data */
  2950. dsi_display_parse_te_data(display);
  2951. /* Parse all external bridges from port 0 */
  2952. display_for_each_ctrl(i, display) {
  2953. display->ext_bridge[i].node_of =
  2954. of_graph_get_remote_node(of_node, 0, i);
  2955. if (display->ext_bridge[i].node_of)
  2956. display->ext_bridge_cnt++;
  2957. else
  2958. break;
  2959. }
  2960. pr_debug("success\n");
  2961. error:
  2962. return rc;
  2963. }
  2964. static int dsi_display_res_init(struct dsi_display *display)
  2965. {
  2966. int rc = 0;
  2967. int i;
  2968. struct dsi_display_ctrl *ctrl;
  2969. display_for_each_ctrl(i, display) {
  2970. ctrl = &display->ctrl[i];
  2971. ctrl->ctrl = dsi_ctrl_get(ctrl->ctrl_of_node);
  2972. if (IS_ERR_OR_NULL(ctrl->ctrl)) {
  2973. rc = PTR_ERR(ctrl->ctrl);
  2974. pr_err("failed to get dsi controller, rc=%d\n", rc);
  2975. ctrl->ctrl = NULL;
  2976. goto error_ctrl_put;
  2977. }
  2978. ctrl->phy = dsi_phy_get(ctrl->phy_of_node);
  2979. if (IS_ERR_OR_NULL(ctrl->phy)) {
  2980. rc = PTR_ERR(ctrl->phy);
  2981. pr_err("failed to get phy controller, rc=%d\n", rc);
  2982. dsi_ctrl_put(ctrl->ctrl);
  2983. ctrl->phy = NULL;
  2984. goto error_ctrl_put;
  2985. }
  2986. }
  2987. display->panel = dsi_panel_get(&display->pdev->dev,
  2988. display->panel_node,
  2989. display->parser_node,
  2990. display->display_type,
  2991. display->cmdline_topology);
  2992. if (IS_ERR_OR_NULL(display->panel)) {
  2993. rc = PTR_ERR(display->panel);
  2994. pr_err("failed to get panel, rc=%d\n", rc);
  2995. display->panel = NULL;
  2996. goto error_ctrl_put;
  2997. }
  2998. rc = dsi_display_parse_lane_map(display);
  2999. if (rc) {
  3000. pr_err("Lane map not found, rc=%d\n", rc);
  3001. goto error_ctrl_put;
  3002. }
  3003. rc = dsi_display_clocks_init(display);
  3004. if (rc) {
  3005. pr_err("Failed to parse clock data, rc=%d\n", rc);
  3006. goto error_ctrl_put;
  3007. }
  3008. return 0;
  3009. error_ctrl_put:
  3010. for (i = i - 1; i >= 0; i--) {
  3011. ctrl = &display->ctrl[i];
  3012. dsi_ctrl_put(ctrl->ctrl);
  3013. dsi_phy_put(ctrl->phy);
  3014. }
  3015. return rc;
  3016. }
  3017. static int dsi_display_res_deinit(struct dsi_display *display)
  3018. {
  3019. int rc = 0;
  3020. int i;
  3021. struct dsi_display_ctrl *ctrl;
  3022. rc = dsi_display_clocks_deinit(display);
  3023. if (rc)
  3024. pr_err("clocks deinit failed, rc=%d\n", rc);
  3025. display_for_each_ctrl(i, display) {
  3026. ctrl = &display->ctrl[i];
  3027. dsi_phy_put(ctrl->phy);
  3028. dsi_ctrl_put(ctrl->ctrl);
  3029. }
  3030. if (display->panel)
  3031. dsi_panel_put(display->panel);
  3032. return rc;
  3033. }
  3034. static int dsi_display_validate_mode_set(struct dsi_display *display,
  3035. struct dsi_display_mode *mode,
  3036. u32 flags)
  3037. {
  3038. int rc = 0;
  3039. int i;
  3040. struct dsi_display_ctrl *ctrl;
  3041. /*
  3042. * To set a mode:
  3043. * 1. Controllers should be turned off.
  3044. * 2. Link clocks should be off.
  3045. * 3. Phy should be disabled.
  3046. */
  3047. display_for_each_ctrl(i, display) {
  3048. ctrl = &display->ctrl[i];
  3049. if ((ctrl->power_state > DSI_CTRL_POWER_VREG_ON) ||
  3050. (ctrl->phy_enabled)) {
  3051. rc = -EINVAL;
  3052. goto error;
  3053. }
  3054. }
  3055. error:
  3056. return rc;
  3057. }
  3058. static bool dsi_display_is_seamless_dfps_possible(
  3059. const struct dsi_display *display,
  3060. const struct dsi_display_mode *tgt,
  3061. const enum dsi_dfps_type dfps_type)
  3062. {
  3063. struct dsi_display_mode *cur;
  3064. if (!display || !tgt || !display->panel) {
  3065. pr_err("Invalid params\n");
  3066. return false;
  3067. }
  3068. cur = display->panel->cur_mode;
  3069. if (cur->timing.h_active != tgt->timing.h_active) {
  3070. pr_debug("timing.h_active differs %d %d\n",
  3071. cur->timing.h_active, tgt->timing.h_active);
  3072. return false;
  3073. }
  3074. if (cur->timing.h_back_porch != tgt->timing.h_back_porch) {
  3075. pr_debug("timing.h_back_porch differs %d %d\n",
  3076. cur->timing.h_back_porch,
  3077. tgt->timing.h_back_porch);
  3078. return false;
  3079. }
  3080. if (cur->timing.h_sync_width != tgt->timing.h_sync_width) {
  3081. pr_debug("timing.h_sync_width differs %d %d\n",
  3082. cur->timing.h_sync_width,
  3083. tgt->timing.h_sync_width);
  3084. return false;
  3085. }
  3086. if (cur->timing.h_front_porch != tgt->timing.h_front_porch) {
  3087. pr_debug("timing.h_front_porch differs %d %d\n",
  3088. cur->timing.h_front_porch,
  3089. tgt->timing.h_front_porch);
  3090. if (dfps_type != DSI_DFPS_IMMEDIATE_HFP)
  3091. return false;
  3092. }
  3093. if (cur->timing.h_skew != tgt->timing.h_skew) {
  3094. pr_debug("timing.h_skew differs %d %d\n",
  3095. cur->timing.h_skew,
  3096. tgt->timing.h_skew);
  3097. return false;
  3098. }
  3099. /* skip polarity comparison */
  3100. if (cur->timing.v_active != tgt->timing.v_active) {
  3101. pr_debug("timing.v_active differs %d %d\n",
  3102. cur->timing.v_active,
  3103. tgt->timing.v_active);
  3104. return false;
  3105. }
  3106. if (cur->timing.v_back_porch != tgt->timing.v_back_porch) {
  3107. pr_debug("timing.v_back_porch differs %d %d\n",
  3108. cur->timing.v_back_porch,
  3109. tgt->timing.v_back_porch);
  3110. return false;
  3111. }
  3112. if (cur->timing.v_sync_width != tgt->timing.v_sync_width) {
  3113. pr_debug("timing.v_sync_width differs %d %d\n",
  3114. cur->timing.v_sync_width,
  3115. tgt->timing.v_sync_width);
  3116. return false;
  3117. }
  3118. if (cur->timing.v_front_porch != tgt->timing.v_front_porch) {
  3119. pr_debug("timing.v_front_porch differs %d %d\n",
  3120. cur->timing.v_front_porch,
  3121. tgt->timing.v_front_porch);
  3122. if (dfps_type != DSI_DFPS_IMMEDIATE_VFP)
  3123. return false;
  3124. }
  3125. /* skip polarity comparison */
  3126. if (cur->timing.refresh_rate == tgt->timing.refresh_rate)
  3127. pr_debug("timing.refresh_rate identical %d %d\n",
  3128. cur->timing.refresh_rate,
  3129. tgt->timing.refresh_rate);
  3130. if (cur->pixel_clk_khz != tgt->pixel_clk_khz)
  3131. pr_debug("pixel_clk_khz differs %d %d\n",
  3132. cur->pixel_clk_khz, tgt->pixel_clk_khz);
  3133. if (cur->dsi_mode_flags != tgt->dsi_mode_flags)
  3134. pr_debug("flags differs %d %d\n",
  3135. cur->dsi_mode_flags, tgt->dsi_mode_flags);
  3136. return true;
  3137. }
  3138. static int dsi_display_dfps_update(struct dsi_display *display,
  3139. struct dsi_display_mode *dsi_mode)
  3140. {
  3141. struct dsi_mode_info *timing;
  3142. struct dsi_display_ctrl *m_ctrl, *ctrl;
  3143. struct dsi_display_mode *panel_mode;
  3144. struct dsi_dfps_capabilities dfps_caps;
  3145. int rc = 0;
  3146. int i = 0;
  3147. if (!display || !dsi_mode || !display->panel) {
  3148. pr_err("Invalid params\n");
  3149. return -EINVAL;
  3150. }
  3151. timing = &dsi_mode->timing;
  3152. dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
  3153. if (!dfps_caps.dfps_support) {
  3154. pr_err("dfps not supported\n");
  3155. return -ENOTSUPP;
  3156. }
  3157. if (dfps_caps.type == DSI_DFPS_IMMEDIATE_CLK) {
  3158. pr_err("dfps clock method not supported\n");
  3159. return -ENOTSUPP;
  3160. }
  3161. /* For split DSI, update the clock master first */
  3162. pr_debug("configuring seamless dynamic fps\n\n");
  3163. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  3164. m_ctrl = &display->ctrl[display->clk_master_idx];
  3165. rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
  3166. if (rc) {
  3167. pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
  3168. display->name, i, rc);
  3169. goto error;
  3170. }
  3171. /* Update the rest of the controllers */
  3172. display_for_each_ctrl(i, display) {
  3173. ctrl = &display->ctrl[i];
  3174. if (!ctrl->ctrl || (ctrl == m_ctrl))
  3175. continue;
  3176. rc = dsi_ctrl_async_timing_update(ctrl->ctrl, timing);
  3177. if (rc) {
  3178. pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
  3179. display->name, i, rc);
  3180. goto error;
  3181. }
  3182. }
  3183. panel_mode = display->panel->cur_mode;
  3184. memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
  3185. /*
  3186. * dsi_mode_flags flags are used to communicate with other drm driver
  3187. * components, and are transient. They aren't inherently part of the
  3188. * display panel's mode and shouldn't be saved into the cached currently
  3189. * active mode.
  3190. */
  3191. panel_mode->dsi_mode_flags = 0;
  3192. error:
  3193. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
  3194. return rc;
  3195. }
  3196. static int dsi_display_dfps_calc_front_porch(
  3197. u32 old_fps,
  3198. u32 new_fps,
  3199. u32 a_total,
  3200. u32 b_total,
  3201. u32 b_fp,
  3202. u32 *b_fp_out)
  3203. {
  3204. s32 b_fp_new;
  3205. int add_porches, diff;
  3206. if (!b_fp_out) {
  3207. pr_err("Invalid params\n");
  3208. return -EINVAL;
  3209. }
  3210. if (!a_total || !new_fps) {
  3211. pr_err("Invalid pixel total or new fps in mode request\n");
  3212. return -EINVAL;
  3213. }
  3214. /*
  3215. * Keep clock, other porches constant, use new fps, calc front porch
  3216. * new_vtotal = old_vtotal * (old_fps / new_fps )
  3217. * new_vfp - old_vfp = new_vtotal - old_vtotal
  3218. * new_vfp = old_vfp + old_vtotal * ((old_fps - new_fps)/ new_fps)
  3219. */
  3220. diff = abs(old_fps - new_fps);
  3221. add_porches = mult_frac(b_total, diff, new_fps);
  3222. if (old_fps > new_fps)
  3223. b_fp_new = b_fp + add_porches;
  3224. else
  3225. b_fp_new = b_fp - add_porches;
  3226. pr_debug("fps %u a %u b %u b_fp %u new_fp %d\n",
  3227. new_fps, a_total, b_total, b_fp, b_fp_new);
  3228. if (b_fp_new < 0) {
  3229. pr_err("Invalid new_hfp calcluated%d\n", b_fp_new);
  3230. return -EINVAL;
  3231. }
  3232. /**
  3233. * TODO: To differentiate from clock method when communicating to the
  3234. * other components, perhaps we should set clk here to original value
  3235. */
  3236. *b_fp_out = b_fp_new;
  3237. return 0;
  3238. }
  3239. /**
  3240. * dsi_display_get_dfps_timing() - Get the new dfps values.
  3241. * @display: DSI display handle.
  3242. * @adj_mode: Mode value structure to be changed.
  3243. * It contains old timing values and latest fps value.
  3244. * New timing values are updated based on new fps.
  3245. * @curr_refresh_rate: Current fps rate.
  3246. * If zero , current fps rate is taken from
  3247. * display->panel->cur_mode.
  3248. * Return: error code.
  3249. */
  3250. static int dsi_display_get_dfps_timing(struct dsi_display *display,
  3251. struct dsi_display_mode *adj_mode,
  3252. u32 curr_refresh_rate)
  3253. {
  3254. struct dsi_dfps_capabilities dfps_caps;
  3255. struct dsi_display_mode per_ctrl_mode;
  3256. struct dsi_mode_info *timing;
  3257. struct dsi_ctrl *m_ctrl;
  3258. int rc = 0;
  3259. if (!display || !adj_mode) {
  3260. pr_err("Invalid params\n");
  3261. return -EINVAL;
  3262. }
  3263. m_ctrl = display->ctrl[display->clk_master_idx].ctrl;
  3264. dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
  3265. if (!dfps_caps.dfps_support) {
  3266. pr_err("dfps not supported by panel\n");
  3267. return -EINVAL;
  3268. }
  3269. per_ctrl_mode = *adj_mode;
  3270. adjust_timing_by_ctrl_count(display, &per_ctrl_mode);
  3271. if (!curr_refresh_rate) {
  3272. if (!dsi_display_is_seamless_dfps_possible(display,
  3273. &per_ctrl_mode, dfps_caps.type)) {
  3274. pr_err("seamless dynamic fps not supported for mode\n");
  3275. return -EINVAL;
  3276. }
  3277. if (display->panel->cur_mode) {
  3278. curr_refresh_rate =
  3279. display->panel->cur_mode->timing.refresh_rate;
  3280. } else {
  3281. pr_err("cur_mode is not initialized\n");
  3282. return -EINVAL;
  3283. }
  3284. }
  3285. /* TODO: Remove this direct reference to the dsi_ctrl */
  3286. timing = &per_ctrl_mode.timing;
  3287. switch (dfps_caps.type) {
  3288. case DSI_DFPS_IMMEDIATE_VFP:
  3289. rc = dsi_display_dfps_calc_front_porch(
  3290. curr_refresh_rate,
  3291. timing->refresh_rate,
  3292. DSI_H_TOTAL_DSC(timing),
  3293. DSI_V_TOTAL(timing),
  3294. timing->v_front_porch,
  3295. &adj_mode->timing.v_front_porch);
  3296. break;
  3297. case DSI_DFPS_IMMEDIATE_HFP:
  3298. rc = dsi_display_dfps_calc_front_porch(
  3299. curr_refresh_rate,
  3300. timing->refresh_rate,
  3301. DSI_V_TOTAL(timing),
  3302. DSI_H_TOTAL_DSC(timing),
  3303. timing->h_front_porch,
  3304. &adj_mode->timing.h_front_porch);
  3305. if (!rc)
  3306. adj_mode->timing.h_front_porch *= display->ctrl_count;
  3307. break;
  3308. default:
  3309. pr_err("Unsupported DFPS mode %d\n", dfps_caps.type);
  3310. rc = -ENOTSUPP;
  3311. }
  3312. return rc;
  3313. }
  3314. static bool dsi_display_validate_mode_seamless(struct dsi_display *display,
  3315. struct dsi_display_mode *adj_mode)
  3316. {
  3317. int rc = 0;
  3318. if (!display || !adj_mode) {
  3319. pr_err("Invalid params\n");
  3320. return false;
  3321. }
  3322. /* Currently the only seamless transition is dynamic fps */
  3323. rc = dsi_display_get_dfps_timing(display, adj_mode, 0);
  3324. if (rc) {
  3325. pr_debug("Dynamic FPS not supported for seamless\n");
  3326. } else {
  3327. pr_debug("Mode switch is seamless Dynamic FPS\n");
  3328. adj_mode->dsi_mode_flags |= DSI_MODE_FLAG_DFPS |
  3329. DSI_MODE_FLAG_VBLANK_PRE_MODESET;
  3330. }
  3331. return rc;
  3332. }
  3333. static int dsi_display_set_mode_sub(struct dsi_display *display,
  3334. struct dsi_display_mode *mode,
  3335. u32 flags)
  3336. {
  3337. int rc = 0;
  3338. int i;
  3339. struct dsi_display_ctrl *ctrl;
  3340. struct dsi_display_mode_priv_info *priv_info;
  3341. priv_info = mode->priv_info;
  3342. if (!priv_info) {
  3343. pr_err("[%s] failed to get private info of the display mode\n",
  3344. display->name);
  3345. return -EINVAL;
  3346. }
  3347. if (mode->dsi_mode_flags & DSI_MODE_FLAG_POMS) {
  3348. display->config.panel_mode = mode->panel_mode;
  3349. display->panel->panel_mode = mode->panel_mode;
  3350. }
  3351. rc = dsi_panel_get_host_cfg_for_mode(display->panel,
  3352. mode,
  3353. &display->config);
  3354. if (rc) {
  3355. pr_err("[%s] failed to get host config for mode, rc=%d\n",
  3356. display->name, rc);
  3357. goto error;
  3358. }
  3359. memcpy(&display->config.lane_map, &display->lane_map,
  3360. sizeof(display->lane_map));
  3361. if (mode->dsi_mode_flags &
  3362. (DSI_MODE_FLAG_DFPS | DSI_MODE_FLAG_VRR)) {
  3363. rc = dsi_display_dfps_update(display, mode);
  3364. if (rc) {
  3365. pr_err("[%s]DSI dfps update failed, rc=%d\n",
  3366. display->name, rc);
  3367. goto error;
  3368. }
  3369. }
  3370. display_for_each_ctrl(i, display) {
  3371. ctrl = &display->ctrl[i];
  3372. rc = dsi_ctrl_update_host_config(ctrl->ctrl, &display->config,
  3373. mode, mode->dsi_mode_flags,
  3374. display->dsi_clk_handle);
  3375. if (rc) {
  3376. pr_err("[%s] failed to update ctrl config, rc=%d\n",
  3377. display->name, rc);
  3378. goto error;
  3379. }
  3380. }
  3381. if (priv_info->phy_timing_len) {
  3382. display_for_each_ctrl(i, display) {
  3383. ctrl = &display->ctrl[i];
  3384. rc = dsi_phy_set_timing_params(ctrl->phy,
  3385. priv_info->phy_timing_val,
  3386. priv_info->phy_timing_len);
  3387. if (rc)
  3388. pr_err("failed to add DSI PHY timing params\n");
  3389. }
  3390. }
  3391. error:
  3392. return rc;
  3393. }
  3394. /**
  3395. * _dsi_display_dev_init - initializes the display device
  3396. * Initialization will acquire references to the resources required for the
  3397. * display hardware to function.
  3398. * @display: Handle to the display
  3399. * Returns: Zero on success
  3400. */
  3401. static int _dsi_display_dev_init(struct dsi_display *display)
  3402. {
  3403. int rc = 0;
  3404. if (!display) {
  3405. pr_err("invalid display\n");
  3406. return -EINVAL;
  3407. }
  3408. if (!display->panel_node)
  3409. return 0;
  3410. mutex_lock(&display->display_lock);
  3411. display->parser = dsi_parser_get(&display->pdev->dev);
  3412. if (display->fw && display->parser)
  3413. display->parser_node = dsi_parser_get_head_node(
  3414. display->parser, display->fw->data,
  3415. display->fw->size);
  3416. rc = dsi_display_parse_dt(display);
  3417. if (rc) {
  3418. pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
  3419. goto error;
  3420. }
  3421. rc = dsi_display_res_init(display);
  3422. if (rc) {
  3423. pr_err("[%s] failed to initialize resources, rc=%d\n",
  3424. display->name, rc);
  3425. goto error;
  3426. }
  3427. error:
  3428. mutex_unlock(&display->display_lock);
  3429. return rc;
  3430. }
  3431. /**
  3432. * _dsi_display_dev_deinit - deinitializes the display device
  3433. * All the resources acquired during device init will be released.
  3434. * @display: Handle to the display
  3435. * Returns: Zero on success
  3436. */
  3437. static int _dsi_display_dev_deinit(struct dsi_display *display)
  3438. {
  3439. int rc = 0;
  3440. if (!display) {
  3441. pr_err("invalid display\n");
  3442. return -EINVAL;
  3443. }
  3444. mutex_lock(&display->display_lock);
  3445. rc = dsi_display_res_deinit(display);
  3446. if (rc)
  3447. pr_err("[%s] failed to deinitialize resource, rc=%d\n",
  3448. display->name, rc);
  3449. mutex_unlock(&display->display_lock);
  3450. return rc;
  3451. }
  3452. /**
  3453. * dsi_display_cont_splash_config() - Initialize resources for continuous splash
  3454. * @dsi_display: Pointer to dsi display
  3455. * Returns: Zero on success
  3456. */
  3457. int dsi_display_cont_splash_config(void *dsi_display)
  3458. {
  3459. struct dsi_display *display = dsi_display;
  3460. int rc = 0;
  3461. /* Vote for gdsc required to read register address space */
  3462. if (!display) {
  3463. pr_err("invalid input display param\n");
  3464. return -EINVAL;
  3465. }
  3466. rc = pm_runtime_get_sync(display->drm_dev->dev);
  3467. if (rc < 0) {
  3468. pr_err("failed to vote gdsc for continuous splash, rc=%d\n",
  3469. rc);
  3470. return rc;
  3471. }
  3472. mutex_lock(&display->display_lock);
  3473. /* Verify whether continuous splash is enabled or not */
  3474. display->is_cont_splash_enabled =
  3475. dsi_display_get_cont_splash_status(display);
  3476. if (!display->is_cont_splash_enabled) {
  3477. pr_err("Continuous splash is not enabled\n");
  3478. goto splash_disabled;
  3479. }
  3480. /* Update splash status for clock manager */
  3481. dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
  3482. display->is_cont_splash_enabled);
  3483. /* Set up ctrl isr before enabling core clk */
  3484. dsi_display_ctrl_isr_configure(display, true);
  3485. /* Vote for Core clk and link clk. Votes on ctrl and phy
  3486. * regulator are inplicit from pre clk on callback
  3487. */
  3488. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  3489. DSI_ALL_CLKS, DSI_CLK_ON);
  3490. if (rc) {
  3491. pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
  3492. display->name, rc);
  3493. goto clk_manager_update;
  3494. }
  3495. /* Vote on panel regulator will be removed during suspend path */
  3496. rc = dsi_pwr_enable_regulator(&display->panel->power_info, true);
  3497. if (rc) {
  3498. pr_err("[%s] failed to enable vregs, rc=%d\n",
  3499. display->panel->name, rc);
  3500. goto clks_disabled;
  3501. }
  3502. dsi_config_host_engine_state_for_cont_splash(display);
  3503. mutex_unlock(&display->display_lock);
  3504. /* Set the current brightness level */
  3505. dsi_panel_bl_handoff(display->panel);
  3506. return rc;
  3507. clks_disabled:
  3508. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  3509. DSI_ALL_CLKS, DSI_CLK_OFF);
  3510. clk_manager_update:
  3511. dsi_display_ctrl_isr_configure(display, false);
  3512. /* Update splash status for clock manager */
  3513. dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
  3514. false);
  3515. splash_disabled:
  3516. pm_runtime_put_sync(display->drm_dev->dev);
  3517. display->is_cont_splash_enabled = false;
  3518. mutex_unlock(&display->display_lock);
  3519. return rc;
  3520. }
  3521. /**
  3522. * dsi_display_splash_res_cleanup() - cleanup for continuous splash
  3523. * @display: Pointer to dsi display
  3524. * Returns: Zero on success
  3525. */
  3526. int dsi_display_splash_res_cleanup(struct dsi_display *display)
  3527. {
  3528. int rc = 0;
  3529. if (!display->is_cont_splash_enabled)
  3530. return 0;
  3531. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  3532. DSI_ALL_CLKS, DSI_CLK_OFF);
  3533. if (rc)
  3534. pr_err("[%s] failed to disable DSI link clocks, rc=%d\n",
  3535. display->name, rc);
  3536. pm_runtime_put_sync(display->drm_dev->dev);
  3537. display->is_cont_splash_enabled = false;
  3538. /* Update splash status for clock manager */
  3539. dsi_display_clk_mngr_update_splash_status(display->clk_mngr,
  3540. display->is_cont_splash_enabled);
  3541. return rc;
  3542. }
  3543. static int dsi_display_force_update_dsi_clk(struct dsi_display *display)
  3544. {
  3545. int rc = 0;
  3546. rc = dsi_display_link_clk_force_update_ctrl(display->dsi_clk_handle);
  3547. if (!rc) {
  3548. pr_info("dsi bit clk has been configured to %d\n",
  3549. display->cached_clk_rate);
  3550. atomic_set(&display->clkrate_change_pending, 0);
  3551. } else {
  3552. pr_err("Failed to configure dsi bit clock '%d'. rc = %d\n",
  3553. display->cached_clk_rate, rc);
  3554. }
  3555. return rc;
  3556. }
  3557. static int dsi_display_request_update_dsi_bitrate(struct dsi_display *display,
  3558. u32 bit_clk_rate)
  3559. {
  3560. int rc = 0;
  3561. int i;
  3562. pr_debug("%s:bit rate:%d\n", __func__, bit_clk_rate);
  3563. if (!display->panel) {
  3564. pr_err("Invalid params\n");
  3565. return -EINVAL;
  3566. }
  3567. if (bit_clk_rate == 0) {
  3568. pr_err("Invalid bit clock rate\n");
  3569. return -EINVAL;
  3570. }
  3571. display->config.bit_clk_rate_hz_override = bit_clk_rate;
  3572. display_for_each_ctrl(i, display) {
  3573. struct dsi_display_ctrl *dsi_disp_ctrl = &display->ctrl[i];
  3574. struct dsi_ctrl *ctrl = dsi_disp_ctrl->ctrl;
  3575. u32 num_of_lanes = 0;
  3576. u32 bpp = 3;
  3577. u64 bit_rate, pclk_rate, bit_rate_per_lane, byte_clk_rate;
  3578. struct dsi_host_common_cfg *host_cfg;
  3579. mutex_lock(&ctrl->ctrl_lock);
  3580. host_cfg = &display->panel->host_config;
  3581. if (host_cfg->data_lanes & DSI_DATA_LANE_0)
  3582. num_of_lanes++;
  3583. if (host_cfg->data_lanes & DSI_DATA_LANE_1)
  3584. num_of_lanes++;
  3585. if (host_cfg->data_lanes & DSI_DATA_LANE_2)
  3586. num_of_lanes++;
  3587. if (host_cfg->data_lanes & DSI_DATA_LANE_3)
  3588. num_of_lanes++;
  3589. if (num_of_lanes == 0) {
  3590. pr_err("Invalid lane count\n");
  3591. rc = -EINVAL;
  3592. goto error;
  3593. }
  3594. bit_rate = display->config.bit_clk_rate_hz_override *
  3595. num_of_lanes;
  3596. bit_rate_per_lane = bit_rate;
  3597. do_div(bit_rate_per_lane, num_of_lanes);
  3598. pclk_rate = bit_rate;
  3599. do_div(pclk_rate, (8 * bpp));
  3600. byte_clk_rate = bit_rate_per_lane;
  3601. do_div(byte_clk_rate, 8);
  3602. pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
  3603. bit_rate, bit_rate_per_lane);
  3604. pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
  3605. byte_clk_rate, pclk_rate);
  3606. ctrl->clk_freq.byte_clk_rate = byte_clk_rate;
  3607. ctrl->clk_freq.pix_clk_rate = pclk_rate;
  3608. rc = dsi_clk_set_link_frequencies(display->dsi_clk_handle,
  3609. ctrl->clk_freq, ctrl->cell_index);
  3610. if (rc) {
  3611. pr_err("Failed to update link frequencies\n");
  3612. goto error;
  3613. }
  3614. ctrl->host_config.bit_clk_rate_hz_override = bit_clk_rate;
  3615. error:
  3616. mutex_unlock(&ctrl->ctrl_lock);
  3617. /* TODO: recover ctrl->clk_freq in case of failure */
  3618. if (rc)
  3619. return rc;
  3620. }
  3621. return 0;
  3622. }
  3623. static ssize_t dynamic_dsi_clock_show(struct device *dev,
  3624. struct device_attribute *attr, char *buf)
  3625. {
  3626. int rc = 0;
  3627. struct dsi_display *display;
  3628. struct dsi_display_ctrl *m_ctrl;
  3629. struct dsi_ctrl *ctrl;
  3630. display = dev_get_drvdata(dev);
  3631. if (!display) {
  3632. pr_err("Invalid display\n");
  3633. return -EINVAL;
  3634. }
  3635. mutex_lock(&display->display_lock);
  3636. m_ctrl = &display->ctrl[display->cmd_master_idx];
  3637. ctrl = m_ctrl->ctrl;
  3638. if (ctrl)
  3639. display->cached_clk_rate = ctrl->clk_freq.byte_clk_rate
  3640. * 8;
  3641. rc = snprintf(buf, PAGE_SIZE, "%d\n", display->cached_clk_rate);
  3642. pr_debug("%s: read dsi clk rate %d\n", __func__,
  3643. display->cached_clk_rate);
  3644. mutex_unlock(&display->display_lock);
  3645. return rc;
  3646. }
  3647. static ssize_t dynamic_dsi_clock_store(struct device *dev,
  3648. struct device_attribute *attr, const char *buf, size_t count)
  3649. {
  3650. int rc = 0;
  3651. int clk_rate;
  3652. struct dsi_display *display;
  3653. display = dev_get_drvdata(dev);
  3654. if (!display) {
  3655. pr_err("Invalid display\n");
  3656. return -EINVAL;
  3657. }
  3658. rc = kstrtoint(buf, DSI_CLOCK_BITRATE_RADIX, &clk_rate);
  3659. if (rc) {
  3660. pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
  3661. return rc;
  3662. }
  3663. if (clk_rate <= 0) {
  3664. pr_err("%s: bitrate should be greater than 0\n", __func__);
  3665. return -EINVAL;
  3666. }
  3667. if (clk_rate == display->cached_clk_rate) {
  3668. pr_info("%s: ignore duplicated DSI clk setting\n", __func__);
  3669. return count;
  3670. }
  3671. pr_info("%s: bitrate param value: '%d'\n", __func__, clk_rate);
  3672. mutex_lock(&display->display_lock);
  3673. display->cached_clk_rate = clk_rate;
  3674. rc = dsi_display_request_update_dsi_bitrate(display, clk_rate);
  3675. if (!rc) {
  3676. pr_info("%s: bit clk is ready to be configured to '%d'\n",
  3677. __func__, clk_rate);
  3678. } else {
  3679. pr_err("%s: Failed to prepare to configure '%d'. rc = %d\n",
  3680. __func__, clk_rate, rc);
  3681. /*Caching clock failed, so don't go on doing so.*/
  3682. atomic_set(&display->clkrate_change_pending, 0);
  3683. display->cached_clk_rate = 0;
  3684. mutex_unlock(&display->display_lock);
  3685. return rc;
  3686. }
  3687. atomic_set(&display->clkrate_change_pending, 1);
  3688. mutex_unlock(&display->display_lock);
  3689. return count;
  3690. }
  3691. static DEVICE_ATTR_RW(dynamic_dsi_clock);
  3692. static struct attribute *dynamic_dsi_clock_fs_attrs[] = {
  3693. &dev_attr_dynamic_dsi_clock.attr,
  3694. NULL,
  3695. };
  3696. static struct attribute_group dynamic_dsi_clock_fs_attrs_group = {
  3697. .attrs = dynamic_dsi_clock_fs_attrs,
  3698. };
  3699. static int dsi_display_sysfs_init(struct dsi_display *display)
  3700. {
  3701. int rc = 0;
  3702. struct device *dev = &display->pdev->dev;
  3703. if (display->panel->panel_mode == DSI_OP_CMD_MODE)
  3704. rc = sysfs_create_group(&dev->kobj,
  3705. &dynamic_dsi_clock_fs_attrs_group);
  3706. return rc;
  3707. }
  3708. static int dsi_display_sysfs_deinit(struct dsi_display *display)
  3709. {
  3710. struct device *dev = &display->pdev->dev;
  3711. if (display->panel->panel_mode == DSI_OP_CMD_MODE)
  3712. sysfs_remove_group(&dev->kobj,
  3713. &dynamic_dsi_clock_fs_attrs_group);
  3714. return 0;
  3715. }
  3716. /**
  3717. * dsi_display_bind - bind dsi device with controlling device
  3718. * @dev: Pointer to base of platform device
  3719. * @master: Pointer to container of drm device
  3720. * @data: Pointer to private data
  3721. * Returns: Zero on success
  3722. */
  3723. static int dsi_display_bind(struct device *dev,
  3724. struct device *master,
  3725. void *data)
  3726. {
  3727. struct dsi_display_ctrl *display_ctrl;
  3728. struct drm_device *drm;
  3729. struct dsi_display *display;
  3730. struct dsi_clk_info info;
  3731. struct clk_ctrl_cb clk_cb;
  3732. void *handle = NULL;
  3733. struct platform_device *pdev = to_platform_device(dev);
  3734. char *client1 = "dsi_clk_client";
  3735. char *client2 = "mdp_event_client";
  3736. int i, rc = 0;
  3737. if (!dev || !pdev || !master) {
  3738. pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
  3739. dev, pdev, master);
  3740. return -EINVAL;
  3741. }
  3742. drm = dev_get_drvdata(master);
  3743. display = platform_get_drvdata(pdev);
  3744. if (!drm || !display) {
  3745. pr_err("invalid param(s), drm %pK, display %pK\n",
  3746. drm, display);
  3747. return -EINVAL;
  3748. }
  3749. if (!display->panel_node)
  3750. return 0;
  3751. if (!display->fw)
  3752. display->name = display->panel_node->name;
  3753. mutex_lock(&display->display_lock);
  3754. rc = dsi_display_debugfs_init(display);
  3755. if (rc) {
  3756. pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
  3757. goto error;
  3758. }
  3759. atomic_set(&display->clkrate_change_pending, 0);
  3760. display->cached_clk_rate = 0;
  3761. rc = dsi_display_sysfs_init(display);
  3762. if (rc) {
  3763. pr_err("[%s] sysfs init failed, rc=%d\n", display->name, rc);
  3764. goto error;
  3765. }
  3766. memset(&info, 0x0, sizeof(info));
  3767. display_for_each_ctrl(i, display) {
  3768. display_ctrl = &display->ctrl[i];
  3769. rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
  3770. if (rc) {
  3771. pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
  3772. display->name, i, rc);
  3773. goto error_ctrl_deinit;
  3774. }
  3775. display_ctrl->ctrl->horiz_index = i;
  3776. rc = dsi_phy_drv_init(display_ctrl->phy);
  3777. if (rc) {
  3778. pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
  3779. display->name, i, rc);
  3780. (void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
  3781. goto error_ctrl_deinit;
  3782. }
  3783. memcpy(&info.c_clks[i],
  3784. (&display_ctrl->ctrl->clk_info.core_clks),
  3785. sizeof(struct dsi_core_clk_info));
  3786. memcpy(&info.l_hs_clks[i],
  3787. (&display_ctrl->ctrl->clk_info.hs_link_clks),
  3788. sizeof(struct dsi_link_hs_clk_info));
  3789. memcpy(&info.l_lp_clks[i],
  3790. (&display_ctrl->ctrl->clk_info.lp_link_clks),
  3791. sizeof(struct dsi_link_lp_clk_info));
  3792. info.c_clks[i].drm = drm;
  3793. info.bus_handle[i] =
  3794. display_ctrl->ctrl->axi_bus_info.bus_handle;
  3795. info.ctrl_index[i] = display_ctrl->ctrl->cell_index;
  3796. }
  3797. info.pre_clkoff_cb = dsi_pre_clkoff_cb;
  3798. info.pre_clkon_cb = dsi_pre_clkon_cb;
  3799. info.post_clkoff_cb = dsi_post_clkoff_cb;
  3800. info.post_clkon_cb = dsi_post_clkon_cb;
  3801. info.priv_data = display;
  3802. info.master_ndx = display->clk_master_idx;
  3803. info.dsi_ctrl_count = display->ctrl_count;
  3804. snprintf(info.name, MAX_STRING_LEN,
  3805. "DSI_MNGR-%s", display->name);
  3806. display->clk_mngr = dsi_display_clk_mngr_register(&info);
  3807. if (IS_ERR_OR_NULL(display->clk_mngr)) {
  3808. rc = PTR_ERR(display->clk_mngr);
  3809. display->clk_mngr = NULL;
  3810. pr_err("dsi clock registration failed, rc = %d\n", rc);
  3811. goto error_ctrl_deinit;
  3812. }
  3813. handle = dsi_register_clk_handle(display->clk_mngr, client1);
  3814. if (IS_ERR_OR_NULL(handle)) {
  3815. rc = PTR_ERR(handle);
  3816. pr_err("failed to register %s client, rc = %d\n",
  3817. client1, rc);
  3818. goto error_clk_deinit;
  3819. } else {
  3820. display->dsi_clk_handle = handle;
  3821. }
  3822. handle = dsi_register_clk_handle(display->clk_mngr, client2);
  3823. if (IS_ERR_OR_NULL(handle)) {
  3824. rc = PTR_ERR(handle);
  3825. pr_err("failed to register %s client, rc = %d\n",
  3826. client2, rc);
  3827. goto error_clk_client_deinit;
  3828. } else {
  3829. display->mdp_clk_handle = handle;
  3830. }
  3831. clk_cb.priv = display;
  3832. clk_cb.dsi_clk_cb = dsi_display_clk_ctrl_cb;
  3833. display_for_each_ctrl(i, display) {
  3834. display_ctrl = &display->ctrl[i];
  3835. rc = dsi_ctrl_clk_cb_register(display_ctrl->ctrl, &clk_cb);
  3836. if (rc) {
  3837. pr_err("[%s] failed to register ctrl clk_cb[%d], rc=%d\n",
  3838. display->name, i, rc);
  3839. goto error_ctrl_deinit;
  3840. }
  3841. rc = dsi_phy_clk_cb_register(display_ctrl->phy, &clk_cb);
  3842. if (rc) {
  3843. pr_err("[%s] failed to register phy clk_cb[%d], rc=%d\n",
  3844. display->name, i, rc);
  3845. goto error_ctrl_deinit;
  3846. }
  3847. }
  3848. rc = dsi_display_mipi_host_init(display);
  3849. if (rc) {
  3850. pr_err("[%s] failed to initialize mipi host, rc=%d\n",
  3851. display->name, rc);
  3852. goto error_ctrl_deinit;
  3853. }
  3854. rc = dsi_panel_drv_init(display->panel, &display->host);
  3855. if (rc) {
  3856. if (rc != -EPROBE_DEFER)
  3857. pr_err("[%s] failed to initialize panel driver, rc=%d\n",
  3858. display->name, rc);
  3859. goto error_host_deinit;
  3860. }
  3861. pr_info("Successfully bind display panel '%s'\n", display->name);
  3862. display->drm_dev = drm;
  3863. display_for_each_ctrl(i, display) {
  3864. display_ctrl = &display->ctrl[i];
  3865. if (!display_ctrl->phy || !display_ctrl->ctrl)
  3866. continue;
  3867. rc = dsi_phy_set_clk_freq(display_ctrl->phy,
  3868. &display_ctrl->ctrl->clk_freq);
  3869. if (rc) {
  3870. pr_err("[%s] failed to set phy clk freq, rc=%d\n",
  3871. display->name, rc);
  3872. goto error;
  3873. }
  3874. }
  3875. /* register te irq handler */
  3876. dsi_display_register_te_irq(display);
  3877. goto error;
  3878. error_host_deinit:
  3879. (void)dsi_display_mipi_host_deinit(display);
  3880. error_clk_client_deinit:
  3881. (void)dsi_deregister_clk_handle(display->dsi_clk_handle);
  3882. error_clk_deinit:
  3883. (void)dsi_display_clk_mngr_deregister(display->clk_mngr);
  3884. error_ctrl_deinit:
  3885. for (i = i - 1; i >= 0; i--) {
  3886. display_ctrl = &display->ctrl[i];
  3887. (void)dsi_phy_drv_deinit(display_ctrl->phy);
  3888. (void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
  3889. }
  3890. (void)dsi_display_sysfs_deinit(display);
  3891. (void)dsi_display_debugfs_deinit(display);
  3892. error:
  3893. mutex_unlock(&display->display_lock);
  3894. return rc;
  3895. }
  3896. /**
  3897. * dsi_display_unbind - unbind dsi from controlling device
  3898. * @dev: Pointer to base of platform device
  3899. * @master: Pointer to container of drm device
  3900. * @data: Pointer to private data
  3901. */
  3902. static void dsi_display_unbind(struct device *dev,
  3903. struct device *master, void *data)
  3904. {
  3905. struct dsi_display_ctrl *display_ctrl;
  3906. struct dsi_display *display;
  3907. struct platform_device *pdev = to_platform_device(dev);
  3908. int i, rc = 0;
  3909. if (!dev || !pdev) {
  3910. pr_err("invalid param(s)\n");
  3911. return;
  3912. }
  3913. display = platform_get_drvdata(pdev);
  3914. if (!display) {
  3915. pr_err("invalid display\n");
  3916. return;
  3917. }
  3918. mutex_lock(&display->display_lock);
  3919. rc = dsi_panel_drv_deinit(display->panel);
  3920. if (rc)
  3921. pr_err("[%s] failed to deinit panel driver, rc=%d\n",
  3922. display->name, rc);
  3923. rc = dsi_display_mipi_host_deinit(display);
  3924. if (rc)
  3925. pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
  3926. display->name,
  3927. rc);
  3928. display_for_each_ctrl(i, display) {
  3929. display_ctrl = &display->ctrl[i];
  3930. rc = dsi_phy_drv_deinit(display_ctrl->phy);
  3931. if (rc)
  3932. pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
  3933. display->name, i, rc);
  3934. rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
  3935. if (rc)
  3936. pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
  3937. display->name, i, rc);
  3938. }
  3939. atomic_set(&display->clkrate_change_pending, 0);
  3940. (void)dsi_display_sysfs_deinit(display);
  3941. (void)dsi_display_debugfs_deinit(display);
  3942. mutex_unlock(&display->display_lock);
  3943. }
  3944. static const struct component_ops dsi_display_comp_ops = {
  3945. .bind = dsi_display_bind,
  3946. .unbind = dsi_display_unbind,
  3947. };
  3948. static struct platform_driver dsi_display_driver = {
  3949. .probe = dsi_display_dev_probe,
  3950. .remove = dsi_display_dev_remove,
  3951. .driver = {
  3952. .name = "msm-dsi-display",
  3953. .of_match_table = dsi_display_dt_match,
  3954. .suppress_bind_attrs = true,
  3955. },
  3956. };
  3957. static int dsi_display_init(struct dsi_display *display)
  3958. {
  3959. int rc = 0;
  3960. struct platform_device *pdev = display->pdev;
  3961. mutex_init(&display->display_lock);
  3962. rc = _dsi_display_dev_init(display);
  3963. if (rc) {
  3964. pr_err("device init failed, rc=%d\n", rc);
  3965. goto end;
  3966. }
  3967. rc = component_add(&pdev->dev, &dsi_display_comp_ops);
  3968. if (rc)
  3969. pr_err("component add failed, rc=%d\n", rc);
  3970. pr_debug("component add success: %s\n", display->name);
  3971. end:
  3972. return rc;
  3973. }
  3974. static void dsi_display_firmware_display(const struct firmware *fw,
  3975. void *context)
  3976. {
  3977. struct dsi_display *display = context;
  3978. if (fw) {
  3979. pr_debug("reading data from firmware, size=%zd\n",
  3980. fw->size);
  3981. display->fw = fw;
  3982. display->name = "dsi_firmware_display";
  3983. }
  3984. if (dsi_display_init(display))
  3985. return;
  3986. pr_debug("success\n");
  3987. }
  3988. int dsi_display_dev_probe(struct platform_device *pdev)
  3989. {
  3990. struct dsi_display *display = NULL;
  3991. struct device_node *node = NULL, *panel_node = NULL, *mdp_node = NULL;
  3992. int rc = 0, index = DSI_PRIMARY;
  3993. bool firm_req = false;
  3994. struct dsi_display_boot_param *boot_disp;
  3995. if (!pdev || !pdev->dev.of_node) {
  3996. pr_err("pdev not found\n");
  3997. rc = -ENODEV;
  3998. goto end;
  3999. }
  4000. display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
  4001. if (!display) {
  4002. rc = -ENOMEM;
  4003. goto end;
  4004. }
  4005. display->display_type = of_get_property(pdev->dev.of_node,
  4006. "label", NULL);
  4007. if (!display->display_type)
  4008. display->display_type = "primary";
  4009. if (!strcmp(display->display_type, "secondary"))
  4010. index = DSI_SECONDARY;
  4011. boot_disp = &boot_displays[index];
  4012. node = pdev->dev.of_node;
  4013. if (boot_disp->boot_disp_en) {
  4014. mdp_node = of_parse_phandle(node, "qcom,mdp", 0);
  4015. if (!mdp_node) {
  4016. pr_err("mdp_node not found\n");
  4017. rc = -ENODEV;
  4018. goto end;
  4019. }
  4020. /* The panel name should be same as UEFI name index */
  4021. panel_node = of_find_node_by_name(mdp_node, boot_disp->name);
  4022. if (!panel_node)
  4023. pr_warn("panel_node %s not found\n", boot_disp->name);
  4024. } else {
  4025. panel_node = of_parse_phandle(node,
  4026. "qcom,dsi-default-panel", 0);
  4027. if (!panel_node)
  4028. pr_warn("default panel not found\n");
  4029. if (IS_ENABLED(CONFIG_DSI_PARSER))
  4030. firm_req = !request_firmware_nowait(
  4031. THIS_MODULE, 1, "dsi_prop",
  4032. &pdev->dev, GFP_KERNEL, display,
  4033. dsi_display_firmware_display);
  4034. }
  4035. boot_disp->node = pdev->dev.of_node;
  4036. boot_disp->disp = display;
  4037. display->panel_node = panel_node;
  4038. display->pdev = pdev;
  4039. display->boot_disp = boot_disp;
  4040. dsi_display_parse_cmdline_topology(display, index);
  4041. platform_set_drvdata(pdev, display);
  4042. /* initialize display in firmware callback */
  4043. if (!firm_req) {
  4044. rc = dsi_display_init(display);
  4045. if (rc)
  4046. goto end;
  4047. }
  4048. return 0;
  4049. end:
  4050. if (display)
  4051. devm_kfree(&pdev->dev, display);
  4052. return rc;
  4053. }
  4054. int dsi_display_dev_remove(struct platform_device *pdev)
  4055. {
  4056. int rc = 0;
  4057. struct dsi_display *display;
  4058. if (!pdev) {
  4059. pr_err("Invalid device\n");
  4060. return -EINVAL;
  4061. }
  4062. display = platform_get_drvdata(pdev);
  4063. /* decrement ref count */
  4064. of_node_put(display->panel_node);
  4065. (void)_dsi_display_dev_deinit(display);
  4066. platform_set_drvdata(pdev, NULL);
  4067. devm_kfree(&pdev->dev, display);
  4068. return rc;
  4069. }
  4070. int dsi_display_get_num_of_displays(void)
  4071. {
  4072. int i, count = 0;
  4073. for (i = 0; i < MAX_DSI_ACTIVE_DISPLAY; i++) {
  4074. struct dsi_display *display = boot_displays[i].disp;
  4075. if (display && display->panel_node)
  4076. count++;
  4077. }
  4078. return count;
  4079. }
  4080. int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
  4081. {
  4082. int index = 0, count = 0;
  4083. if (!display_array || !max_display_count) {
  4084. pr_err("invalid params\n");
  4085. return 0;
  4086. }
  4087. for (index = 0; index < MAX_DSI_ACTIVE_DISPLAY; index++) {
  4088. struct dsi_display *display = boot_displays[index].disp;
  4089. if (display && display->panel_node)
  4090. display_array[count++] = display;
  4091. }
  4092. return count;
  4093. }
  4094. int dsi_display_drm_bridge_init(struct dsi_display *display,
  4095. struct drm_encoder *enc)
  4096. {
  4097. int rc = 0;
  4098. struct dsi_bridge *bridge;
  4099. struct msm_drm_private *priv = NULL;
  4100. if (!display || !display->drm_dev || !enc) {
  4101. pr_err("invalid param(s)\n");
  4102. return -EINVAL;
  4103. }
  4104. mutex_lock(&display->display_lock);
  4105. priv = display->drm_dev->dev_private;
  4106. if (!priv) {
  4107. pr_err("Private data is not present\n");
  4108. rc = -EINVAL;
  4109. goto error;
  4110. }
  4111. if (display->bridge) {
  4112. pr_err("display is already initialize\n");
  4113. goto error;
  4114. }
  4115. bridge = dsi_drm_bridge_init(display, display->drm_dev, enc);
  4116. if (IS_ERR_OR_NULL(bridge)) {
  4117. rc = PTR_ERR(bridge);
  4118. pr_err("[%s] brige init failed, %d\n", display->name, rc);
  4119. goto error;
  4120. }
  4121. display->bridge = bridge;
  4122. priv->bridges[priv->num_bridges++] = &bridge->base;
  4123. error:
  4124. mutex_unlock(&display->display_lock);
  4125. return rc;
  4126. }
  4127. int dsi_display_drm_bridge_deinit(struct dsi_display *display)
  4128. {
  4129. int rc = 0;
  4130. if (!display) {
  4131. pr_err("Invalid params\n");
  4132. return -EINVAL;
  4133. }
  4134. mutex_lock(&display->display_lock);
  4135. dsi_drm_bridge_cleanup(display->bridge);
  4136. display->bridge = NULL;
  4137. mutex_unlock(&display->display_lock);
  4138. return rc;
  4139. }
  4140. /* Hook functions to call external connector, pointer validation is
  4141. * done in dsi_display_drm_ext_bridge_init.
  4142. */
  4143. static enum drm_connector_status dsi_display_drm_ext_detect(
  4144. struct drm_connector *connector,
  4145. bool force,
  4146. void *disp)
  4147. {
  4148. struct dsi_display *display = disp;
  4149. return display->ext_conn->funcs->detect(display->ext_conn, force);
  4150. }
  4151. static int dsi_display_drm_ext_get_modes(
  4152. struct drm_connector *connector, void *disp)
  4153. {
  4154. struct dsi_display *display = disp;
  4155. struct drm_display_mode *pmode, *pt;
  4156. int count;
  4157. /* if there are modes defined in panel, ignore external modes */
  4158. if (display->panel->num_timing_nodes)
  4159. return dsi_connector_get_modes(connector, disp);
  4160. count = display->ext_conn->helper_private->get_modes(
  4161. display->ext_conn);
  4162. list_for_each_entry_safe(pmode, pt,
  4163. &display->ext_conn->probed_modes, head) {
  4164. list_move_tail(&pmode->head, &connector->probed_modes);
  4165. }
  4166. connector->display_info = display->ext_conn->display_info;
  4167. return count;
  4168. }
  4169. static enum drm_mode_status dsi_display_drm_ext_mode_valid(
  4170. struct drm_connector *connector,
  4171. struct drm_display_mode *mode,
  4172. void *disp)
  4173. {
  4174. struct dsi_display *display = disp;
  4175. enum drm_mode_status status;
  4176. /* always do internal mode_valid check */
  4177. status = dsi_conn_mode_valid(connector, mode, disp);
  4178. if (status != MODE_OK)
  4179. return status;
  4180. return display->ext_conn->helper_private->mode_valid(
  4181. display->ext_conn, mode);
  4182. }
  4183. static int dsi_display_drm_ext_atomic_check(struct drm_connector *connector,
  4184. void *disp,
  4185. struct drm_connector_state *c_state)
  4186. {
  4187. struct dsi_display *display = disp;
  4188. return display->ext_conn->helper_private->atomic_check(
  4189. display->ext_conn, c_state);
  4190. }
  4191. static int dsi_display_ext_get_info(struct drm_connector *connector,
  4192. struct msm_display_info *info, void *disp)
  4193. {
  4194. struct dsi_display *display;
  4195. int i;
  4196. if (!info || !disp) {
  4197. pr_err("invalid params\n");
  4198. return -EINVAL;
  4199. }
  4200. display = disp;
  4201. if (!display->panel) {
  4202. pr_err("invalid display panel\n");
  4203. return -EINVAL;
  4204. }
  4205. mutex_lock(&display->display_lock);
  4206. memset(info, 0, sizeof(struct msm_display_info));
  4207. info->intf_type = DRM_MODE_CONNECTOR_DSI;
  4208. info->num_of_h_tiles = display->ctrl_count;
  4209. for (i = 0; i < info->num_of_h_tiles; i++)
  4210. info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
  4211. info->is_connected = connector->status != connector_status_disconnected;
  4212. if (!strcmp(display->display_type, "primary"))
  4213. info->is_primary = true;
  4214. else
  4215. info->is_primary = false;
  4216. info->capabilities |= (MSM_DISPLAY_CAP_VID_MODE |
  4217. MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_HOT_PLUG);
  4218. info->curr_panel_mode = MSM_DISPLAY_VIDEO_MODE;
  4219. mutex_unlock(&display->display_lock);
  4220. return 0;
  4221. }
  4222. static int dsi_display_ext_get_mode_info(struct drm_connector *connector,
  4223. const struct drm_display_mode *drm_mode,
  4224. struct msm_mode_info *mode_info,
  4225. u32 max_mixer_width, void *display)
  4226. {
  4227. struct msm_display_topology *topology;
  4228. if (!drm_mode || !mode_info)
  4229. return -EINVAL;
  4230. memset(mode_info, 0, sizeof(*mode_info));
  4231. mode_info->frame_rate = drm_mode->vrefresh;
  4232. mode_info->vtotal = drm_mode->vtotal;
  4233. topology = &mode_info->topology;
  4234. topology->num_lm = (max_mixer_width <= drm_mode->hdisplay) ? 2 : 1;
  4235. topology->num_enc = 0;
  4236. topology->num_intf = topology->num_lm;
  4237. mode_info->comp_info.comp_type = MSM_DISPLAY_COMPRESSION_NONE;
  4238. return 0;
  4239. }
  4240. static struct dsi_display_ext_bridge *dsi_display_ext_get_bridge(
  4241. struct drm_bridge *bridge)
  4242. {
  4243. struct msm_drm_private *priv;
  4244. struct sde_kms *sde_kms;
  4245. struct list_head *connector_list;
  4246. struct drm_connector *conn_iter;
  4247. struct sde_connector *sde_conn;
  4248. struct dsi_display *display;
  4249. int i;
  4250. if (!bridge || !bridge->encoder) {
  4251. SDE_ERROR("invalid argument\n");
  4252. return NULL;
  4253. }
  4254. priv = bridge->dev->dev_private;
  4255. sde_kms = to_sde_kms(priv->kms);
  4256. connector_list = &sde_kms->dev->mode_config.connector_list;
  4257. list_for_each_entry(conn_iter, connector_list, head) {
  4258. sde_conn = to_sde_connector(conn_iter);
  4259. if (sde_conn->encoder == bridge->encoder) {
  4260. display = sde_conn->display;
  4261. for (i = 0; i < display->ctrl_count; i++) {
  4262. if (display->ext_bridge[i].bridge == bridge)
  4263. return &display->ext_bridge[i];
  4264. }
  4265. }
  4266. }
  4267. return NULL;
  4268. }
  4269. static void dsi_display_drm_ext_adjust_timing(
  4270. const struct dsi_display *display,
  4271. struct drm_display_mode *mode)
  4272. {
  4273. mode->hdisplay /= display->ctrl_count;
  4274. mode->hsync_start /= display->ctrl_count;
  4275. mode->hsync_end /= display->ctrl_count;
  4276. mode->htotal /= display->ctrl_count;
  4277. mode->hskew /= display->ctrl_count;
  4278. mode->clock /= display->ctrl_count;
  4279. }
  4280. static enum drm_mode_status dsi_display_drm_ext_bridge_mode_valid(
  4281. struct drm_bridge *bridge,
  4282. const struct drm_display_mode *mode)
  4283. {
  4284. struct dsi_display_ext_bridge *ext_bridge;
  4285. struct drm_display_mode tmp;
  4286. ext_bridge = dsi_display_ext_get_bridge(bridge);
  4287. if (!ext_bridge)
  4288. return MODE_ERROR;
  4289. tmp = *mode;
  4290. dsi_display_drm_ext_adjust_timing(ext_bridge->display, &tmp);
  4291. return ext_bridge->orig_funcs->mode_valid(bridge, &tmp);
  4292. }
  4293. static bool dsi_display_drm_ext_bridge_mode_fixup(
  4294. struct drm_bridge *bridge,
  4295. const struct drm_display_mode *mode,
  4296. struct drm_display_mode *adjusted_mode)
  4297. {
  4298. struct dsi_display_ext_bridge *ext_bridge;
  4299. struct drm_display_mode tmp;
  4300. ext_bridge = dsi_display_ext_get_bridge(bridge);
  4301. if (!ext_bridge)
  4302. return false;
  4303. tmp = *mode;
  4304. dsi_display_drm_ext_adjust_timing(ext_bridge->display, &tmp);
  4305. return ext_bridge->orig_funcs->mode_fixup(bridge, &tmp, &tmp);
  4306. }
  4307. static void dsi_display_drm_ext_bridge_mode_set(
  4308. struct drm_bridge *bridge,
  4309. struct drm_display_mode *mode,
  4310. struct drm_display_mode *adjusted_mode)
  4311. {
  4312. struct dsi_display_ext_bridge *ext_bridge;
  4313. struct drm_display_mode tmp;
  4314. ext_bridge = dsi_display_ext_get_bridge(bridge);
  4315. if (!ext_bridge)
  4316. return;
  4317. tmp = *mode;
  4318. dsi_display_drm_ext_adjust_timing(ext_bridge->display, &tmp);
  4319. ext_bridge->orig_funcs->mode_set(bridge, &tmp, &tmp);
  4320. }
  4321. static int dsi_host_ext_attach(struct mipi_dsi_host *host,
  4322. struct mipi_dsi_device *dsi)
  4323. {
  4324. struct dsi_display *display = to_dsi_display(host);
  4325. struct dsi_panel *panel;
  4326. if (!host || !dsi || !display->panel) {
  4327. pr_err("Invalid param\n");
  4328. return -EINVAL;
  4329. }
  4330. pr_debug("DSI[%s]: channel=%d, lanes=%d, format=%d, mode_flags=%lx\n",
  4331. dsi->name, dsi->channel, dsi->lanes,
  4332. dsi->format, dsi->mode_flags);
  4333. panel = display->panel;
  4334. panel->host_config.data_lanes = 0;
  4335. if (dsi->lanes > 0)
  4336. panel->host_config.data_lanes |= DSI_DATA_LANE_0;
  4337. if (dsi->lanes > 1)
  4338. panel->host_config.data_lanes |= DSI_DATA_LANE_1;
  4339. if (dsi->lanes > 2)
  4340. panel->host_config.data_lanes |= DSI_DATA_LANE_2;
  4341. if (dsi->lanes > 3)
  4342. panel->host_config.data_lanes |= DSI_DATA_LANE_3;
  4343. switch (dsi->format) {
  4344. case MIPI_DSI_FMT_RGB888:
  4345. panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB888;
  4346. break;
  4347. case MIPI_DSI_FMT_RGB666:
  4348. panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB666_LOOSE;
  4349. break;
  4350. case MIPI_DSI_FMT_RGB666_PACKED:
  4351. panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB666;
  4352. break;
  4353. case MIPI_DSI_FMT_RGB565:
  4354. default:
  4355. panel->host_config.dst_format = DSI_PIXEL_FORMAT_RGB565;
  4356. break;
  4357. }
  4358. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO) {
  4359. panel->panel_mode = DSI_OP_VIDEO_MODE;
  4360. if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST)
  4361. panel->video_config.traffic_mode =
  4362. DSI_VIDEO_TRAFFIC_BURST_MODE;
  4363. else if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
  4364. panel->video_config.traffic_mode =
  4365. DSI_VIDEO_TRAFFIC_SYNC_PULSES;
  4366. else
  4367. panel->video_config.traffic_mode =
  4368. DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
  4369. panel->video_config.hsa_lp11_en =
  4370. dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSA;
  4371. panel->video_config.hbp_lp11_en =
  4372. dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HBP;
  4373. panel->video_config.hfp_lp11_en =
  4374. dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HFP;
  4375. panel->video_config.pulse_mode_hsa_he =
  4376. dsi->mode_flags & MIPI_DSI_MODE_VIDEO_HSE;
  4377. panel->video_config.bllp_lp11_en =
  4378. dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BLLP;
  4379. panel->video_config.eof_bllp_lp11_en =
  4380. dsi->mode_flags & MIPI_DSI_MODE_VIDEO_EOF_BLLP;
  4381. } else {
  4382. panel->panel_mode = DSI_OP_CMD_MODE;
  4383. pr_err("command mode not supported by ext bridge\n");
  4384. return -ENOTSUPP;
  4385. }
  4386. panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
  4387. return 0;
  4388. }
  4389. static struct mipi_dsi_host_ops dsi_host_ext_ops = {
  4390. .attach = dsi_host_ext_attach,
  4391. .detach = dsi_host_detach,
  4392. .transfer = dsi_host_transfer,
  4393. };
  4394. int dsi_display_drm_ext_bridge_init(struct dsi_display *display,
  4395. struct drm_encoder *encoder, struct drm_connector *connector)
  4396. {
  4397. struct drm_device *drm = encoder->dev;
  4398. struct drm_bridge *bridge = encoder->bridge;
  4399. struct drm_bridge *ext_bridge;
  4400. struct drm_connector *ext_conn;
  4401. struct sde_connector *sde_conn = to_sde_connector(connector);
  4402. struct drm_bridge *prev_bridge = bridge;
  4403. int rc = 0, i;
  4404. for (i = 0; i < display->ext_bridge_cnt; i++) {
  4405. struct dsi_display_ext_bridge *ext_bridge_info =
  4406. &display->ext_bridge[i];
  4407. /* return if ext bridge is already initialized */
  4408. if (ext_bridge_info->bridge)
  4409. return 0;
  4410. ext_bridge = of_drm_find_bridge(ext_bridge_info->node_of);
  4411. if (IS_ERR_OR_NULL(ext_bridge)) {
  4412. rc = PTR_ERR(ext_bridge);
  4413. pr_err("failed to find ext bridge\n");
  4414. goto error;
  4415. }
  4416. /* override functions for mode adjustment */
  4417. if (display->ext_bridge_cnt > 1) {
  4418. ext_bridge_info->bridge_funcs = *ext_bridge->funcs;
  4419. if (ext_bridge->funcs->mode_fixup)
  4420. ext_bridge_info->bridge_funcs.mode_fixup =
  4421. dsi_display_drm_ext_bridge_mode_fixup;
  4422. if (ext_bridge->funcs->mode_valid)
  4423. ext_bridge_info->bridge_funcs.mode_valid =
  4424. dsi_display_drm_ext_bridge_mode_valid;
  4425. if (ext_bridge->funcs->mode_set)
  4426. ext_bridge_info->bridge_funcs.mode_set =
  4427. dsi_display_drm_ext_bridge_mode_set;
  4428. ext_bridge_info->orig_funcs = ext_bridge->funcs;
  4429. ext_bridge->funcs = &ext_bridge_info->bridge_funcs;
  4430. }
  4431. rc = drm_bridge_attach(encoder, ext_bridge, prev_bridge);
  4432. if (rc) {
  4433. pr_err("[%s] ext brige attach failed, %d\n",
  4434. display->name, rc);
  4435. goto error;
  4436. }
  4437. ext_bridge_info->display = display;
  4438. ext_bridge_info->bridge = ext_bridge;
  4439. prev_bridge = ext_bridge;
  4440. /* ext bridge will init its own connector during attach,
  4441. * we need to extract it out of the connector list
  4442. */
  4443. spin_lock_irq(&drm->mode_config.connector_list_lock);
  4444. ext_conn = list_last_entry(&drm->mode_config.connector_list,
  4445. struct drm_connector, head);
  4446. if (ext_conn && ext_conn != connector &&
  4447. ext_conn->encoder_ids[0] == bridge->encoder->base.id) {
  4448. list_del_init(&ext_conn->head);
  4449. display->ext_conn = ext_conn;
  4450. }
  4451. spin_unlock_irq(&drm->mode_config.connector_list_lock);
  4452. /* if there is no valid external connector created, or in split
  4453. * mode, default setting is used from panel defined in DT file.
  4454. */
  4455. if (!display->ext_conn ||
  4456. !display->ext_conn->funcs ||
  4457. !display->ext_conn->helper_private ||
  4458. display->ext_bridge_cnt > 1) {
  4459. display->ext_conn = NULL;
  4460. continue;
  4461. }
  4462. /* otherwise, hook up the functions to use external connector */
  4463. if (display->ext_conn->funcs->detect)
  4464. sde_conn->ops.detect = dsi_display_drm_ext_detect;
  4465. if (display->ext_conn->helper_private->get_modes)
  4466. sde_conn->ops.get_modes =
  4467. dsi_display_drm_ext_get_modes;
  4468. if (display->ext_conn->helper_private->mode_valid)
  4469. sde_conn->ops.mode_valid =
  4470. dsi_display_drm_ext_mode_valid;
  4471. if (display->ext_conn->helper_private->atomic_check)
  4472. sde_conn->ops.atomic_check =
  4473. dsi_display_drm_ext_atomic_check;
  4474. sde_conn->ops.get_info =
  4475. dsi_display_ext_get_info;
  4476. sde_conn->ops.get_mode_info =
  4477. dsi_display_ext_get_mode_info;
  4478. /* add support to attach/detach */
  4479. display->host.ops = &dsi_host_ext_ops;
  4480. }
  4481. return 0;
  4482. error:
  4483. return rc;
  4484. }
  4485. int dsi_display_get_info(struct drm_connector *connector,
  4486. struct msm_display_info *info, void *disp)
  4487. {
  4488. struct dsi_display *display;
  4489. struct dsi_panel_phy_props phy_props;
  4490. int i, rc;
  4491. if (!info || !disp) {
  4492. pr_err("invalid params\n");
  4493. return -EINVAL;
  4494. }
  4495. display = disp;
  4496. if (!display->panel) {
  4497. pr_err("invalid display panel\n");
  4498. return -EINVAL;
  4499. }
  4500. mutex_lock(&display->display_lock);
  4501. rc = dsi_panel_get_phy_props(display->panel, &phy_props);
  4502. if (rc) {
  4503. pr_err("[%s] failed to get panel phy props, rc=%d\n",
  4504. display->name, rc);
  4505. goto error;
  4506. }
  4507. memset(info, 0, sizeof(struct msm_display_info));
  4508. info->intf_type = DRM_MODE_CONNECTOR_DSI;
  4509. info->num_of_h_tiles = display->ctrl_count;
  4510. for (i = 0; i < info->num_of_h_tiles; i++)
  4511. info->h_tile_instance[i] = display->ctrl[i].ctrl->cell_index;
  4512. info->is_connected = true;
  4513. info->is_primary = false;
  4514. if (!strcmp(display->display_type, "primary"))
  4515. info->is_primary = true;
  4516. info->width_mm = phy_props.panel_width_mm;
  4517. info->height_mm = phy_props.panel_height_mm;
  4518. info->max_width = 1920;
  4519. info->max_height = 1080;
  4520. info->qsync_min_fps =
  4521. display->panel->qsync_min_fps;
  4522. switch (display->panel->panel_mode) {
  4523. case DSI_OP_VIDEO_MODE:
  4524. info->curr_panel_mode = MSM_DISPLAY_VIDEO_MODE;
  4525. info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
  4526. if (display->panel->panel_mode_switch_enabled)
  4527. info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
  4528. break;
  4529. case DSI_OP_CMD_MODE:
  4530. info->curr_panel_mode = MSM_DISPLAY_CMD_MODE;
  4531. info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
  4532. if (display->panel->panel_mode_switch_enabled)
  4533. info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
  4534. info->is_te_using_watchdog_timer =
  4535. display->panel->te_using_watchdog_timer |
  4536. display->sw_te_using_wd;
  4537. break;
  4538. default:
  4539. pr_err("unknwown dsi panel mode %d\n",
  4540. display->panel->panel_mode);
  4541. break;
  4542. }
  4543. if (display->panel->esd_config.esd_enabled)
  4544. info->capabilities |= MSM_DISPLAY_ESD_ENABLED;
  4545. info->te_source = display->te_source;
  4546. error:
  4547. mutex_unlock(&display->display_lock);
  4548. return rc;
  4549. }
  4550. static int dsi_display_get_mode_count_no_lock(struct dsi_display *display,
  4551. u32 *count)
  4552. {
  4553. struct dsi_dfps_capabilities dfps_caps;
  4554. int num_dfps_rates, rc = 0;
  4555. if (!display || !display->panel) {
  4556. pr_err("invalid display:%d panel:%d\n", display != NULL,
  4557. display ? display->panel != NULL : 0);
  4558. return -EINVAL;
  4559. }
  4560. *count = display->panel->num_timing_nodes;
  4561. rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
  4562. if (rc) {
  4563. pr_err("[%s] failed to get dfps caps from panel\n",
  4564. display->name);
  4565. return rc;
  4566. }
  4567. num_dfps_rates = !dfps_caps.dfps_support ? 1 :
  4568. dfps_caps.max_refresh_rate -
  4569. dfps_caps.min_refresh_rate + 1;
  4570. /* Inflate num_of_modes by fps in dfps */
  4571. *count = display->panel->num_timing_nodes * num_dfps_rates;
  4572. return 0;
  4573. }
  4574. int dsi_display_get_mode_count(struct dsi_display *display,
  4575. u32 *count)
  4576. {
  4577. int rc;
  4578. if (!display || !display->panel) {
  4579. pr_err("invalid display:%d panel:%d\n", display != NULL,
  4580. display ? display->panel != NULL : 0);
  4581. return -EINVAL;
  4582. }
  4583. mutex_lock(&display->display_lock);
  4584. rc = dsi_display_get_mode_count_no_lock(display, count);
  4585. mutex_unlock(&display->display_lock);
  4586. return 0;
  4587. }
  4588. void dsi_display_put_mode(struct dsi_display *display,
  4589. struct dsi_display_mode *mode)
  4590. {
  4591. dsi_panel_put_mode(mode);
  4592. }
  4593. int dsi_display_get_modes(struct dsi_display *display,
  4594. struct dsi_display_mode **out_modes)
  4595. {
  4596. struct dsi_dfps_capabilities dfps_caps;
  4597. u32 num_dfps_rates, panel_mode_count, total_mode_count;
  4598. u32 mode_idx, array_idx = 0;
  4599. int i, rc = -EINVAL;
  4600. if (!display || !out_modes) {
  4601. pr_err("Invalid params\n");
  4602. return -EINVAL;
  4603. }
  4604. *out_modes = NULL;
  4605. mutex_lock(&display->display_lock);
  4606. if (display->modes)
  4607. goto exit;
  4608. rc = dsi_display_get_mode_count_no_lock(display, &total_mode_count);
  4609. if (rc)
  4610. goto error;
  4611. display->modes = kcalloc(total_mode_count, sizeof(*display->modes),
  4612. GFP_KERNEL);
  4613. if (!display->modes) {
  4614. rc = -ENOMEM;
  4615. goto error;
  4616. }
  4617. rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
  4618. if (rc) {
  4619. pr_err("[%s] failed to get dfps caps from panel\n",
  4620. display->name);
  4621. goto error;
  4622. }
  4623. num_dfps_rates = !dfps_caps.dfps_support ? 1 :
  4624. dfps_caps.max_refresh_rate -
  4625. dfps_caps.min_refresh_rate + 1;
  4626. panel_mode_count = display->panel->num_timing_nodes;
  4627. for (mode_idx = 0; mode_idx < panel_mode_count; mode_idx++) {
  4628. struct dsi_display_mode panel_mode;
  4629. int topology_override = NO_OVERRIDE;
  4630. if (display->cmdline_timing == mode_idx)
  4631. topology_override = display->cmdline_topology;
  4632. memset(&panel_mode, 0, sizeof(panel_mode));
  4633. rc = dsi_panel_get_mode(display->panel, mode_idx,
  4634. &panel_mode,
  4635. topology_override);
  4636. if (rc) {
  4637. pr_err("[%s] failed to get mode idx %d from panel\n",
  4638. display->name, mode_idx);
  4639. goto error;
  4640. }
  4641. /* Calculate dsi frame transfer time */
  4642. if (display->panel->panel_mode == DSI_OP_CMD_MODE) {
  4643. dsi_panel_calc_dsi_transfer_time(
  4644. &display->panel->host_config,
  4645. &panel_mode.timing);
  4646. panel_mode.priv_info->dsi_transfer_time_us =
  4647. panel_mode.timing.dsi_transfer_time_us;
  4648. panel_mode.priv_info->min_dsi_clk_hz =
  4649. panel_mode.timing.min_dsi_clk_hz;
  4650. panel_mode.priv_info->mdp_transfer_time_us =
  4651. panel_mode.priv_info->dsi_transfer_time_us;
  4652. panel_mode.timing.mdp_transfer_time_us =
  4653. panel_mode.timing.dsi_transfer_time_us;
  4654. }
  4655. if (display->ctrl_count > 1) { /* TODO: remove if */
  4656. panel_mode.timing.h_active *= display->ctrl_count;
  4657. panel_mode.timing.h_front_porch *= display->ctrl_count;
  4658. panel_mode.timing.h_sync_width *= display->ctrl_count;
  4659. panel_mode.timing.h_back_porch *= display->ctrl_count;
  4660. panel_mode.timing.h_skew *= display->ctrl_count;
  4661. panel_mode.pixel_clk_khz *= display->ctrl_count;
  4662. }
  4663. for (i = 0; i < num_dfps_rates; i++) {
  4664. struct dsi_display_mode *sub_mode =
  4665. &display->modes[array_idx];
  4666. u32 curr_refresh_rate;
  4667. if (!sub_mode) {
  4668. pr_err("invalid mode data\n");
  4669. rc = -EFAULT;
  4670. goto error;
  4671. }
  4672. memcpy(sub_mode, &panel_mode, sizeof(panel_mode));
  4673. if (dfps_caps.dfps_support) {
  4674. curr_refresh_rate =
  4675. sub_mode->timing.refresh_rate;
  4676. sub_mode->timing.refresh_rate =
  4677. dfps_caps.min_refresh_rate +
  4678. (i % num_dfps_rates);
  4679. dsi_display_get_dfps_timing(display,
  4680. sub_mode, curr_refresh_rate);
  4681. sub_mode->pixel_clk_khz =
  4682. (DSI_H_TOTAL_DSC(&sub_mode->timing) *
  4683. DSI_V_TOTAL(&sub_mode->timing) *
  4684. sub_mode->timing.refresh_rate) / 1000;
  4685. }
  4686. array_idx++;
  4687. }
  4688. }
  4689. exit:
  4690. *out_modes = display->modes;
  4691. rc = 0;
  4692. error:
  4693. if (rc)
  4694. kfree(display->modes);
  4695. mutex_unlock(&display->display_lock);
  4696. return rc;
  4697. }
  4698. int dsi_display_get_panel_vfp(void *dsi_display,
  4699. int h_active, int v_active)
  4700. {
  4701. int i, rc = 0;
  4702. u32 count, refresh_rate = 0;
  4703. struct dsi_dfps_capabilities dfps_caps;
  4704. struct dsi_display *display = (struct dsi_display *)dsi_display;
  4705. if (!display)
  4706. return -EINVAL;
  4707. rc = dsi_display_get_mode_count(display, &count);
  4708. if (rc)
  4709. return rc;
  4710. mutex_lock(&display->display_lock);
  4711. if (display->panel && display->panel->cur_mode)
  4712. refresh_rate = display->panel->cur_mode->timing.refresh_rate;
  4713. dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
  4714. if (dfps_caps.dfps_support)
  4715. refresh_rate = dfps_caps.max_refresh_rate;
  4716. if (!refresh_rate) {
  4717. mutex_unlock(&display->display_lock);
  4718. pr_err("Null Refresh Rate\n");
  4719. return -EINVAL;
  4720. }
  4721. h_active *= display->ctrl_count;
  4722. for (i = 0; i < count; i++) {
  4723. struct dsi_display_mode *m = &display->modes[i];
  4724. if (m && v_active == m->timing.v_active &&
  4725. h_active == m->timing.h_active &&
  4726. refresh_rate == m->timing.refresh_rate) {
  4727. rc = m->timing.v_front_porch;
  4728. break;
  4729. }
  4730. }
  4731. mutex_unlock(&display->display_lock);
  4732. return rc;
  4733. }
  4734. int dsi_display_get_default_lms(void *dsi_display, u32 *num_lm)
  4735. {
  4736. struct dsi_display *display = (struct dsi_display *)dsi_display;
  4737. u32 count, i;
  4738. int rc = 0;
  4739. *num_lm = 0;
  4740. rc = dsi_display_get_mode_count(display, &count);
  4741. if (rc)
  4742. return rc;
  4743. if (!display->modes) {
  4744. struct dsi_display_mode *m;
  4745. rc = dsi_display_get_modes(display, &m);
  4746. if (rc)
  4747. return rc;
  4748. }
  4749. mutex_lock(&display->display_lock);
  4750. for (i = 0; i < count; i++) {
  4751. struct dsi_display_mode *m = &display->modes[i];
  4752. *num_lm = max(m->priv_info->topology.num_lm, *num_lm);
  4753. }
  4754. mutex_unlock(&display->display_lock);
  4755. return rc;
  4756. }
  4757. int dsi_display_find_mode(struct dsi_display *display,
  4758. const struct dsi_display_mode *cmp,
  4759. struct dsi_display_mode **out_mode)
  4760. {
  4761. u32 count, i;
  4762. int rc;
  4763. if (!display || !out_mode)
  4764. return -EINVAL;
  4765. *out_mode = NULL;
  4766. rc = dsi_display_get_mode_count(display, &count);
  4767. if (rc)
  4768. return rc;
  4769. if (!display->modes) {
  4770. struct dsi_display_mode *m;
  4771. rc = dsi_display_get_modes(display, &m);
  4772. if (rc)
  4773. return rc;
  4774. }
  4775. mutex_lock(&display->display_lock);
  4776. for (i = 0; i < count; i++) {
  4777. struct dsi_display_mode *m = &display->modes[i];
  4778. if (cmp->timing.v_active == m->timing.v_active &&
  4779. cmp->timing.h_active == m->timing.h_active &&
  4780. cmp->timing.refresh_rate == m->timing.refresh_rate &&
  4781. cmp->panel_mode == m->panel_mode) {
  4782. *out_mode = m;
  4783. rc = 0;
  4784. break;
  4785. }
  4786. }
  4787. mutex_unlock(&display->display_lock);
  4788. if (!*out_mode) {
  4789. pr_err("[%s] failed to find mode for v_active %u h_active %u rate %u\n",
  4790. display->name, cmp->timing.v_active,
  4791. cmp->timing.h_active, cmp->timing.refresh_rate);
  4792. rc = -ENOENT;
  4793. }
  4794. return rc;
  4795. }
  4796. /**
  4797. * dsi_display_validate_mode_vrr() - Validate if varaible refresh case.
  4798. * @display: DSI display handle.
  4799. * @cur_dsi_mode: Current DSI mode.
  4800. * @mode: Mode value structure to be validated.
  4801. * MSM_MODE_FLAG_SEAMLESS_VRR flag is set if there
  4802. * is change in fps but vactive and hactive are same.
  4803. * Return: error code.
  4804. */
  4805. int dsi_display_validate_mode_vrr(struct dsi_display *display,
  4806. struct dsi_display_mode *cur_dsi_mode,
  4807. struct dsi_display_mode *mode)
  4808. {
  4809. int rc = 0;
  4810. struct dsi_display_mode adj_mode, cur_mode;
  4811. struct dsi_dfps_capabilities dfps_caps;
  4812. u32 curr_refresh_rate;
  4813. if (!display || !mode) {
  4814. pr_err("Invalid params\n");
  4815. return -EINVAL;
  4816. }
  4817. if (!display->panel || !display->panel->cur_mode) {
  4818. pr_debug("Current panel mode not set\n");
  4819. return rc;
  4820. }
  4821. mutex_lock(&display->display_lock);
  4822. adj_mode = *mode;
  4823. cur_mode = *cur_dsi_mode;
  4824. if ((cur_mode.timing.refresh_rate != adj_mode.timing.refresh_rate) &&
  4825. (cur_mode.timing.v_active == adj_mode.timing.v_active) &&
  4826. (cur_mode.timing.h_active == adj_mode.timing.h_active)) {
  4827. curr_refresh_rate = cur_mode.timing.refresh_rate;
  4828. rc = dsi_panel_get_dfps_caps(display->panel, &dfps_caps);
  4829. if (rc) {
  4830. pr_err("[%s] failed to get dfps caps from panel\n",
  4831. display->name);
  4832. goto error;
  4833. }
  4834. cur_mode.timing.refresh_rate =
  4835. adj_mode.timing.refresh_rate;
  4836. rc = dsi_display_get_dfps_timing(display,
  4837. &cur_mode, curr_refresh_rate);
  4838. if (rc) {
  4839. pr_err("[%s] seamless vrr not possible rc=%d\n",
  4840. display->name, rc);
  4841. goto error;
  4842. }
  4843. switch (dfps_caps.type) {
  4844. /*
  4845. * Ignore any round off factors in porch calculation.
  4846. * Worse case is set to 5.
  4847. */
  4848. case DSI_DFPS_IMMEDIATE_VFP:
  4849. if (abs(DSI_V_TOTAL(&cur_mode.timing) -
  4850. DSI_V_TOTAL(&adj_mode.timing)) > 5)
  4851. pr_err("Mismatch vfp fps:%d new:%d given:%d\n",
  4852. adj_mode.timing.refresh_rate,
  4853. cur_mode.timing.v_front_porch,
  4854. adj_mode.timing.v_front_porch);
  4855. break;
  4856. case DSI_DFPS_IMMEDIATE_HFP:
  4857. if (abs(DSI_H_TOTAL_DSC(&cur_mode.timing) -
  4858. DSI_H_TOTAL_DSC(&adj_mode.timing)) > 5)
  4859. pr_err("Mismatch hfp fps:%d new:%d given:%d\n",
  4860. adj_mode.timing.refresh_rate,
  4861. cur_mode.timing.h_front_porch,
  4862. adj_mode.timing.h_front_porch);
  4863. break;
  4864. default:
  4865. pr_err("Unsupported DFPS mode %d\n",
  4866. dfps_caps.type);
  4867. rc = -ENOTSUPP;
  4868. }
  4869. pr_debug("Mode switch is seamless variable refresh\n");
  4870. mode->dsi_mode_flags |= DSI_MODE_FLAG_VRR;
  4871. SDE_EVT32(curr_refresh_rate, adj_mode.timing.refresh_rate,
  4872. cur_mode.timing.h_front_porch,
  4873. adj_mode.timing.h_front_porch);
  4874. }
  4875. error:
  4876. mutex_unlock(&display->display_lock);
  4877. return rc;
  4878. }
  4879. int dsi_display_validate_mode(struct dsi_display *display,
  4880. struct dsi_display_mode *mode,
  4881. u32 flags)
  4882. {
  4883. int rc = 0;
  4884. int i;
  4885. struct dsi_display_ctrl *ctrl;
  4886. struct dsi_display_mode adj_mode;
  4887. if (!display || !mode) {
  4888. pr_err("Invalid params\n");
  4889. return -EINVAL;
  4890. }
  4891. mutex_lock(&display->display_lock);
  4892. adj_mode = *mode;
  4893. adjust_timing_by_ctrl_count(display, &adj_mode);
  4894. rc = dsi_panel_validate_mode(display->panel, &adj_mode);
  4895. if (rc) {
  4896. pr_err("[%s] panel mode validation failed, rc=%d\n",
  4897. display->name, rc);
  4898. goto error;
  4899. }
  4900. display_for_each_ctrl(i, display) {
  4901. ctrl = &display->ctrl[i];
  4902. rc = dsi_ctrl_validate_timing(ctrl->ctrl, &adj_mode.timing);
  4903. if (rc) {
  4904. pr_err("[%s] ctrl mode validation failed, rc=%d\n",
  4905. display->name, rc);
  4906. goto error;
  4907. }
  4908. rc = dsi_phy_validate_mode(ctrl->phy, &adj_mode.timing);
  4909. if (rc) {
  4910. pr_err("[%s] phy mode validation failed, rc=%d\n",
  4911. display->name, rc);
  4912. goto error;
  4913. }
  4914. }
  4915. if ((flags & DSI_VALIDATE_FLAG_ALLOW_ADJUST) &&
  4916. (mode->dsi_mode_flags & DSI_MODE_FLAG_SEAMLESS)) {
  4917. rc = dsi_display_validate_mode_seamless(display, mode);
  4918. if (rc) {
  4919. pr_err("[%s] seamless not possible rc=%d\n",
  4920. display->name, rc);
  4921. goto error;
  4922. }
  4923. }
  4924. error:
  4925. mutex_unlock(&display->display_lock);
  4926. return rc;
  4927. }
  4928. int dsi_display_set_mode(struct dsi_display *display,
  4929. struct dsi_display_mode *mode,
  4930. u32 flags)
  4931. {
  4932. int rc = 0;
  4933. struct dsi_display_mode adj_mode;
  4934. struct dsi_mode_info timing;
  4935. if (!display || !mode || !display->panel) {
  4936. pr_err("Invalid params\n");
  4937. return -EINVAL;
  4938. }
  4939. mutex_lock(&display->display_lock);
  4940. adj_mode = *mode;
  4941. timing = adj_mode.timing;
  4942. adjust_timing_by_ctrl_count(display, &adj_mode);
  4943. /*For dynamic DSI setting, use specified clock rate */
  4944. if (display->cached_clk_rate > 0)
  4945. adj_mode.priv_info->clk_rate_hz = display->cached_clk_rate;
  4946. rc = dsi_display_validate_mode_set(display, &adj_mode, flags);
  4947. if (rc) {
  4948. pr_err("[%s] mode cannot be set\n", display->name);
  4949. goto error;
  4950. }
  4951. rc = dsi_display_set_mode_sub(display, &adj_mode, flags);
  4952. if (rc) {
  4953. pr_err("[%s] failed to set mode\n", display->name);
  4954. goto error;
  4955. }
  4956. if (!display->panel->cur_mode) {
  4957. display->panel->cur_mode =
  4958. kzalloc(sizeof(struct dsi_display_mode), GFP_KERNEL);
  4959. if (!display->panel->cur_mode) {
  4960. rc = -ENOMEM;
  4961. goto error;
  4962. }
  4963. }
  4964. pr_info("mdp_transfer_time_us=%d us\n",
  4965. adj_mode.priv_info->mdp_transfer_time_us);
  4966. pr_info("hactive= %d,vactive= %d,fps=%d",timing.h_active,
  4967. timing.v_active,timing.refresh_rate);
  4968. memcpy(display->panel->cur_mode, &adj_mode, sizeof(adj_mode));
  4969. error:
  4970. mutex_unlock(&display->display_lock);
  4971. return rc;
  4972. }
  4973. int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
  4974. {
  4975. int rc = 0;
  4976. int i;
  4977. struct dsi_display_ctrl *ctrl;
  4978. if (!display) {
  4979. pr_err("Invalid params\n");
  4980. return -EINVAL;
  4981. }
  4982. display_for_each_ctrl(i, display) {
  4983. ctrl = &display->ctrl[i];
  4984. rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable);
  4985. if (rc) {
  4986. pr_err("[%s] failed to set tpg state for host_%d\n",
  4987. display->name, i);
  4988. goto error;
  4989. }
  4990. }
  4991. display->is_tpg_enabled = enable;
  4992. error:
  4993. return rc;
  4994. }
  4995. static int dsi_display_pre_switch(struct dsi_display *display)
  4996. {
  4997. int rc = 0;
  4998. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  4999. DSI_CORE_CLK, DSI_CLK_ON);
  5000. if (rc) {
  5001. pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
  5002. display->name, rc);
  5003. goto error;
  5004. }
  5005. rc = dsi_display_ctrl_update(display);
  5006. if (rc) {
  5007. pr_err("[%s] failed to update DSI controller, rc=%d\n",
  5008. display->name, rc);
  5009. goto error_ctrl_clk_off;
  5010. }
  5011. rc = dsi_display_set_clk_src(display);
  5012. if (rc) {
  5013. pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
  5014. display->name, rc);
  5015. goto error_ctrl_deinit;
  5016. }
  5017. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  5018. DSI_LINK_CLK, DSI_CLK_ON);
  5019. if (rc) {
  5020. pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
  5021. display->name, rc);
  5022. goto error_ctrl_deinit;
  5023. }
  5024. goto error;
  5025. error_ctrl_deinit:
  5026. (void)dsi_display_ctrl_deinit(display);
  5027. error_ctrl_clk_off:
  5028. (void)dsi_display_clk_ctrl(display->dsi_clk_handle,
  5029. DSI_CORE_CLK, DSI_CLK_OFF);
  5030. error:
  5031. return rc;
  5032. }
  5033. static bool _dsi_display_validate_host_state(struct dsi_display *display)
  5034. {
  5035. int i;
  5036. struct dsi_display_ctrl *ctrl;
  5037. display_for_each_ctrl(i, display) {
  5038. ctrl = &display->ctrl[i];
  5039. if (!ctrl->ctrl)
  5040. continue;
  5041. if (!dsi_ctrl_validate_host_state(ctrl->ctrl))
  5042. return false;
  5043. }
  5044. return true;
  5045. }
  5046. static void dsi_display_handle_fifo_underflow(struct work_struct *work)
  5047. {
  5048. struct dsi_display *display = NULL;
  5049. display = container_of(work, struct dsi_display, fifo_underflow_work);
  5050. if (!display || !display->panel ||
  5051. atomic_read(&display->panel->esd_recovery_pending)) {
  5052. pr_debug("Invalid recovery use case\n");
  5053. return;
  5054. }
  5055. mutex_lock(&display->display_lock);
  5056. if (!_dsi_display_validate_host_state(display)) {
  5057. mutex_unlock(&display->display_lock);
  5058. return;
  5059. }
  5060. pr_debug("handle DSI FIFO underflow error\n");
  5061. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5062. DSI_ALL_CLKS, DSI_CLK_ON);
  5063. dsi_display_soft_reset(display);
  5064. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5065. DSI_ALL_CLKS, DSI_CLK_OFF);
  5066. mutex_unlock(&display->display_lock);
  5067. }
  5068. static void dsi_display_handle_fifo_overflow(struct work_struct *work)
  5069. {
  5070. struct dsi_display *display = NULL;
  5071. struct dsi_display_ctrl *ctrl;
  5072. int i, rc;
  5073. int mask = BIT(20); /* clock lane */
  5074. int (*cb_func)(void *event_usr_ptr,
  5075. uint32_t event_idx, uint32_t instance_idx,
  5076. uint32_t data0, uint32_t data1,
  5077. uint32_t data2, uint32_t data3);
  5078. void *data;
  5079. u32 version = 0;
  5080. display = container_of(work, struct dsi_display, fifo_overflow_work);
  5081. if (!display || !display->panel ||
  5082. (display->panel->panel_mode != DSI_OP_VIDEO_MODE) ||
  5083. atomic_read(&display->panel->esd_recovery_pending)) {
  5084. pr_debug("Invalid recovery use case\n");
  5085. return;
  5086. }
  5087. mutex_lock(&display->display_lock);
  5088. if (!_dsi_display_validate_host_state(display)) {
  5089. mutex_unlock(&display->display_lock);
  5090. return;
  5091. }
  5092. pr_debug("handle DSI FIFO overflow error\n");
  5093. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5094. DSI_ALL_CLKS, DSI_CLK_ON);
  5095. /*
  5096. * below recovery sequence is not applicable to
  5097. * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
  5098. */
  5099. ctrl = &display->ctrl[display->clk_master_idx];
  5100. version = dsi_ctrl_get_hw_version(ctrl->ctrl);
  5101. if (!version || (version < 0x20020001))
  5102. goto end;
  5103. /* reset ctrl and lanes */
  5104. display_for_each_ctrl(i, display) {
  5105. ctrl = &display->ctrl[i];
  5106. rc = dsi_ctrl_reset(ctrl->ctrl, mask);
  5107. rc = dsi_phy_lane_reset(ctrl->phy);
  5108. }
  5109. /* wait for display line count to be in active area */
  5110. ctrl = &display->ctrl[display->clk_master_idx];
  5111. if (ctrl->ctrl->recovery_cb.event_cb) {
  5112. cb_func = ctrl->ctrl->recovery_cb.event_cb;
  5113. data = ctrl->ctrl->recovery_cb.event_usr_ptr;
  5114. rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
  5115. display->clk_master_idx, 0, 0, 0, 0);
  5116. if (rc < 0) {
  5117. pr_debug("sde callback failed\n");
  5118. goto end;
  5119. }
  5120. }
  5121. /* Enable Video mode for DSI controller */
  5122. display_for_each_ctrl(i, display) {
  5123. ctrl = &display->ctrl[i];
  5124. dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
  5125. }
  5126. /*
  5127. * Add sufficient delay to make sure
  5128. * pixel transmission has started
  5129. */
  5130. udelay(200);
  5131. end:
  5132. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5133. DSI_ALL_CLKS, DSI_CLK_OFF);
  5134. mutex_unlock(&display->display_lock);
  5135. }
  5136. static void dsi_display_handle_lp_rx_timeout(struct work_struct *work)
  5137. {
  5138. struct dsi_display *display = NULL;
  5139. struct dsi_display_ctrl *ctrl;
  5140. int i, rc;
  5141. int mask = (BIT(20) | (0xF << 16)); /* clock lane and 4 data lane */
  5142. int (*cb_func)(void *event_usr_ptr,
  5143. uint32_t event_idx, uint32_t instance_idx,
  5144. uint32_t data0, uint32_t data1,
  5145. uint32_t data2, uint32_t data3);
  5146. void *data;
  5147. u32 version = 0;
  5148. display = container_of(work, struct dsi_display, lp_rx_timeout_work);
  5149. if (!display || !display->panel ||
  5150. (display->panel->panel_mode != DSI_OP_VIDEO_MODE) ||
  5151. atomic_read(&display->panel->esd_recovery_pending)) {
  5152. pr_debug("Invalid recovery use case\n");
  5153. return;
  5154. }
  5155. mutex_lock(&display->display_lock);
  5156. if (!_dsi_display_validate_host_state(display)) {
  5157. mutex_unlock(&display->display_lock);
  5158. return;
  5159. }
  5160. pr_debug("handle DSI LP RX Timeout error\n");
  5161. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5162. DSI_ALL_CLKS, DSI_CLK_ON);
  5163. /*
  5164. * below recovery sequence is not applicable to
  5165. * hw version 2.0.0, 2.1.0 and 2.2.0, so return early.
  5166. */
  5167. ctrl = &display->ctrl[display->clk_master_idx];
  5168. version = dsi_ctrl_get_hw_version(ctrl->ctrl);
  5169. if (!version || (version < 0x20020001))
  5170. goto end;
  5171. /* reset ctrl and lanes */
  5172. display_for_each_ctrl(i, display) {
  5173. ctrl = &display->ctrl[i];
  5174. rc = dsi_ctrl_reset(ctrl->ctrl, mask);
  5175. rc = dsi_phy_lane_reset(ctrl->phy);
  5176. }
  5177. ctrl = &display->ctrl[display->clk_master_idx];
  5178. if (ctrl->ctrl->recovery_cb.event_cb) {
  5179. cb_func = ctrl->ctrl->recovery_cb.event_cb;
  5180. data = ctrl->ctrl->recovery_cb.event_usr_ptr;
  5181. rc = cb_func(data, SDE_CONN_EVENT_VID_FIFO_OVERFLOW,
  5182. display->clk_master_idx, 0, 0, 0, 0);
  5183. if (rc < 0) {
  5184. pr_debug("Target is in suspend/shutdown\n");
  5185. goto end;
  5186. }
  5187. }
  5188. /* Enable Video mode for DSI controller */
  5189. display_for_each_ctrl(i, display) {
  5190. ctrl = &display->ctrl[i];
  5191. dsi_ctrl_vid_engine_en(ctrl->ctrl, true);
  5192. }
  5193. /*
  5194. * Add sufficient delay to make sure
  5195. * pixel transmission as started
  5196. */
  5197. udelay(200);
  5198. end:
  5199. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5200. DSI_ALL_CLKS, DSI_CLK_OFF);
  5201. mutex_unlock(&display->display_lock);
  5202. }
  5203. static int dsi_display_cb_error_handler(void *data,
  5204. uint32_t event_idx, uint32_t instance_idx,
  5205. uint32_t data0, uint32_t data1,
  5206. uint32_t data2, uint32_t data3)
  5207. {
  5208. struct dsi_display *display = data;
  5209. if (!display || !(display->err_workq))
  5210. return -EINVAL;
  5211. switch (event_idx) {
  5212. case DSI_FIFO_UNDERFLOW:
  5213. queue_work(display->err_workq, &display->fifo_underflow_work);
  5214. break;
  5215. case DSI_FIFO_OVERFLOW:
  5216. queue_work(display->err_workq, &display->fifo_overflow_work);
  5217. break;
  5218. case DSI_LP_Rx_TIMEOUT:
  5219. queue_work(display->err_workq, &display->lp_rx_timeout_work);
  5220. break;
  5221. default:
  5222. pr_warn("unhandled error interrupt: %d\n", event_idx);
  5223. break;
  5224. }
  5225. return 0;
  5226. }
  5227. static void dsi_display_register_error_handler(struct dsi_display *display)
  5228. {
  5229. int i = 0;
  5230. struct dsi_display_ctrl *ctrl;
  5231. struct dsi_event_cb_info event_info;
  5232. if (!display)
  5233. return;
  5234. display->err_workq = create_singlethread_workqueue("dsi_err_workq");
  5235. if (!display->err_workq) {
  5236. pr_err("failed to create dsi workq!\n");
  5237. return;
  5238. }
  5239. INIT_WORK(&display->fifo_underflow_work,
  5240. dsi_display_handle_fifo_underflow);
  5241. INIT_WORK(&display->fifo_overflow_work,
  5242. dsi_display_handle_fifo_overflow);
  5243. INIT_WORK(&display->lp_rx_timeout_work,
  5244. dsi_display_handle_lp_rx_timeout);
  5245. memset(&event_info, 0, sizeof(event_info));
  5246. event_info.event_cb = dsi_display_cb_error_handler;
  5247. event_info.event_usr_ptr = display;
  5248. display_for_each_ctrl(i, display) {
  5249. ctrl = &display->ctrl[i];
  5250. ctrl->ctrl->irq_info.irq_err_cb = event_info;
  5251. }
  5252. }
  5253. static void dsi_display_unregister_error_handler(struct dsi_display *display)
  5254. {
  5255. int i = 0;
  5256. struct dsi_display_ctrl *ctrl;
  5257. if (!display)
  5258. return;
  5259. display_for_each_ctrl(i, display) {
  5260. ctrl = &display->ctrl[i];
  5261. memset(&ctrl->ctrl->irq_info.irq_err_cb,
  5262. 0, sizeof(struct dsi_event_cb_info));
  5263. }
  5264. if (display->err_workq) {
  5265. destroy_workqueue(display->err_workq);
  5266. display->err_workq = NULL;
  5267. }
  5268. }
  5269. int dsi_display_prepare(struct dsi_display *display)
  5270. {
  5271. int rc = 0;
  5272. struct dsi_display_mode *mode;
  5273. if (!display) {
  5274. pr_err("Invalid params\n");
  5275. return -EINVAL;
  5276. }
  5277. if (!display->panel->cur_mode) {
  5278. pr_err("no valid mode set for the display\n");
  5279. return -EINVAL;
  5280. }
  5281. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  5282. mutex_lock(&display->display_lock);
  5283. mode = display->panel->cur_mode;
  5284. dsi_display_set_ctrl_esd_check_flag(display, false);
  5285. /* Set up ctrl isr before enabling core clk */
  5286. dsi_display_ctrl_isr_configure(display, true);
  5287. if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
  5288. if (display->is_cont_splash_enabled) {
  5289. pr_err("DMS is not supposed to be set on first frame\n");
  5290. return -EINVAL;
  5291. }
  5292. /* update dsi ctrl for new mode */
  5293. rc = dsi_display_pre_switch(display);
  5294. if (rc)
  5295. pr_err("[%s] panel pre-prepare-res-switch failed, rc=%d\n",
  5296. display->name, rc);
  5297. goto error;
  5298. }
  5299. if (!(mode->dsi_mode_flags & DSI_MODE_FLAG_POMS) &&
  5300. (!display->is_cont_splash_enabled)) {
  5301. /*
  5302. * For continuous splash usecase we skip panel
  5303. * pre prepare since the regulator vote is already
  5304. * taken care in splash resource init
  5305. */
  5306. rc = dsi_panel_pre_prepare(display->panel);
  5307. if (rc) {
  5308. pr_err("[%s] panel pre-prepare failed, rc=%d\n",
  5309. display->name, rc);
  5310. goto error;
  5311. }
  5312. }
  5313. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  5314. DSI_CORE_CLK, DSI_CLK_ON);
  5315. if (rc) {
  5316. pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
  5317. display->name, rc);
  5318. goto error_panel_post_unprep;
  5319. }
  5320. /*
  5321. * If ULPS during suspend feature is enabled, then DSI PHY was
  5322. * left on during suspend. In this case, we do not need to reset/init
  5323. * PHY. This would have already been done when the CORE clocks are
  5324. * turned on. However, if cont splash is disabled, the first time DSI
  5325. * is powered on, phy init needs to be done unconditionally.
  5326. */
  5327. if (!display->panel->ulps_suspend_enabled || !display->ulps_enabled) {
  5328. rc = dsi_display_phy_sw_reset(display);
  5329. if (rc) {
  5330. pr_err("[%s] failed to reset phy, rc=%d\n",
  5331. display->name, rc);
  5332. goto error_ctrl_clk_off;
  5333. }
  5334. rc = dsi_display_phy_enable(display);
  5335. if (rc) {
  5336. pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
  5337. display->name, rc);
  5338. goto error_ctrl_clk_off;
  5339. }
  5340. }
  5341. rc = dsi_display_set_clk_src(display);
  5342. if (rc) {
  5343. pr_err("[%s] failed to set DSI link clock source, rc=%d\n",
  5344. display->name, rc);
  5345. goto error_phy_disable;
  5346. }
  5347. rc = dsi_display_ctrl_init(display);
  5348. if (rc) {
  5349. pr_err("[%s] failed to setup DSI controller, rc=%d\n",
  5350. display->name, rc);
  5351. goto error_phy_disable;
  5352. }
  5353. /* Set up DSI ERROR event callback */
  5354. dsi_display_register_error_handler(display);
  5355. rc = dsi_display_ctrl_host_enable(display);
  5356. if (rc) {
  5357. pr_err("[%s] failed to enable DSI host, rc=%d\n",
  5358. display->name, rc);
  5359. goto error_ctrl_deinit;
  5360. }
  5361. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  5362. DSI_LINK_CLK, DSI_CLK_ON);
  5363. if (rc) {
  5364. pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
  5365. display->name, rc);
  5366. goto error_host_engine_off;
  5367. }
  5368. if (!display->is_cont_splash_enabled) {
  5369. /*
  5370. * For continuous splash usecase, skip panel prepare and
  5371. * ctl reset since the pnael and ctrl is already in active
  5372. * state and panel on commands are not needed
  5373. */
  5374. rc = dsi_display_soft_reset(display);
  5375. if (rc) {
  5376. pr_err("[%s] failed soft reset, rc=%d\n",
  5377. display->name, rc);
  5378. goto error_ctrl_link_off;
  5379. }
  5380. if (!(mode->dsi_mode_flags & DSI_MODE_FLAG_POMS)) {
  5381. rc = dsi_panel_prepare(display->panel);
  5382. if (rc) {
  5383. pr_err("[%s] panel prepare failed, rc=%d\n",
  5384. display->name, rc);
  5385. goto error_ctrl_link_off;
  5386. }
  5387. }
  5388. }
  5389. goto error;
  5390. error_ctrl_link_off:
  5391. (void)dsi_display_clk_ctrl(display->dsi_clk_handle,
  5392. DSI_LINK_CLK, DSI_CLK_OFF);
  5393. error_host_engine_off:
  5394. (void)dsi_display_ctrl_host_disable(display);
  5395. error_ctrl_deinit:
  5396. (void)dsi_display_ctrl_deinit(display);
  5397. error_phy_disable:
  5398. (void)dsi_display_phy_disable(display);
  5399. error_ctrl_clk_off:
  5400. (void)dsi_display_clk_ctrl(display->dsi_clk_handle,
  5401. DSI_CORE_CLK, DSI_CLK_OFF);
  5402. error_panel_post_unprep:
  5403. (void)dsi_panel_post_unprepare(display->panel);
  5404. error:
  5405. mutex_unlock(&display->display_lock);
  5406. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
  5407. return rc;
  5408. }
  5409. static int dsi_display_calc_ctrl_roi(const struct dsi_display *display,
  5410. const struct dsi_display_ctrl *ctrl,
  5411. const struct msm_roi_list *req_rois,
  5412. struct dsi_rect *out_roi)
  5413. {
  5414. const struct dsi_rect *bounds = &ctrl->ctrl->mode_bounds;
  5415. struct dsi_display_mode *cur_mode;
  5416. struct msm_roi_caps *roi_caps;
  5417. struct dsi_rect req_roi = { 0 };
  5418. int rc = 0;
  5419. cur_mode = display->panel->cur_mode;
  5420. if (!cur_mode)
  5421. return 0;
  5422. roi_caps = &cur_mode->priv_info->roi_caps;
  5423. if (req_rois->num_rects > roi_caps->num_roi) {
  5424. pr_err("request for %d rois greater than max %d\n",
  5425. req_rois->num_rects,
  5426. roi_caps->num_roi);
  5427. rc = -EINVAL;
  5428. goto exit;
  5429. }
  5430. /**
  5431. * if no rois, user wants to reset back to full resolution
  5432. * note: h_active is already divided by ctrl_count
  5433. */
  5434. if (!req_rois->num_rects) {
  5435. *out_roi = *bounds;
  5436. goto exit;
  5437. }
  5438. /* intersect with the bounds */
  5439. req_roi.x = req_rois->roi[0].x1;
  5440. req_roi.y = req_rois->roi[0].y1;
  5441. req_roi.w = req_rois->roi[0].x2 - req_rois->roi[0].x1;
  5442. req_roi.h = req_rois->roi[0].y2 - req_rois->roi[0].y1;
  5443. dsi_rect_intersect(&req_roi, bounds, out_roi);
  5444. exit:
  5445. /* adjust the ctrl origin to be top left within the ctrl */
  5446. out_roi->x = out_roi->x - bounds->x;
  5447. pr_debug("ctrl%d:%d: req (%d,%d,%d,%d) bnd (%d,%d,%d,%d) out (%d,%d,%d,%d)\n",
  5448. ctrl->dsi_ctrl_idx, ctrl->ctrl->cell_index,
  5449. req_roi.x, req_roi.y, req_roi.w, req_roi.h,
  5450. bounds->x, bounds->y, bounds->w, bounds->h,
  5451. out_roi->x, out_roi->y, out_roi->w, out_roi->h);
  5452. return rc;
  5453. }
  5454. static int dsi_display_qsync(struct dsi_display *display, bool enable)
  5455. {
  5456. int i;
  5457. int rc = 0;
  5458. if (!display->panel->qsync_min_fps) {
  5459. pr_err("%s:ERROR: qsync set, but no fps\n", __func__);
  5460. return 0;
  5461. }
  5462. mutex_lock(&display->display_lock);
  5463. for (i = 0; i < display->ctrl_count; i++) {
  5464. if (enable) {
  5465. /* send the commands to enable qsync */
  5466. rc = dsi_panel_send_qsync_on_dcs(display->panel, i);
  5467. if (rc) {
  5468. pr_err("fail qsync ON cmds rc:%d\n", rc);
  5469. goto exit;
  5470. }
  5471. } else {
  5472. /* send the commands to enable qsync */
  5473. rc = dsi_panel_send_qsync_off_dcs(display->panel, i);
  5474. if (rc) {
  5475. pr_err("fail qsync OFF cmds rc:%d\n", rc);
  5476. goto exit;
  5477. }
  5478. }
  5479. dsi_ctrl_setup_avr(display->ctrl[i].ctrl, enable);
  5480. }
  5481. exit:
  5482. SDE_EVT32(enable, display->panel->qsync_min_fps, rc);
  5483. mutex_unlock(&display->display_lock);
  5484. return rc;
  5485. }
  5486. static int dsi_display_set_roi(struct dsi_display *display,
  5487. struct msm_roi_list *rois)
  5488. {
  5489. struct dsi_display_mode *cur_mode;
  5490. struct msm_roi_caps *roi_caps;
  5491. int rc = 0;
  5492. int i;
  5493. if (!display || !rois || !display->panel)
  5494. return -EINVAL;
  5495. cur_mode = display->panel->cur_mode;
  5496. if (!cur_mode)
  5497. return 0;
  5498. roi_caps = &cur_mode->priv_info->roi_caps;
  5499. if (!roi_caps->enabled)
  5500. return 0;
  5501. display_for_each_ctrl(i, display) {
  5502. struct dsi_display_ctrl *ctrl = &display->ctrl[i];
  5503. struct dsi_rect ctrl_roi;
  5504. bool changed = false;
  5505. rc = dsi_display_calc_ctrl_roi(display, ctrl, rois, &ctrl_roi);
  5506. if (rc) {
  5507. pr_err("dsi_display_calc_ctrl_roi failed rc %d\n", rc);
  5508. return rc;
  5509. }
  5510. rc = dsi_ctrl_set_roi(ctrl->ctrl, &ctrl_roi, &changed);
  5511. if (rc) {
  5512. pr_err("dsi_ctrl_set_roi failed rc %d\n", rc);
  5513. return rc;
  5514. }
  5515. if (!changed)
  5516. continue;
  5517. /* send the new roi to the panel via dcs commands */
  5518. rc = dsi_panel_send_roi_dcs(display->panel, i, &ctrl_roi);
  5519. if (rc) {
  5520. pr_err("dsi_panel_set_roi failed rc %d\n", rc);
  5521. return rc;
  5522. }
  5523. /* re-program the ctrl with the timing based on the new roi */
  5524. rc = dsi_ctrl_setup(ctrl->ctrl);
  5525. if (rc) {
  5526. pr_err("dsi_ctrl_setup failed rc %d\n", rc);
  5527. return rc;
  5528. }
  5529. }
  5530. return rc;
  5531. }
  5532. int dsi_display_pre_kickoff(struct drm_connector *connector,
  5533. struct dsi_display *display,
  5534. struct msm_display_kickoff_params *params)
  5535. {
  5536. int rc = 0;
  5537. int i;
  5538. bool enable;
  5539. /* check and setup MISR */
  5540. if (display->misr_enable)
  5541. _dsi_display_setup_misr(display);
  5542. if (params->qsync_update) {
  5543. enable = (params->qsync_mode > 0) ? true : false;
  5544. rc = dsi_display_qsync(display, enable);
  5545. if (rc)
  5546. pr_err("%s failed to send qsync commands\n",
  5547. __func__);
  5548. SDE_EVT32(params->qsync_mode, rc);
  5549. }
  5550. rc = dsi_display_set_roi(display, params->rois);
  5551. /* dynamic DSI clock setting */
  5552. if (atomic_read(&display->clkrate_change_pending)) {
  5553. mutex_lock(&display->display_lock);
  5554. /*
  5555. * acquire panel_lock to make sure no commands are in progress
  5556. */
  5557. dsi_panel_acquire_panel_lock(display->panel);
  5558. /*
  5559. * Wait for DSI command engine not to be busy sending data
  5560. * from display engine.
  5561. * If waiting fails, return "rc" instead of below "ret" so as
  5562. * not to impact DRM commit. The clock updating would be
  5563. * deferred to the next DRM commit.
  5564. */
  5565. display_for_each_ctrl(i, display) {
  5566. struct dsi_ctrl *ctrl = display->ctrl[i].ctrl;
  5567. int ret = 0;
  5568. ret = dsi_ctrl_wait_for_cmd_mode_mdp_idle(ctrl);
  5569. if (ret)
  5570. goto wait_failure;
  5571. }
  5572. /*
  5573. * Don't check the return value so as not to impact DRM commit
  5574. * when error occurs.
  5575. */
  5576. (void)dsi_display_force_update_dsi_clk(display);
  5577. wait_failure:
  5578. /* release panel_lock */
  5579. dsi_panel_release_panel_lock(display->panel);
  5580. mutex_unlock(&display->display_lock);
  5581. }
  5582. return rc;
  5583. }
  5584. int dsi_display_config_ctrl_for_cont_splash(struct dsi_display *display)
  5585. {
  5586. int rc = 0;
  5587. if (!display || !display->panel) {
  5588. pr_err("Invalid params\n");
  5589. return -EINVAL;
  5590. }
  5591. if (!display->panel->cur_mode) {
  5592. pr_err("no valid mode set for the display\n");
  5593. return -EINVAL;
  5594. }
  5595. if (!display->is_cont_splash_enabled)
  5596. return 0;
  5597. if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
  5598. rc = dsi_display_vid_engine_enable(display);
  5599. if (rc) {
  5600. pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
  5601. display->name, rc);
  5602. goto error_out;
  5603. }
  5604. } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
  5605. rc = dsi_display_cmd_engine_enable(display);
  5606. if (rc) {
  5607. pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
  5608. display->name, rc);
  5609. goto error_out;
  5610. }
  5611. } else {
  5612. pr_err("[%s] Invalid configuration\n", display->name);
  5613. rc = -EINVAL;
  5614. }
  5615. error_out:
  5616. return rc;
  5617. }
  5618. int dsi_display_enable(struct dsi_display *display)
  5619. {
  5620. int rc = 0;
  5621. struct dsi_display_mode *mode;
  5622. if (!display || !display->panel) {
  5623. pr_err("Invalid params\n");
  5624. return -EINVAL;
  5625. }
  5626. if (!display->panel->cur_mode) {
  5627. pr_err("no valid mode set for the display\n");
  5628. return -EINVAL;
  5629. }
  5630. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  5631. /* Engine states and panel states are populated during splash
  5632. * resource init and hence we return early
  5633. */
  5634. if (display->is_cont_splash_enabled) {
  5635. dsi_display_config_ctrl_for_cont_splash(display);
  5636. rc = dsi_display_splash_res_cleanup(display);
  5637. if (rc) {
  5638. pr_err("Continuous splash res cleanup failed, rc=%d\n",
  5639. rc);
  5640. return -EINVAL;
  5641. }
  5642. display->panel->panel_initialized = true;
  5643. pr_debug("cont splash enabled, display enable not required\n");
  5644. return 0;
  5645. }
  5646. mutex_lock(&display->display_lock);
  5647. mode = display->panel->cur_mode;
  5648. if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
  5649. rc = dsi_panel_post_switch(display->panel);
  5650. if (rc) {
  5651. pr_err("[%s] failed to switch DSI panel mode, rc=%d\n",
  5652. display->name, rc);
  5653. goto error;
  5654. }
  5655. } else if (!(display->panel->cur_mode->dsi_mode_flags &
  5656. DSI_MODE_FLAG_POMS)){
  5657. rc = dsi_panel_enable(display->panel);
  5658. if (rc) {
  5659. pr_err("[%s] failed to enable DSI panel, rc=%d\n",
  5660. display->name, rc);
  5661. goto error;
  5662. }
  5663. }
  5664. if (mode->priv_info->dsc_enabled) {
  5665. mode->priv_info->dsc.pic_width *= display->ctrl_count;
  5666. rc = dsi_panel_update_pps(display->panel);
  5667. if (rc) {
  5668. pr_err("[%s] panel pps cmd update failed, rc=%d\n",
  5669. display->name, rc);
  5670. goto error;
  5671. }
  5672. }
  5673. if (mode->dsi_mode_flags & DSI_MODE_FLAG_DMS) {
  5674. rc = dsi_panel_switch(display->panel);
  5675. if (rc)
  5676. pr_err("[%s] failed to switch DSI panel mode, rc=%d\n",
  5677. display->name, rc);
  5678. goto error;
  5679. }
  5680. if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
  5681. pr_debug("%s:enable video timing eng\n", __func__);
  5682. rc = dsi_display_vid_engine_enable(display);
  5683. if (rc) {
  5684. pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
  5685. display->name, rc);
  5686. goto error_disable_panel;
  5687. }
  5688. } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
  5689. pr_debug("%s:enable command timing eng\n", __func__);
  5690. rc = dsi_display_cmd_engine_enable(display);
  5691. if (rc) {
  5692. pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
  5693. display->name, rc);
  5694. goto error_disable_panel;
  5695. }
  5696. } else {
  5697. pr_err("[%s] Invalid configuration\n", display->name);
  5698. rc = -EINVAL;
  5699. goto error_disable_panel;
  5700. }
  5701. goto error;
  5702. error_disable_panel:
  5703. (void)dsi_panel_disable(display->panel);
  5704. error:
  5705. mutex_unlock(&display->display_lock);
  5706. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
  5707. return rc;
  5708. }
  5709. int dsi_display_post_enable(struct dsi_display *display)
  5710. {
  5711. int rc = 0;
  5712. if (!display) {
  5713. pr_err("Invalid params\n");
  5714. return -EINVAL;
  5715. }
  5716. mutex_lock(&display->display_lock);
  5717. if (display->panel->cur_mode->dsi_mode_flags & DSI_MODE_FLAG_POMS) {
  5718. if (display->config.panel_mode == DSI_OP_CMD_MODE)
  5719. dsi_panel_mode_switch_to_cmd(display->panel);
  5720. if (display->config.panel_mode == DSI_OP_VIDEO_MODE)
  5721. dsi_panel_mode_switch_to_vid(display->panel);
  5722. } else {
  5723. rc = dsi_panel_post_enable(display->panel);
  5724. if (rc)
  5725. pr_err("[%s] panel post-enable failed, rc=%d\n",
  5726. display->name, rc);
  5727. }
  5728. /* remove the clk vote for CMD mode panels */
  5729. if (display->config.panel_mode == DSI_OP_CMD_MODE)
  5730. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5731. DSI_ALL_CLKS, DSI_CLK_OFF);
  5732. mutex_unlock(&display->display_lock);
  5733. return rc;
  5734. }
  5735. int dsi_display_pre_disable(struct dsi_display *display)
  5736. {
  5737. int rc = 0;
  5738. if (!display) {
  5739. pr_err("Invalid params\n");
  5740. return -EINVAL;
  5741. }
  5742. mutex_lock(&display->display_lock);
  5743. /* enable the clk vote for CMD mode panels */
  5744. if (display->config.panel_mode == DSI_OP_CMD_MODE)
  5745. dsi_display_clk_ctrl(display->dsi_clk_handle,
  5746. DSI_ALL_CLKS, DSI_CLK_ON);
  5747. if (display->poms_pending) {
  5748. if (display->config.panel_mode == DSI_OP_CMD_MODE)
  5749. dsi_panel_pre_mode_switch_to_video(display->panel);
  5750. if (display->config.panel_mode == DSI_OP_VIDEO_MODE)
  5751. dsi_panel_pre_mode_switch_to_cmd(display->panel);
  5752. } else {
  5753. rc = dsi_panel_pre_disable(display->panel);
  5754. if (rc)
  5755. pr_err("[%s] panel pre-disable failed, rc=%d\n",
  5756. display->name, rc);
  5757. }
  5758. mutex_unlock(&display->display_lock);
  5759. return rc;
  5760. }
  5761. int dsi_display_disable(struct dsi_display *display)
  5762. {
  5763. int rc = 0;
  5764. if (!display) {
  5765. pr_err("Invalid params\n");
  5766. return -EINVAL;
  5767. }
  5768. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  5769. mutex_lock(&display->display_lock);
  5770. rc = dsi_display_wake_up(display);
  5771. if (rc)
  5772. pr_err("[%s] display wake up failed, rc=%d\n",
  5773. display->name, rc);
  5774. if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
  5775. rc = dsi_display_vid_engine_disable(display);
  5776. if (rc)
  5777. pr_err("[%s]failed to disable DSI vid engine, rc=%d\n",
  5778. display->name, rc);
  5779. } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
  5780. rc = dsi_display_cmd_engine_disable(display);
  5781. if (rc)
  5782. pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
  5783. display->name, rc);
  5784. } else {
  5785. pr_err("[%s] Invalid configuration\n", display->name);
  5786. rc = -EINVAL;
  5787. }
  5788. if (!display->poms_pending) {
  5789. rc = dsi_panel_disable(display->panel);
  5790. if (rc)
  5791. pr_err("[%s] failed to disable DSI panel, rc=%d\n",
  5792. display->name, rc);
  5793. }
  5794. mutex_unlock(&display->display_lock);
  5795. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
  5796. return rc;
  5797. }
  5798. int dsi_display_update_pps(char *pps_cmd, void *disp)
  5799. {
  5800. struct dsi_display *display;
  5801. if (pps_cmd == NULL || disp == NULL) {
  5802. pr_err("Invalid parameter\n");
  5803. return -EINVAL;
  5804. }
  5805. display = disp;
  5806. mutex_lock(&display->display_lock);
  5807. memcpy(display->panel->dsc_pps_cmd, pps_cmd, DSI_CMD_PPS_SIZE);
  5808. mutex_unlock(&display->display_lock);
  5809. return 0;
  5810. }
  5811. int dsi_display_unprepare(struct dsi_display *display)
  5812. {
  5813. int rc = 0;
  5814. if (!display) {
  5815. pr_err("Invalid params\n");
  5816. return -EINVAL;
  5817. }
  5818. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  5819. mutex_lock(&display->display_lock);
  5820. rc = dsi_display_wake_up(display);
  5821. if (rc)
  5822. pr_err("[%s] display wake up failed, rc=%d\n",
  5823. display->name, rc);
  5824. if (!display->poms_pending) {
  5825. rc = dsi_panel_unprepare(display->panel);
  5826. if (rc)
  5827. pr_err("[%s] panel unprepare failed, rc=%d\n",
  5828. display->name, rc);
  5829. }
  5830. rc = dsi_display_ctrl_host_disable(display);
  5831. if (rc)
  5832. pr_err("[%s] failed to disable DSI host, rc=%d\n",
  5833. display->name, rc);
  5834. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  5835. DSI_LINK_CLK, DSI_CLK_OFF);
  5836. if (rc)
  5837. pr_err("[%s] failed to disable Link clocks, rc=%d\n",
  5838. display->name, rc);
  5839. rc = dsi_display_ctrl_deinit(display);
  5840. if (rc)
  5841. pr_err("[%s] failed to deinit controller, rc=%d\n",
  5842. display->name, rc);
  5843. if (!display->panel->ulps_suspend_enabled) {
  5844. rc = dsi_display_phy_disable(display);
  5845. if (rc)
  5846. pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
  5847. display->name, rc);
  5848. }
  5849. rc = dsi_display_clk_ctrl(display->dsi_clk_handle,
  5850. DSI_CORE_CLK, DSI_CLK_OFF);
  5851. if (rc)
  5852. pr_err("[%s] failed to disable DSI clocks, rc=%d\n",
  5853. display->name, rc);
  5854. /* destrory dsi isr set up */
  5855. dsi_display_ctrl_isr_configure(display, false);
  5856. if (!display->poms_pending) {
  5857. rc = dsi_panel_post_unprepare(display->panel);
  5858. if (rc)
  5859. pr_err("[%s] panel post-unprepare failed, rc=%d\n",
  5860. display->name, rc);
  5861. }
  5862. mutex_unlock(&display->display_lock);
  5863. /* Free up DSI ERROR event callback */
  5864. dsi_display_unregister_error_handler(display);
  5865. SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
  5866. return rc;
  5867. }
  5868. static int __init dsi_display_register(void)
  5869. {
  5870. dsi_phy_drv_register();
  5871. dsi_ctrl_drv_register();
  5872. dsi_display_parse_boot_display_selection();
  5873. return platform_driver_register(&dsi_display_driver);
  5874. }
  5875. static void __exit dsi_display_unregister(void)
  5876. {
  5877. platform_driver_unregister(&dsi_display_driver);
  5878. dsi_ctrl_drv_unregister();
  5879. dsi_phy_drv_unregister();
  5880. }
  5881. module_param_string(dsi_display0, dsi_display_primary, MAX_CMDLINE_PARAM_LEN,
  5882. 0600);
  5883. MODULE_PARM_DESC(dsi_display0,
  5884. "msm_drm.dsi_display0=<display node>:<configX> where <display node> is 'primary dsi display node name' and <configX> where x represents index in the topology list");
  5885. module_param_string(dsi_display1, dsi_display_secondary, MAX_CMDLINE_PARAM_LEN,
  5886. 0600);
  5887. MODULE_PARM_DESC(dsi_display1,
  5888. "msm_drm.dsi_display1=<display node>:<configX> where <display node> is 'secondary dsi display node name' and <configX> where x represents index in the topology list");
  5889. module_init(dsi_display_register);
  5890. module_exit(dsi_display_unregister);