dp_display.c 166 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021-2024, Qualcomm Innovation Center, Inc. All rights reserved.
  4. * Copyright (c) 2017-2021, The Linux Foundation. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/slab.h>
  8. #include <linux/uaccess.h>
  9. #include <linux/debugfs.h>
  10. #include <linux/component.h>
  11. #include <linux/of_irq.h>
  12. #include <linux/delay.h>
  13. #if !defined(CONFIG_SECDP)
  14. #include <linux/usb/phy.h>
  15. #endif
  16. #include <linux/jiffies.h>
  17. #include <linux/pm_qos.h>
  18. #include <linux/ipc_logging.h>
  19. #include "sde_connector.h"
  20. #include "msm_drv.h"
  21. #include "dp_hpd.h"
  22. #include "dp_parser.h"
  23. #include "dp_power.h"
  24. #include "dp_catalog.h"
  25. #include "dp_aux.h"
  26. #include "dp_link.h"
  27. #include "dp_panel.h"
  28. #include "dp_ctrl.h"
  29. #include "dp_audio.h"
  30. #include "dp_display.h"
  31. #include "sde_hdcp.h"
  32. #include "dp_debug.h"
  33. #include "dp_pll.h"
  34. #include "sde_dbg.h"
  35. #ifdef CONFIG_UML
  36. #include "kunit_test/dp_kunit_macro.h"
  37. #endif
  38. #if defined(CONFIG_SECDP)
  39. #include <linux/string.h>
  40. #include <linux/reboot.h>
  41. #include <linux/sec_displayport.h>
  42. #include <linux/sched/clock.h>
  43. #include <linux/bitmap.h>
  44. #include <linux/sec_class.h>
  45. #include "secdp.h"
  46. #include "secdp_sysfs.h"
  47. #if defined(CONFIG_SECDP_BIGDATA)
  48. #include <linux/secdp_bigdata.h>
  49. #endif
  50. /*#undef CONFIG_SECDP_SWITCH*/
  51. #if defined(CONFIG_SECDP_SWITCH)
  52. #include <linux/switch.h>
  53. static struct switch_dev switch_secdp_msg = {
  54. .name = "secdp_msg",
  55. };
  56. #endif
  57. extern int dwc3_msm_set_dp_mode_for_ss(bool dp_connected);
  58. #endif/*CONFIG_SECDP*/
  59. #define DRM_DP_IPC_NUM_PAGES 10
  60. #define DP_MST_DEBUG(fmt, ...) DP_DEBUG(fmt, ##__VA_ARGS__)
  61. #define dp_display_state_show(x) { \
  62. DP_ERR("%s: state (0x%x): %s\n", x, dp->state, \
  63. dp_display_state_name(dp->state)); \
  64. SDE_EVT32_EXTERNAL(dp->state); }
  65. #define dp_display_state_warn(x) { \
  66. DP_WARN("%s: state (0x%x): %s\n", x, dp->state, \
  67. dp_display_state_name(dp->state)); \
  68. SDE_EVT32_EXTERNAL(dp->state); }
  69. #define dp_display_state_log(x) { \
  70. DP_DEBUG("%s: state (0x%x): %s\n", x, dp->state, \
  71. dp_display_state_name(dp->state)); \
  72. SDE_EVT32_EXTERNAL(dp->state); }
  73. #define dp_display_state_is(x) (dp->state & (x))
  74. #define dp_display_state_add(x) { \
  75. (dp->state |= (x)); \
  76. dp_display_state_log("add "#x); }
  77. #define dp_display_state_remove(x) { \
  78. (dp->state &= ~(x)); \
  79. dp_display_state_log("remove "#x); }
  80. #define MAX_TMDS_CLOCK_HDMI_1_4 340000
  81. enum dp_display_states {
  82. DP_STATE_DISCONNECTED = 0,
  83. DP_STATE_CONFIGURED = BIT(0),
  84. DP_STATE_INITIALIZED = BIT(1),
  85. DP_STATE_READY = BIT(2),
  86. DP_STATE_CONNECTED = BIT(3),
  87. DP_STATE_CONNECT_NOTIFIED = BIT(4),
  88. DP_STATE_DISCONNECT_NOTIFIED = BIT(5),
  89. DP_STATE_ENABLED = BIT(6),
  90. DP_STATE_SUSPENDED = BIT(7),
  91. DP_STATE_ABORTED = BIT(8),
  92. DP_STATE_HDCP_ABORTED = BIT(9),
  93. DP_STATE_SRC_PWRDN = BIT(10),
  94. DP_STATE_TUI_ACTIVE = BIT(11),
  95. };
  96. static char *dp_display_state_name(enum dp_display_states state)
  97. {
  98. static char buf[SZ_1K];
  99. u32 len = 0;
  100. memset(buf, 0, SZ_1K);
  101. if (state & DP_STATE_CONFIGURED)
  102. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  103. "CONFIGURED");
  104. if (state & DP_STATE_INITIALIZED)
  105. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  106. "INITIALIZED");
  107. if (state & DP_STATE_READY)
  108. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  109. "READY");
  110. if (state & DP_STATE_CONNECTED)
  111. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  112. "CONNECTED");
  113. if (state & DP_STATE_CONNECT_NOTIFIED)
  114. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  115. "CONNECT_NOTIFIED");
  116. if (state & DP_STATE_DISCONNECT_NOTIFIED)
  117. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  118. "DISCONNECT_NOTIFIED");
  119. if (state & DP_STATE_ENABLED)
  120. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  121. "ENABLED");
  122. if (state & DP_STATE_SUSPENDED)
  123. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  124. "SUSPENDED");
  125. if (state & DP_STATE_ABORTED)
  126. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  127. "ABORTED");
  128. if (state & DP_STATE_HDCP_ABORTED)
  129. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  130. "HDCP_ABORTED");
  131. if (state & DP_STATE_SRC_PWRDN)
  132. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  133. "SRC_PWRDN");
  134. if (state & DP_STATE_TUI_ACTIVE)
  135. len += scnprintf(buf + len, sizeof(buf) - len, "|%s|",
  136. "TUI_ACTIVE");
  137. if (!strlen(buf))
  138. return "DISCONNECTED";
  139. return buf;
  140. }
  141. static struct dp_display *g_dp_display;
  142. #define HPD_STRING_SIZE 30
  143. struct dp_hdcp_dev {
  144. void *fd;
  145. struct sde_hdcp_ops *ops;
  146. enum sde_hdcp_version ver;
  147. };
  148. struct dp_hdcp {
  149. void *data;
  150. struct sde_hdcp_ops *ops;
  151. u32 source_cap;
  152. struct dp_hdcp_dev dev[HDCP_VERSION_MAX];
  153. };
  154. struct dp_mst {
  155. bool mst_active;
  156. bool drm_registered;
  157. struct dp_mst_drm_cbs cbs;
  158. };
  159. struct dp_display_private {
  160. char *name;
  161. int irq;
  162. enum drm_connector_status cached_connector_status;
  163. enum dp_display_states state;
  164. enum dp_aux_switch_type switch_type;
  165. struct platform_device *pdev;
  166. struct device_node *aux_switch_node;
  167. bool aux_switch_ready;
  168. struct dp_aux_bridge *aux_bridge;
  169. struct dentry *root;
  170. struct completion notification_comp;
  171. struct completion attention_comp;
  172. struct dp_hpd *hpd;
  173. struct dp_parser *parser;
  174. struct dp_power *power;
  175. struct dp_catalog *catalog;
  176. struct dp_aux *aux;
  177. struct dp_link *link;
  178. struct dp_panel *panel;
  179. struct dp_ctrl *ctrl;
  180. struct dp_debug *debug;
  181. struct dp_pll *pll;
  182. struct dp_panel *active_panels[DP_STREAM_MAX];
  183. struct dp_hdcp hdcp;
  184. struct dp_hpd_cb hpd_cb;
  185. struct dp_display_mode mode;
  186. struct dp_display dp_display;
  187. struct msm_drm_private *priv;
  188. struct workqueue_struct *wq;
  189. struct delayed_work hdcp_cb_work;
  190. struct work_struct connect_work;
  191. struct work_struct attention_work;
  192. struct work_struct disconnect_work;
  193. struct mutex session_lock;
  194. struct mutex accounting_lock;
  195. bool hdcp_delayed_off;
  196. bool no_aux_switch;
  197. u32 active_stream_cnt;
  198. struct dp_mst mst;
  199. u32 tot_dsc_blks_in_use;
  200. u32 tot_lm_blks_in_use;
  201. bool process_hpd_connect;
  202. struct dev_pm_qos_request pm_qos_req[NR_CPUS];
  203. bool pm_qos_requested;
  204. #if !defined(CONFIG_SECDP)
  205. struct notifier_block usb_nb;
  206. #else
  207. struct secdp_misc sec;
  208. #endif
  209. };
  210. static const struct of_device_id dp_dt_match[] = {
  211. {.compatible = "qcom,dp-display"},
  212. {}
  213. };
  214. #if defined(CONFIG_SECDP)
  215. static void dp_audio_enable(struct dp_display_private *dp, bool enable);
  216. #ifndef SECDP_USE_WAKELOCK
  217. static void secdp_init_wakelock(struct dp_display_private *dp)
  218. {
  219. do {} while (0);
  220. }
  221. static void secdp_destroy_wakelock(struct dp_display_private *dp)
  222. {
  223. do {} while (0);
  224. }
  225. static void secdp_set_wakelock(struct dp_display_private *dp, bool en)
  226. {
  227. do {} while (0);
  228. }
  229. #else
  230. static void secdp_init_wakelock(struct dp_display_private *dp)
  231. {
  232. dp->sec.ws = wakeup_source_register(&dp->pdev->dev, "secdp_ws");
  233. }
  234. static void secdp_destroy_wakelock(struct dp_display_private *dp)
  235. {
  236. wakeup_source_unregister(dp->sec.ws);
  237. }
  238. static void secdp_set_wakelock(struct dp_display_private *dp, bool en)
  239. {
  240. struct wakeup_source *ws = dp->sec.ws;
  241. bool active_before = ws->active;
  242. if (en)
  243. __pm_stay_awake(ws);
  244. else
  245. __pm_relax(ws);
  246. DP_DEBUG("en:%d, active:%d->%d\n", en, active_before, ws->active);
  247. }
  248. #endif
  249. static int secdp_param_lpcharge;
  250. module_param(secdp_param_lpcharge, int, 0444);
  251. bool secdp_get_lpm_mode(struct secdp_misc *sec)
  252. {
  253. if (!sec) {
  254. DP_INFO("SECDP is not ready yet\n");
  255. return false;
  256. }
  257. DP_INFO("lpcharge:%d\n", sec->lpm_booting);
  258. return sec->lpm_booting;
  259. }
  260. static void secdp_send_poor_connection_event(struct dp_display_private *dp,
  261. bool edid_fail)
  262. {
  263. DP_INFO("poor connection++ %d\n", edid_fail);
  264. if (!edid_fail)
  265. dp->link->poor_connection = true;
  266. #if defined(CONFIG_SECDP_SWITCH)
  267. switch_set_state(&switch_secdp_msg, 1);
  268. switch_set_state(&switch_secdp_msg, 0);
  269. #else
  270. {
  271. struct drm_device *dev = NULL;
  272. struct drm_connector *connector;
  273. char *envp[3];
  274. connector = dp->dp_display.base_connector;
  275. if (!connector) {
  276. DP_ERR("connector not set\n");
  277. return;
  278. }
  279. dev = connector->dev;
  280. envp[0] = "DEVPATH=/devices/virtual/switch/secdp_msg";
  281. envp[1] = "SWITCH_STATE=1";
  282. envp[2] = NULL;
  283. DP_DEBUG("[%s]:[%s]\n", envp[0], envp[1]);
  284. kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
  285. }
  286. #endif
  287. dp->sec.dex.prev = dp->sec.dex.curr = DEX_DISABLED;
  288. }
  289. static void secdp_show_clk_status(struct dp_display_private *dp)
  290. {
  291. struct dp_power *dp_power;
  292. bool core, link, stream0, stream1;
  293. if (!dp || !dp->power)
  294. return;
  295. dp_power = dp->power;
  296. core = dp_power->clk_status(dp_power, DP_CORE_PM);
  297. link = dp_power->clk_status(dp_power, DP_LINK_PM);
  298. stream0 = dp_power->clk_status(dp_power, DP_STREAM0_PM);
  299. stream1 = dp_power->clk_status(dp_power, DP_STREAM1_PM);
  300. DP_DEBUG("core:%d link:%d strm0:%d strm1:%d\n", core, link, stream0, stream1);
  301. }
  302. /** check if dp has powered on */
  303. bool secdp_get_power_status(void)
  304. {
  305. struct dp_display_private *dp = container_of(g_dp_display,
  306. struct dp_display_private, dp_display);
  307. return dp_display_state_is(DP_STATE_ENABLED);
  308. }
  309. /** check if dp cable has connected or not */
  310. bool secdp_get_cable_status(void)
  311. {
  312. struct dp_display_private *dp = container_of(g_dp_display,
  313. struct dp_display_private, dp_display);
  314. return dp->sec.cable_connected;
  315. }
  316. /** check if hpd high has come or not */
  317. int secdp_get_hpd_status(void)
  318. {
  319. struct dp_display_private *dp = container_of(g_dp_display,
  320. struct dp_display_private, dp_display);
  321. return atomic_read(&dp->sec.hpd.val);
  322. }
  323. #define DP_HAL_INIT_TIME 30/*sec*/
  324. /**
  325. * retval wait if booting time has not yet passed over DP_HAL_INIT_TIME
  326. * how long to wait [DP_HAL_INIT_TIME - curr_time]
  327. * retval 0 otherwise
  328. */
  329. static int secdp_check_boot_time(void)
  330. {
  331. int wait = 0;
  332. u64 curr_time;
  333. unsigned long nsec;
  334. curr_time = local_clock();
  335. nsec = do_div(curr_time, 1000000000);
  336. if ((unsigned long)curr_time < DP_HAL_INIT_TIME)
  337. wait = DP_HAL_INIT_TIME - (unsigned long)curr_time;
  338. DP_INFO("curr_time: %lu[s], wait: %d\n",
  339. (unsigned long)curr_time, wait);
  340. return wait;
  341. }
  342. /**
  343. * read dongle's information
  344. */
  345. int secdp_read_branch_revision(struct dp_display_private *dp)
  346. {
  347. struct secdp_adapter *adapter;
  348. struct drm_dp_aux *drm_aux;
  349. char *ieee_oui, *devid_str, *fw_ver;
  350. int rlen = 0;
  351. if (!dp || !dp->aux || !dp->aux->drm_aux) {
  352. DP_ERR("invalid param\n");
  353. goto end;
  354. }
  355. drm_aux = dp->aux->drm_aux;
  356. adapter = &dp->sec.adapter;
  357. ieee_oui = adapter->ieee_oui;
  358. devid_str = adapter->devid_str;
  359. fw_ver = adapter->fw_ver;
  360. rlen = drm_dp_dpcd_read(drm_aux, DPCD_IEEE_OUI, ieee_oui, 3);
  361. if (rlen < 3) {
  362. DP_ERR("oui read fail:%d\n", rlen);
  363. goto end;
  364. }
  365. DP_INFO("oui:%02x%02x%02x\n", ieee_oui[0], ieee_oui[1], ieee_oui[2]);
  366. rlen = drm_dp_dpcd_read(drm_aux, DPCD_DEVID_STR, devid_str, 6);
  367. if (rlen < 6) {
  368. DP_ERR("devid read fail:%d\n", rlen);
  369. goto end;
  370. }
  371. print_hex_dump(KERN_DEBUG, "devid:",
  372. DUMP_PREFIX_NONE, 16, 1, devid_str, 6, true);
  373. secdp_logger_hex_dump(devid_str, "devid:", 6);
  374. rlen = drm_dp_dpcd_read(drm_aux, DPCD_BRANCH_HW_REV, fw_ver, LEN_BRANCH_REV);
  375. if (rlen < LEN_BRANCH_REV) {
  376. DP_ERR("fw_ver read fail:%d\n", rlen);
  377. goto end;
  378. }
  379. DP_INFO("branch revision: HW:0x%X, SW:0x%X,0x%X\n", fw_ver[0],
  380. fw_ver[1], fw_ver[2]);
  381. #if defined(CONFIG_SECDP_BIGDATA)
  382. secdp_bigdata_save_item(BD_ADAPTER_HWID, fw_ver[0]);
  383. secdp_bigdata_save_item(BD_ADAPTER_FWVER, (fw_ver[1] << 8) | fw_ver[2]);
  384. #endif
  385. end:
  386. return rlen;
  387. }
  388. void secdp_clear_branch_info(struct dp_display_private *dp)
  389. {
  390. int i;
  391. char *fw_ver;
  392. if (!dp)
  393. goto end;
  394. fw_ver = dp->sec.adapter.fw_ver;
  395. for (i = 0; i < LEN_BRANCH_REV; i++)
  396. fw_ver[i] = 0;
  397. end:
  398. return;
  399. }
  400. /**
  401. * get max dex resolution of current dongle/cable.
  402. * it's decided by secdp_check_adapter_type() at connection moment.
  403. */
  404. static enum dex_support_res_t secdp_get_dex_res(struct dp_display_private *dp)
  405. {
  406. enum dex_support_res_t res = dp->sec.dex.res;
  407. if (dp->sec.dex.adapter_check_skip)
  408. res = DEX_RES_MAX;
  409. return res;
  410. }
  411. /**
  412. * check if dex is running
  413. */
  414. static bool secdp_check_dex_mode(struct dp_display_private *dp)
  415. {
  416. bool mode = false;
  417. if (secdp_get_dex_res(dp) == DEX_RES_NOT_SUPPORT)
  418. goto end;
  419. if (dp->sec.dex.setting_ui == DEX_DISABLED &&
  420. dp->sec.dex.curr == DEX_DISABLED)
  421. goto end;
  422. mode = true;
  423. end:
  424. return mode;
  425. }
  426. bool secdp_adapter_check_parade(struct secdp_misc *sec)
  427. {
  428. struct secdp_adapter *adapter = &sec->adapter;
  429. if (adapter->ieee_oui[0] == 0x00 &&
  430. adapter->ieee_oui[1] == 0x1c &&
  431. adapter->ieee_oui[2] == 0xf8)
  432. return true;
  433. return false;
  434. }
  435. bool secdp_adapter_check_ps176(struct secdp_misc *sec)
  436. {
  437. struct secdp_adapter *adapter = &sec->adapter;
  438. if (adapter->devid_str[0] == '1' &&
  439. adapter->devid_str[1] == '7' &&
  440. adapter->devid_str[2] == '6')
  441. return true;
  442. return false;
  443. }
  444. bool secdp_adapter_check_ps176_legacy(struct secdp_misc *sec)
  445. {
  446. struct secdp_adapter *adapter = &sec->adapter;
  447. if (!secdp_adapter_check_parade(sec))
  448. return false;
  449. if (!secdp_adapter_check_ps176(sec))
  450. return false;
  451. if (adapter->fw_ver[1] != 0x07)
  452. return false;
  453. if (adapter->fw_ver[2] <= 0x40)
  454. return false;
  455. return true;
  456. }
  457. bool secdp_adapter_check_realtek(struct secdp_misc *sec)
  458. {
  459. struct secdp_adapter *adapter = &sec->adapter;
  460. if (adapter->ieee_oui[0] == 0x00 &&
  461. adapter->ieee_oui[1] == 0xe0 &&
  462. adapter->ieee_oui[2] == 0x4c)
  463. return true;
  464. return false;
  465. }
  466. bool secdp_adapter_is_legacy(void)
  467. {
  468. struct dp_display_private *dp = container_of(g_dp_display,
  469. struct dp_display_private, dp_display);
  470. struct secdp_misc *sec = &dp->sec;
  471. struct secdp_adapter *adapter = &dp->sec.adapter;
  472. bool rc = false;
  473. if (adapter->ss_legacy) {
  474. DP_INFO("ss_legacy\n");
  475. return true;
  476. }
  477. if (secdp_adapter_check_realtek(sec)) {
  478. DP_INFO("realtek_legacy\n");
  479. return true;
  480. }
  481. rc = secdp_adapter_check_ps176_legacy(sec);
  482. DP_INFO("ps176_legacy:%d\n", rc);
  483. return rc;
  484. }
  485. struct drm_connector *secdp_get_connector(void)
  486. {
  487. struct dp_display *dp_disp = g_dp_display;
  488. struct drm_connector *connector = NULL;
  489. if (dp_disp)
  490. connector = dp_disp->base_connector;
  491. return connector;
  492. }
  493. static int secdp_reboot_cb(struct notifier_block *nb,
  494. unsigned long action, void *data)
  495. {
  496. struct secdp_misc *sec = container_of(nb,
  497. struct secdp_misc, reboot_nb);
  498. if (IS_ERR_OR_NULL(sec)) {
  499. DP_ERR("dp is null!\n");
  500. goto end;
  501. }
  502. if (!secdp_get_cable_status()) {
  503. DP_DEBUG("cable is out\n");
  504. goto end;
  505. }
  506. DP_DEBUG("reboot:%d\n", sec->reboot);
  507. sec->reboot = true;
  508. #ifndef SECDP_TEST_HDCP2P2_REAUTH
  509. msleep(300);
  510. #endif
  511. end:
  512. return NOTIFY_OK;
  513. }
  514. bool secdp_get_reboot_status(void)
  515. {
  516. struct dp_display_private *dp;
  517. bool ret = false;
  518. if (!g_dp_display) {
  519. DP_DEBUG("dp display not initialized\n");
  520. goto end;
  521. }
  522. dp = container_of(g_dp_display, struct dp_display_private, dp_display);
  523. if (IS_ERR_OR_NULL(dp)) {
  524. DP_ERR("dp is null!\n");
  525. goto end;
  526. }
  527. DP_DEBUG("reboot:%d\n", dp->sec.reboot);
  528. ret = dp->sec.reboot;
  529. #ifdef SECDP_TEST_HDCP2P2_REAUTH
  530. ret = false;
  531. DP_DEBUG("[SECDP_TEST_HDCP2P2_REAUTH]\n");
  532. #endif
  533. end:
  534. return ret;
  535. }
  536. /**
  537. * convert VID/PID string to uint in hexadecimal
  538. * @tok [in] 4bytes, char
  539. * @result [inout] converted value
  540. */
  541. static int _secdp_strtoint(char *tok, uint *result)
  542. {
  543. int ret = 0, len;
  544. if (!tok || !result) {
  545. DP_ERR("invalid arg!\n");
  546. ret = -EINVAL;
  547. goto end;
  548. }
  549. len = strlen(tok);
  550. if (len == 5 && tok[len - 1] == 0xa/*LF*/) {
  551. /* continue since it's ended with line feed */
  552. } else if ((len == 4 && tok[len - 1] == 0xa/*LF*/) || (len != 4)) {
  553. DP_ERR("wrong! tok:%s, len:%d\n", tok, len);
  554. ret = -EINVAL;
  555. goto end;
  556. }
  557. ret = kstrtouint(tok, 16, result);
  558. if (ret) {
  559. DP_ERR("fail to convert %s! ret:%d\n", tok, ret);
  560. goto end;
  561. }
  562. end:
  563. return ret;
  564. }
  565. #if defined(CONFIG_SECDP_DBG)
  566. int secdp_debug_set_ssc(struct secdp_misc *sec, bool onoff)
  567. {
  568. struct dp_display_private *dp = container_of(sec, struct dp_display_private, sec);
  569. dp->pll->ssc_en = onoff;
  570. DP_DEBUG("set ssc_en %d\n", dp->pll->ssc_en);
  571. return 0;
  572. }
  573. bool secdp_debug_get_ssc(struct secdp_misc *sec)
  574. {
  575. struct dp_display_private *dp = container_of(sec, struct dp_display_private, sec);
  576. DP_DEBUG("get ssc_en %d\n", dp->pll->ssc_en);
  577. return dp->pll->ssc_en;
  578. }
  579. int secdp_show_hmd_dev(struct secdp_misc *sec, char *buf)
  580. {
  581. struct secdp_sink_dev *hmd_list;
  582. int i, rc = 0;
  583. hmd_list = sec->hmd.list;
  584. if (!hmd_list) {
  585. DP_ERR("hmd_list is null!\n");
  586. rc = -ENOMEM;
  587. goto end;
  588. }
  589. for (i = 0; i < MAX_NUM_HMD; i++) {
  590. if (strlen(hmd_list[i].monitor_name) > 0) {
  591. if (buf) {
  592. rc += scnprintf(buf + rc, PAGE_SIZE - rc,
  593. "%s,0x%04x,0x%04x\n",
  594. hmd_list[i].monitor_name,
  595. hmd_list[i].ven_id,
  596. hmd_list[i].prod_id);
  597. }
  598. }
  599. }
  600. end:
  601. return rc;
  602. }
  603. #endif/*CONFIG_SECDP_DBG*/
  604. enum {
  605. DEX_HMD_MON = 0, /* monitor name field */
  606. DEX_HMD_VID, /* vid field */
  607. DEX_HMD_PID, /* pid field */
  608. DEX_HMD_FIELD_MAX,
  609. };
  610. int secdp_store_hmd_dev(struct secdp_misc *sec, char *str, size_t len, int num_hmd)
  611. {
  612. struct secdp_sink_dev *hmd_list;
  613. struct secdp_sink_dev hmd_bak[MAX_NUM_HMD] = {0,};
  614. bool backup = false;
  615. char *tok;
  616. int i, j, ret = 0, rmdr;
  617. uint value;
  618. if (num_hmd <= 0 || num_hmd > MAX_NUM_HMD) {
  619. DP_ERR("invalid num_hmd! %d\n", num_hmd);
  620. ret = -EINVAL;
  621. goto end;
  622. }
  623. DP_INFO("%s,%lu,%d\n", str, len, num_hmd);
  624. hmd_list = sec->hmd.list;
  625. /* backup and reset */
  626. for (j = 0; j < MAX_NUM_HMD; j++) {
  627. memcpy(&hmd_bak[j], &hmd_list[j], sizeof(struct secdp_sink_dev));
  628. memset(&hmd_list[j], 0, sizeof(struct secdp_sink_dev));
  629. }
  630. backup = true;
  631. tok = strsep(&str, ",");
  632. i = 0, j = 0;
  633. while (tok != NULL && *tok != 0xa/*LF*/) {
  634. if (i >= num_hmd * DEX_HMD_FIELD_MAX) {
  635. DP_ERR("num of tok cannot exceed <%dx%d>!\n",
  636. num_hmd, DEX_HMD_FIELD_MAX);
  637. break;
  638. }
  639. if (j >= MAX_NUM_HMD) {
  640. DP_ERR("num of HMD cannot exceed %d!\n", MAX_NUM_HMD);
  641. break;
  642. }
  643. rmdr = i % DEX_HMD_FIELD_MAX;
  644. switch (rmdr) {
  645. case DEX_HMD_MON:
  646. strlcpy(hmd_list[j].monitor_name, tok, MON_NAME_LEN);
  647. break;
  648. case DEX_HMD_VID:
  649. case DEX_HMD_PID:
  650. ret = _secdp_strtoint(tok, &value);
  651. if (ret)
  652. goto end;
  653. if (rmdr == DEX_HMD_VID) {
  654. hmd_list[j].ven_id = value;
  655. } else {
  656. hmd_list[j].prod_id = value;
  657. j++; /* move next */
  658. }
  659. break;
  660. }
  661. tok = strsep(&str, ",");
  662. i++;
  663. }
  664. for (j = 0; j < MAX_NUM_HMD; j++) {
  665. if (strlen(hmd_list[j].monitor_name) > 0)
  666. DP_INFO("%s,0x%04x,0x%04x\n",
  667. hmd_list[j].monitor_name,
  668. hmd_list[j].ven_id,
  669. hmd_list[j].prod_id);
  670. }
  671. end:
  672. if (backup && ret) {
  673. DP_INFO("restore hmd list!\n");
  674. for (j = 0; j < MAX_NUM_HMD; j++) {
  675. memcpy(&hmd_list[j], &hmd_bak[j],
  676. sizeof(struct secdp_sink_dev));
  677. }
  678. }
  679. return ret;
  680. }
  681. /**
  682. * check if connected sink is HMD device from hmd_list or not
  683. */
  684. static bool _secdp_check_hmd_dev(struct dp_display_private *dp,
  685. const struct secdp_sink_dev *hmd)
  686. {
  687. bool ret = false;
  688. if (!dp || !hmd) {
  689. DP_ERR("invalid args!\n");
  690. goto end;
  691. }
  692. if (dp->sec.adapter.ven_id != hmd->ven_id)
  693. goto end;
  694. if (dp->sec.adapter.prod_id != hmd->prod_id)
  695. goto end;
  696. if (strncmp(dp->panel->monitor_name, hmd->monitor_name,
  697. strlen(dp->panel->monitor_name)))
  698. goto end;
  699. ret = true;
  700. end:
  701. return ret;
  702. }
  703. /**
  704. * check if connected sink is predefined HMD(AR/VR) device or not
  705. * @param string to search
  706. * if NULL, check if one of HMD devices in list are connected
  707. * @retval true if found, false otherwise
  708. */
  709. bool secdp_check_hmd_dev(struct secdp_misc *sec, const char *name_to_search)
  710. {
  711. struct dp_display_private *dp;
  712. struct secdp_hmd *hmd;
  713. struct secdp_sink_dev *hmd_list;
  714. int i, list_size;
  715. bool found = false;
  716. dp = container_of(sec, struct dp_display_private, sec);
  717. hmd = &dp->sec.hmd;
  718. hmd_list = hmd->list;
  719. mutex_lock(&hmd->lock);
  720. list_size = MAX_NUM_HMD;
  721. for (i = 0; i < list_size; i++) {
  722. if (name_to_search != NULL &&
  723. strncmp(name_to_search, hmd_list[i].monitor_name,
  724. strlen(name_to_search)))
  725. continue;
  726. found = _secdp_check_hmd_dev(dp, &hmd_list[i]);
  727. if (found)
  728. break;
  729. }
  730. if (found)
  731. DP_INFO("hmd <%s>\n", hmd_list[i].monitor_name);
  732. mutex_unlock(&hmd->lock);
  733. return found;
  734. }
  735. #define PRN_MODE_COUNT 3
  736. static void secdp_mode_count_init(struct dp_display_private *dp)
  737. {
  738. struct secdp_misc *sec = &dp->sec;
  739. secdp_logger_set_mode_max_count(PRN_MODE_COUNT);
  740. sec->mode_cnt = PRN_MODE_COUNT + 1;
  741. }
  742. static void secdp_mode_count_dec(struct dp_display_private *dp)
  743. {
  744. struct secdp_misc *sec = &dp->sec;
  745. secdp_logger_dec_mode_count();
  746. if (sec->mode_cnt > 0)
  747. sec->mode_cnt--;
  748. }
  749. static bool secdp_mode_count_check(struct dp_display_private *dp)
  750. {
  751. struct secdp_misc *sec = &dp->sec;
  752. return sec->mode_cnt ? true : false;
  753. }
  754. static int secdp_send_hpd_event(struct secdp_misc *sec, bool hpd)
  755. {
  756. struct dp_display_private *dp;
  757. struct drm_device *dev;
  758. struct drm_connector *connector;
  759. u32 val = 0;
  760. char buf[HPD_STRING_SIZE] = { 0, };
  761. char *envp[3];
  762. int rc = 0;
  763. dp = container_of(sec, struct dp_display_private, sec);
  764. if (!dp->parser->rf_tx_backoff) {
  765. DP_DEBUG("RF TX backoff disabled\n");
  766. return 0;
  767. }
  768. connector = dp->dp_display.base_connector;
  769. if (!connector) {
  770. DP_ERR("connector not set\n");
  771. return 0;
  772. }
  773. if (hpd == sec->hpd.prev_evt) {
  774. DP_DEBUG("already sent %d\n", hpd);
  775. return 0;
  776. }
  777. if (!hpd) {
  778. val = 0;
  779. } else {
  780. val = 1;
  781. }
  782. snprintf(buf, HPD_STRING_SIZE, "hpd=%d", val);
  783. DP_INFO("[%s]\n", buf);
  784. dev = connector->dev;
  785. envp[0] = "DEVPATH=/devices/virtual/sec/secdp";
  786. envp[1] = buf;
  787. envp[2] = NULL;
  788. rc = kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
  789. if (!rc)
  790. sec->hpd.prev_evt = hpd;
  791. else
  792. DP_INFO("hpd uevent failed %d\n", rc);
  793. return rc;
  794. }
  795. #endif
  796. static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
  797. {
  798. return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops;
  799. }
  800. static irqreturn_t dp_display_irq(int irq, void *dev_id)
  801. {
  802. struct dp_display_private *dp = dev_id;
  803. if (!dp) {
  804. DP_ERR("invalid data\n");
  805. return IRQ_NONE;
  806. }
  807. /* DP HPD isr */
  808. if (dp->hpd->type == DP_HPD_LPHW)
  809. dp->hpd->isr(dp->hpd);
  810. /* DP controller isr */
  811. dp->ctrl->isr(dp->ctrl);
  812. /* DP aux isr */
  813. dp->aux->isr(dp->aux);
  814. /* HDCP isr */
  815. if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->isr) {
  816. if (dp->hdcp.ops->isr(dp->hdcp.data))
  817. DP_ERR("dp_hdcp_isr failed\n");
  818. }
  819. return IRQ_HANDLED;
  820. }
  821. static bool dp_display_is_ds_bridge(struct dp_panel *panel)
  822. {
  823. return (panel->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
  824. DP_DWN_STRM_PORT_PRESENT);
  825. }
  826. static bool dp_display_is_sink_count_zero(struct dp_display_private *dp)
  827. {
  828. return dp_display_is_ds_bridge(dp->panel) &&
  829. (dp->link->sink_count.count == 0);
  830. }
  831. static bool dp_display_is_ready(struct dp_display_private *dp)
  832. {
  833. return dp->hpd->hpd_high && dp_display_state_is(DP_STATE_CONNECTED) &&
  834. !dp_display_is_sink_count_zero(dp) &&
  835. dp->hpd->alt_mode_cfg_done;
  836. }
  837. static void dp_audio_enable(struct dp_display_private *dp, bool enable)
  838. {
  839. struct dp_panel *dp_panel;
  840. int idx;
  841. for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) {
  842. if (!dp->active_panels[idx])
  843. continue;
  844. dp_panel = dp->active_panels[idx];
  845. if (dp_panel->audio_supported) {
  846. if (enable) {
  847. dp_panel->audio->bw_code =
  848. dp->link->link_params.bw_code;
  849. dp_panel->audio->lane_count =
  850. dp->link->link_params.lane_count;
  851. dp_panel->audio->on(dp_panel->audio);
  852. } else {
  853. dp_panel->audio->off(dp_panel->audio, false);
  854. }
  855. }
  856. }
  857. }
  858. static void dp_display_qos_request(struct dp_display_private *dp, bool add_vote)
  859. {
  860. struct device *cpu_dev;
  861. int cpu = 0;
  862. struct cpumask *cpu_mask;
  863. u32 latency = dp->parser->qos_cpu_latency;
  864. unsigned long mask = dp->parser->qos_cpu_mask;
  865. if (!dp->parser->qos_cpu_mask || (dp->pm_qos_requested == add_vote))
  866. return;
  867. cpu_mask = to_cpumask(&mask);
  868. for_each_cpu(cpu, cpu_mask) {
  869. cpu_dev = get_cpu_device(cpu);
  870. if (!cpu_dev) {
  871. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__, cpu);
  872. continue;
  873. }
  874. if (add_vote)
  875. dev_pm_qos_add_request(cpu_dev, &dp->pm_qos_req[cpu],
  876. DEV_PM_QOS_RESUME_LATENCY, latency);
  877. else
  878. dev_pm_qos_remove_request(&dp->pm_qos_req[cpu]);
  879. }
  880. SDE_EVT32_EXTERNAL(add_vote, mask, latency);
  881. dp->pm_qos_requested = add_vote;
  882. }
  883. static void dp_display_update_hdcp_status(struct dp_display_private *dp,
  884. bool reset)
  885. {
  886. if (reset) {
  887. dp->link->hdcp_status.hdcp_state = HDCP_STATE_INACTIVE;
  888. dp->link->hdcp_status.hdcp_version = HDCP_VERSION_NONE;
  889. }
  890. memset(dp->debug->hdcp_status, 0, sizeof(dp->debug->hdcp_status));
  891. snprintf(dp->debug->hdcp_status, sizeof(dp->debug->hdcp_status),
  892. "%s: %s\ncaps: %d\n",
  893. sde_hdcp_version(dp->link->hdcp_status.hdcp_version),
  894. sde_hdcp_state_name(dp->link->hdcp_status.hdcp_state),
  895. dp->hdcp.source_cap);
  896. }
  897. static void dp_display_update_hdcp_info(struct dp_display_private *dp)
  898. {
  899. void *fd = NULL;
  900. struct dp_hdcp_dev *dev = NULL;
  901. struct sde_hdcp_ops *ops = NULL;
  902. int i = HDCP_VERSION_2P2;
  903. dp_display_update_hdcp_status(dp, true);
  904. dp->hdcp.data = NULL;
  905. dp->hdcp.ops = NULL;
  906. if (dp->debug->hdcp_disabled || dp->debug->sim_mode)
  907. return;
  908. while (i) {
  909. dev = &dp->hdcp.dev[i];
  910. ops = dev->ops;
  911. fd = dev->fd;
  912. i >>= 1;
  913. if (!(dp->hdcp.source_cap & dev->ver))
  914. continue;
  915. if (ops->sink_support(fd)) {
  916. dp->hdcp.data = fd;
  917. dp->hdcp.ops = ops;
  918. dp->link->hdcp_status.hdcp_version = dev->ver;
  919. break;
  920. }
  921. }
  922. DP_DEBUG("HDCP version supported: %s\n",
  923. sde_hdcp_version(dp->link->hdcp_status.hdcp_version));
  924. }
  925. static void dp_display_check_source_hdcp_caps(struct dp_display_private *dp)
  926. {
  927. int i;
  928. struct dp_hdcp_dev *hdcp_dev = dp->hdcp.dev;
  929. if (dp->debug->hdcp_disabled) {
  930. DP_DEBUG("hdcp disabled\n");
  931. return;
  932. }
  933. for (i = 0; i < HDCP_VERSION_MAX; i++) {
  934. struct dp_hdcp_dev *dev = &hdcp_dev[i];
  935. struct sde_hdcp_ops *ops = dev->ops;
  936. void *fd = dev->fd;
  937. if (!fd || !ops)
  938. continue;
  939. if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active))
  940. continue;
  941. if (!(dp->hdcp.source_cap & dev->ver) &&
  942. ops->feature_supported &&
  943. ops->feature_supported(fd))
  944. dp->hdcp.source_cap |= dev->ver;
  945. }
  946. #if defined(CONFIG_SECDP_BIGDATA)
  947. secdp_bigdata_save_item(BD_HDCP_VER,
  948. (dp->hdcp.source_cap & HDCP_VERSION_2P2) ? "hdcp2" :
  949. ((dp->hdcp.source_cap & HDCP_VERSION_1X) ? "hdcp1" : "X"));
  950. #endif
  951. dp_display_update_hdcp_status(dp, false);
  952. }
  953. static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
  954. {
  955. int rc;
  956. size_t i;
  957. struct sde_hdcp_ops *ops = dp->hdcp.ops;
  958. void *data = dp->hdcp.data;
  959. if (dp_display_is_ready(dp) && dp->mst.mst_active && ops &&
  960. ops->register_streams){
  961. struct stream_info streams[DP_STREAM_MAX];
  962. int index = 0;
  963. DP_DEBUG("Registering all active panel streams with HDCP\n");
  964. for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
  965. if (!dp->active_panels[i])
  966. continue;
  967. streams[index].stream_id = i;
  968. streams[index].virtual_channel =
  969. dp->active_panels[i]->vcpi;
  970. index++;
  971. }
  972. if (index > 0) {
  973. rc = ops->register_streams(data, index, streams);
  974. if (rc)
  975. DP_ERR("failed to register streams. rc = %d\n",
  976. rc);
  977. }
  978. }
  979. }
  980. static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
  981. enum dp_stream_id stream_id)
  982. {
  983. if (dp->hdcp.ops->deregister_streams && dp->active_panels[stream_id]) {
  984. struct stream_info stream = {stream_id,
  985. dp->active_panels[stream_id]->vcpi};
  986. DP_DEBUG("Deregistering stream within HDCP library\n");
  987. dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream);
  988. }
  989. }
  990. static void dp_display_hdcp_process_delayed_off(struct dp_display_private *dp)
  991. {
  992. if (dp->hdcp_delayed_off) {
  993. if (dp->hdcp.ops && dp->hdcp.ops->off)
  994. dp->hdcp.ops->off(dp->hdcp.data);
  995. dp_display_update_hdcp_status(dp, true);
  996. dp->hdcp_delayed_off = false;
  997. }
  998. }
  999. static int dp_display_hdcp_process_sink_sync(struct dp_display_private *dp)
  1000. {
  1001. u8 sink_status = 0;
  1002. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
  1003. if (dp->debug->hdcp_wait_sink_sync) {
  1004. drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS,
  1005. &sink_status);
  1006. sink_status &= (DP_RECEIVE_PORT_0_STATUS |
  1007. DP_RECEIVE_PORT_1_STATUS);
  1008. #if !defined(CONFIG_SECDP)
  1009. if (sink_status < 1) {
  1010. DP_DEBUG("Sink not synchronized. Queuing again then exiting\n");
  1011. queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
  1012. return -EAGAIN;
  1013. }
  1014. #else
  1015. if (sink_status < 1 && !secdp_get_link_train_status(dp->ctrl)) {
  1016. DP_INFO("hdcp retry: %d\n", dp->sec.hdcp.retry);
  1017. if (dp->sec.hdcp.retry >= MAX_CNT_HDCP_RETRY) {
  1018. DP_DEBUG("stop queueing!\n");
  1019. schedule_delayed_work(&dp->sec.poor_discon_work,
  1020. msecs_to_jiffies(10));
  1021. return -EAGAIN;
  1022. }
  1023. dp->sec.hdcp.retry++;
  1024. DP_DEBUG("Sink not synchronized. Queuing again then exiting\n");
  1025. queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
  1026. return -EAGAIN;
  1027. }
  1028. #endif
  1029. /*
  1030. * Some sinks need more time to stabilize after synchronization
  1031. * and before it can handle an HDCP authentication request.
  1032. * Adding the delay for better interoperability.
  1033. */
  1034. msleep(6000);
  1035. }
  1036. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT);
  1037. return 0;
  1038. }
  1039. static int dp_display_hdcp_start(struct dp_display_private *dp)
  1040. {
  1041. if (dp->link->hdcp_status.hdcp_state != HDCP_STATE_INACTIVE)
  1042. return -EINVAL;
  1043. dp_display_check_source_hdcp_caps(dp);
  1044. dp_display_update_hdcp_info(dp);
  1045. if (dp_display_is_hdcp_enabled(dp)) {
  1046. if (dp->hdcp.ops && dp->hdcp.ops->on &&
  1047. dp->hdcp.ops->on(dp->hdcp.data)) {
  1048. dp_display_update_hdcp_status(dp, true);
  1049. return 0;
  1050. }
  1051. } else {
  1052. dp_display_update_hdcp_status(dp, true);
  1053. return 0;
  1054. }
  1055. return -EINVAL;
  1056. }
  1057. static void dp_display_hdcp_print_auth_state(struct dp_display_private *dp)
  1058. {
  1059. u32 hdcp_auth_state;
  1060. int rc;
  1061. rc = dp->catalog->ctrl.read_hdcp_status(&dp->catalog->ctrl);
  1062. if (rc >= 0) {
  1063. hdcp_auth_state = (rc >> 20) & 0x3;
  1064. DP_DEBUG("hdcp auth state %d\n", hdcp_auth_state);
  1065. }
  1066. }
  1067. static void dp_display_hdcp_process_state(struct dp_display_private *dp)
  1068. {
  1069. struct dp_link_hdcp_status *status;
  1070. struct sde_hdcp_ops *ops;
  1071. void *data;
  1072. int rc = 0;
  1073. status = &dp->link->hdcp_status;
  1074. ops = dp->hdcp.ops;
  1075. data = dp->hdcp.data;
  1076. #if defined(CONFIG_SECDP)
  1077. if (secdp_get_reboot_status()) {
  1078. DP_INFO("shutdown\n");
  1079. return;
  1080. }
  1081. #endif
  1082. if (status->hdcp_state != HDCP_STATE_AUTHENTICATED &&
  1083. dp->debug->force_encryption && ops && ops->force_encryption)
  1084. ops->force_encryption(data, dp->debug->force_encryption);
  1085. if (status->hdcp_state == HDCP_STATE_AUTHENTICATED)
  1086. dp_display_qos_request(dp, false);
  1087. else
  1088. dp_display_qos_request(dp, true);
  1089. switch (status->hdcp_state) {
  1090. case HDCP_STATE_INACTIVE:
  1091. dp_display_hdcp_register_streams(dp);
  1092. #if defined(CONFIG_SECDP)
  1093. if (!dp->panel->tbox)
  1094. secdp_read_link_status(dp->link);
  1095. #endif
  1096. if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
  1097. rc = dp->hdcp.ops->authenticate(data);
  1098. if (!rc)
  1099. status->hdcp_state = HDCP_STATE_AUTHENTICATING;
  1100. break;
  1101. case HDCP_STATE_AUTH_FAIL:
  1102. #if defined(CONFIG_SECDP_BIGDATA)
  1103. secdp_bigdata_inc_error_cnt(ERR_HDCP_AUTH);
  1104. #endif
  1105. if (dp_display_is_ready(dp) &&
  1106. dp_display_state_is(DP_STATE_ENABLED)) {
  1107. if (ops && ops->on && ops->on(data)) {
  1108. dp_display_update_hdcp_status(dp, true);
  1109. return;
  1110. }
  1111. dp_display_hdcp_register_streams(dp);
  1112. if (ops && ops->reauthenticate) {
  1113. rc = ops->reauthenticate(data);
  1114. if (rc)
  1115. DP_ERR("failed rc=%d\n", rc);
  1116. }
  1117. status->hdcp_state = HDCP_STATE_AUTHENTICATING;
  1118. } else {
  1119. DP_DEBUG("not reauthenticating, cable disconnected\n");
  1120. }
  1121. break;
  1122. default:
  1123. dp_display_hdcp_register_streams(dp);
  1124. break;
  1125. }
  1126. }
  1127. static void dp_display_abort_hdcp(struct dp_display_private *dp,
  1128. bool abort)
  1129. {
  1130. u8 i = HDCP_VERSION_2P2;
  1131. struct dp_hdcp_dev *dev = NULL;
  1132. while (i) {
  1133. dev = &dp->hdcp.dev[i];
  1134. i >>= 1;
  1135. if (!(dp->hdcp.source_cap & dev->ver))
  1136. continue;
  1137. dev->ops->abort(dev->fd, abort);
  1138. }
  1139. }
  1140. static void dp_display_hdcp_cb_work(struct work_struct *work)
  1141. {
  1142. struct dp_display_private *dp;
  1143. struct delayed_work *dw = to_delayed_work(work);
  1144. struct dp_link_hdcp_status *status;
  1145. int rc = 0;
  1146. dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
  1147. if (!dp_display_state_is(DP_STATE_ENABLED | DP_STATE_CONNECTED) ||
  1148. dp_display_state_is(DP_STATE_ABORTED | DP_STATE_HDCP_ABORTED))
  1149. return;
  1150. if (dp_display_state_is(DP_STATE_SUSPENDED)) {
  1151. DP_DEBUG("System suspending. Delay HDCP operations\n");
  1152. queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
  1153. return;
  1154. }
  1155. dp_display_hdcp_process_delayed_off(dp);
  1156. rc = dp_display_hdcp_process_sink_sync(dp);
  1157. if (rc)
  1158. return;
  1159. rc = dp_display_hdcp_start(dp);
  1160. if (!rc)
  1161. return;
  1162. dp_display_hdcp_print_auth_state(dp);
  1163. status = &dp->link->hdcp_status;
  1164. DP_DEBUG("%s: %s\n", sde_hdcp_version(status->hdcp_version),
  1165. sde_hdcp_state_name(status->hdcp_state));
  1166. dp_display_update_hdcp_status(dp, false);
  1167. dp_display_hdcp_process_state(dp);
  1168. }
  1169. static void dp_display_notify_hdcp_status_cb(void *ptr,
  1170. enum sde_hdcp_state state)
  1171. {
  1172. struct dp_display_private *dp = ptr;
  1173. if (!dp) {
  1174. DP_ERR("invalid input\n");
  1175. return;
  1176. }
  1177. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY,
  1178. dp->link->hdcp_status.hdcp_state);
  1179. dp->link->hdcp_status.hdcp_state = state;
  1180. queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ/4);
  1181. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT,
  1182. dp->link->hdcp_status.hdcp_state);
  1183. }
  1184. static void dp_display_deinitialize_hdcp(struct dp_display_private *dp)
  1185. {
  1186. if (!dp) {
  1187. DP_ERR("invalid input\n");
  1188. return;
  1189. }
  1190. sde_hdcp_1x_deinit(dp->hdcp.dev[HDCP_VERSION_1X].fd);
  1191. sde_dp_hdcp2p2_deinit(dp->hdcp.dev[HDCP_VERSION_2P2].fd);
  1192. }
  1193. static int dp_display_initialize_hdcp(struct dp_display_private *dp)
  1194. {
  1195. struct sde_hdcp_init_data hdcp_init_data;
  1196. struct dp_parser *parser;
  1197. void *fd;
  1198. int rc = 0;
  1199. if (!dp) {
  1200. DP_ERR("invalid input\n");
  1201. return -EINVAL;
  1202. }
  1203. parser = dp->parser;
  1204. hdcp_init_data.client_id = HDCP_CLIENT_DP;
  1205. hdcp_init_data.drm_aux = dp->aux->drm_aux;
  1206. hdcp_init_data.cb_data = (void *)dp;
  1207. hdcp_init_data.workq = dp->wq;
  1208. hdcp_init_data.sec_access = true;
  1209. hdcp_init_data.notify_status = dp_display_notify_hdcp_status_cb;
  1210. hdcp_init_data.dp_ahb = &parser->get_io(parser, "dp_ahb")->io;
  1211. hdcp_init_data.dp_aux = &parser->get_io(parser, "dp_aux")->io;
  1212. hdcp_init_data.dp_link = &parser->get_io(parser, "dp_link")->io;
  1213. hdcp_init_data.dp_p0 = &parser->get_io(parser, "dp_p0")->io;
  1214. hdcp_init_data.hdcp_io = &parser->get_io(parser,
  1215. "hdcp_physical")->io;
  1216. hdcp_init_data.revision = &dp->panel->link_info.revision;
  1217. hdcp_init_data.msm_hdcp_dev = dp->parser->msm_hdcp_dev;
  1218. fd = sde_hdcp_1x_init(&hdcp_init_data);
  1219. if (IS_ERR_OR_NULL(fd)) {
  1220. DP_DEBUG("Error initializing HDCP 1.x\n");
  1221. return -EINVAL;
  1222. }
  1223. dp->hdcp.dev[HDCP_VERSION_1X].fd = fd;
  1224. dp->hdcp.dev[HDCP_VERSION_1X].ops = sde_hdcp_1x_get(fd);
  1225. dp->hdcp.dev[HDCP_VERSION_1X].ver = HDCP_VERSION_1X;
  1226. DP_INFO("HDCP 1.3 initialized\n");
  1227. fd = sde_dp_hdcp2p2_init(&hdcp_init_data);
  1228. if (IS_ERR_OR_NULL(fd)) {
  1229. DP_DEBUG("Error initializing HDCP 2.x\n");
  1230. rc = -EINVAL;
  1231. goto error;
  1232. }
  1233. dp->hdcp.dev[HDCP_VERSION_2P2].fd = fd;
  1234. dp->hdcp.dev[HDCP_VERSION_2P2].ops = sde_dp_hdcp2p2_get(fd);
  1235. dp->hdcp.dev[HDCP_VERSION_2P2].ver = HDCP_VERSION_2P2;
  1236. DP_INFO("HDCP 2.2 initialized\n");
  1237. return 0;
  1238. error:
  1239. sde_hdcp_1x_deinit(dp->hdcp.dev[HDCP_VERSION_1X].fd);
  1240. return rc;
  1241. }
  1242. static void dp_display_pause_audio(struct dp_display_private *dp, bool pause)
  1243. {
  1244. struct dp_panel *dp_panel;
  1245. int idx;
  1246. for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) {
  1247. if (!dp->active_panels[idx])
  1248. continue;
  1249. dp_panel = dp->active_panels[idx];
  1250. if (dp_panel->audio_supported)
  1251. dp_panel->audio->tui_active = pause;
  1252. }
  1253. }
  1254. static int dp_display_pre_hw_release(void *data)
  1255. {
  1256. struct dp_display_private *dp;
  1257. struct dp_display *dp_display = data;
  1258. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
  1259. if (!dp_display)
  1260. return -EINVAL;
  1261. dp = container_of(dp_display, struct dp_display_private, dp_display);
  1262. mutex_lock(&dp->session_lock);
  1263. dp_display_state_add(DP_STATE_TUI_ACTIVE);
  1264. cancel_work_sync(&dp->connect_work);
  1265. cancel_work_sync(&dp->attention_work);
  1266. cancel_work_sync(&dp->disconnect_work);
  1267. flush_workqueue(dp->wq);
  1268. dp_display_pause_audio(dp, true);
  1269. disable_irq(dp->irq);
  1270. mutex_unlock(&dp->session_lock);
  1271. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT);
  1272. return 0;
  1273. }
  1274. static int dp_display_post_hw_acquire(void *data)
  1275. {
  1276. struct dp_display_private *dp;
  1277. struct dp_display *dp_display = data;
  1278. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
  1279. if (!dp_display)
  1280. return -EINVAL;
  1281. dp = container_of(dp_display, struct dp_display_private, dp_display);
  1282. mutex_lock(&dp->session_lock);
  1283. dp_display_state_remove(DP_STATE_TUI_ACTIVE);
  1284. dp_display_pause_audio(dp, false);
  1285. enable_irq(dp->irq);
  1286. mutex_unlock(&dp->session_lock);
  1287. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT);
  1288. return 0;
  1289. }
  1290. static int dp_display_bind(struct device *dev, struct device *master,
  1291. void *data)
  1292. {
  1293. int rc = 0;
  1294. struct dp_display_private *dp;
  1295. struct drm_device *drm;
  1296. struct platform_device *pdev = to_platform_device(dev);
  1297. struct msm_vm_ops vm_event_ops = {
  1298. .vm_pre_hw_release = dp_display_pre_hw_release,
  1299. .vm_post_hw_acquire = dp_display_post_hw_acquire,
  1300. };
  1301. if (!dev || !pdev || !master) {
  1302. DP_ERR("invalid param(s), dev %pK, pdev %pK, master %pK\n",
  1303. dev, pdev, master);
  1304. rc = -EINVAL;
  1305. goto end;
  1306. }
  1307. drm = dev_get_drvdata(master);
  1308. dp = platform_get_drvdata(pdev);
  1309. if (!drm || !dp) {
  1310. DP_ERR("invalid param(s), drm %pK, dp %pK\n",
  1311. drm, dp);
  1312. rc = -EINVAL;
  1313. goto end;
  1314. }
  1315. dp->dp_display.drm_dev = drm;
  1316. dp->priv = drm->dev_private;
  1317. msm_register_vm_event(master, dev, &vm_event_ops,
  1318. (void *)&dp->dp_display);
  1319. end:
  1320. return rc;
  1321. }
  1322. static void dp_display_unbind(struct device *dev, struct device *master,
  1323. void *data)
  1324. {
  1325. struct dp_display_private *dp;
  1326. struct platform_device *pdev = to_platform_device(dev);
  1327. if (!dev || !pdev) {
  1328. DP_ERR("invalid param(s)\n");
  1329. return;
  1330. }
  1331. dp = platform_get_drvdata(pdev);
  1332. if (!dp) {
  1333. DP_ERR("Invalid params\n");
  1334. return;
  1335. }
  1336. if (dp->power)
  1337. (void)dp->power->power_client_deinit(dp->power);
  1338. if (dp->aux)
  1339. (void)dp->aux->drm_aux_deregister(dp->aux);
  1340. dp_display_deinitialize_hdcp(dp);
  1341. }
  1342. static const struct component_ops dp_display_comp_ops = {
  1343. .bind = dp_display_bind,
  1344. .unbind = dp_display_unbind,
  1345. };
  1346. static bool dp_display_send_hpd_event(struct dp_display_private *dp)
  1347. {
  1348. struct drm_device *dev = NULL;
  1349. struct drm_connector *connector;
  1350. char name[HPD_STRING_SIZE], status[HPD_STRING_SIZE],
  1351. bpp[HPD_STRING_SIZE], pattern[HPD_STRING_SIZE];
  1352. char *envp[5];
  1353. struct dp_display *display;
  1354. int rc = 0;
  1355. connector = dp->dp_display.base_connector;
  1356. display = &dp->dp_display;
  1357. if (!connector) {
  1358. DP_ERR("connector not set\n");
  1359. return false;
  1360. }
  1361. connector->status = display->is_sst_connected ? connector_status_connected :
  1362. connector_status_disconnected;
  1363. if (dp->cached_connector_status == connector->status) {
  1364. DP_DEBUG("connector status (%d) unchanged, skipping uevent\n",
  1365. dp->cached_connector_status);
  1366. return false;
  1367. }
  1368. dp->cached_connector_status = connector->status;
  1369. dev = connector->dev;
  1370. if (dp->debug->skip_uevent) {
  1371. DP_INFO("skipping uevent\n");
  1372. return false;
  1373. }
  1374. #if defined(CONFIG_SECDP)
  1375. msleep(100);
  1376. atomic_set(&dp->sec.noti_status, 1);
  1377. secdp_mode_count_init(dp);
  1378. #endif
  1379. snprintf(name, HPD_STRING_SIZE, "name=%s", connector->name);
  1380. snprintf(status, HPD_STRING_SIZE, "status=%s",
  1381. drm_get_connector_status_name(connector->status));
  1382. snprintf(bpp, HPD_STRING_SIZE, "bpp=%d",
  1383. dp_link_bit_depth_to_bpp(
  1384. dp->link->test_video.test_bit_depth));
  1385. snprintf(pattern, HPD_STRING_SIZE, "pattern=%d",
  1386. dp->link->test_video.test_video_pattern);
  1387. DP_INFO("[%s]:[%s] [%s] [%s]\n", name, status, bpp, pattern);
  1388. envp[0] = name;
  1389. envp[1] = status;
  1390. envp[2] = bpp;
  1391. envp[3] = pattern;
  1392. envp[4] = NULL;
  1393. rc = kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
  1394. DP_INFO("uevent %s: %d\n", rc ? "failure" : "success", rc);
  1395. return true;
  1396. }
  1397. static int dp_display_send_hpd_notification(struct dp_display_private *dp, bool skip_wait)
  1398. {
  1399. int ret = 0;
  1400. bool hpd = !!dp_display_state_is(DP_STATE_CONNECTED);
  1401. #if defined(CONFIG_SECDP)
  1402. struct secdp_misc *sec = &dp->sec;
  1403. if (hpd && !secdp_get_cable_status()) {
  1404. DP_INFO("cable is out\n");
  1405. return -EIO;
  1406. }
  1407. DP_ENTER("\n");
  1408. mutex_lock(&sec->notify_lock);
  1409. #endif
  1410. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, hpd);
  1411. /*
  1412. * Send the notification only if there is any change. This check is
  1413. * necessary since it is possible that the connect_work may or may not
  1414. * skip sending the notification in order to respond to a pending
  1415. * attention message. Attention work thread will always attempt to
  1416. * send the notification after successfully handling the attention
  1417. * message. This check here will avoid any unintended duplicate
  1418. * notifications.
  1419. */
  1420. if (dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) && hpd) {
  1421. DP_DEBUG("connection notified already, skip notification\n");
  1422. goto skip_wait;
  1423. } else if (dp_display_state_is(DP_STATE_DISCONNECT_NOTIFIED) && !hpd) {
  1424. DP_DEBUG("disonnect notified already, skip notification\n");
  1425. goto skip_wait;
  1426. }
  1427. dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
  1428. reinit_completion(&dp->notification_comp);
  1429. if (!dp->mst.mst_active) {
  1430. dp->dp_display.is_sst_connected = hpd;
  1431. if (!dp_display_send_hpd_event(dp))
  1432. goto skip_wait;
  1433. } else {
  1434. dp->dp_display.is_sst_connected = false;
  1435. if (!dp->mst.cbs.hpd)
  1436. goto skip_wait;
  1437. dp->mst.cbs.hpd(&dp->dp_display, hpd);
  1438. }
  1439. if (hpd) {
  1440. dp_display_state_add(DP_STATE_CONNECT_NOTIFIED);
  1441. dp_display_state_remove(DP_STATE_DISCONNECT_NOTIFIED);
  1442. } else {
  1443. dp_display_state_add(DP_STATE_DISCONNECT_NOTIFIED);
  1444. dp_display_state_remove(DP_STATE_CONNECT_NOTIFIED);
  1445. }
  1446. #if defined(CONFIG_SECDP)
  1447. if (!hpd && !dp_display_state_is(DP_STATE_ENABLED)) {
  1448. DP_INFO("DP is already off, no wait\n");
  1449. goto skip_wait;
  1450. }
  1451. #endif
  1452. /*
  1453. * Skip the wait if TUI is active considering that the user mode will
  1454. * not act on the notification until after the TUI session is over.
  1455. */
  1456. if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) {
  1457. dp_display_state_log("[TUI is active, skipping wait]");
  1458. goto skip_wait;
  1459. }
  1460. if (skip_wait || (hpd && dp->mst.mst_active))
  1461. goto skip_wait;
  1462. if (!dp->mst.mst_active &&
  1463. (!!dp_display_state_is(DP_STATE_ENABLED) == hpd))
  1464. goto skip_wait;
  1465. #if !defined(CONFIG_SECDP)
  1466. // wait 2 seconds
  1467. if (wait_for_completion_timeout(&dp->notification_comp, HZ * 2))
  1468. #else
  1469. if (wait_for_completion_timeout(&dp->notification_comp, HZ * 17))
  1470. #endif
  1471. goto skip_wait;
  1472. //resend notification
  1473. if (dp->mst.mst_active)
  1474. dp->mst.cbs.hpd(&dp->dp_display, hpd);
  1475. else
  1476. dp_display_send_hpd_event(dp);
  1477. // wait another 3 seconds
  1478. if (!wait_for_completion_timeout(&dp->notification_comp, HZ * 3)) {
  1479. DP_WARN("%s timeout\n", hpd ? "connect" : "disconnect");
  1480. ret = -EINVAL;
  1481. }
  1482. skip_wait:
  1483. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, hpd, ret);
  1484. #if defined(CONFIG_SECDP)
  1485. DP_LEAVE("\n");
  1486. mutex_unlock(&sec->notify_lock);
  1487. #endif
  1488. return ret;
  1489. }
  1490. static void dp_display_update_mst_state(struct dp_display_private *dp,
  1491. bool state)
  1492. {
  1493. dp->mst.mst_active = state;
  1494. dp->panel->mst_state = state;
  1495. }
  1496. static void dp_display_mst_init(struct dp_display_private *dp)
  1497. {
  1498. bool is_mst_receiver;
  1499. const unsigned long clear_mstm_ctrl_timeout_us = 100000;
  1500. u8 old_mstm_ctrl;
  1501. int ret;
  1502. if (!dp->parser->has_mst || !dp->mst.drm_registered) {
  1503. DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n",
  1504. dp->parser->has_mst, dp->mst.drm_registered);
  1505. return;
  1506. }
  1507. is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
  1508. if (!is_mst_receiver) {
  1509. DP_MST_DEBUG("sink doesn't support mst\n");
  1510. return;
  1511. }
  1512. /* clear sink mst state */
  1513. drm_dp_dpcd_readb(dp->aux->drm_aux, DP_MSTM_CTRL, &old_mstm_ctrl);
  1514. drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
  1515. /* add extra delay if MST state is not cleared */
  1516. if (old_mstm_ctrl) {
  1517. DP_MST_DEBUG("MSTM_CTRL is not cleared, wait %luus\n",
  1518. clear_mstm_ctrl_timeout_us);
  1519. usleep_range(clear_mstm_ctrl_timeout_us,
  1520. clear_mstm_ctrl_timeout_us + 1000);
  1521. }
  1522. ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
  1523. DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
  1524. if (ret < 0) {
  1525. DP_ERR("sink mst enablement failed\n");
  1526. return;
  1527. }
  1528. dp_display_update_mst_state(dp, true);
  1529. }
  1530. static void dp_display_set_mst_mgr_state(struct dp_display_private *dp,
  1531. bool state)
  1532. {
  1533. if (!dp->mst.mst_active)
  1534. return;
  1535. if (dp->mst.cbs.set_mgr_state)
  1536. dp->mst.cbs.set_mgr_state(&dp->dp_display, state);
  1537. DP_MST_DEBUG("mst_mgr_state: %d\n", state);
  1538. }
  1539. static int dp_display_host_init(struct dp_display_private *dp)
  1540. {
  1541. bool flip = false;
  1542. bool reset;
  1543. int rc = 0;
  1544. if (dp_display_state_is(DP_STATE_INITIALIZED)) {
  1545. dp_display_state_log("[already initialized]");
  1546. return rc;
  1547. }
  1548. if (dp->hpd->orientation == ORIENTATION_CC2)
  1549. flip = true;
  1550. reset = dp->debug->sim_mode ? false : !dp->hpd->multi_func;
  1551. rc = dp->power->init(dp->power, flip);
  1552. if (rc) {
  1553. DP_WARN("Power init failed.\n");
  1554. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE1, dp->state);
  1555. return rc;
  1556. }
  1557. dp->hpd->host_init(dp->hpd, &dp->catalog->hpd);
  1558. rc = dp->ctrl->init(dp->ctrl, flip, reset);
  1559. if (rc) {
  1560. DP_WARN("Ctrl init Failed.\n");
  1561. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE2, dp->state);
  1562. goto error_ctrl;
  1563. }
  1564. enable_irq(dp->irq);
  1565. dp_display_abort_hdcp(dp, false);
  1566. dp_display_state_add(DP_STATE_INITIALIZED);
  1567. /* log this as it results from user action of cable connection */
  1568. DP_INFO("[OK]\n");
  1569. return rc;
  1570. error_ctrl:
  1571. dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd);
  1572. dp->power->deinit(dp->power);
  1573. return rc;
  1574. }
  1575. static int dp_display_host_ready(struct dp_display_private *dp)
  1576. {
  1577. int rc = 0;
  1578. if (!dp_display_state_is(DP_STATE_INITIALIZED)) {
  1579. rc = dp_display_host_init(dp);
  1580. if (rc) {
  1581. dp_display_state_show("[not initialized]");
  1582. return rc;
  1583. }
  1584. }
  1585. if (dp_display_state_is(DP_STATE_READY)) {
  1586. dp_display_state_log("[already ready]");
  1587. return rc;
  1588. }
  1589. /*
  1590. * Reset the aborted state for AUX and CTRL modules. This will
  1591. * allow these modules to execute normally in response to the
  1592. * cable connection event.
  1593. *
  1594. * One corner case still exists. While the execution flow ensures
  1595. * that cable disconnection flushes all pending work items on the DP
  1596. * workqueue, and waits for the user module to clean up the DP
  1597. * connection session, it is possible that the system delays can
  1598. * lead to timeouts in the connect path. As a result, the actual
  1599. * connection callback from user modules can come in late and can
  1600. * race against a subsequent connection event here which would have
  1601. * reset the aborted flags. There is no clear solution for this since
  1602. * the connect/disconnect notifications do not currently have any
  1603. * sessions IDs.
  1604. */
  1605. dp->aux->abort(dp->aux, false);
  1606. dp->ctrl->abort(dp->ctrl, false);
  1607. dp->aux->init(dp->aux, dp->parser->aux_cfg);
  1608. dp->panel->init(dp->panel);
  1609. dp_display_state_add(DP_STATE_READY);
  1610. /* log this as it results from user action of cable connection */
  1611. DP_INFO("[OK]\n");
  1612. return rc;
  1613. }
  1614. static void dp_display_host_unready(struct dp_display_private *dp)
  1615. {
  1616. if (!dp_display_state_is(DP_STATE_INITIALIZED)) {
  1617. dp_display_state_warn("[not initialized]");
  1618. return;
  1619. }
  1620. if (!dp_display_state_is(DP_STATE_READY)) {
  1621. dp_display_state_show("[not ready]");
  1622. return;
  1623. }
  1624. dp_display_state_remove(DP_STATE_READY);
  1625. dp->aux->deinit(dp->aux);
  1626. /* log this as it results from user action of cable disconnection */
  1627. DP_INFO("[OK]\n");
  1628. }
  1629. static void dp_display_host_deinit(struct dp_display_private *dp)
  1630. {
  1631. if (dp->active_stream_cnt) {
  1632. SDE_EVT32_EXTERNAL(dp->state, dp->active_stream_cnt);
  1633. DP_DEBUG("active stream present\n");
  1634. return;
  1635. }
  1636. if (!dp_display_state_is(DP_STATE_INITIALIZED)) {
  1637. dp_display_state_show("[not initialized]");
  1638. return;
  1639. }
  1640. if (dp_display_state_is(DP_STATE_READY)) {
  1641. DP_DEBUG("dp deinit before unready\n");
  1642. dp_display_host_unready(dp);
  1643. }
  1644. dp_display_abort_hdcp(dp, true);
  1645. dp->ctrl->deinit(dp->ctrl);
  1646. dp->hpd->host_deinit(dp->hpd, &dp->catalog->hpd);
  1647. dp->power->deinit(dp->power);
  1648. disable_irq(dp->irq);
  1649. dp->aux->state = 0;
  1650. dp_display_state_remove(DP_STATE_INITIALIZED);
  1651. /* log this as it results from user action of cable dis-connection */
  1652. DP_INFO("[OK]\n");
  1653. }
  1654. static bool dp_display_hpd_irq_pending(struct dp_display_private *dp)
  1655. {
  1656. unsigned long wait_timeout_ms = 0;
  1657. unsigned long t_out = 0;
  1658. unsigned long wait_time = 0;
  1659. do {
  1660. /*
  1661. * If an IRQ HPD is pending, then do not send a connect notification.
  1662. * Once this work returns, the IRQ HPD would be processed and any
  1663. * required actions (such as link maintenance) would be done which
  1664. * will subsequently send the HPD notification. To keep things simple,
  1665. * do this only for SST use-cases. MST use cases require additional
  1666. * care in order to handle the side-band communications as well.
  1667. *
  1668. * One of the main motivations for this is DP LL 1.4 CTS use case
  1669. * where it is possible that we could get a test request right after
  1670. * a connection, and the strict timing requriements of the test can
  1671. * only be met if we do not wait for the e2e connection to be set up.
  1672. */
  1673. if (!dp->mst.mst_active && (work_busy(&dp->attention_work) == WORK_BUSY_PENDING)) {
  1674. SDE_EVT32_EXTERNAL(dp->state, 99, jiffies_to_msecs(t_out));
  1675. DP_DEBUG("Attention pending, skip HPD notification\n");
  1676. return true;
  1677. }
  1678. /*
  1679. * If no IRQ HPD, delay the HPD connect notification for
  1680. * MAX_CONNECT_NOTIFICATION_DELAY_MS to see if sink generates any IRQ HPDs
  1681. * after the HPD high. Wait for
  1682. * MAX_CONNECT_NOTIFICATION_DELAY_MS to make sure any IRQ HPD from test
  1683. * requests aren't missed.
  1684. */
  1685. reinit_completion(&dp->attention_comp);
  1686. wait_timeout_ms = min_t(unsigned long, dp->debug->connect_notification_delay_ms,
  1687. (unsigned long) MAX_CONNECT_NOTIFICATION_DELAY_MS - wait_time);
  1688. t_out = wait_for_completion_timeout(&dp->attention_comp,
  1689. msecs_to_jiffies(wait_timeout_ms));
  1690. wait_time += (t_out == 0) ? wait_timeout_ms : t_out;
  1691. } while ((wait_timeout_ms < wait_time) && (wait_time < MAX_CONNECT_NOTIFICATION_DELAY_MS));
  1692. DP_DEBUG("wait_timeout=%lu ms, time_waited=%lu ms\n", wait_timeout_ms, wait_time);
  1693. return false;
  1694. }
  1695. static int dp_display_process_hpd_high(struct dp_display_private *dp)
  1696. {
  1697. int rc = -EINVAL;
  1698. #if defined(CONFIG_SECDP)
  1699. bool core_off = false;
  1700. #endif
  1701. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  1702. mutex_lock(&dp->session_lock);
  1703. if (dp_display_state_is(DP_STATE_CONNECTED)) {
  1704. DP_DEBUG("dp already connected, skipping hpd high\n");
  1705. mutex_unlock(&dp->session_lock);
  1706. return -EISCONN;
  1707. }
  1708. dp_display_state_add(DP_STATE_CONNECTED);
  1709. dp->dp_display.max_pclk_khz = min(dp->parser->max_pclk_khz,
  1710. dp->debug->max_pclk_khz);
  1711. if (!dp->debug->sim_mode && !dp->no_aux_switch && !dp->parser->gpio_aux_switch
  1712. && dp->aux_switch_node && dp->aux->switch_configure) {
  1713. rc = dp->aux->switch_configure(dp->aux, true, dp->hpd->orientation);
  1714. if (rc)
  1715. goto err_state;
  1716. }
  1717. #if defined(CONFIG_SECDP)
  1718. secdp_set_wakelock(dp, true);
  1719. #endif
  1720. /*
  1721. * If dp video session is not restored from a previous session teardown
  1722. * by userspace, ensure the host_init is executed, in such a scenario,
  1723. * so that all the required DP resources are enabled.
  1724. *
  1725. * Below is one of the sequences of events which describe the above
  1726. * scenario:
  1727. * a. Source initiated power down resulting in host_deinit.
  1728. * b. Sink issues hpd low attention without physical cable disconnect.
  1729. * c. Source initiated power up sequence returns early because hpd is
  1730. * not high.
  1731. * d. Sink issues a hpd high attention event.
  1732. */
  1733. if (dp_display_state_is(DP_STATE_SRC_PWRDN) &&
  1734. dp_display_state_is(DP_STATE_CONFIGURED)) {
  1735. rc = dp_display_host_init(dp);
  1736. if (rc) {
  1737. DP_WARN("Host init Failed");
  1738. if (!dp_display_state_is(DP_STATE_SUSPENDED)) {
  1739. /*
  1740. * If not suspended no point of going forward if
  1741. * resource is not enabled.
  1742. */
  1743. dp_display_state_remove(DP_STATE_CONNECTED);
  1744. }
  1745. goto err_unlock;
  1746. }
  1747. /*
  1748. * If device is suspended and host_init fails, there is
  1749. * one more chance for host init to happen in prepare which
  1750. * is why DP_STATE_SRC_PWRDN is removed only at success.
  1751. */
  1752. dp_display_state_remove(DP_STATE_SRC_PWRDN);
  1753. }
  1754. rc = dp_display_host_ready(dp);
  1755. if (rc) {
  1756. dp_display_state_show("[ready failed]");
  1757. goto err_state;
  1758. }
  1759. dp->link->psm_config(dp->link, &dp->panel->link_info, false);
  1760. dp->debug->psm_enabled = false;
  1761. if (!dp->dp_display.base_connector)
  1762. goto err_unready;
  1763. rc = dp->panel->read_sink_caps(dp->panel,
  1764. dp->dp_display.base_connector, dp->hpd->multi_func);
  1765. #if defined(CONFIG_SECDP)
  1766. if (!secdp_get_hpd_status() || !secdp_get_cable_status()) {
  1767. DP_INFO("hpd_low or cable_lost or AUX failure: %d\n", rc);
  1768. rc = -EIO;
  1769. core_off = true;
  1770. goto err_mst;
  1771. }
  1772. #endif
  1773. /*
  1774. * ETIMEDOUT --> cable may have been removed
  1775. * ENOTCONN --> no downstream device connected
  1776. */
  1777. #if !defined(CONFIG_SECDP)
  1778. if (rc == -ETIMEDOUT || rc == -ENOTCONN)
  1779. goto err_unready;
  1780. #else
  1781. if (rc == -ENOTCONN)
  1782. goto err_unready;
  1783. if (rc == -ETIMEDOUT) {
  1784. core_off = true;
  1785. goto err_unready;
  1786. }
  1787. if (rc == -EINVAL) {
  1788. /* read EDID is corrupted or invalid, failsafe case */
  1789. secdp_send_poor_connection_event(dp, true);
  1790. }
  1791. dp->sec.dex.prev = secdp_check_dex_mode(dp);
  1792. DP_INFO("dex.ui:%d,dex.curr:%d\n",
  1793. dp->sec.dex.setting_ui, dp->sec.dex.curr);
  1794. secdp_read_branch_revision(dp);
  1795. dp->sec.hmd.exist = secdp_check_hmd_dev(&dp->sec, NULL);
  1796. #if defined(CONFIG_SECDP_BIGDATA)
  1797. if (dp->sec.dex.prev != DEX_DISABLED)
  1798. secdp_bigdata_save_item(BD_DP_MODE, "DEX");
  1799. else
  1800. secdp_bigdata_save_item(BD_DP_MODE, "MIRROR");
  1801. #endif
  1802. #endif/*CONFIG_SECDP*/
  1803. dp->link->process_request(dp->link);
  1804. dp->panel->handle_sink_request(dp->panel);
  1805. dp_display_mst_init(dp);
  1806. rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
  1807. dp->panel->fec_en, dp->panel->dsc_en, false);
  1808. if (rc) {
  1809. #if defined(CONFIG_SECDP)
  1810. core_off = true;
  1811. #endif
  1812. goto err_mst;
  1813. }
  1814. dp->process_hpd_connect = false;
  1815. dp_display_set_mst_mgr_state(dp, true);
  1816. mutex_unlock(&dp->session_lock);
  1817. #if defined(CONFIG_SECDP)
  1818. {
  1819. int wait = secdp_check_boot_time();
  1820. if (!rc && !dp_display_state_is(DP_STATE_ABORTED) && wait) {
  1821. DP_INFO("deferred HPD noti at boot time! wait: %d\n", wait);
  1822. schedule_delayed_work(&dp->sec.hpd.noti_work,
  1823. msecs_to_jiffies(wait * 1000));
  1824. dp->sec.hpd.noti_deferred = true;
  1825. return rc;
  1826. }
  1827. }
  1828. #endif
  1829. if (dp_display_hpd_irq_pending(dp))
  1830. goto end;
  1831. if (!rc && !dp_display_state_is(DP_STATE_ABORTED))
  1832. dp_display_send_hpd_notification(dp, false);
  1833. #if defined(CONFIG_SECDP)
  1834. if (rc || dp_display_state_is(DP_STATE_ABORTED))
  1835. secdp_set_wakelock(dp, false);
  1836. #endif
  1837. goto end;
  1838. err_mst:
  1839. dp_display_update_mst_state(dp, false);
  1840. err_unready:
  1841. dp_display_host_unready(dp);
  1842. err_state:
  1843. dp_display_state_remove(DP_STATE_CONNECTED);
  1844. err_unlock:
  1845. #if defined(CONFIG_SECDP)
  1846. secdp_send_hpd_event(&dp->sec, false);
  1847. secdp_set_wakelock(dp, false);
  1848. if (core_off) {
  1849. dp_display_host_deinit(dp);
  1850. secdp_send_poor_connection_event(dp, false);
  1851. }
  1852. #endif
  1853. mutex_unlock(&dp->session_lock);
  1854. end:
  1855. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc);
  1856. return rc;
  1857. }
  1858. static void dp_display_process_mst_hpd_low(struct dp_display_private *dp, bool skip_wait)
  1859. {
  1860. int rc = 0;
  1861. if (dp->mst.mst_active) {
  1862. DP_MST_DEBUG("mst_hpd_low work\n");
  1863. /*
  1864. * HPD unplug callflow:
  1865. * 1. send hpd unplug on base connector so usermode can disable
  1866. * all external displays.
  1867. * 2. unset mst state in the topology mgr so the branch device
  1868. * can be cleaned up.
  1869. */
  1870. if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
  1871. dp_display_state_is(DP_STATE_ENABLED)))
  1872. rc = dp_display_send_hpd_notification(dp, skip_wait);
  1873. dp_display_set_mst_mgr_state(dp, false);
  1874. dp_display_update_mst_state(dp, false);
  1875. }
  1876. DP_MST_DEBUG("mst_hpd_low. mst_active:%d\n", dp->mst.mst_active);
  1877. }
  1878. static int dp_display_process_hpd_low(struct dp_display_private *dp, bool skip_wait)
  1879. {
  1880. int rc = 0;
  1881. dp_display_state_remove(DP_STATE_CONNECTED);
  1882. dp->process_hpd_connect = false;
  1883. #if defined(CONFIG_SECDP)
  1884. secdp_send_hpd_event(&dp->sec, false);
  1885. cancel_delayed_work(&dp->sec.hpd.noti_work);
  1886. cancel_delayed_work_sync(&dp->sec.hdcp.start_work);
  1887. cancel_delayed_work(&dp->sec.link_status_work);
  1888. cancel_delayed_work(&dp->sec.poor_discon_work);
  1889. secdp_link_backoff_stop();
  1890. #endif
  1891. dp_audio_enable(dp, false);
  1892. if (dp->mst.mst_active) {
  1893. dp_display_process_mst_hpd_low(dp, skip_wait);
  1894. } else {
  1895. if ((dp_display_state_is(DP_STATE_CONNECT_NOTIFIED) ||
  1896. dp_display_state_is(DP_STATE_ENABLED)))
  1897. rc = dp_display_send_hpd_notification(dp, skip_wait);
  1898. }
  1899. mutex_lock(&dp->session_lock);
  1900. if (!dp->active_stream_cnt)
  1901. dp->ctrl->off(dp->ctrl);
  1902. mutex_unlock(&dp->session_lock);
  1903. dp->panel->video_test = false;
  1904. #if defined(CONFIG_SECDP)
  1905. secdp_set_wakelock(dp, false);
  1906. #endif
  1907. return rc;
  1908. }
  1909. static int dp_display_aux_switch_callback(struct notifier_block *self,
  1910. unsigned long event, void *data)
  1911. {
  1912. return 0;
  1913. }
  1914. static int dp_display_init_aux_switch(struct dp_display_private *dp)
  1915. {
  1916. int rc = 0;
  1917. struct notifier_block nb;
  1918. const u32 max_retries = 50;
  1919. u32 retry;
  1920. if (dp->aux_switch_ready)
  1921. return rc;
  1922. if (!dp->aux->switch_register_notifier)
  1923. return rc;
  1924. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
  1925. nb.notifier_call = dp_display_aux_switch_callback;
  1926. nb.priority = 0;
  1927. /*
  1928. * Iteratively wait for reg notifier which confirms that fsa driver is probed.
  1929. * Bootup DP with cable connected usecase can hit this scenario.
  1930. */
  1931. for (retry = 0; retry < max_retries; retry++) {
  1932. rc = dp->aux->switch_register_notifier(&nb, dp->aux_switch_node);
  1933. if (rc == 0) {
  1934. DP_DEBUG("registered notifier successfully\n");
  1935. dp->aux_switch_ready = true;
  1936. break;
  1937. } else {
  1938. DP_DEBUG("failed to register notifier retry=%d rc=%d\n", retry, rc);
  1939. msleep(100);
  1940. }
  1941. }
  1942. if (retry == max_retries) {
  1943. DP_WARN("Failed to register fsa notifier\n");
  1944. dp->aux_switch_ready = false;
  1945. return rc;
  1946. }
  1947. if (dp->aux->switch_unregister_notifier)
  1948. dp->aux->switch_unregister_notifier(&nb, dp->aux_switch_node);
  1949. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, rc);
  1950. return rc;
  1951. }
  1952. static int dp_display_usbpd_configure_cb(struct device *dev)
  1953. {
  1954. int rc = 0;
  1955. struct dp_display_private *dp;
  1956. if (!dev) {
  1957. DP_ERR("invalid dev\n");
  1958. return -EINVAL;
  1959. }
  1960. dp = dev_get_drvdata(dev);
  1961. if (!dp) {
  1962. DP_ERR("no driver data found\n");
  1963. return -ENODEV;
  1964. }
  1965. if (!dp->debug->sim_mode && !dp->no_aux_switch
  1966. && !dp->parser->gpio_aux_switch && dp->aux_switch_node && dp->aux->switch_configure) {
  1967. rc = dp_display_init_aux_switch(dp);
  1968. if (rc)
  1969. return rc;
  1970. rc = dp->aux->switch_configure(dp->aux, true, dp->hpd->orientation);
  1971. if (rc)
  1972. return rc;
  1973. }
  1974. mutex_lock(&dp->session_lock);
  1975. if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) {
  1976. dp_display_state_log("[TUI is active]");
  1977. mutex_unlock(&dp->session_lock);
  1978. return 0;
  1979. }
  1980. dp_display_state_remove(DP_STATE_ABORTED);
  1981. dp_display_state_add(DP_STATE_CONFIGURED);
  1982. rc = dp_display_host_init(dp);
  1983. if (rc) {
  1984. DP_ERR("Host init Failed");
  1985. mutex_unlock(&dp->session_lock);
  1986. return rc;
  1987. }
  1988. /* check for hpd high */
  1989. if (dp->hpd->hpd_high)
  1990. queue_work(dp->wq, &dp->connect_work);
  1991. else
  1992. dp->process_hpd_connect = true;
  1993. mutex_unlock(&dp->session_lock);
  1994. return 0;
  1995. }
  1996. static void dp_display_clear_reservation(struct dp_display *dp, struct dp_panel *panel)
  1997. {
  1998. struct dp_display_private *dp_display;
  1999. if (!dp || !panel) {
  2000. DP_ERR("invalid params\n");
  2001. return;
  2002. }
  2003. dp_display = container_of(dp, struct dp_display_private, dp_display);
  2004. mutex_lock(&dp_display->accounting_lock);
  2005. dp_display->tot_lm_blks_in_use -= panel->max_lm;
  2006. panel->max_lm = 0;
  2007. if (!dp_display->active_stream_cnt)
  2008. dp_display->tot_lm_blks_in_use = 0;
  2009. mutex_unlock(&dp_display->accounting_lock);
  2010. }
  2011. static void dp_display_clear_dsc_resources(struct dp_display_private *dp,
  2012. struct dp_panel *panel)
  2013. {
  2014. dp->tot_dsc_blks_in_use -= panel->dsc_blks_in_use;
  2015. panel->dsc_blks_in_use = 0;
  2016. }
  2017. static int dp_display_get_mst_pbn_div(struct dp_display *dp_display)
  2018. {
  2019. struct dp_display_private *dp;
  2020. u32 link_rate, lane_count;
  2021. if (!dp_display) {
  2022. DP_ERR("invalid params\n");
  2023. return 0;
  2024. }
  2025. dp = container_of(dp_display, struct dp_display_private, dp_display);
  2026. link_rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code);
  2027. lane_count = dp->link->link_params.lane_count;
  2028. return link_rate * lane_count / 54000;
  2029. }
  2030. static int dp_display_stream_pre_disable(struct dp_display_private *dp,
  2031. struct dp_panel *dp_panel)
  2032. {
  2033. if (!dp->active_stream_cnt) {
  2034. DP_WARN("streams already disabled cnt=%d\n",
  2035. dp->active_stream_cnt);
  2036. return 0;
  2037. }
  2038. dp->ctrl->stream_pre_off(dp->ctrl, dp_panel);
  2039. return 0;
  2040. }
  2041. static void dp_display_stream_disable(struct dp_display_private *dp,
  2042. struct dp_panel *dp_panel)
  2043. {
  2044. if (!dp->active_stream_cnt) {
  2045. DP_WARN("streams already disabled cnt=%d\n",
  2046. dp->active_stream_cnt);
  2047. return;
  2048. }
  2049. if (dp_panel->stream_id == DP_STREAM_MAX ||
  2050. !dp->active_panels[dp_panel->stream_id]) {
  2051. DP_ERR("panel is already disabled\n");
  2052. return;
  2053. }
  2054. dp_display_clear_dsc_resources(dp, dp_panel);
  2055. DP_DEBUG("stream_id=%d, active_stream_cnt=%d, tot_dsc_blks_in_use=%d\n",
  2056. dp_panel->stream_id, dp->active_stream_cnt,
  2057. dp->tot_dsc_blks_in_use);
  2058. dp->ctrl->stream_off(dp->ctrl, dp_panel);
  2059. dp->active_panels[dp_panel->stream_id] = NULL;
  2060. dp->active_stream_cnt--;
  2061. }
  2062. static void dp_display_clean(struct dp_display_private *dp, bool skip_wait)
  2063. {
  2064. int idx;
  2065. struct dp_panel *dp_panel;
  2066. struct dp_link_hdcp_status *status = &dp->link->hdcp_status;
  2067. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  2068. if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) {
  2069. DP_WARN("TUI is active\n");
  2070. return;
  2071. }
  2072. #if defined(CONFIG_SECDP)
  2073. cancel_delayed_work(&dp->sec.hpd.noti_work);
  2074. cancel_delayed_work_sync(&dp->sec.hdcp.start_work);
  2075. cancel_delayed_work(&dp->sec.link_status_work);
  2076. cancel_delayed_work(&dp->sec.poor_discon_work);
  2077. secdp_link_backoff_stop();
  2078. #endif
  2079. if (dp_display_is_hdcp_enabled(dp) &&
  2080. status->hdcp_state != HDCP_STATE_INACTIVE) {
  2081. cancel_delayed_work_sync(&dp->hdcp_cb_work);
  2082. if (dp->hdcp.ops->off)
  2083. dp->hdcp.ops->off(dp->hdcp.data);
  2084. dp_display_update_hdcp_status(dp, true);
  2085. }
  2086. for (idx = DP_STREAM_0; idx < DP_STREAM_MAX; idx++) {
  2087. if (!dp->active_panels[idx])
  2088. continue;
  2089. dp_panel = dp->active_panels[idx];
  2090. if (dp_panel->audio_supported)
  2091. dp_panel->audio->off(dp_panel->audio, skip_wait);
  2092. if (!skip_wait)
  2093. dp_display_stream_pre_disable(dp, dp_panel);
  2094. dp_display_stream_disable(dp, dp_panel);
  2095. dp_display_clear_reservation(&dp->dp_display, dp_panel);
  2096. dp_panel->deinit(dp_panel, 0);
  2097. }
  2098. dp_display_state_remove(DP_STATE_ENABLED | DP_STATE_CONNECTED);
  2099. dp->ctrl->off(dp->ctrl);
  2100. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  2101. }
  2102. static int dp_display_handle_disconnect(struct dp_display_private *dp, bool skip_wait)
  2103. {
  2104. int rc;
  2105. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  2106. rc = dp_display_process_hpd_low(dp, skip_wait);
  2107. if (rc) {
  2108. /* cancel any pending request */
  2109. dp->ctrl->abort(dp->ctrl, true);
  2110. dp->aux->abort(dp->aux, true);
  2111. }
  2112. mutex_lock(&dp->session_lock);
  2113. if (dp_display_state_is(DP_STATE_ENABLED))
  2114. dp_display_clean(dp, skip_wait);
  2115. dp_display_host_unready(dp);
  2116. dp->tot_lm_blks_in_use = 0;
  2117. mutex_unlock(&dp->session_lock);
  2118. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  2119. return rc;
  2120. }
  2121. static void dp_display_disconnect_sync(struct dp_display_private *dp)
  2122. {
  2123. int disconnect_delay_ms;
  2124. #if defined(CONFIG_SECDP)
  2125. DP_ENTER("\n");
  2126. if (dp->link->poor_connection) {
  2127. secdp_send_poor_connection_event(dp, false);
  2128. dp->link->status_update_cnt = 0;
  2129. dp->sec.hdcp.retry = 0;
  2130. }
  2131. #endif
  2132. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  2133. /* cancel any pending request */
  2134. dp_display_state_add(DP_STATE_ABORTED);
  2135. dp->ctrl->abort(dp->ctrl, true);
  2136. dp->aux->abort(dp->aux, true);
  2137. /* wait for idle state */
  2138. cancel_work_sync(&dp->connect_work);
  2139. cancel_work_sync(&dp->attention_work);
  2140. cancel_work_sync(&dp->disconnect_work);
  2141. flush_workqueue(dp->wq);
  2142. /*
  2143. * Delay the teardown of the mainlink for better interop experience.
  2144. * It is possible that certain sinks can issue an HPD high immediately
  2145. * following an HPD low as soon as they detect the mainlink being
  2146. * turned off. This can sometimes result in the HPD low pulse getting
  2147. * lost with certain cable. This issue is commonly seen when running
  2148. * DP LL CTS test 4.2.1.3.
  2149. */
  2150. disconnect_delay_ms = min_t(u32, dp->debug->disconnect_delay_ms,
  2151. (u32) MAX_DISCONNECT_DELAY_MS);
  2152. DP_DEBUG("disconnect delay = %d ms\n", disconnect_delay_ms);
  2153. msleep(disconnect_delay_ms);
  2154. dp_display_handle_disconnect(dp, false);
  2155. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state,
  2156. disconnect_delay_ms);
  2157. }
  2158. static int dp_display_usbpd_disconnect_cb(struct device *dev)
  2159. {
  2160. int rc = 0;
  2161. struct dp_display_private *dp;
  2162. if (!dev) {
  2163. DP_ERR("invalid dev\n");
  2164. rc = -EINVAL;
  2165. goto end;
  2166. }
  2167. dp = dev_get_drvdata(dev);
  2168. if (!dp) {
  2169. DP_ERR("no driver data found\n");
  2170. rc = -ENODEV;
  2171. goto end;
  2172. }
  2173. #if defined(CONFIG_SECDP)
  2174. DP_ENTER("\n");
  2175. if (atomic_read(&dp->sec.noti_status)) {
  2176. reinit_completion(&dp->notification_comp);
  2177. DP_INFO("wait++, psm:%d\n", dp->debug->psm_enabled);
  2178. if (atomic_read(&dp->sec.noti_status) &&
  2179. !wait_for_completion_timeout(&dp->notification_comp, HZ * 5)) {
  2180. DP_ERR("notification_comp timeout!\n");
  2181. }
  2182. DP_INFO("wait--\n");
  2183. }
  2184. #endif
  2185. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state,
  2186. dp->debug->psm_enabled);
  2187. /* skip if a disconnect is already in progress */
  2188. if (dp_display_state_is(DP_STATE_ABORTED) &&
  2189. dp_display_state_is(DP_STATE_READY)) {
  2190. DP_DEBUG("disconnect already in progress\n");
  2191. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_CASE1, dp->state);
  2192. return 0;
  2193. }
  2194. if (dp->debug->psm_enabled && dp_display_state_is(DP_STATE_READY))
  2195. dp->link->psm_config(dp->link, &dp->panel->link_info, true);
  2196. dp->ctrl->abort(dp->ctrl, true);
  2197. dp->aux->abort(dp->aux, true);
  2198. if (!dp->debug->sim_mode && !dp->no_aux_switch
  2199. && !dp->parser->gpio_aux_switch && dp->aux->switch_configure)
  2200. dp->aux->switch_configure(dp->aux, false, ORIENTATION_NONE);
  2201. dp_display_disconnect_sync(dp);
  2202. mutex_lock(&dp->session_lock);
  2203. dp_display_host_deinit(dp);
  2204. dp_display_state_remove(DP_STATE_CONFIGURED);
  2205. mutex_unlock(&dp->session_lock);
  2206. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  2207. #if defined(CONFIG_SECDP)
  2208. /* unset should be here because it's set at above
  2209. * "dp_display_disconnect_sync()"
  2210. */
  2211. atomic_set(&dp->sec.noti_status, 0);
  2212. complete(&dp->sec.dp_off_comp);
  2213. DP_DEBUG("[usbpd_disconnect_cb] done\n");
  2214. #endif
  2215. end:
  2216. return rc;
  2217. }
  2218. static int dp_display_stream_enable(struct dp_display_private *dp,
  2219. struct dp_panel *dp_panel)
  2220. {
  2221. int rc = 0;
  2222. rc = dp->ctrl->stream_on(dp->ctrl, dp_panel);
  2223. if (dp->debug->tpg_pattern)
  2224. dp_panel->tpg_config(dp_panel, dp->debug->tpg_pattern);
  2225. if (!rc) {
  2226. dp->active_panels[dp_panel->stream_id] = dp_panel;
  2227. dp->active_stream_cnt++;
  2228. }
  2229. DP_DEBUG("dp active_stream_cnt:%d, tot_dsc_blks_in_use=%d\n",
  2230. dp->active_stream_cnt, dp->tot_dsc_blks_in_use);
  2231. return rc;
  2232. }
  2233. static void dp_display_mst_attention(struct dp_display_private *dp)
  2234. {
  2235. if (dp->mst.mst_active && dp->mst.cbs.hpd_irq)
  2236. dp->mst.cbs.hpd_irq(&dp->dp_display);
  2237. DP_MST_DEBUG("mst_attention_work. mst_active:%d\n", dp->mst.mst_active);
  2238. }
  2239. static void dp_display_attention_work(struct work_struct *work)
  2240. {
  2241. struct dp_display_private *dp = container_of(work,
  2242. struct dp_display_private, attention_work);
  2243. int rc = 0;
  2244. #if defined(CONFIG_SECDP)
  2245. if (!secdp_get_hpd_status() || !secdp_get_cable_status()) {
  2246. DP_INFO("hpd_low or cable_lost\n");
  2247. return;
  2248. }
  2249. DP_DEBUG("request:0x%x\n", dp->link->sink_request);
  2250. #endif
  2251. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  2252. mutex_lock(&dp->session_lock);
  2253. SDE_EVT32_EXTERNAL(dp->state);
  2254. if (dp_display_state_is(DP_STATE_ABORTED)) {
  2255. DP_INFO("Hpd off, not handling any attention\n");
  2256. mutex_unlock(&dp->session_lock);
  2257. goto exit;
  2258. }
  2259. if (!dp_display_state_is(DP_STATE_READY)) {
  2260. mutex_unlock(&dp->session_lock);
  2261. goto mst_attention;
  2262. }
  2263. if (dp->link->process_request(dp->link)) {
  2264. mutex_unlock(&dp->session_lock);
  2265. goto cp_irq;
  2266. }
  2267. mutex_unlock(&dp->session_lock);
  2268. SDE_EVT32_EXTERNAL(dp->state, dp->link->sink_request);
  2269. if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
  2270. SDE_EVT32_EXTERNAL(dp->state, DS_PORT_STATUS_CHANGED);
  2271. if (!dp->mst.mst_active) {
  2272. if (dp_display_is_sink_count_zero(dp)) {
  2273. dp_display_handle_disconnect(dp, false);
  2274. } else {
  2275. /*
  2276. * connect work should take care of sending
  2277. * the HPD notification.
  2278. */
  2279. queue_work(dp->wq, &dp->connect_work);
  2280. }
  2281. }
  2282. #if defined(CONFIG_SECDP)
  2283. /*add some delay to guarantee hpd event handling in framework*/
  2284. msleep(60);
  2285. #endif
  2286. goto mst_attention;
  2287. }
  2288. if (dp->link->sink_request & DP_TEST_LINK_VIDEO_PATTERN) {
  2289. SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_VIDEO_PATTERN);
  2290. dp_display_handle_disconnect(dp, false);
  2291. dp->panel->video_test = true;
  2292. /*
  2293. * connect work should take care of sending
  2294. * the HPD notification.
  2295. */
  2296. queue_work(dp->wq, &dp->connect_work);
  2297. goto mst_attention;
  2298. }
  2299. if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN |
  2300. DP_TEST_LINK_TRAINING | DP_LINK_STATUS_UPDATED)) {
  2301. mutex_lock(&dp->session_lock);
  2302. dp_audio_enable(dp, false);
  2303. if (dp->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
  2304. SDE_EVT32_EXTERNAL(dp->state,
  2305. DP_TEST_LINK_PHY_TEST_PATTERN);
  2306. dp->ctrl->process_phy_test_request(dp->ctrl);
  2307. }
  2308. if (dp->link->sink_request & DP_TEST_LINK_TRAINING) {
  2309. SDE_EVT32_EXTERNAL(dp->state, DP_TEST_LINK_TRAINING);
  2310. dp->link->send_test_response(dp->link);
  2311. rc = dp->ctrl->link_maintenance(dp->ctrl);
  2312. }
  2313. if (dp->link->sink_request & DP_LINK_STATUS_UPDATED) {
  2314. SDE_EVT32_EXTERNAL(dp->state, DP_LINK_STATUS_UPDATED);
  2315. rc = dp->ctrl->link_maintenance(dp->ctrl);
  2316. }
  2317. if (!rc)
  2318. dp_audio_enable(dp, true);
  2319. mutex_unlock(&dp->session_lock);
  2320. if (rc)
  2321. goto exit;
  2322. if (dp->link->sink_request & (DP_TEST_LINK_PHY_TEST_PATTERN |
  2323. DP_TEST_LINK_TRAINING))
  2324. goto mst_attention;
  2325. }
  2326. cp_irq:
  2327. if (dp_display_is_hdcp_enabled(dp) && dp->hdcp.ops->cp_irq)
  2328. dp->hdcp.ops->cp_irq(dp->hdcp.data);
  2329. if (!dp->mst.mst_active) {
  2330. #if defined(CONFIG_SECDP)
  2331. if (dp->sec.hpd.noti_deferred) {
  2332. DP_INFO("noti_deferred! skip noti\n");
  2333. goto exit;
  2334. }
  2335. #endif
  2336. /*
  2337. * It is possible that the connect_work skipped sending
  2338. * the HPD notification if the attention message was
  2339. * already pending. Send the notification here to
  2340. * account for that. It is possible that the test sequence
  2341. * can trigger an unplug after DP_LINK_STATUS_UPDATED, before
  2342. * starting the next test case. Make sure to check the HPD status.
  2343. */
  2344. if (!dp_display_state_is(DP_STATE_ABORTED))
  2345. dp_display_send_hpd_notification(dp, false);
  2346. }
  2347. mst_attention:
  2348. dp_display_mst_attention(dp);
  2349. exit:
  2350. #if defined(CONFIG_SECDP)
  2351. if (dp->link->status_update_cnt > 9 && !dp->link->poor_connection) {
  2352. dp->link->poor_connection = true;
  2353. dp->sec.dex.status = dp->sec.dex.prev = dp->sec.dex.curr = DEX_DISABLED;
  2354. schedule_delayed_work(&dp->sec.link_status_work,
  2355. msecs_to_jiffies(10));
  2356. }
  2357. #endif
  2358. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  2359. }
  2360. static int dp_display_usbpd_attention_cb(struct device *dev)
  2361. {
  2362. struct dp_display_private *dp;
  2363. if (!dev) {
  2364. DP_ERR("invalid dev\n");
  2365. return -EINVAL;
  2366. }
  2367. dp = dev_get_drvdata(dev);
  2368. if (!dp) {
  2369. DP_ERR("no driver data found\n");
  2370. return -ENODEV;
  2371. }
  2372. DP_DEBUG("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n",
  2373. dp->hpd->hpd_irq, dp->hpd->hpd_high,
  2374. !!dp_display_state_is(DP_STATE_ENABLED),
  2375. !!dp_display_state_is(DP_STATE_CONNECTED));
  2376. SDE_EVT32_EXTERNAL(dp->state, dp->hpd->hpd_irq, dp->hpd->hpd_high,
  2377. !!dp_display_state_is(DP_STATE_ENABLED),
  2378. !!dp_display_state_is(DP_STATE_CONNECTED));
  2379. #if !defined(CONFIG_SECDP)
  2380. if (!dp->hpd->hpd_high) {
  2381. dp_display_disconnect_sync(dp);
  2382. return 0;
  2383. }
  2384. /*
  2385. * Ignore all the attention messages except HPD LOW when TUI is
  2386. * active, so user mode can be notified of the disconnect event. This
  2387. * allows user mode to tear down the control path after the TUI
  2388. * session is over. Ideally this should never happen, but on the off
  2389. * chance that there is a race condition in which there is a IRQ HPD
  2390. * during tear down of DP at TUI start then this check might help avoid
  2391. * a potential issue accessing registers in attention processing.
  2392. */
  2393. if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) {
  2394. DP_WARN("TUI is active\n");
  2395. return 0;
  2396. }
  2397. if (dp->hpd->hpd_irq && dp_display_state_is(DP_STATE_READY)) {
  2398. queue_work(dp->wq, &dp->attention_work);
  2399. complete_all(&dp->attention_comp);
  2400. } else if (dp->process_hpd_connect ||
  2401. !dp_display_state_is(DP_STATE_CONNECTED)) {
  2402. dp_display_state_remove(DP_STATE_ABORTED);
  2403. queue_work(dp->wq, &dp->connect_work);
  2404. } else {
  2405. DP_DEBUG("ignored\n");
  2406. }
  2407. #endif
  2408. return 0;
  2409. }
  2410. #if defined(CONFIG_SECDP)
  2411. #ifdef SECDP_SELF_TEST
  2412. static void secdp_hdcp_start_work(struct work_struct *work);
  2413. static void secdp_self_test_hdcp_on(void)
  2414. {
  2415. DP_ENTER("\n");
  2416. secdp_hdcp_start_work(NULL);
  2417. }
  2418. static void secdp_self_test_hdcp_off(void)
  2419. {
  2420. struct dp_display_private *dp = container_of(g_dp_display,
  2421. struct dp_display_private, dp_display);
  2422. struct dp_link_hdcp_status *status;
  2423. DP_ENTER("\n");
  2424. if (secdp_get_cable_status() && dp_display_state_is(DP_STATE_ENABLED)) {
  2425. status = &dp->link->hdcp_status;
  2426. if (dp_display_is_hdcp_enabled(dp) &&
  2427. status->hdcp_state != HDCP_STATE_INACTIVE) {
  2428. cancel_delayed_work(&dp->hdcp_cb_work);
  2429. usleep_range(3000, 5000);
  2430. if (dp->hdcp.ops->off)
  2431. dp->hdcp.ops->off(dp->hdcp.data);
  2432. dp_display_update_hdcp_status(dp, true);
  2433. }
  2434. }
  2435. }
  2436. #endif
  2437. #if IS_ENABLED(CONFIG_PDIC_NOTIFIER)
  2438. /** true if it's DP_DISCONNECT event, false otherwise */
  2439. static bool secdp_is_disconnect(PD_NOTI_TYPEDEF *noti)
  2440. {
  2441. bool ret = false;
  2442. if (noti->id == PDIC_NOTIFY_ID_DP_CONNECT &&
  2443. noti->sub1 == PDIC_NOTIFY_DETACH)
  2444. ret = true;
  2445. return ret;
  2446. }
  2447. /** true if it's HPD_IRQ event, false otherwise */
  2448. static bool secdp_is_hpd_irq(PD_NOTI_TYPEDEF *noti)
  2449. {
  2450. bool ret = false;
  2451. if (noti->id == PDIC_NOTIFY_ID_DP_HPD &&
  2452. noti->sub1 == PDIC_NOTIFY_HIGH &&
  2453. noti->sub2 == PDIC_NOTIFY_IRQ)
  2454. ret = true;
  2455. return ret;
  2456. }
  2457. /** true if it's HPD_LOW event, false otherwise */
  2458. static bool secdp_is_hpd_low(PD_NOTI_TYPEDEF *noti)
  2459. {
  2460. bool ret = false;
  2461. if (noti->id == PDIC_NOTIFY_ID_DP_HPD &&
  2462. noti->sub1 == PDIC_NOTIFY_LOW)
  2463. ret = true;
  2464. return ret;
  2465. }
  2466. static void secdp_process_attention(struct dp_display_private *dp,
  2467. PD_NOTI_TYPEDEF *noti)
  2468. {
  2469. struct secdp_misc *sec = NULL;
  2470. if (!noti || !dp)
  2471. goto end;
  2472. DP_DEBUG("sub1:%d sub2:%d sub3:%d\n", noti->sub1, noti->sub2, noti->sub3);
  2473. sec = &dp->sec;
  2474. sec->dex.reconnecting = false;
  2475. if (secdp_is_disconnect(noti)) {
  2476. cancel_delayed_work_sync(&sec->poor_discon_work);
  2477. dp_display_usbpd_disconnect_cb(&dp->pdev->dev);
  2478. goto end;
  2479. }
  2480. if (secdp_is_hpd_low(noti)) {
  2481. sec->dex.status = sec->dex.prev = sec->dex.curr = DEX_DISABLED;
  2482. secdp_clear_link_status_cnt(dp->link);
  2483. dp_display_disconnect_sync(dp);
  2484. DP_INFO("active_stream_cnt %d\n", dp->active_stream_cnt);
  2485. if (!dp->active_stream_cnt)
  2486. dp_display_host_deinit(dp);
  2487. goto end;
  2488. }
  2489. /*
  2490. * see "dp_display_usbpd_attention_cb" at sm8350 post-cs2
  2491. *
  2492. * Ignore all the attention messages except HPD LOW when TUI is
  2493. * active, so user mode can be notified of the disconnect event. This
  2494. * allows user mode to tear down the control path after the TUI
  2495. * session is over. Ideally this should never happen, but on the off
  2496. * chance that there is a race condition in which there is a IRQ HPD
  2497. * during tear down of DP at TUI start then this check might help avoid
  2498. * a potential issue accessing registers in attention processing.
  2499. */
  2500. if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) {
  2501. DP_WARN("TUI is active\n");
  2502. goto end;
  2503. }
  2504. if (secdp_is_hpd_irq(noti)) {
  2505. if (secdp_get_reboot_status()) {
  2506. DP_INFO("shutdown!\n");
  2507. goto end;
  2508. }
  2509. if (!secdp_get_cable_status()) {
  2510. DP_INFO("cable is out!\n");
  2511. goto end;
  2512. }
  2513. if (dp->link->poor_connection) {
  2514. DP_INFO("poor connection!\n");
  2515. goto end;
  2516. }
  2517. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  2518. if (dp->mst.mst_active)
  2519. goto attention;
  2520. /* SECDP_ADT_SST */
  2521. if (dp->link->poor_connection) {
  2522. DP_INFO("poor connection!!\n");
  2523. goto end;
  2524. }
  2525. if (!dp_display_state_is(DP_STATE_CONNECTED)) {
  2526. /* aux timeout happens whenever DeX
  2527. * reconnect scenario, init aux here
  2528. */
  2529. dp_display_host_unready(dp);
  2530. dp_display_host_deinit(dp);
  2531. usleep_range(5000, 6000);
  2532. DP_DEBUG("handle HPD_IRQ as HPD_HIGH!\n");
  2533. goto hpd_high;
  2534. }
  2535. }
  2536. attention:
  2537. /* irq_hpd: do the same with: dp_display_usbpd_attention_cb */
  2538. queue_work(dp->wq, &dp->attention_work);
  2539. complete_all(&dp->attention_comp);
  2540. goto end;
  2541. }
  2542. hpd_high:
  2543. if (sec->extdisp_off) {
  2544. DP_INFO("extdisp_off\n");
  2545. goto end;
  2546. }
  2547. /* hpd high: do the same with: dp_display_usbpd_attention_cb */
  2548. DP_INFO("connected:%d\n", dp_display_state_is(DP_STATE_CONNECTED));
  2549. if (!dp_display_state_is(DP_STATE_CONNECTED)) {
  2550. secdp_clear_link_status_cnt(dp->link);
  2551. dp_display_state_remove(DP_STATE_ABORTED);
  2552. queue_work(dp->wq, &dp->connect_work);
  2553. }
  2554. end:
  2555. return;
  2556. }
  2557. static void secdp_adapter_init(struct dp_display_private *dp)
  2558. {
  2559. struct secdp_adapter *adapter = &dp->sec.adapter;
  2560. memset(adapter, 0, sizeof(struct secdp_adapter));
  2561. }
  2562. static void secdp_adapter_check_legacy(struct dp_display_private *dp)
  2563. {
  2564. struct secdp_adapter *adapter = &dp->sec.adapter;
  2565. bool legacy = false;
  2566. if (adapter->ven_id != SAMSUNG_VENDOR_ID)
  2567. goto end;
  2568. switch (adapter->prod_id) {
  2569. case DEXPAD_PRODUCT_ID:
  2570. case DEXCABLE_PRODUCT_ID:
  2571. case MPA2_PRODUCT_ID:
  2572. case MPA3_PRODUCT_ID:
  2573. legacy = true;
  2574. break;
  2575. default:
  2576. break;
  2577. }
  2578. DP_INFO("ss_legacy:%d\n", legacy);
  2579. end:
  2580. adapter->ss_legacy = legacy;
  2581. }
  2582. /**
  2583. * check connected dongle type with given vid and pid. Based upon this info,
  2584. * we can decide maximum dex resolution for that cable/adapter.
  2585. */
  2586. static void secdp_adapter_check_dex(struct dp_display_private *dp)
  2587. {
  2588. struct secdp_adapter *adapter = &dp->sec.adapter;
  2589. enum dex_support_res_t dex_type = DEX_RES_DFT;
  2590. bool ss_fan = false;
  2591. #ifdef NOT_SUPPORT_DEX_RES_CHANGE
  2592. adapter->dex_type = DEX_RES_NOT_SUPPORT;
  2593. return;
  2594. #endif
  2595. if (dp->parser->dex_dft_res > DEX_RES_NOT_SUPPORT) {
  2596. dex_type = dp->parser->dex_dft_res;
  2597. goto end;
  2598. }
  2599. if (adapter->ven_id != SAMSUNG_VENDOR_ID)
  2600. goto end;
  2601. switch (adapter->prod_id) {
  2602. case DEXDOCK_PRODUCT_ID:
  2603. case DEXPAD_PRODUCT_ID:
  2604. dex_type = DEX_RES_MAX;
  2605. ss_fan = true;
  2606. break;
  2607. default:
  2608. break;
  2609. }
  2610. end:
  2611. DP_INFO("fan:%d %s\n", ss_fan, secdp_dex_res_to_string(dex_type));
  2612. adapter->dex_type = dex_type;
  2613. }
  2614. static void secdp_adapter_check(struct dp_display_private *dp,
  2615. PD_NOTI_TYPEDEF *noti, bool connect)
  2616. {
  2617. struct secdp_adapter *adapter = &dp->sec.adapter;
  2618. secdp_adapter_init(dp);
  2619. if (!connect)
  2620. goto end;
  2621. adapter->ven_id = (uint)(noti->sub2);
  2622. adapter->prod_id = (uint)(noti->sub3);
  2623. if (adapter->ven_id == SAMSUNG_VENDOR_ID)
  2624. adapter->ss_genuine = true;
  2625. DP_INFO("venId:0x%04x prodId:0x%04x genuine:%d\n", adapter->ven_id,
  2626. adapter->prod_id, adapter->ss_genuine);
  2627. secdp_adapter_check_dex(dp);
  2628. secdp_adapter_check_legacy(dp);
  2629. end:
  2630. return;
  2631. }
  2632. static void secdp_pdic_connect_init(struct dp_display_private *dp,
  2633. PD_NOTI_TYPEDEF *noti, bool connect)
  2634. {
  2635. struct secdp_misc *sec = &dp->sec;
  2636. dp->hpd->orientation = connect ? secdp_get_plug_orientation(dp->power) : ORIENTATION_NONE;
  2637. dp->hpd->multi_func = false;
  2638. sec->pdic_noti.reset = false;
  2639. sec->cable_connected = dp->hpd->alt_mode_cfg_done = connect;
  2640. sec->link_conf = false;
  2641. sec->hdcp.retry = 0;
  2642. secdp_adapter_check(dp, noti, connect);
  2643. /* set flags here as soon as disconnected
  2644. * resource clear will be made later at "secdp_process_attention"
  2645. */
  2646. sec->dex.res = connect ?
  2647. sec->adapter.dex_type : DEX_RES_NOT_SUPPORT;
  2648. sec->dex.prev = sec->dex.curr = sec->dex.status = DEX_DISABLED;
  2649. sec->dex.reconnecting = false;
  2650. secdp_clear_branch_info(dp);
  2651. secdp_clear_link_status_cnt(dp->link);
  2652. #if defined(CONFIG_SECDP_BIGDATA)
  2653. if (connect) {
  2654. secdp_bigdata_connection();
  2655. secdp_bigdata_save_item(BD_ORIENTATION,
  2656. (dp->hpd->orientation == ORIENTATION_CC1) ? "CC1" : "CC2");
  2657. secdp_bigdata_save_item(BD_ADT_VID, noti->sub2);
  2658. secdp_bigdata_save_item(BD_ADT_PID, noti->sub3);
  2659. } else {
  2660. secdp_bigdata_disconnection();
  2661. }
  2662. #endif
  2663. }
  2664. static void secdp_pdic_handle_connect(struct dp_display_private *dp,
  2665. PD_NOTI_TYPEDEF *noti)
  2666. {
  2667. secdp_pdic_connect_init(dp, noti, true);
  2668. #ifndef SECDP_USB_CONCURRENCY
  2669. /* see dp_display_usbpd_configure_cb() */
  2670. dp_display_host_init(dp);
  2671. #endif
  2672. #ifdef SECDP_SELF_TEST
  2673. if (secdp_self_test_status(ST_CONNECTION_TEST) >= 0)
  2674. secdp_self_test_start_reconnect(dp->sec.sysfs, secdp_reconnect);
  2675. #endif
  2676. }
  2677. static void secdp_pdic_handle_disconnect(struct dp_display_private *dp,
  2678. PD_NOTI_TYPEDEF *noti)
  2679. {
  2680. struct secdp_misc *sec = &dp->sec;
  2681. sec->dp_disconnecting = true;
  2682. atomic_set(&sec->hpd.val, 0);
  2683. dp->hpd->hpd_high = false;
  2684. secdp_power_unset_gpio(dp->power);
  2685. secdp_redriver_onoff(dp->power, false, 0);
  2686. secdp_pdic_connect_init(dp, noti, false);
  2687. }
  2688. static void secdp_pdic_handle_linkconf(struct dp_display_private *dp,
  2689. PD_NOTI_TYPEDEF *noti)
  2690. {
  2691. struct secdp_misc *sec = &dp->sec;
  2692. sec->link_conf = true;
  2693. /* see dp_display_usbpd_configure_cb() */
  2694. dp_display_state_remove(DP_STATE_ABORTED);
  2695. dp_display_state_add(DP_STATE_CONFIGURED);
  2696. #ifdef SECDP_USB_CONCURRENCY
  2697. if (noti->sub1 == PDIC_NOTIFY_DP_PIN_B ||
  2698. noti->sub1 == PDIC_NOTIFY_DP_PIN_D ||
  2699. noti->sub1 == PDIC_NOTIFY_DP_PIN_F) {
  2700. dp->hpd->multi_func = true;
  2701. secdp_redriver_onoff(dp->power, true, 2);
  2702. } else {
  2703. dp->hpd->multi_func = false;
  2704. secdp_redriver_onoff(dp->power, true, 4);
  2705. }
  2706. DP_INFO("multi_func:%d\n", dp->hpd->multi_func);
  2707. #endif
  2708. #if defined(CONFIG_SECDP_BIGDATA)
  2709. secdp_bigdata_save_item(BD_LINK_CONFIGURE, noti->sub1 + 'A' - 1);
  2710. #endif
  2711. }
  2712. static void secdp_pdic_handle_hpd(struct dp_display_private *dp,
  2713. PD_NOTI_TYPEDEF *noti)
  2714. {
  2715. struct secdp_misc *sec = &dp->sec;
  2716. if (noti->sub1 == PDIC_NOTIFY_HIGH) {
  2717. bool flip = false;
  2718. atomic_set(&sec->hpd.val, 1);
  2719. dp->hpd->hpd_high = true;
  2720. secdp_send_hpd_event(&dp->sec, true);
  2721. if (dp->hpd->orientation == ORIENTATION_CC2)
  2722. flip = true;
  2723. secdp_power_set_gpio(dp->power, flip);
  2724. } else/* if (noti->sub1 == PDIC_NOTIFY_LOW)*/ {
  2725. atomic_set(&sec->hpd.val, 0);
  2726. dp->hpd->hpd_high = false;
  2727. secdp_power_unset_gpio(dp->power);
  2728. }
  2729. }
  2730. static int secdp_pdic_noti_cb(struct notifier_block *nb, unsigned long action,
  2731. void *data)
  2732. {
  2733. struct dp_display_private *dp = container_of(g_dp_display,
  2734. struct dp_display_private, dp_display);
  2735. struct secdp_misc *sec = &dp->sec;
  2736. PD_NOTI_TYPEDEF noti = *(PD_NOTI_TYPEDEF *)data;
  2737. int rc = 0;
  2738. if (noti.dest != PDIC_NOTIFY_DEV_DP) {
  2739. /*DP_DEBUG("not DP, skip\n");*/
  2740. goto end;
  2741. }
  2742. switch (noti.id) {
  2743. case PDIC_NOTIFY_ID_ATTACH:
  2744. DP_INFO("PDIC_NOTIFY_ID_ATTACH\n");
  2745. break;
  2746. case PDIC_NOTIFY_ID_DP_CONNECT:
  2747. secdp_logger_set_max_count(300);
  2748. DP_INFO("PDIC_NOTIFY_ID_DP_CONNECT<%d>\n", noti.sub1);
  2749. if (noti.sub1 == PDIC_NOTIFY_ATTACH) {
  2750. secdp_pdic_handle_connect(dp, &noti);
  2751. } else if (noti.sub1 == PDIC_NOTIFY_DETACH) {
  2752. if (!secdp_get_cable_status()) {
  2753. DP_INFO("already disconnected\n");
  2754. goto end;
  2755. }
  2756. secdp_pdic_handle_disconnect(dp, &noti);
  2757. }
  2758. break;
  2759. case PDIC_NOTIFY_ID_DP_LINK_CONF:
  2760. DP_INFO("PDIC_NOTIFY_ID_DP_LINK_CONF<%c>\n",
  2761. noti.sub1 + 'A' - 1);
  2762. if (!secdp_get_cable_status()) {
  2763. DP_INFO("cable is out\n");
  2764. goto end;
  2765. }
  2766. secdp_pdic_handle_linkconf(dp, &noti);
  2767. break;
  2768. case PDIC_NOTIFY_ID_DP_HPD:
  2769. if (!secdp_is_hpd_irq(&noti))
  2770. secdp_logger_set_max_count(300);
  2771. DP_INFO("PDIC_NOTIFY_ID_DP_HPD<%s><%s>\n",
  2772. (noti.sub1 == PDIC_NOTIFY_HIGH) ? "high" :
  2773. ((noti.sub1 == PDIC_NOTIFY_LOW) ? "low" : "."),
  2774. (noti.sub2 == PDIC_NOTIFY_IRQ) ? "irq" : ".");
  2775. if (!secdp_get_cable_status()) {
  2776. DP_INFO("cable is out\n");
  2777. goto end;
  2778. }
  2779. secdp_pdic_handle_hpd(dp, &noti);
  2780. break;
  2781. default:
  2782. break;
  2783. }
  2784. if ((sec->link_conf && secdp_get_hpd_status()) ||/*hpd high or hpd_irq*/
  2785. secdp_is_hpd_low(&noti) ||
  2786. secdp_is_disconnect(&noti)) {
  2787. /* see "secdp_handle_attention()" */
  2788. mutex_lock(&sec->attention_lock);
  2789. secdp_process_attention(dp, &noti);
  2790. mutex_unlock(&sec->attention_lock);
  2791. if (secdp_is_disconnect(&noti)) {
  2792. cancel_work_sync(&dp->connect_work);
  2793. if (dp_display_state_is(DP_STATE_ENABLED) ||
  2794. atomic_read(&sec->noti_status)) {
  2795. u64 ret;
  2796. DP_DEBUG("wait for detach complete\n");
  2797. init_completion(&sec->dp_off_comp);
  2798. ret = wait_for_completion_timeout(&sec->dp_off_comp,
  2799. msecs_to_jiffies(13500));
  2800. if (!ret) {
  2801. DP_ERR("dp_off_comp timeout\n");
  2802. complete_all(&dp->notification_comp);
  2803. msleep(100);
  2804. } else {
  2805. DP_DEBUG("detach complete!\n");
  2806. }
  2807. atomic_set(&sec->noti_status, 0);
  2808. }
  2809. sec->dp_disconnecting = false;
  2810. DP_INFO("DP disconnection complete\n");
  2811. dwc3_msm_set_dp_mode_for_ss(false);
  2812. complete(&sec->dp_discon_comp);
  2813. }
  2814. }
  2815. end:
  2816. return rc;
  2817. }
  2818. #endif
  2819. /**
  2820. * returns 0 if DP disconnect is completed
  2821. * returns -1 if DP disconnect is not completed until timeout
  2822. */
  2823. int secdp_wait_for_disconnect_complete(void)
  2824. {
  2825. struct dp_display_private *dp = container_of(g_dp_display,
  2826. struct dp_display_private, dp_display);
  2827. struct secdp_misc *sec = NULL;
  2828. u64 rc;
  2829. int ret = 0;
  2830. if (!dp) {
  2831. DP_INFO("dp driver is not initialized completely");
  2832. ret = -1;
  2833. goto end;
  2834. }
  2835. sec = &dp->sec;
  2836. if (!sec->dp_disconnecting) {
  2837. DP_INFO("DP is not disconnecting now\n");
  2838. goto end;
  2839. }
  2840. DP_INFO("wait start\n");
  2841. init_completion(&sec->dp_discon_comp);
  2842. rc = wait_for_completion_timeout(&sec->dp_discon_comp,
  2843. msecs_to_jiffies(17000));
  2844. if (!rc) {
  2845. DP_ERR("DP disconnect timeout\n");
  2846. dp->sec.pdic_noti.reset = true;
  2847. ret = -1;
  2848. goto end;
  2849. }
  2850. DP_INFO("DP disconnect complete!\n");
  2851. end:
  2852. return ret;
  2853. }
  2854. EXPORT_SYMBOL(secdp_wait_for_disconnect_complete);
  2855. /**
  2856. * if SSUSB gets reset, it needs to call this callback to let DP know, since
  2857. * DP shares PHY with SSUSB (combo PHY)
  2858. * return 0 if success
  2859. * return -1 otherwise
  2860. */
  2861. int secdp_pdic_reset_cb(bool reset)
  2862. {
  2863. struct dp_display_private *dp = container_of(g_dp_display,
  2864. struct dp_display_private, dp_display);
  2865. struct secdp_misc *sec;
  2866. int ret = -1;
  2867. if (!dp) {
  2868. DP_INFO("DP is not yet initialized\n");
  2869. goto end;
  2870. }
  2871. DP_INFO("[reset_cb] %d\n", reset);
  2872. sec = &dp->sec;
  2873. if (secdp_get_cable_status()) {
  2874. bool lnk_cnf = sec->link_conf;
  2875. int hpd_val = secdp_get_hpd_status();
  2876. if (lnk_cnf && hpd_val) {
  2877. DP_ERR("DP connected and [LNK_CNF:%d,HPD:%d], abnormal!\n",
  2878. lnk_cnf, hpd_val);
  2879. goto abnormal;
  2880. } else {
  2881. DP_DEBUG("DP connected but [LNK_CNF:%d,HPD:%d], normal\n",
  2882. lnk_cnf, hpd_val);
  2883. goto end;
  2884. }
  2885. } else {
  2886. if (sec->dp_disconnecting) {
  2887. DP_ERR("DP disconnection under processing!\n");
  2888. goto abnormal;
  2889. } else {
  2890. DP_DEBUG("DP is not connected or disconnection complete\n");
  2891. goto end;
  2892. }
  2893. }
  2894. abnormal:
  2895. sec->pdic_noti.reset = reset;
  2896. ret = 0;
  2897. end:
  2898. return ret;
  2899. }
  2900. EXPORT_SYMBOL(secdp_pdic_reset_cb);
  2901. /**
  2902. * returns true if PHY has reset after DP connection unexpectedly
  2903. * returns false otherwise
  2904. */
  2905. bool secdp_phy_reset_check(void)
  2906. {
  2907. struct dp_display_private *dp = container_of(g_dp_display,
  2908. struct dp_display_private, dp_display);
  2909. bool ret = false;
  2910. if (!dp || !dp->power)
  2911. goto end;
  2912. /* check if core clk is off */
  2913. if (!dp->power->clk_status(dp->power, DP_CORE_PM)) {
  2914. ret = true;
  2915. goto end;
  2916. }
  2917. /* check if PDIC or SSUSB PHY went reset */
  2918. ret = dp->sec.pdic_noti.reset;
  2919. end:
  2920. return ret;
  2921. }
  2922. int secdp_pdic_noti_register_ex(struct secdp_misc *sec, bool retry)
  2923. {
  2924. struct secdp_pdic_noti *pdic_noti = &sec->pdic_noti;
  2925. int rc = -1;
  2926. #if IS_ENABLED(CONFIG_USB_TYPEC_MANAGER_NOTIFIER)
  2927. rc = manager_notifier_register(&pdic_noti->nb,
  2928. secdp_pdic_noti_cb, MANAGER_NOTIFY_PDIC_DP);
  2929. if (!rc) {
  2930. pdic_noti->registered = true;
  2931. DP_INFO("noti register success\n");
  2932. goto exit;
  2933. }
  2934. #endif
  2935. DP_ERR("noti register fail, rc:%d\n", rc);
  2936. if (!retry)
  2937. goto exit;
  2938. DP_ERR("manager_dev is not ready, try again in %d[ms]\n",
  2939. PDIC_DP_NOTI_REG_DELAY);
  2940. schedule_delayed_work(&pdic_noti->reg_work,
  2941. msecs_to_jiffies(PDIC_DP_NOTI_REG_DELAY));
  2942. exit:
  2943. return rc;
  2944. }
  2945. static void secdp_pdic_noti_register(struct work_struct *work)
  2946. {
  2947. struct delayed_work *dw;
  2948. struct secdp_pdic_noti *pdic_noti;
  2949. struct secdp_misc *sec;
  2950. int rc;
  2951. dw = to_delayed_work(work);
  2952. pdic_noti = container_of(dw, struct secdp_pdic_noti, reg_work);
  2953. sec = container_of(pdic_noti, struct secdp_misc, pdic_noti);
  2954. mutex_lock(&sec->notifier_lock);
  2955. if (secdp_get_lpm_mode(sec)) {
  2956. DP_INFO("it's LPM mode. skip\n");
  2957. goto exit;
  2958. }
  2959. if (pdic_noti->registered) {
  2960. DP_INFO("already registered\n");
  2961. goto exit;
  2962. }
  2963. rc = secdp_pdic_noti_register_ex(sec, true);
  2964. if (rc) {
  2965. DP_ERR("fail, rc(%d)\n", rc);
  2966. goto exit;
  2967. }
  2968. pdic_noti->registered = true;
  2969. /* cancel immediately */
  2970. rc = cancel_delayed_work(&pdic_noti->reg_work);
  2971. DP_DEBUG("cancel_work,%d\n", rc);
  2972. destroy_delayed_work_on_stack(&pdic_noti->reg_work);
  2973. exit:
  2974. mutex_unlock(&sec->notifier_lock);
  2975. }
  2976. int secdp_send_deferred_hpd_noti(struct secdp_misc *sec)
  2977. {
  2978. struct dp_display_private *dp;
  2979. int rc = 0;
  2980. dp = container_of(sec, struct dp_display_private, sec);
  2981. DP_INFO("noti_deferred %d\n", sec->hpd.noti_deferred);
  2982. cancel_delayed_work_sync(&sec->hpd.noti_work);
  2983. if (sec->hpd.noti_deferred) {
  2984. rc = dp_display_send_hpd_notification(dp, false);
  2985. sec->hpd.noti_deferred = false;
  2986. }
  2987. return rc;
  2988. }
  2989. static void secdp_hpd_noti_work(struct work_struct *work)
  2990. {
  2991. struct delayed_work *dw;
  2992. struct secdp_hpd *hpd;
  2993. struct secdp_misc *sec;
  2994. struct dp_display_private *dp;
  2995. dw = to_delayed_work(work);
  2996. hpd = container_of(dw, struct secdp_hpd, noti_work);
  2997. sec = container_of(hpd, struct secdp_misc, hpd);
  2998. dp = container_of(sec, struct dp_display_private, sec);
  2999. DP_INFO("hpd_noti_work %d\n", hpd->noti_deferred);
  3000. dp_display_send_hpd_notification(dp, false);
  3001. hpd->noti_deferred = false;
  3002. }
  3003. static void secdp_hdcp_start_work(struct work_struct *work)
  3004. {
  3005. struct dp_display_private *dp = container_of(g_dp_display,
  3006. struct dp_display_private, dp_display);
  3007. DP_ENTER("\n");
  3008. if (secdp_get_cable_status() && dp_display_state_is(DP_STATE_ENABLED)) {
  3009. cancel_delayed_work_sync(&dp->hdcp_cb_work);
  3010. queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ / 4);
  3011. }
  3012. }
  3013. static void secdp_poor_disconnect_work(struct work_struct *work)
  3014. {
  3015. struct delayed_work *dw;
  3016. struct secdp_misc *sec;
  3017. struct dp_display_private *dp;
  3018. dw = to_delayed_work(work);
  3019. sec = container_of(dw, struct secdp_misc, poor_discon_work);
  3020. dp = container_of(sec, struct dp_display_private, sec);
  3021. DP_INFO("poor %d\n", dp->link->poor_connection);
  3022. if (!dp->link->poor_connection)
  3023. dp->link->poor_connection = true;
  3024. dp_display_disconnect_sync(dp);
  3025. mutex_lock(&dp->session_lock);
  3026. dp_display_host_deinit(dp);
  3027. dp_display_state_remove(DP_STATE_CONFIGURED);
  3028. mutex_unlock(&dp->session_lock);
  3029. }
  3030. #define LINK_BACKOFF_TIMER 120000 /*2min*/
  3031. void secdp_link_backoff_start(void)
  3032. {
  3033. struct dp_display_private *dp = container_of(g_dp_display,
  3034. struct dp_display_private, dp_display);
  3035. struct secdp_misc *sec = &dp->sec;
  3036. if (sec->backoff_start) {
  3037. //DP_DEBUG("[backoff] already queued\n");
  3038. return;
  3039. }
  3040. schedule_delayed_work(&sec->link_backoff_work,
  3041. msecs_to_jiffies(LINK_BACKOFF_TIMER));
  3042. sec->backoff_start = true;
  3043. DP_INFO("[backoff] started\n");
  3044. }
  3045. void secdp_link_backoff_stop(void)
  3046. {
  3047. struct dp_display_private *dp = container_of(g_dp_display,
  3048. struct dp_display_private, dp_display);
  3049. struct secdp_misc *sec = &dp->sec;
  3050. if (!sec->backoff_start) {
  3051. //DP_DEBUG("[backoff] already cancelled\n");
  3052. return;
  3053. }
  3054. cancel_delayed_work(&sec->link_backoff_work);
  3055. sec->backoff_start = false;
  3056. DP_INFO("[backoff] stopped\n");
  3057. }
  3058. static void secdp_link_backoff_work(struct work_struct *work)
  3059. {
  3060. struct dp_display_private *dp = container_of(g_dp_display,
  3061. struct dp_display_private, dp_display);
  3062. struct secdp_misc *sec = &dp->sec;
  3063. if (!secdp_get_cable_status() || !dp_display_state_is(DP_STATE_ENABLED))
  3064. return;
  3065. //DP_DEBUG("[backoff] status_update_cnt %d\n", dp->link->status_update_cnt);
  3066. if (dp->link->status_update_cnt > 0)
  3067. dp->link->status_update_cnt--;
  3068. if (!dp->link->status_update_cnt) {
  3069. sec->backoff_start = false;
  3070. DP_INFO("[backoff] finished\n");
  3071. return;
  3072. }
  3073. schedule_delayed_work(&sec->link_backoff_work,
  3074. msecs_to_jiffies(LINK_BACKOFF_TIMER));
  3075. DP_INFO("[backoff] re-started %d\n", dp->link->status_update_cnt);
  3076. }
  3077. /**
  3078. * This logic is to check poor DP connection. if link train is failed or
  3079. * HPD_IRQ is coming more than 4th times in 13 sec, regard it as a poor
  3080. * connection and do DP disconnection
  3081. */
  3082. static void secdp_link_status_work(struct work_struct *work)
  3083. {
  3084. struct delayed_work *dw;
  3085. struct secdp_misc *sec;
  3086. struct dp_display_private *dp;
  3087. int status_update_cnt;
  3088. dw = to_delayed_work(work);
  3089. sec = container_of(dw, struct secdp_misc, link_status_work);
  3090. dp = container_of(sec, struct dp_display_private, sec);
  3091. status_update_cnt = dp->link->status_update_cnt;
  3092. DP_INFO("[link_work] status_update_cnt %d\n", status_update_cnt);
  3093. if (dp->link->poor_connection) {
  3094. DP_INFO("[link_work] poor connection!\n");
  3095. goto poor_disconnect;
  3096. }
  3097. if (secdp_get_cable_status() && dp_display_state_is(DP_STATE_ENABLED) &&
  3098. sec->dex.curr != DEX_DISABLED) {
  3099. if (!secdp_get_link_train_status(dp->ctrl) ||
  3100. status_update_cnt > MAX_CNT_LINK_STATUS_UPDATE) {
  3101. DP_INFO("poor!\n");
  3102. goto poor_disconnect;
  3103. }
  3104. if (!secdp_check_link_stable(dp->link)) {
  3105. DP_INFO("Check poor connection, again\n");
  3106. schedule_delayed_work(&sec->link_status_work,
  3107. msecs_to_jiffies(3000));
  3108. }
  3109. }
  3110. DP_LEAVE("\n");
  3111. return;
  3112. poor_disconnect:
  3113. schedule_delayed_work(&sec->poor_discon_work, msecs_to_jiffies(10));
  3114. }
  3115. #ifdef MODEM_RF_INFO
  3116. /* CP notity format (HEX raw format)
  3117. * 10 00 AA BB 27 01 03 XX YY YY YY YY ZZ ZZ ZZ ZZ
  3118. *
  3119. * 00 10 (0x0010) - len
  3120. * AA BB - not used
  3121. * 27 - MAIN CMD (SYSTEM CMD : 0x27)
  3122. * 01 - SUB CMD (CP Channel Info : 0x01)
  3123. * 03 - NOTI CMD (0x03)
  3124. * XX - RAT MODE
  3125. * YY YY YY YY - BAND MODE
  3126. * ZZ ZZ ZZ ZZ - FREQ INFO
  3127. */
  3128. int secdp_modem_rfinfo_cb(struct notifier_block *nb,
  3129. unsigned long size, void *data)
  3130. {
  3131. struct secdp_misc *sec = container_of(nb,
  3132. struct secdp_misc, modem_rfinfo_nb);
  3133. struct dev_ril_bridge_msg *msg = (struct dev_ril_bridge_msg *)data;
  3134. int ret = NOTIFY_DONE;
  3135. DP_INFO("RF_info: size %lu, dev_id %d, len %d\n", size, msg->dev_id, msg->data_len);
  3136. if (msg->dev_id == IPC_SYSTEM_CP_CHANNEL_INFO &&
  3137. msg->data_len == sizeof(struct rf_information)) {
  3138. memcpy(&sec->rf_info, msg->data, sizeof(struct rf_information));
  3139. DP_INFO("RAT %u, BAND %u, ARFCN %u\n", sec->rf_info.rat,
  3140. sec->rf_info.band, sec->rf_info.arfcn);
  3141. //.TODO:
  3142. }
  3143. return ret;
  3144. }
  3145. #endif
  3146. static int secdp_init(struct dp_display_private *dp)
  3147. {
  3148. struct secdp_misc *sec;
  3149. int rc = -1;
  3150. if (!dp) {
  3151. DP_ERR("error! no dp structure\n");
  3152. goto end;
  3153. }
  3154. secdp_logger_init();
  3155. sec = &dp->sec;
  3156. sec->lpm_booting = (secdp_param_lpcharge == 1) ? true : false;
  3157. init_completion(&sec->dp_off_comp);
  3158. init_completion(&sec->dp_discon_comp);
  3159. atomic_set(&sec->noti_status, 0);
  3160. INIT_DELAYED_WORK(&sec->hpd.noti_work, secdp_hpd_noti_work);
  3161. INIT_DELAYED_WORK(&sec->hdcp.start_work, secdp_hdcp_start_work);
  3162. INIT_DELAYED_WORK(&sec->link_status_work, secdp_link_status_work);
  3163. INIT_DELAYED_WORK(&sec->link_backoff_work, secdp_link_backoff_work);
  3164. INIT_DELAYED_WORK(&sec->poor_discon_work, secdp_poor_disconnect_work);
  3165. INIT_DELAYED_WORK(&sec->pdic_noti.reg_work, secdp_pdic_noti_register);
  3166. schedule_delayed_work(&sec->pdic_noti.reg_work,
  3167. msecs_to_jiffies(PDIC_DP_NOTI_REG_DELAY));
  3168. rc = secdp_power_request_gpios(dp->power);
  3169. if (rc)
  3170. DP_ERR("DRM DP gpio request failed:%d\n", rc);
  3171. mutex_init(&sec->notify_lock);
  3172. mutex_init(&sec->attention_lock);
  3173. mutex_init(&sec->notifier_lock);
  3174. mutex_init(&sec->hmd.lock);
  3175. secdp_init_wakelock(dp);
  3176. /* reboot notifier callback */
  3177. sec->reboot_nb.notifier_call = secdp_reboot_cb;
  3178. #ifndef CONFIG_UML
  3179. register_reboot_notifier(&sec->reboot_nb);
  3180. #endif
  3181. #if defined(CONFIG_SECDP_SWITCH)
  3182. rc = switch_dev_register(&switch_secdp_msg);
  3183. if (rc)
  3184. DP_INFO("Failed to register secdp_msg switch:%d\n", rc);
  3185. #endif
  3186. if (dp->parser->rf_tx_backoff) {
  3187. /* DEVPATH=/devices/virtual/sec/secdp */
  3188. sec->uevent_dev = sec_device_create(NULL, "secdp");
  3189. }
  3190. #ifdef MODEM_RF_INFO
  3191. //sec->modem_rfinfo_nb.priority = 0;
  3192. sec->modem_rfinfo_nb.notifier_call = secdp_modem_rfinfo_cb;
  3193. register_dev_ril_bridge_event_notifier(&sec->modem_rfinfo_nb);
  3194. #endif
  3195. /* add default AR/VR here */
  3196. strlcpy(sec->hmd.list[0].monitor_name, "PicoVR", MON_NAME_LEN);
  3197. sec->hmd.list[0].ven_id = 0x2d40;
  3198. sec->hmd.list[0].prod_id = 0x0000;
  3199. DP_INFO("secdp init done\n");
  3200. end:
  3201. return rc;
  3202. }
  3203. static void secdp_deinit(struct dp_display_private *dp)
  3204. {
  3205. struct secdp_misc *sec;
  3206. if (!dp) {
  3207. DP_ERR("error! no dp structure\n");
  3208. goto end;
  3209. }
  3210. sec = &dp->sec;
  3211. if (dp->parser->rf_tx_backoff)
  3212. sec_device_destroy(sec->uevent_dev->devt);
  3213. #ifdef MODEM_RF_INFO
  3214. unregister_dev_ril_bridge_event_notifier(&sec->modem_rfinfo_nb);
  3215. #endif
  3216. secdp_destroy_wakelock(dp);
  3217. secdp_logger_deinit();
  3218. mutex_destroy(&sec->notify_lock);
  3219. mutex_destroy(&sec->attention_lock);
  3220. mutex_destroy(&sec->notifier_lock);
  3221. mutex_destroy(&sec->hmd.lock);
  3222. sec->sysfs = NULL;
  3223. #if defined(CONFIG_SECDP_SWITCH)
  3224. switch_dev_unregister(&switch_secdp_msg);
  3225. #endif
  3226. end:
  3227. return;
  3228. }
  3229. #endif
  3230. static void dp_display_connect_work(struct work_struct *work)
  3231. {
  3232. int rc = 0;
  3233. struct dp_display_private *dp = container_of(work,
  3234. struct dp_display_private, connect_work);
  3235. if (dp_display_state_is(DP_STATE_TUI_ACTIVE)) {
  3236. dp_display_state_log("[TUI is active]");
  3237. return;
  3238. }
  3239. if (dp_display_state_is(DP_STATE_ABORTED)) {
  3240. DP_WARN("HPD off requested\n");
  3241. return;
  3242. }
  3243. if (!dp->hpd->hpd_high) {
  3244. DP_WARN("Sink disconnected\n");
  3245. return;
  3246. }
  3247. #if defined(CONFIG_SECDP)
  3248. DP_ENTER("\n");
  3249. dp_display_host_init(dp);
  3250. /* fix for PHY CTS v1.2 - 8.1 AUX Manchester - Channel EYE Test failure.
  3251. * whenever HPD goes high, AUX init makes RC delay and actual AUX
  3252. * transfer starts even when RC is not yet stabilized. To make RC
  3253. * waveform to be stable, put some delay here
  3254. */
  3255. msleep(200);
  3256. #endif
  3257. rc = dp_display_process_hpd_high(dp);
  3258. if (!rc && dp->panel->video_test)
  3259. dp->link->send_test_response(dp->link);
  3260. }
  3261. static void dp_display_disconnect_work(struct work_struct *work)
  3262. {
  3263. struct dp_display_private *dp = container_of(work,
  3264. struct dp_display_private, disconnect_work);
  3265. dp_display_handle_disconnect(dp, false);
  3266. if (dp->debug->sim_mode && dp_display_state_is(DP_STATE_ABORTED))
  3267. dp_display_host_deinit(dp);
  3268. dp->debug->abort(dp->debug);
  3269. }
  3270. #if !defined(CONFIG_SECDP)
  3271. static int dp_display_usb_notifier(struct notifier_block *nb,
  3272. unsigned long action, void *data)
  3273. {
  3274. struct dp_display_private *dp = container_of(nb,
  3275. struct dp_display_private, usb_nb);
  3276. SDE_EVT32_EXTERNAL(dp->state, dp->debug->sim_mode, action);
  3277. if (!action && dp->debug->sim_mode) {
  3278. DP_WARN("usb disconnected during simulation\n");
  3279. dp_display_state_add(DP_STATE_ABORTED);
  3280. dp->ctrl->abort(dp->ctrl, true);
  3281. dp->aux->abort(dp->aux, true);
  3282. dp->power->park_clocks(dp->power);
  3283. queue_work(dp->wq, &dp->disconnect_work);
  3284. }
  3285. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, NOTIFY_DONE);
  3286. return NOTIFY_DONE;
  3287. }
  3288. static void dp_display_register_usb_notifier(struct dp_display_private *dp)
  3289. {
  3290. int rc = 0;
  3291. const char *phandle = "usb-phy";
  3292. struct usb_phy *usbphy;
  3293. usbphy = devm_usb_get_phy_by_phandle(&dp->pdev->dev, phandle, 0);
  3294. if (IS_ERR_OR_NULL(usbphy)) {
  3295. DP_DEBUG("unable to get usbphy\n");
  3296. return;
  3297. }
  3298. dp->usb_nb.notifier_call = dp_display_usb_notifier;
  3299. dp->usb_nb.priority = 2;
  3300. rc = usb_register_notifier(usbphy, &dp->usb_nb);
  3301. if (rc)
  3302. DP_DEBUG("failed to register for usb event: %d\n", rc);
  3303. }
  3304. #endif
  3305. int dp_display_mmrm_callback(struct mmrm_client_notifier_data *notifier_data)
  3306. {
  3307. struct dss_clk_mmrm_cb *mmrm_cb_data = (struct dss_clk_mmrm_cb *)notifier_data->pvt_data;
  3308. struct dp_display *dp_display = (struct dp_display *)mmrm_cb_data->phandle;
  3309. struct dp_display_private *dp =
  3310. container_of(dp_display, struct dp_display_private, dp_display);
  3311. int ret = 0;
  3312. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, notifier_data->cb_type);
  3313. if (notifier_data->cb_type == MMRM_CLIENT_RESOURCE_VALUE_CHANGE
  3314. && dp_display_state_is(DP_STATE_ENABLED)
  3315. && !dp_display_state_is(DP_STATE_ABORTED)) {
  3316. ret = dp_display_handle_disconnect(dp, false);
  3317. if (ret)
  3318. DP_ERR("mmrm callback error reducing clk, ret:%d\n", ret);
  3319. }
  3320. DP_DEBUG("mmrm callback handled, state: 0x%x rc:%d\n", dp->state, ret);
  3321. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, notifier_data->cb_type);
  3322. return ret;
  3323. }
  3324. static void dp_display_deinit_sub_modules(struct dp_display_private *dp)
  3325. {
  3326. #if defined(CONFIG_SECDP)
  3327. struct device *dev = &dp->pdev->dev;
  3328. DP_ENTER("\n");
  3329. secdp_deinit(dp);
  3330. secdp_sysfs_put(dev, dp->sec.sysfs);
  3331. #endif
  3332. dp_debug_put(dp->debug);
  3333. dp_hpd_put(dp->hpd);
  3334. if (dp->panel)
  3335. dp_audio_put(dp->panel->audio);
  3336. dp_ctrl_put(dp->ctrl);
  3337. dp_panel_put(dp->panel);
  3338. dp_link_put(dp->link);
  3339. dp_power_put(dp->power);
  3340. dp_pll_put(dp->pll);
  3341. dp_aux_put(dp->aux);
  3342. dp_catalog_put(dp->catalog);
  3343. dp_parser_put(dp->parser);
  3344. mutex_destroy(&dp->session_lock);
  3345. }
  3346. static int dp_init_sub_modules(struct dp_display_private *dp)
  3347. {
  3348. int rc = 0;
  3349. u32 dp_core_revision = 0;
  3350. bool hdcp_disabled;
  3351. const char *phandle = "qcom,dp-aux-switch";
  3352. struct device *dev = &dp->pdev->dev;
  3353. struct dp_hpd_cb *cb = &dp->hpd_cb;
  3354. struct dp_ctrl_in ctrl_in = {
  3355. .dev = dev,
  3356. };
  3357. struct dp_panel_in panel_in = {
  3358. .dev = dev,
  3359. };
  3360. struct dp_debug_in debug_in = {
  3361. .dev = dev,
  3362. };
  3363. struct dp_pll_in pll_in = {
  3364. .pdev = dp->pdev,
  3365. };
  3366. #if defined(CONFIG_SECDP)
  3367. struct secdp_sysfs_in sysfs_in = {
  3368. .dev = dev,
  3369. };
  3370. #endif
  3371. mutex_init(&dp->session_lock);
  3372. mutex_init(&dp->accounting_lock);
  3373. dp->parser = dp_parser_get(dp->pdev);
  3374. if (IS_ERR(dp->parser)) {
  3375. rc = PTR_ERR(dp->parser);
  3376. DP_ERR("failed to initialize parser, rc = %d\n", rc);
  3377. dp->parser = NULL;
  3378. goto error;
  3379. }
  3380. rc = dp->parser->parse(dp->parser);
  3381. if (rc) {
  3382. DP_ERR("device tree parsing failed\n");
  3383. goto error_catalog;
  3384. }
  3385. g_dp_display->is_mst_supported = dp->parser->has_mst;
  3386. g_dp_display->dsc_cont_pps = dp->parser->dsc_continuous_pps;
  3387. dp->catalog = dp_catalog_get(dev, dp->parser);
  3388. if (IS_ERR(dp->catalog)) {
  3389. rc = PTR_ERR(dp->catalog);
  3390. DP_ERR("failed to initialize catalog, rc = %d\n", rc);
  3391. dp->catalog = NULL;
  3392. goto error_catalog;
  3393. }
  3394. dp_core_revision = dp_catalog_get_dp_core_version(dp->catalog);
  3395. dp->aux_switch_node = of_parse_phandle(dp->pdev->dev.of_node, phandle, 0);
  3396. if (!dp->aux_switch_node) {
  3397. dp->no_aux_switch = true;
  3398. DP_WARN("Aux switch node not found, assigning bypass mode as switch type\n");
  3399. dp->switch_type = DP_AUX_SWITCH_BYPASS;
  3400. goto skip_node_name;
  3401. }
  3402. if (!strcmp(dp->aux_switch_node->name, "fsa4480"))
  3403. dp->switch_type = DP_AUX_SWITCH_FSA4480;
  3404. else if (!strcmp(dp->aux_switch_node->name, "wcd939x_i2c"))
  3405. dp->switch_type = DP_AUX_SWITCH_WCD939x;
  3406. else
  3407. dp->switch_type = DP_AUX_SWITCH_BYPASS;
  3408. skip_node_name:
  3409. dp->aux = dp_aux_get(dev, &dp->catalog->aux, dp->parser,
  3410. #if !defined(CONFIG_SECDP)
  3411. dp->aux_switch_node, dp->aux_bridge, g_dp_display->dp_aux_ipc_log);
  3412. #else
  3413. dp->aux_switch_node, dp->aux_bridge, g_dp_display->dp_aux_ipc_log, (void*)&dp->sec);
  3414. #endif
  3415. if (IS_ERR(dp->aux)) {
  3416. rc = PTR_ERR(dp->aux);
  3417. DP_ERR("failed to initialize aux, rc = %d\n", rc);
  3418. dp->aux = NULL;
  3419. goto error_aux;
  3420. }
  3421. rc = dp->aux->drm_aux_register(dp->aux, dp->dp_display.drm_dev);
  3422. if (rc) {
  3423. DP_ERR("DRM DP AUX register failed\n");
  3424. goto error_pll;
  3425. }
  3426. pll_in.aux = dp->aux;
  3427. pll_in.parser = dp->parser;
  3428. pll_in.dp_core_revision = dp_core_revision;
  3429. dp->pll = dp_pll_get(&pll_in);
  3430. if (IS_ERR(dp->pll)) {
  3431. rc = PTR_ERR(dp->pll);
  3432. DP_ERR("failed to initialize pll, rc = %d\n", rc);
  3433. dp->pll = NULL;
  3434. goto error_pll;
  3435. }
  3436. dp->power = dp_power_get(dp->parser, dp->pll);
  3437. if (IS_ERR(dp->power)) {
  3438. rc = PTR_ERR(dp->power);
  3439. DP_ERR("failed to initialize power, rc = %d\n", rc);
  3440. dp->power = NULL;
  3441. goto error_power;
  3442. }
  3443. rc = dp->power->power_client_init(dp->power, &dp->priv->phandle,
  3444. dp->dp_display.drm_dev);
  3445. if (rc) {
  3446. DP_ERR("Power client create failed\n");
  3447. goto error_link;
  3448. }
  3449. rc = dp->power->power_mmrm_init(dp->power, &dp->priv->phandle,
  3450. (void *)&dp->dp_display, dp_display_mmrm_callback);
  3451. if (rc) {
  3452. DP_ERR("failed to initialize mmrm, rc = %d\n", rc);
  3453. goto error_link;
  3454. }
  3455. dp->link = dp_link_get(dev, dp->aux, dp_core_revision);
  3456. if (IS_ERR(dp->link)) {
  3457. rc = PTR_ERR(dp->link);
  3458. DP_ERR("failed to initialize link, rc = %d\n", rc);
  3459. dp->link = NULL;
  3460. goto error_link;
  3461. }
  3462. panel_in.aux = dp->aux;
  3463. panel_in.catalog = &dp->catalog->panel;
  3464. panel_in.link = dp->link;
  3465. panel_in.connector = dp->dp_display.base_connector;
  3466. panel_in.base_panel = NULL;
  3467. panel_in.parser = dp->parser;
  3468. #if defined(CONFIG_SECDP)
  3469. panel_in.sec = &dp->sec;
  3470. #endif
  3471. dp->panel = dp_panel_get(&panel_in);
  3472. if (IS_ERR(dp->panel)) {
  3473. rc = PTR_ERR(dp->panel);
  3474. DP_ERR("failed to initialize panel, rc = %d\n", rc);
  3475. dp->panel = NULL;
  3476. goto error_panel;
  3477. }
  3478. ctrl_in.link = dp->link;
  3479. ctrl_in.panel = dp->panel;
  3480. ctrl_in.aux = dp->aux;
  3481. ctrl_in.power = dp->power;
  3482. ctrl_in.catalog = &dp->catalog->ctrl;
  3483. ctrl_in.parser = dp->parser;
  3484. ctrl_in.pll = dp->pll;
  3485. #if defined(CONFIG_SECDP)
  3486. ctrl_in.sec = &dp->sec;
  3487. #endif
  3488. dp->ctrl = dp_ctrl_get(&ctrl_in);
  3489. if (IS_ERR(dp->ctrl)) {
  3490. rc = PTR_ERR(dp->ctrl);
  3491. DP_ERR("failed to initialize ctrl, rc = %d\n", rc);
  3492. dp->ctrl = NULL;
  3493. goto error_ctrl;
  3494. }
  3495. dp->panel->audio = dp_audio_get(dp->pdev, dp->panel,
  3496. &dp->catalog->audio);
  3497. if (IS_ERR(dp->panel->audio)) {
  3498. rc = PTR_ERR(dp->panel->audio);
  3499. DP_ERR("failed to initialize audio, rc = %d\n", rc);
  3500. dp->panel->audio = NULL;
  3501. goto error_audio;
  3502. }
  3503. #if defined(CONFIG_SECDP)
  3504. #if defined(CONFIG_SECDP_SWITCH)
  3505. dp->panel->audio->has_mst = dp->parser->has_mst;
  3506. secdp_audio_register_switch(dp->panel->audio);
  3507. #endif
  3508. sysfs_in.parser = dp->parser;
  3509. sysfs_in.panel = dp->panel;
  3510. sysfs_in.power = dp->power;
  3511. sysfs_in.link = dp->link;
  3512. sysfs_in.ctrl = dp->ctrl;
  3513. sysfs_in.catalog = dp->catalog;
  3514. sysfs_in.sec = &dp->sec;
  3515. dp->sec.sysfs = secdp_sysfs_get(&sysfs_in);
  3516. if (IS_ERR(dp->sec.sysfs)) {
  3517. rc = PTR_ERR(dp->sec.sysfs);
  3518. DP_ERR("failed to initialize sysfs, rc = %d\n", rc);
  3519. dp->sec.sysfs = NULL;
  3520. goto error_sysfs;
  3521. }
  3522. rc = secdp_init(dp);
  3523. if (rc)
  3524. DP_ERR("secdp_init failed\n");
  3525. #endif/*CONFIG_SECDP*/
  3526. memset(&dp->mst, 0, sizeof(dp->mst));
  3527. dp->active_stream_cnt = 0;
  3528. cb->configure = dp_display_usbpd_configure_cb;
  3529. cb->disconnect = dp_display_usbpd_disconnect_cb;
  3530. cb->attention = dp_display_usbpd_attention_cb;
  3531. dp->hpd = dp_hpd_get(dev, dp->parser, &dp->catalog->hpd,
  3532. dp->aux_bridge, cb);
  3533. if (IS_ERR(dp->hpd)) {
  3534. rc = PTR_ERR(dp->hpd);
  3535. DP_ERR("failed to initialize hpd, rc = %d\n", rc);
  3536. dp->hpd = NULL;
  3537. goto error_hpd;
  3538. }
  3539. hdcp_disabled = !!dp_display_initialize_hdcp(dp);
  3540. debug_in.panel = dp->panel;
  3541. debug_in.hpd = dp->hpd;
  3542. debug_in.link = dp->link;
  3543. debug_in.aux = dp->aux;
  3544. debug_in.connector = &dp->dp_display.base_connector;
  3545. debug_in.catalog = dp->catalog;
  3546. debug_in.parser = dp->parser;
  3547. debug_in.ctrl = dp->ctrl;
  3548. debug_in.pll = dp->pll;
  3549. debug_in.display = &dp->dp_display;
  3550. dp->debug = dp_debug_get(&debug_in);
  3551. if (IS_ERR(dp->debug)) {
  3552. rc = PTR_ERR(dp->debug);
  3553. DP_ERR("failed to initialize debug, rc = %d\n", rc);
  3554. dp->debug = NULL;
  3555. goto error_debug;
  3556. }
  3557. dp->cached_connector_status = connector_status_disconnected;
  3558. dp->tot_dsc_blks_in_use = 0;
  3559. dp->tot_lm_blks_in_use = 0;
  3560. dp->debug->hdcp_disabled = hdcp_disabled;
  3561. dp_display_update_hdcp_status(dp, true);
  3562. #if !defined(CONFIG_SECDP)
  3563. dp_display_register_usb_notifier(dp);
  3564. #endif
  3565. if (dp->hpd->register_hpd) {
  3566. rc = dp->hpd->register_hpd(dp->hpd);
  3567. if (rc) {
  3568. DP_ERR("failed register hpd\n");
  3569. goto error_hpd_reg;
  3570. }
  3571. }
  3572. return rc;
  3573. error_hpd_reg:
  3574. dp_debug_put(dp->debug);
  3575. error_debug:
  3576. dp_hpd_put(dp->hpd);
  3577. error_hpd:
  3578. dp_audio_put(dp->panel->audio);
  3579. error_audio:
  3580. dp_ctrl_put(dp->ctrl);
  3581. error_ctrl:
  3582. dp_panel_put(dp->panel);
  3583. error_panel:
  3584. dp_link_put(dp->link);
  3585. #if defined(CONFIG_SECDP)
  3586. error_sysfs:
  3587. secdp_sysfs_put(dev, dp->sec.sysfs);
  3588. #endif
  3589. error_link:
  3590. dp_power_put(dp->power);
  3591. error_power:
  3592. dp_pll_put(dp->pll);
  3593. error_pll:
  3594. dp_aux_put(dp->aux);
  3595. error_aux:
  3596. dp_catalog_put(dp->catalog);
  3597. error_catalog:
  3598. dp_parser_put(dp->parser);
  3599. error:
  3600. mutex_destroy(&dp->session_lock);
  3601. return rc;
  3602. }
  3603. static int dp_display_post_init(struct dp_display *dp_display)
  3604. {
  3605. int rc = 0;
  3606. struct dp_display_private *dp;
  3607. if (!dp_display) {
  3608. DP_ERR("invalid input\n");
  3609. rc = -EINVAL;
  3610. goto end;
  3611. }
  3612. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3613. if (IS_ERR_OR_NULL(dp)) {
  3614. DP_ERR("invalid params\n");
  3615. rc = -EINVAL;
  3616. goto end;
  3617. }
  3618. rc = dp_init_sub_modules(dp);
  3619. if (rc)
  3620. goto end;
  3621. dp_display->post_init = NULL;
  3622. end:
  3623. DP_DEBUG("%s\n", rc ? "failed" : "success");
  3624. return rc;
  3625. }
  3626. static int dp_display_set_mode(struct dp_display *dp_display, void *panel,
  3627. struct dp_display_mode *mode)
  3628. {
  3629. const u32 num_components = 3, default_bpp = 24;
  3630. struct dp_display_private *dp;
  3631. struct dp_panel *dp_panel;
  3632. bool dsc_en = (mode->capabilities & DP_PANEL_CAPS_DSC) ? true : false;
  3633. if (!dp_display || !panel) {
  3634. DP_ERR("invalid input\n");
  3635. return -EINVAL;
  3636. }
  3637. dp_panel = panel;
  3638. if (!dp_panel->connector) {
  3639. DP_ERR("invalid connector input\n");
  3640. return -EINVAL;
  3641. }
  3642. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3643. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state,
  3644. mode->timing.h_active, mode->timing.v_active,
  3645. mode->timing.refresh_rate);
  3646. mutex_lock(&dp->session_lock);
  3647. mode->timing.bpp =
  3648. dp_panel->connector->display_info.bpc * num_components;
  3649. if (!mode->timing.bpp)
  3650. mode->timing.bpp = default_bpp;
  3651. mode->timing.bpp = dp->panel->get_mode_bpp(dp->panel,
  3652. mode->timing.bpp, mode->timing.pixel_clk_khz, dsc_en);
  3653. if (dp->mst.mst_active)
  3654. dp->mst.cbs.set_mst_mode_params(&dp->dp_display, mode);
  3655. dp_panel->pinfo = mode->timing;
  3656. mutex_unlock(&dp->session_lock);
  3657. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  3658. return 0;
  3659. }
  3660. static int dp_display_prepare(struct dp_display *dp_display, void *panel)
  3661. {
  3662. struct dp_display_private *dp;
  3663. struct dp_panel *dp_panel;
  3664. int rc = 0;
  3665. if (!dp_display || !panel) {
  3666. DP_ERR("invalid input\n");
  3667. return -EINVAL;
  3668. }
  3669. dp_panel = panel;
  3670. if (!dp_panel->connector) {
  3671. DP_ERR("invalid connector input\n");
  3672. return -EINVAL;
  3673. }
  3674. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3675. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  3676. mutex_lock(&dp->session_lock);
  3677. /*
  3678. * If DP video session is restored by the userspace after display
  3679. * disconnect notification from dongle i.e. typeC cable connected to
  3680. * source but disconnected at the display side, the DP controller is
  3681. * not restored to the desired configured state. So, ensure host_init
  3682. * is executed in such a scenario so that all the DP controller
  3683. * resources are enabled for the next connection event.
  3684. */
  3685. if (dp_display_state_is(DP_STATE_SRC_PWRDN) &&
  3686. dp_display_state_is(DP_STATE_CONFIGURED)) {
  3687. rc = dp_display_host_init(dp);
  3688. if (rc) {
  3689. /*
  3690. * Skip all the events that are similar to abort case, just that
  3691. * the stream clks should be enabled so that no commit failure can
  3692. * be seen.
  3693. */
  3694. DP_ERR("Host init failed.\n");
  3695. goto end;
  3696. }
  3697. /*
  3698. * Remove DP_STATE_SRC_PWRDN flag on successful host_init to
  3699. * prevent cases such as below.
  3700. * 1. MST stream 1 failed to do host init then stream 2 can retry again.
  3701. * 2. Resume path fails, now sink sends hpd_high=0 and hpd_high=1.
  3702. */
  3703. dp_display_state_remove(DP_STATE_SRC_PWRDN);
  3704. }
  3705. /*
  3706. * If the physical connection to the sink is already lost by the time
  3707. * we try to set up the connection, we can just skip all the steps
  3708. * here safely.
  3709. */
  3710. if (dp_display_state_is(DP_STATE_ABORTED)) {
  3711. dp_display_state_log("[aborted]");
  3712. goto end;
  3713. }
  3714. /*
  3715. * If DP_STATE_ENABLED, there is nothing left to do.
  3716. * This would happen during MST flow. So, log this.
  3717. */
  3718. if (dp_display_state_is(DP_STATE_ENABLED)) {
  3719. dp_display_state_warn("[already enabled]");
  3720. goto end;
  3721. }
  3722. if (!dp_display_is_ready(dp)) {
  3723. dp_display_state_show("[not ready]");
  3724. goto end;
  3725. }
  3726. /* For supporting DP_PANEL_SRC_INITIATED_POWER_DOWN case */
  3727. rc = dp_display_host_ready(dp);
  3728. if (rc) {
  3729. dp_display_state_show("[ready failed]");
  3730. goto end;
  3731. }
  3732. if (dp->debug->psm_enabled) {
  3733. dp->link->psm_config(dp->link, &dp->panel->link_info, false);
  3734. dp->debug->psm_enabled = false;
  3735. }
  3736. /*
  3737. * Execute the dp controller power on in shallow mode here.
  3738. * In normal cases, controller should have been powered on
  3739. * by now. In some cases like suspend/resume or framework
  3740. * reboot, we end up here without a powered on controller.
  3741. * Cable may have been removed in suspended state. In that
  3742. * case, link training is bound to fail on system resume.
  3743. * So, we execute in shallow mode here to do only minimal
  3744. * and required things.
  3745. */
  3746. rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active, dp_panel->fec_en,
  3747. dp_panel->dsc_en, true);
  3748. if (rc)
  3749. goto end;
  3750. end:
  3751. mutex_unlock(&dp->session_lock);
  3752. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc);
  3753. return rc;
  3754. }
  3755. static int dp_display_set_stream_info(struct dp_display *dp_display,
  3756. void *panel, u32 strm_id, u32 start_slot,
  3757. u32 num_slots, u32 pbn, int vcpi)
  3758. {
  3759. int rc = 0;
  3760. struct dp_panel *dp_panel;
  3761. struct dp_display_private *dp;
  3762. const int max_slots = 64;
  3763. if (!dp_display) {
  3764. DP_ERR("invalid input\n");
  3765. return -EINVAL;
  3766. }
  3767. if (strm_id >= DP_STREAM_MAX) {
  3768. DP_ERR("invalid stream id:%d\n", strm_id);
  3769. return -EINVAL;
  3770. }
  3771. if (start_slot + num_slots > max_slots) {
  3772. DP_ERR("invalid channel info received. start:%d, slots:%d\n",
  3773. start_slot, num_slots);
  3774. return -EINVAL;
  3775. }
  3776. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3777. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state, strm_id,
  3778. start_slot, num_slots);
  3779. mutex_lock(&dp->session_lock);
  3780. dp->ctrl->set_mst_channel_info(dp->ctrl, strm_id,
  3781. start_slot, num_slots);
  3782. if (panel) {
  3783. dp_panel = panel;
  3784. dp_panel->set_stream_info(dp_panel, strm_id, start_slot,
  3785. num_slots, pbn, vcpi);
  3786. }
  3787. mutex_unlock(&dp->session_lock);
  3788. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc);
  3789. return rc;
  3790. }
  3791. static int dp_display_enable(struct dp_display *dp_display, void *panel)
  3792. {
  3793. int rc = 0;
  3794. struct dp_display_private *dp;
  3795. if (!dp_display || !panel) {
  3796. DP_ERR("invalid input\n");
  3797. return -EINVAL;
  3798. }
  3799. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3800. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  3801. mutex_lock(&dp->session_lock);
  3802. /*
  3803. * If DP_STATE_READY is not set, we should not do any HW
  3804. * programming.
  3805. */
  3806. if (!dp_display_state_is(DP_STATE_READY)) {
  3807. dp_display_state_show("[host not ready]");
  3808. goto end;
  3809. }
  3810. /*
  3811. * It is possible that by the time we get call back to establish
  3812. * the DP pipeline e2e, the physical DP connection to the sink is
  3813. * already lost. In such cases, the DP_STATE_ABORTED would be set.
  3814. * However, it is necessary to NOT abort the display setup here so as
  3815. * to ensure that the rest of the system is in a stable state prior to
  3816. * handling the disconnect notification.
  3817. */
  3818. if (dp_display_state_is(DP_STATE_ABORTED))
  3819. dp_display_state_log("[aborted, but continue on]");
  3820. rc = dp_display_stream_enable(dp, panel);
  3821. if (rc)
  3822. goto end;
  3823. dp_display_state_add(DP_STATE_ENABLED);
  3824. end:
  3825. mutex_unlock(&dp->session_lock);
  3826. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc);
  3827. return rc;
  3828. }
  3829. static void dp_display_stream_post_enable(struct dp_display_private *dp,
  3830. struct dp_panel *dp_panel)
  3831. {
  3832. dp_panel->spd_config(dp_panel);
  3833. dp_panel->setup_hdr(dp_panel, NULL, false, 0, true);
  3834. }
  3835. static int dp_display_post_enable(struct dp_display *dp_display, void *panel)
  3836. {
  3837. struct dp_display_private *dp;
  3838. struct dp_panel *dp_panel;
  3839. if (!dp_display || !panel) {
  3840. DP_ERR("invalid input\n");
  3841. return -EINVAL;
  3842. }
  3843. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3844. dp_panel = panel;
  3845. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  3846. mutex_lock(&dp->session_lock);
  3847. /*
  3848. * If DP_STATE_READY is not set, we should not do any HW
  3849. * programming.
  3850. */
  3851. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  3852. dp_display_state_show("[not enabled]");
  3853. goto end;
  3854. }
  3855. #if !defined(CONFIG_SECDP)
  3856. /*
  3857. * If the physical connection to the sink is already lost by the time
  3858. * we try to set up the connection, we can just skip all the steps
  3859. * here safely.
  3860. */
  3861. if (dp_display_state_is(DP_STATE_ABORTED)) {
  3862. dp_display_state_log("[aborted]");
  3863. goto end;
  3864. }
  3865. if (!dp_display_is_ready(dp) || !dp_display_state_is(DP_STATE_READY)) {
  3866. #else
  3867. if (!dp_display_state_is(DP_STATE_READY)) {
  3868. #endif
  3869. dp_display_state_show("[not ready]");
  3870. goto end;
  3871. }
  3872. dp_display_stream_post_enable(dp, dp_panel);
  3873. #if !defined(CONFIG_SECDP)
  3874. cancel_delayed_work_sync(&dp->hdcp_cb_work);
  3875. queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
  3876. #else
  3877. #ifdef SECDP_HDCP_DISABLE
  3878. DP_INFO("skip hdcp\n");
  3879. #else
  3880. schedule_delayed_work(&dp->sec.hdcp.start_work,
  3881. msecs_to_jiffies(3500));
  3882. #endif
  3883. #ifdef SECDP_SELF_TEST
  3884. if (secdp_self_test_status(ST_HDCP_TEST) >= 0) {
  3885. cancel_delayed_work_sync(&dp->sec.hdcp.start_work);
  3886. secdp_self_test_start_hdcp_test(dp->sec.sysfs,
  3887. secdp_self_test_hdcp_on, secdp_self_test_hdcp_off);
  3888. }
  3889. #endif
  3890. /* check poor connection only if it's dex mode */
  3891. if (secdp_check_dex_mode(dp))
  3892. schedule_delayed_work(&dp->sec.link_status_work,
  3893. msecs_to_jiffies(13000));
  3894. #endif
  3895. if (dp_panel->audio_supported) {
  3896. dp_panel->audio->bw_code = dp->link->link_params.bw_code;
  3897. dp_panel->audio->lane_count = dp->link->link_params.lane_count;
  3898. dp_panel->audio->on(dp_panel->audio);
  3899. }
  3900. dp->aux->state &= ~DP_STATE_CTRL_POWERED_OFF;
  3901. dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
  3902. #if defined(CONFIG_SECDP)
  3903. atomic_set(&dp->sec.noti_status, 0);
  3904. #endif
  3905. complete_all(&dp->notification_comp);
  3906. DP_DEBUG("display post enable complete. state: 0x%x\n", dp->state);
  3907. end:
  3908. mutex_unlock(&dp->session_lock);
  3909. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  3910. return 0;
  3911. }
  3912. static void dp_display_clear_colorspaces(struct dp_display *dp_display)
  3913. {
  3914. struct drm_connector *connector;
  3915. struct sde_connector *sde_conn;
  3916. connector = dp_display->base_connector;
  3917. sde_conn = to_sde_connector(connector);
  3918. sde_conn->color_enc_fmt = 0;
  3919. }
  3920. static int dp_display_pre_disable(struct dp_display *dp_display, void *panel)
  3921. {
  3922. struct dp_display_private *dp;
  3923. struct dp_panel *dp_panel = panel;
  3924. struct dp_link_hdcp_status *status;
  3925. int rc = 0;
  3926. size_t i;
  3927. if (!dp_display || !panel) {
  3928. DP_ERR("invalid input\n");
  3929. return -EINVAL;
  3930. }
  3931. dp = container_of(dp_display, struct dp_display_private, dp_display);
  3932. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  3933. mutex_lock(&dp->session_lock);
  3934. status = &dp->link->hdcp_status;
  3935. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  3936. dp_display_state_show("[not enabled]");
  3937. goto end;
  3938. }
  3939. #if defined(CONFIG_SECDP)
  3940. if (!dp_display_state_is(DP_STATE_READY)) {
  3941. dp_display_state_show("[not ready]");
  3942. goto end;
  3943. }
  3944. #endif
  3945. dp_display_state_add(DP_STATE_HDCP_ABORTED);
  3946. cancel_delayed_work_sync(&dp->hdcp_cb_work);
  3947. if (dp_display_is_hdcp_enabled(dp) &&
  3948. status->hdcp_state != HDCP_STATE_INACTIVE) {
  3949. bool off = true;
  3950. if (dp_display_state_is(DP_STATE_SUSPENDED)) {
  3951. DP_DEBUG("Can't perform HDCP cleanup while suspended. Defer\n");
  3952. dp->hdcp_delayed_off = true;
  3953. goto clean;
  3954. }
  3955. flush_delayed_work(&dp->hdcp_cb_work);
  3956. if (dp->mst.mst_active) {
  3957. dp_display_hdcp_deregister_stream(dp,
  3958. dp_panel->stream_id);
  3959. for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
  3960. if (i != dp_panel->stream_id &&
  3961. dp->active_panels[i]) {
  3962. DP_DEBUG("Streams are still active. Skip disabling HDCP\n");
  3963. off = false;
  3964. }
  3965. }
  3966. }
  3967. if (off) {
  3968. if (dp->hdcp.ops->off)
  3969. dp->hdcp.ops->off(dp->hdcp.data);
  3970. dp_display_update_hdcp_status(dp, true);
  3971. }
  3972. }
  3973. dp_display_clear_colorspaces(dp_display);
  3974. clean:
  3975. #if defined(CONFIG_SECDP)
  3976. cancel_delayed_work(&dp->sec.hpd.noti_work);
  3977. cancel_delayed_work_sync(&dp->sec.hdcp.start_work);
  3978. cancel_delayed_work(&dp->sec.link_status_work);
  3979. cancel_delayed_work(&dp->sec.poor_discon_work);
  3980. secdp_link_backoff_stop();
  3981. #endif
  3982. if (dp_panel->audio_supported)
  3983. dp_panel->audio->off(dp_panel->audio, false);
  3984. rc = dp_display_stream_pre_disable(dp, dp_panel);
  3985. end:
  3986. mutex_unlock(&dp->session_lock);
  3987. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  3988. return 0;
  3989. }
  3990. static int dp_display_disable(struct dp_display *dp_display, void *panel)
  3991. {
  3992. int i;
  3993. struct dp_display_private *dp = NULL;
  3994. struct dp_panel *dp_panel = NULL;
  3995. struct dp_link_hdcp_status *status;
  3996. if (!dp_display || !panel) {
  3997. DP_ERR("invalid input\n");
  3998. return -EINVAL;
  3999. }
  4000. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4001. dp_panel = panel;
  4002. status = &dp->link->hdcp_status;
  4003. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  4004. mutex_lock(&dp->session_lock);
  4005. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  4006. dp_display_state_show("[not enabled]");
  4007. goto end;
  4008. }
  4009. if (!dp_display_state_is(DP_STATE_READY)) {
  4010. dp_display_state_show("[not ready]");
  4011. goto end;
  4012. }
  4013. dp_display_stream_disable(dp, dp_panel);
  4014. dp_display_state_remove(DP_STATE_HDCP_ABORTED);
  4015. for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
  4016. if (dp->active_panels[i]) {
  4017. if (status->hdcp_state != HDCP_STATE_AUTHENTICATED)
  4018. queue_delayed_work(dp->wq, &dp->hdcp_cb_work,
  4019. HZ/4);
  4020. break;
  4021. }
  4022. }
  4023. end:
  4024. #if defined(CONFIG_SECDP)
  4025. atomic_set(&dp->sec.noti_status, 0);
  4026. #endif
  4027. mutex_unlock(&dp->session_lock);
  4028. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  4029. return 0;
  4030. }
  4031. static int dp_request_irq(struct dp_display *dp_display)
  4032. {
  4033. int rc = 0;
  4034. struct dp_display_private *dp;
  4035. if (!dp_display) {
  4036. DP_ERR("invalid input\n");
  4037. return -EINVAL;
  4038. }
  4039. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4040. dp->irq = irq_of_parse_and_map(dp->pdev->dev.of_node, 0);
  4041. if (dp->irq < 0) {
  4042. rc = dp->irq;
  4043. DP_ERR("failed to get irq: %d\n", rc);
  4044. return rc;
  4045. }
  4046. rc = devm_request_irq(&dp->pdev->dev, dp->irq, dp_display_irq,
  4047. IRQF_TRIGGER_HIGH, "dp_display_isr", dp);
  4048. if (rc < 0) {
  4049. DP_ERR("failed to request IRQ%u: %d\n",
  4050. dp->irq, rc);
  4051. return rc;
  4052. }
  4053. disable_irq(dp->irq);
  4054. return 0;
  4055. }
  4056. static struct dp_debug *dp_get_debug(struct dp_display *dp_display)
  4057. {
  4058. struct dp_display_private *dp;
  4059. if (!dp_display) {
  4060. DP_ERR("invalid input\n");
  4061. return ERR_PTR(-EINVAL);
  4062. }
  4063. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4064. return dp->debug;
  4065. }
  4066. static int dp_display_unprepare(struct dp_display *dp_display, void *panel)
  4067. {
  4068. struct dp_display_private *dp;
  4069. struct dp_panel *dp_panel = panel;
  4070. u32 flags = 0;
  4071. if (!dp_display || !panel) {
  4072. DP_ERR("invalid input\n");
  4073. return -EINVAL;
  4074. }
  4075. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4076. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  4077. mutex_lock(&dp->session_lock);
  4078. /*
  4079. * Check if the power off sequence was triggered
  4080. * by a source initialated action like framework
  4081. * reboot or suspend-resume but not from normal
  4082. * hot plug. If connector is in MST mode, skip
  4083. * powering down host as aux needs to be kept
  4084. * alive to handle hot-plug sideband message.
  4085. */
  4086. if (dp_display_is_ready(dp) &&
  4087. (dp_display_state_is(DP_STATE_SUSPENDED) ||
  4088. !dp->mst.mst_active))
  4089. flags |= DP_PANEL_SRC_INITIATED_POWER_DOWN;
  4090. if (dp->active_stream_cnt)
  4091. goto end;
  4092. if (flags & DP_PANEL_SRC_INITIATED_POWER_DOWN) {
  4093. dp->link->psm_config(dp->link, &dp->panel->link_info, true);
  4094. dp->debug->psm_enabled = true;
  4095. dp->ctrl->off(dp->ctrl);
  4096. dp_display_host_unready(dp);
  4097. dp_display_host_deinit(dp);
  4098. dp_display_state_add(DP_STATE_SRC_PWRDN);
  4099. }
  4100. dp_display_state_remove(DP_STATE_ENABLED);
  4101. dp->aux->state &= ~DP_STATE_CTRL_POWERED_ON;
  4102. dp->aux->state |= DP_STATE_CTRL_POWERED_OFF;
  4103. complete_all(&dp->notification_comp);
  4104. /* log this as it results from user action of cable dis-connection */
  4105. DP_INFO("[OK]\n");
  4106. end:
  4107. mutex_lock(&dp->accounting_lock);
  4108. dp->tot_lm_blks_in_use -= dp_panel->max_lm;
  4109. dp_panel->max_lm = 0;
  4110. mutex_unlock(&dp->accounting_lock);
  4111. dp_panel->deinit(dp_panel, flags);
  4112. mutex_unlock(&dp->session_lock);
  4113. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  4114. return 0;
  4115. }
  4116. #if defined(CONFIG_SECDP)
  4117. static enum mon_aspect_ratio_t secdp_get_aspect_ratio(struct drm_display_mode *mode)
  4118. {
  4119. enum mon_aspect_ratio_t aspect_ratio = MON_RATIO_NA;
  4120. int hdisplay = mode->hdisplay;
  4121. int vdisplay = mode->vdisplay;
  4122. if ((hdisplay == 4096 && vdisplay == 2160) ||
  4123. (hdisplay == 3840 && vdisplay == 2160) ||
  4124. (hdisplay == 2560 && vdisplay == 1440) ||
  4125. (hdisplay == 1920 && vdisplay == 1080) ||
  4126. (hdisplay == 1600 && vdisplay == 900) ||
  4127. (hdisplay == 1366 && vdisplay == 768) ||
  4128. (hdisplay == 1280 && vdisplay == 720))
  4129. aspect_ratio = MON_RATIO_16_9;
  4130. else if ((hdisplay == 2560 && vdisplay == 1600) ||
  4131. (hdisplay == 1920 && vdisplay == 1200) ||
  4132. (hdisplay == 1680 && vdisplay == 1050) ||
  4133. (hdisplay == 1440 && vdisplay == 900) ||
  4134. (hdisplay == 1280 && vdisplay == 800))
  4135. aspect_ratio = MON_RATIO_16_10;
  4136. else if ((hdisplay == 3840 && vdisplay == 1600) ||
  4137. (hdisplay == 3440 && vdisplay == 1440) ||
  4138. (hdisplay == 2560 && vdisplay == 1080))
  4139. aspect_ratio = MON_RATIO_21_9;
  4140. else if ((hdisplay == 1720 && vdisplay == 1440) ||
  4141. (hdisplay == 1280 && vdisplay == 1080))
  4142. aspect_ratio = MON_RATIO_10P5_9;
  4143. else if (hdisplay == 2520 && vdisplay == 1200)
  4144. aspect_ratio = MON_RATIO_21_10;
  4145. else if (hdisplay == 1320 && vdisplay == 1200)
  4146. aspect_ratio = MON_RATIO_11_10;
  4147. else if ((hdisplay == 5120 && vdisplay == 1440) ||
  4148. (hdisplay == 3840 && vdisplay == 1080))
  4149. aspect_ratio = MON_RATIO_32_9;
  4150. else if (hdisplay == 3840 && vdisplay == 1200)
  4151. aspect_ratio = MON_RATIO_32_10;
  4152. else if (hdisplay == 1440 && vdisplay == 900)
  4153. aspect_ratio = MON_RATIO_8_5;
  4154. else if ((hdisplay == 1280 && vdisplay == 1024) ||
  4155. (hdisplay == 720 && vdisplay == 576))
  4156. aspect_ratio = MON_RATIO_5_4;
  4157. else if (hdisplay == 1280 && vdisplay == 768)
  4158. aspect_ratio = MON_RATIO_5_3;
  4159. else if ((hdisplay == 2200 && vdisplay == 1650) ||
  4160. (hdisplay == 1152 && vdisplay == 864) ||
  4161. (hdisplay == 1024 && vdisplay == 768) ||
  4162. (hdisplay == 800 && vdisplay == 600) ||
  4163. (hdisplay == 640 && vdisplay == 480))
  4164. aspect_ratio = MON_RATIO_4_3;
  4165. else if ((hdisplay == 1920 && vdisplay == 1280) ||
  4166. (hdisplay == 720 && vdisplay == 480))
  4167. aspect_ratio = MON_RATIO_3_2;
  4168. return aspect_ratio;
  4169. }
  4170. /*
  4171. * @target [inout] timing to be updated (prefer/mirror/dex)
  4172. * @mode [in] timing info to compare
  4173. */
  4174. static bool secdp_update_max_timing(struct secdp_display_timing *target,
  4175. struct drm_display_mode *mode)
  4176. {
  4177. u64 mode_total = 0;
  4178. int mode_refresh;
  4179. if (!mode) {
  4180. /* reset */
  4181. memset(target, 0, sizeof(struct secdp_display_timing));
  4182. target->mon_ratio = MON_RATIO_NA;
  4183. return true;
  4184. }
  4185. mode_refresh = drm_mode_vrefresh(mode);
  4186. mode_total = (u64)mode->hdisplay * (u64)mode->vdisplay;
  4187. if (mode_total < target->total)
  4188. return false;
  4189. if (mode_total > target->total)
  4190. goto update;
  4191. if (mode_refresh < target->refresh_rate)
  4192. return false;
  4193. if (mode_refresh > target->refresh_rate)
  4194. goto update;
  4195. return false;
  4196. update:
  4197. target->active_h = mode->hdisplay;
  4198. target->active_v = mode->vdisplay;
  4199. target->refresh_rate = mode_refresh;
  4200. target->clock = mode->clock;
  4201. target->mon_ratio = secdp_get_aspect_ratio(mode);
  4202. target->total = mode_total;
  4203. return true;
  4204. }
  4205. static void secdp_show_max_timing(struct dp_display_private *dp)
  4206. {
  4207. struct secdp_display_timing *prf_timing, *mrr_timing, *dex_timing;
  4208. prf_timing = &dp->sec.prf_timing;
  4209. mrr_timing = &dp->sec.mrr_timing;
  4210. dex_timing = &dp->sec.dex_timing;
  4211. DP_INFO("prf:%ux%u@%uhz mrr:%ux%u@%uhz dex:%ux%u@%uhz\n",
  4212. prf_timing->active_h, prf_timing->active_v, prf_timing->refresh_rate,
  4213. mrr_timing->active_h, mrr_timing->active_v, mrr_timing->refresh_rate,
  4214. dex_timing->active_h, dex_timing->active_v, dex_timing->refresh_rate);
  4215. }
  4216. void secdp_timing_init(struct secdp_misc *sec)
  4217. {
  4218. struct secdp_prefer *prefer = &sec->prefer;
  4219. struct secdp_dex *dex = &sec->dex;
  4220. secdp_update_max_timing(&sec->prf_timing, NULL);
  4221. secdp_update_max_timing(&sec->mrr_timing, NULL);
  4222. secdp_update_max_timing(&sec->dex_timing, NULL);
  4223. prefer->ratio = MON_RATIO_NA;
  4224. prefer->exist = false;
  4225. prefer->hdisp = 0;
  4226. prefer->vdisp = 0;
  4227. prefer->refresh = 0;
  4228. dex->ignore_prefer_ratio = false;
  4229. }
  4230. /**
  4231. * check if reconnection is needed when mode is changing between mirror and dex
  4232. * @return false if dex and mirror resolutions are same
  4233. * @return true otherwise
  4234. */
  4235. bool secdp_check_reconnect(struct secdp_misc *sec)
  4236. {
  4237. struct dp_display_private *dp;
  4238. struct secdp_display_timing *dex_timing, *compare;
  4239. bool ret = false;
  4240. if (sec->hmd.exist)
  4241. goto end;
  4242. dp = container_of(sec, struct dp_display_private, sec);
  4243. secdp_show_max_timing(dp);
  4244. dex_timing = &sec->dex_timing;
  4245. if (sec->prefer.exist)
  4246. compare = &sec->prf_timing;
  4247. else
  4248. compare = &sec->mrr_timing;
  4249. if (compare->active_h == dex_timing->active_h &&
  4250. compare->active_v == dex_timing->active_v &&
  4251. compare->refresh_rate == dex_timing->refresh_rate)
  4252. goto end;
  4253. ret = true;
  4254. end:
  4255. return ret;
  4256. }
  4257. /**
  4258. * do reconnect when mode is changing from dex to mirror and vice versa.
  4259. * it's needed only when resolution changing is required between mirror and dex.
  4260. */
  4261. void secdp_reconnect(struct secdp_misc *sec)
  4262. {
  4263. struct dp_display_private *dp;
  4264. secdp_logger_set_max_count(300);
  4265. dp = container_of(sec, struct dp_display_private, sec);
  4266. if (dp->link->poor_connection) {
  4267. DP_INFO("poor! skip reconnect\n");
  4268. return;
  4269. }
  4270. if (dp->mst.mst_active) {
  4271. DP_INFO("MST! skip reconnect\n");
  4272. return;
  4273. }
  4274. mutex_lock(&dp->sec.attention_lock);
  4275. DP_INFO("dex_reconnect hpd low++\n");
  4276. dp->sec.dex.reconnecting = true;
  4277. dp->sec.dex.status = DEX_MODE_CHANGING;
  4278. if (dp->sec.dex.curr == DEX_ENABLED)
  4279. dp->sec.dex.curr = DEX_MODE_CHANGING;
  4280. dp->hpd->hpd_high = false;
  4281. dp_display_host_init(dp);
  4282. dp_display_process_hpd_low(dp, false);
  4283. DP_INFO("dex_reconnect hpd low--\n");
  4284. mutex_unlock(&dp->sec.attention_lock);
  4285. /* give some time for display hal to handle disconnect event */
  4286. msleep(400);
  4287. mutex_lock(&dp->sec.attention_lock);
  4288. if (!dp_display_state_is(DP_STATE_ENABLED) &&
  4289. dp->sec.dex.reconnecting &&
  4290. !dp_display_state_is(DP_STATE_CONNECTED)) {
  4291. DP_INFO("dex_reconnect hpd high++\n");
  4292. secdp_send_hpd_event(sec, true);
  4293. if (dp_display_state_is(DP_STATE_INITIALIZED)) {
  4294. /* aux timeout happens whenever DeX reconnect scenario,
  4295. * init aux here
  4296. */
  4297. dp_display_host_unready(dp);
  4298. dp_display_host_deinit(dp);
  4299. usleep_range(5000, 6000);
  4300. }
  4301. dp->hpd->hpd_high = true;
  4302. dp_display_host_init(dp);
  4303. dp_display_process_hpd_high(dp);
  4304. DP_INFO("dex_reconnect hpd high--\n");
  4305. }
  4306. dp->sec.dex.reconnecting = false;
  4307. mutex_unlock(&dp->sec.attention_lock);
  4308. }
  4309. void secdp_extdisp_off(struct secdp_misc *sec)
  4310. {
  4311. struct dp_display_private *dp;
  4312. dp = container_of(sec, struct dp_display_private, sec);
  4313. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  4314. DP_INFO("[extdisp_off] DP is not enabled\n");
  4315. return;
  4316. }
  4317. DP_INFO("[extdisp_off] ++\n");
  4318. dp->link->psm_config(dp->link, &dp->panel->link_info, true);
  4319. dp->debug->psm_enabled = true;
  4320. dp_display_state_add(DP_STATE_SRC_PWRDN);
  4321. dp->hpd->hpd_high = false;
  4322. /* see secdp_process_attention() */
  4323. secdp_clear_link_status_cnt(dp->link);
  4324. dp_display_disconnect_sync(dp);
  4325. dp_display_host_deinit(dp);
  4326. DP_INFO("[extdisp_off] --\n");
  4327. }
  4328. void secdp_extdisp_on(struct secdp_misc *sec)
  4329. {
  4330. struct dp_display_private *dp;
  4331. dp = container_of(sec, struct dp_display_private, sec);
  4332. if (!dp_display_state_is(DP_STATE_CONFIGURED) || !secdp_get_cable_status()) {
  4333. DP_INFO("[extdisp_on] cable is out\n");
  4334. return;
  4335. }
  4336. if (dp_display_state_is(DP_STATE_CONNECTED)) {
  4337. DP_INFO("[extdisp_on] already connected\n");
  4338. return;
  4339. }
  4340. DP_INFO("[extdisp_on] ++\n");
  4341. dp->hpd->hpd_high = true;
  4342. /* see secdp_process_attention() */
  4343. secdp_clear_link_status_cnt(dp->link);
  4344. dp_display_state_remove(DP_STATE_ABORTED);
  4345. queue_work(dp->wq, &dp->connect_work);
  4346. DP_INFO("[extdisp_on] --\n");
  4347. }
  4348. /**
  4349. * check if given mode(timing) is fail-safe or not
  4350. */
  4351. static bool secdp_check_fail_safe(struct drm_display_mode *mode)
  4352. {
  4353. bool ret = false;
  4354. if (mode->hdisplay == 640 && mode->vdisplay == 480)
  4355. ret = true;
  4356. return ret;
  4357. }
  4358. /**
  4359. * check if given ratio is one of dex ratios (16:9, 16:10, 21:9)
  4360. */
  4361. static bool secdp_check_dex_ratio(enum mon_aspect_ratio_t ratio)
  4362. {
  4363. bool ret = false;
  4364. switch (ratio) {
  4365. case MON_RATIO_16_9:
  4366. case MON_RATIO_16_10:
  4367. case MON_RATIO_21_9:
  4368. ret = true;
  4369. break;
  4370. default:
  4371. break;
  4372. }
  4373. return ret;
  4374. }
  4375. /**
  4376. * check if mode's active_h, active_v are within max dex rows/cols
  4377. */
  4378. static bool secdp_check_dex_rowcol(struct dp_display_private *dp,
  4379. struct drm_display_mode *mode)
  4380. {
  4381. int max_cols = DEX_DFT_COL, max_rows = DEX_DFT_ROW;
  4382. bool ret = false;
  4383. if (secdp_get_dex_res(dp) == DEX_RES_MAX) {
  4384. max_cols = DEX_MAX_COL;
  4385. max_rows = DEX_MAX_ROW;
  4386. }
  4387. if ((mode->hdisplay <= max_cols) && (mode->vdisplay <= max_rows))
  4388. ret = true;
  4389. return ret;
  4390. }
  4391. /**
  4392. * check if current refresh_rate meets in dex mode
  4393. */
  4394. static bool secdp_check_dex_refresh(struct dp_display_private *dp,
  4395. struct drm_display_mode *mode)
  4396. {
  4397. int mode_refresh = drm_mode_vrefresh(mode);
  4398. bool ret = false;
  4399. if (mode_refresh < DEX_REFRESH_MIN)
  4400. goto end;
  4401. #if 0
  4402. if (dp->sec.dex.adapter_check_skip) {
  4403. ret = true;
  4404. goto end;
  4405. }
  4406. #endif
  4407. if (mode_refresh <= DEX_REFRESH_MAX) {
  4408. ret = true;
  4409. goto end;
  4410. }
  4411. end:
  4412. return ret;
  4413. }
  4414. static bool secdp_exceed_mst_max_pclk(struct dp_display_private *dp,
  4415. struct drm_display_mode *mode)
  4416. {
  4417. bool ret = false;
  4418. if (!dp->mst.mst_active) {
  4419. /* it's SST. No need to check pclk */
  4420. goto end;
  4421. }
  4422. if (mode->clock <= MST_MAX_PCLK) {
  4423. /* it's MST, and current pclk is less than MST's max pclk */
  4424. goto end;
  4425. }
  4426. /* it's MST, and current pclk is bigger than MST's max pclk */
  4427. ret = true;
  4428. end:
  4429. return ret;
  4430. }
  4431. static bool secdp_check_prefer_resolution(struct dp_display_private *dp,
  4432. struct drm_display_mode *mode)
  4433. {
  4434. struct secdp_misc *sec;
  4435. bool ret = false;
  4436. if (!dp || !mode)
  4437. goto end;
  4438. sec = &dp->sec;
  4439. if (!sec || sec->debug.prefer_check_skip)
  4440. goto end;
  4441. if (mode->type & DRM_MODE_TYPE_PREFERRED)
  4442. ret = true;
  4443. end:
  4444. return ret;
  4445. }
  4446. static bool secdp_has_higher_refresh(struct dp_display_private *dp,
  4447. struct drm_display_mode *mode,
  4448. int mode_refresh)
  4449. {
  4450. struct secdp_prefer *prefer = &dp->sec.prefer;
  4451. bool ret = false;
  4452. if (dp->panel->tbox || secdp_check_prefer_resolution(dp, mode))
  4453. goto end;
  4454. if (mode->hdisplay == prefer->hdisp &&
  4455. mode->vdisplay == prefer->vdisp &&
  4456. mode_refresh > prefer->refresh)
  4457. ret = true;
  4458. end:
  4459. return ret;
  4460. }
  4461. /**
  4462. * check if current timing(mode) is valid compared to prefer timing
  4463. * return true if it's valid. false otherwise
  4464. */
  4465. static bool secdp_check_hdisp_vdisp(struct dp_display_private *dp,
  4466. struct drm_display_mode *mode)
  4467. {
  4468. struct secdp_prefer *prefer = &dp->sec.prefer;
  4469. bool ret = true;
  4470. if (dp->panel->tbox || secdp_check_prefer_resolution(dp, mode))
  4471. goto end;
  4472. if (prefer->hdisp > prefer->vdisp) {
  4473. if (mode->hdisplay < mode->vdisplay)
  4474. ret = false;
  4475. goto end;
  4476. }
  4477. if (prefer->hdisp < prefer->vdisp) {
  4478. if (mode->hdisplay > mode->vdisplay)
  4479. ret = false;
  4480. }
  4481. end:
  4482. if (!ret) {
  4483. DP_INFO("weird timing! %dx%d@%dhz\n",
  4484. mode->hdisplay, mode->vdisplay, drm_mode_vrefresh(mode));
  4485. }
  4486. return ret;
  4487. }
  4488. #ifndef SECDP_MAX_HBR2
  4489. /**
  4490. * check if current timing is g.t. 8K
  4491. */
  4492. static bool secdp_check_uhd2(struct dp_display_private *dp,
  4493. struct drm_display_mode *mode)
  4494. {
  4495. bool ret = false;
  4496. if (dp->panel->tbox || secdp_check_prefer_resolution(dp, mode))
  4497. goto end;
  4498. if (mode->hdisplay >= 7680 && mode->vdisplay >= 4320)
  4499. ret = true;
  4500. end:
  4501. return ret;
  4502. }
  4503. #endif
  4504. #define __NA (-1) /* not available */
  4505. static struct secdp_display_timing secdp_dex_resolution[] = {
  4506. {1600, 900, __NA, false, __NA, DEX_RES_1600X900, MON_RATIO_16_9},
  4507. {1920, 1080, __NA, false, __NA, DEX_RES_1920X1080, MON_RATIO_16_9},
  4508. {1920, 1200, __NA, false, __NA, DEX_RES_1920X1200, MON_RATIO_16_10},
  4509. {2560, 1080, __NA, false, __NA, DEX_RES_2560X1080, MON_RATIO_21_9},
  4510. {2560, 1440, __NA, false, __NA, DEX_RES_2560X1440, MON_RATIO_16_9},
  4511. {2560, 1600, __NA, false, __NA, DEX_RES_2560X1600, MON_RATIO_16_10},
  4512. {3440, 1440, __NA, false, __NA, DEX_RES_3440X1440, MON_RATIO_21_9},
  4513. };
  4514. #define DEX_FAIL_SAFE 2073600 /* 1920x1080 */
  4515. static bool secdp_dex_fail_safe(struct drm_display_mode *mode)
  4516. {
  4517. if ((mode->hdisplay * mode->vdisplay) < DEX_FAIL_SAFE)
  4518. return true;
  4519. return false;
  4520. }
  4521. static bool secdp_check_dex_resolution(struct dp_display_private *dp,
  4522. struct drm_display_mode *mode, bool *fail_safe)
  4523. {
  4524. struct secdp_display_timing *dex_table = secdp_dex_resolution;
  4525. struct secdp_misc *sec = &dp->sec;
  4526. struct secdp_prefer *prefer = &sec->prefer;
  4527. struct secdp_dex *dex = &sec->dex;
  4528. enum mon_aspect_ratio_t mode_ratio = secdp_get_aspect_ratio(mode);
  4529. u64 i;
  4530. bool mode_interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
  4531. bool prefer_support = dp->parser->prefer_support;
  4532. bool prefer_mode = secdp_check_prefer_resolution(dp, mode);
  4533. bool ret = false;
  4534. if (dex->ignore_prefer_ratio && secdp_dex_fail_safe(mode)) {
  4535. *fail_safe = ret = true;
  4536. goto end;
  4537. }
  4538. if (!secdp_check_dex_refresh(dp, mode))
  4539. goto end;
  4540. if (prefer_support && prefer_mode &&
  4541. secdp_check_dex_rowcol(dp, mode) &&
  4542. secdp_check_dex_ratio(mode_ratio)) {
  4543. ret = true;
  4544. goto end;
  4545. }
  4546. for (i = 0; i < ARRAY_SIZE(secdp_dex_resolution); i++) {
  4547. if ((mode_interlaced != dex_table[i].interlaced) ||
  4548. (mode->hdisplay != dex_table[i].active_h) ||
  4549. (mode->vdisplay != dex_table[i].active_v))
  4550. continue;
  4551. if (!dex->ignore_prefer_ratio && dex_table[i].mon_ratio != prefer->ratio)
  4552. continue;
  4553. if (dex_table[i].dex_res <= secdp_get_dex_res(dp)) {
  4554. ret = true;
  4555. break;
  4556. }
  4557. }
  4558. end:
  4559. return ret;
  4560. }
  4561. #if defined(REMOVE_YUV420_AT_PREFER)
  4562. static bool secdp_prefer_remove_yuv420(struct dp_display_private *dp,
  4563. struct drm_display_mode *mode)
  4564. {
  4565. struct drm_connector *connector = dp->dp_display.base_connector;
  4566. u8 vic;
  4567. bool result = false;
  4568. if (!secdp_check_prefer_resolution(dp, mode))
  4569. goto exit;
  4570. if (!drm_mode_is_420_only(&connector->display_info, mode))
  4571. goto exit;
  4572. vic = drm_match_cea_mode(mode);
  4573. /* HACK: prevent preferred from becomming ycbcr420 */
  4574. bitmap_clear(connector->display_info.hdmi.y420_vdb_modes, vic, 1);
  4575. DP_INFO("unset ycbcr420 of vic %d\n", vic);
  4576. result = true;
  4577. exit:
  4578. return result;
  4579. }
  4580. #endif
  4581. static bool secdp_check_resolution(struct dp_display_private *dp,
  4582. struct drm_display_mode *mode,
  4583. bool supported)
  4584. {
  4585. struct secdp_misc *sec = &dp->sec;
  4586. struct secdp_prefer *prefer = &sec->prefer;
  4587. struct secdp_dex *dex = &sec->dex;
  4588. struct secdp_display_timing *prf_timing, *mrr_timing, *dex_timing;
  4589. bool prefer_support = dp->parser->prefer_support;
  4590. bool prefer_mode, ret = false, dex_supported = false;
  4591. bool dex_fail_safe = false, ratio_check = false;
  4592. int mode_refresh = drm_mode_vrefresh(mode);
  4593. prf_timing = &sec->prf_timing;
  4594. mrr_timing = &sec->mrr_timing;
  4595. dex_timing = &sec->dex_timing;
  4596. prefer_mode = secdp_check_prefer_resolution(dp, mode);
  4597. if (prefer_mode) {
  4598. secdp_mode_count_dec(dp);
  4599. secdp_show_max_timing(dp);
  4600. if ((mrr_timing->clock || prf_timing->clock) && !dex_timing->clock) {
  4601. dex->ignore_prefer_ratio = true;
  4602. DP_INFO("[dex] ignore prefer ratio\n");
  4603. }
  4604. prefer->hdisp = mode->hdisplay;
  4605. prefer->vdisp = mode->vdisplay;
  4606. prefer->refresh = mode_refresh;
  4607. prefer->ratio = secdp_get_aspect_ratio(mode);
  4608. DP_INFO_M("prefer timing found! %dx%d@%dhz, %s\n",
  4609. prefer->hdisp, prefer->vdisp, prefer->refresh,
  4610. secdp_aspect_ratio_to_string(prefer->ratio));
  4611. #if defined(REMOVE_YUV420_AT_PREFER)
  4612. secdp_prefer_remove_yuv420(dp, mode);
  4613. #endif
  4614. if (!prefer_support) {
  4615. DP_INFO("remove prefer!\n");
  4616. mode->type &= (~DRM_MODE_TYPE_PREFERRED);
  4617. }
  4618. }
  4619. if (prefer->ratio == MON_RATIO_NA) {
  4620. dex->ignore_prefer_ratio = true;
  4621. DP_INFO("prefer timing is absent, ignore!\n");
  4622. }
  4623. if (!supported || secdp_exceed_mst_max_pclk(dp, mode)
  4624. || mode_refresh < MIRROR_REFRESH_MIN) {
  4625. ret = false;
  4626. goto end;
  4627. }
  4628. ratio_check = secdp_check_hdisp_vdisp(dp, mode);
  4629. if (prefer_mode) {
  4630. prefer->exist = true;
  4631. secdp_update_max_timing(prf_timing, mode);
  4632. } else if (prefer->refresh > 0 &&
  4633. secdp_has_higher_refresh(dp, mode, mode_refresh)) {
  4634. /* found same h/v display but higher refresh
  4635. * rate than preferred timing
  4636. */
  4637. secdp_update_max_timing(prf_timing, mode);
  4638. mode->type |= DRM_MODE_TYPE_PREFERRED;
  4639. #ifndef SECDP_MAX_HBR2
  4640. } else if (secdp_check_uhd2(dp, mode)) {
  4641. secdp_update_max_timing(prf_timing, mode);
  4642. mode->type |= DRM_MODE_TYPE_PREFERRED;
  4643. #endif
  4644. } else {
  4645. if (ratio_check)
  4646. secdp_update_max_timing(mrr_timing, mode);
  4647. }
  4648. if (sec->hmd.exist) {
  4649. /* skip dex resolution check as HMD doesn't have DeX */
  4650. ret = true;
  4651. goto end;
  4652. }
  4653. dex_supported = secdp_check_dex_resolution(dp, mode, &dex_fail_safe);
  4654. if (dex_supported && !dex_fail_safe)
  4655. secdp_update_max_timing(dex_timing, mode);
  4656. if (!secdp_check_dex_mode(dp))
  4657. ret = ratio_check ? supported : false;
  4658. else
  4659. ret = dex_supported;
  4660. if (!ret && secdp_check_fail_safe(mode))
  4661. ret = true;
  4662. end:
  4663. return ret;
  4664. }
  4665. #endif/*CONFIG_SECDP*/
  4666. static int dp_display_validate_link_clock(struct dp_display_private *dp,
  4667. struct drm_display_mode *mode, struct dp_display_mode dp_mode)
  4668. {
  4669. u32 mode_rate_khz = 0, supported_rate_khz = 0, mode_bpp = 0;
  4670. u32 mode_bpc = 0, tmds_clock = 0;
  4671. bool dsc_en;
  4672. int rate;
  4673. struct msm_compression_info *c_info = &dp_mode.timing.comp_info;
  4674. dsc_en = c_info->enabled;
  4675. if (dsc_en) {
  4676. mode_bpp = DSC_BPP(c_info->dsc_info.config);
  4677. mode_bpc = c_info->dsc_info.config.bits_per_component;
  4678. } else {
  4679. mode_bpp = dp_mode.timing.bpp;
  4680. mode_bpc = mode_bpp / 3;
  4681. }
  4682. mode_rate_khz = mode->clock * mode_bpp;
  4683. rate = drm_dp_bw_code_to_link_rate(dp->link->link_params.bw_code);
  4684. tmds_clock = mode->clock * mode_bpc / 8;
  4685. /*
  4686. * For a HBR 2 dongle, limit TMDS clock to ensure a max resolution
  4687. * of 4k@30fps for each MST port
  4688. */
  4689. if (dp->mst.mst_active && rate <= 540000 && tmds_clock > MAX_TMDS_CLOCK_HDMI_1_4) {
  4690. DP_DEBUG("Limit mode clock: %d kHz\n", mode->clock);
  4691. return -EPERM;
  4692. }
  4693. supported_rate_khz = dp->link->link_params.lane_count * rate * 8;
  4694. if (mode_rate_khz > supported_rate_khz) {
  4695. DP_DEBUG("mode_rate: %d kHz, supported_rate: %d kHz\n",
  4696. mode_rate_khz, supported_rate_khz);
  4697. return -EPERM;
  4698. }
  4699. return 0;
  4700. }
  4701. static int dp_display_validate_pixel_clock(struct dp_display_mode dp_mode,
  4702. u32 max_pclk_khz)
  4703. {
  4704. u32 pclk_khz = dp_mode.timing.widebus_en ?
  4705. (dp_mode.timing.pixel_clk_khz >> 1) :
  4706. dp_mode.timing.pixel_clk_khz;
  4707. if (pclk_khz > max_pclk_khz) {
  4708. DP_DEBUG("clk: %d kHz, max: %d kHz\n", pclk_khz, max_pclk_khz);
  4709. return -EPERM;
  4710. }
  4711. return 0;
  4712. }
  4713. static int dp_display_validate_topology(struct dp_display_private *dp,
  4714. struct dp_panel *dp_panel, struct drm_display_mode *mode,
  4715. struct dp_display_mode *dp_mode,
  4716. const struct msm_resource_caps_info *avail_res)
  4717. {
  4718. int rc;
  4719. struct msm_drm_private *priv = dp->priv;
  4720. const u32 dual = 2, quad = 4;
  4721. u32 num_lm = 0, num_dsc = 0, num_3dmux = 0;
  4722. bool dsc_capable = dp_mode->capabilities & DP_PANEL_CAPS_DSC;
  4723. u32 fps = dp_mode->timing.refresh_rate;
  4724. int avail_lm = 0;
  4725. mutex_lock(&dp->accounting_lock);
  4726. rc = msm_get_mixer_count(priv, mode, avail_res, &num_lm);
  4727. if (rc) {
  4728. DP_ERR("error getting mixer count. rc:%d\n", rc);
  4729. goto end;
  4730. }
  4731. /* Merge using DSC, if enabled */
  4732. if (dp_panel->dsc_en && dsc_capable) {
  4733. rc = msm_get_dsc_count(priv, mode->hdisplay, &num_dsc);
  4734. if (rc) {
  4735. DP_ERR("error getting dsc count. rc:%d\n", rc);
  4736. goto end;
  4737. }
  4738. num_dsc = max(num_lm, num_dsc);
  4739. if ((num_dsc > avail_res->num_lm) || (num_dsc > avail_res->num_dsc)) {
  4740. DP_DEBUG("mode %sx%d: not enough resources for dsc %d dsc_a:%d lm_a:%d\n",
  4741. mode->name, fps, num_dsc, avail_res->num_dsc,
  4742. avail_res->num_lm);
  4743. /* Clear DSC caps and retry */
  4744. dp_mode->capabilities &= ~DP_PANEL_CAPS_DSC;
  4745. rc = -EAGAIN;
  4746. goto end;
  4747. } else {
  4748. /* Only DSCMERGE is supported on DP */
  4749. num_lm = num_dsc;
  4750. }
  4751. }
  4752. if (!num_dsc && (num_lm == 2) && avail_res->num_3dmux) {
  4753. num_3dmux = 1;
  4754. }
  4755. avail_lm = avail_res->num_lm + avail_res->num_lm_in_use - dp->tot_lm_blks_in_use
  4756. + dp_panel->max_lm;
  4757. if (num_lm > avail_lm) {
  4758. DP_DEBUG("mode %sx%d is invalid, not enough lm req:%d avail:%d\n",
  4759. mode->name, fps, num_lm, avail_lm);
  4760. rc = -EPERM;
  4761. goto end;
  4762. } else if (!num_dsc && (num_lm == dual && !num_3dmux)) {
  4763. DP_DEBUG("mode %sx%d is invalid, not enough 3dmux %d %d\n",
  4764. mode->name, fps, num_3dmux, avail_res->num_3dmux);
  4765. rc = -EPERM;
  4766. goto end;
  4767. } else if (num_lm == quad && num_dsc != quad) {
  4768. DP_DEBUG("mode %sx%d is invalid, unsupported DP topology lm:%d dsc:%d\n",
  4769. mode->name, fps, num_lm, num_dsc);
  4770. rc = -EPERM;
  4771. goto end;
  4772. }
  4773. #if !defined(CONFIG_SECDP)
  4774. DP_DEBUG_V("mode %sx%d is valid, supported DP topology lm:%d dsc:%d 3dmux:%d\n",
  4775. mode->name, fps, num_lm, num_dsc, num_3dmux);
  4776. #endif
  4777. dp_mode->lm_count = num_lm;
  4778. rc = 0;
  4779. end:
  4780. mutex_unlock(&dp->accounting_lock);
  4781. return rc;
  4782. }
  4783. static enum drm_mode_status dp_display_validate_mode(
  4784. struct dp_display *dp_display,
  4785. void *panel, struct drm_display_mode *mode,
  4786. const struct msm_resource_caps_info *avail_res)
  4787. {
  4788. struct dp_display_private *dp;
  4789. struct dp_panel *dp_panel;
  4790. struct dp_debug *debug;
  4791. enum drm_mode_status mode_status = MODE_BAD;
  4792. struct dp_display_mode dp_mode;
  4793. int rc = 0;
  4794. if (!dp_display || !mode || !panel ||
  4795. !avail_res || !avail_res->max_mixer_width) {
  4796. DP_ERR("invalid params\n");
  4797. return mode_status;
  4798. }
  4799. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4800. mutex_lock(&dp->session_lock);
  4801. dp_panel = panel;
  4802. if (!dp_panel->connector) {
  4803. DP_ERR("invalid connector\n");
  4804. goto end;
  4805. }
  4806. debug = dp->debug;
  4807. if (!debug)
  4808. goto end;
  4809. dp_display->convert_to_dp_mode(dp_display, panel, mode, &dp_mode);
  4810. /* As per spec, 640x480 mode should always be present as fail-safe */
  4811. if ((dp_mode.timing.h_active == 640) && (dp_mode.timing.v_active == 480) &&
  4812. (dp_mode.timing.pixel_clk_khz == 25175)) {
  4813. goto skip_validation;
  4814. }
  4815. rc = dp_display_validate_topology(dp, dp_panel, mode, &dp_mode, avail_res);
  4816. if (rc == -EAGAIN) {
  4817. dp_panel->convert_to_dp_mode(dp_panel, mode, &dp_mode);
  4818. rc = dp_display_validate_topology(dp, dp_panel, mode, &dp_mode, avail_res);
  4819. }
  4820. if (rc)
  4821. goto end;
  4822. rc = dp_display_validate_link_clock(dp, mode, dp_mode);
  4823. if (rc)
  4824. goto end;
  4825. rc = dp_display_validate_pixel_clock(dp_mode, dp_display->max_pclk_khz);
  4826. if (rc)
  4827. goto end;
  4828. skip_validation:
  4829. mode_status = MODE_OK;
  4830. if (!avail_res->num_lm_in_use) {
  4831. mutex_lock(&dp->accounting_lock);
  4832. dp->tot_lm_blks_in_use -= dp_panel->max_lm;
  4833. dp_panel->max_lm = max(dp_panel->max_lm, dp_mode.lm_count);
  4834. dp->tot_lm_blks_in_use += dp_panel->max_lm;
  4835. mutex_unlock(&dp->accounting_lock);
  4836. }
  4837. end:
  4838. mutex_unlock(&dp->session_lock);
  4839. #if !defined(CONFIG_SECDP)
  4840. DP_DEBUG_V("[%s clk:%d] mode is %s\n", mode->name, mode->clock,
  4841. (mode_status == MODE_OK) ? "valid" : "invalid");
  4842. #else
  4843. {
  4844. u32 mode_bpp = 0;
  4845. bool dsc_en;
  4846. /* see "dp_display_validate_link_clock()" */
  4847. dsc_en = dp_mode.timing.comp_info.enabled;
  4848. mode_bpp = dsc_en ?
  4849. DSC_BPP(dp_mode.timing.comp_info.dsc_info.config)
  4850. : dp_mode.timing.bpp;
  4851. if (!secdp_check_resolution(dp, mode, mode_status == MODE_OK))
  4852. mode_status = MODE_BAD;
  4853. if (secdp_mode_count_check(dp)) {
  4854. DP_INFO_M("%9s@%dhz | %s | max:%7d cur:%7d | vt:%d bpp:%u\n", mode->name,
  4855. drm_mode_vrefresh(mode), mode_status == MODE_BAD ? "NG" : "OK",
  4856. dp_display->max_pclk_khz, mode->clock, dp_panel->video_test, mode_bpp);
  4857. }
  4858. }
  4859. #endif
  4860. return mode_status;
  4861. }
  4862. static int dp_display_get_available_dp_resources(struct dp_display *dp_display,
  4863. const struct msm_resource_caps_info *avail_res,
  4864. struct msm_resource_caps_info *max_dp_avail_res)
  4865. {
  4866. if (!dp_display || !avail_res || !max_dp_avail_res) {
  4867. DP_ERR("invalid arguments\n");
  4868. return -EINVAL;
  4869. }
  4870. memcpy(max_dp_avail_res, avail_res,
  4871. sizeof(struct msm_resource_caps_info));
  4872. max_dp_avail_res->num_lm = min(avail_res->num_lm,
  4873. dp_display->max_mixer_count);
  4874. max_dp_avail_res->num_dsc = min(avail_res->num_dsc,
  4875. dp_display->max_dsc_count);
  4876. #if !defined(CONFIG_SECDP)
  4877. DP_DEBUG_V("max_lm:%d, avail_lm:%d, dp_avail_lm:%d\n",
  4878. dp_display->max_mixer_count, avail_res->num_lm,
  4879. max_dp_avail_res->num_lm);
  4880. DP_DEBUG_V("max_dsc:%d, avail_dsc:%d, dp_avail_dsc:%d\n",
  4881. dp_display->max_dsc_count, avail_res->num_dsc,
  4882. max_dp_avail_res->num_dsc);
  4883. #endif
  4884. return 0;
  4885. }
  4886. static int dp_display_get_modes(struct dp_display *dp, void *panel,
  4887. struct dp_display_mode *dp_mode)
  4888. {
  4889. struct dp_display_private *dp_display;
  4890. struct dp_panel *dp_panel;
  4891. int ret = 0;
  4892. if (!dp || !panel) {
  4893. DP_ERR("invalid params\n");
  4894. return 0;
  4895. }
  4896. dp_panel = panel;
  4897. if (!dp_panel->connector) {
  4898. DP_ERR("invalid connector\n");
  4899. return 0;
  4900. }
  4901. dp_display = container_of(dp, struct dp_display_private, dp_display);
  4902. ret = dp_panel->get_modes(dp_panel, dp_panel->connector, dp_mode);
  4903. if (dp_mode->timing.pixel_clk_khz)
  4904. dp->max_pclk_khz = dp_mode->timing.pixel_clk_khz;
  4905. return ret;
  4906. }
  4907. static void dp_display_convert_to_dp_mode(struct dp_display *dp_display,
  4908. void *panel,
  4909. const struct drm_display_mode *drm_mode,
  4910. struct dp_display_mode *dp_mode)
  4911. {
  4912. int rc;
  4913. struct dp_display_private *dp;
  4914. struct dp_panel *dp_panel;
  4915. u32 free_dsc_blks = 0, required_dsc_blks = 0, curr_dsc = 0, new_dsc = 0;
  4916. if (!dp_display || !drm_mode || !dp_mode || !panel) {
  4917. DP_ERR("invalid input\n");
  4918. return;
  4919. }
  4920. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4921. dp_panel = panel;
  4922. memset(dp_mode, 0, sizeof(*dp_mode));
  4923. if (dp_panel->dsc_en) {
  4924. free_dsc_blks = dp_display->max_dsc_count -
  4925. dp->tot_dsc_blks_in_use +
  4926. dp_panel->dsc_blks_in_use;
  4927. #if !defined(CONFIG_SECDP)
  4928. DP_DEBUG_V("Before: in_use:%d, max:%d, free:%d\n",
  4929. dp->tot_dsc_blks_in_use,
  4930. dp_display->max_dsc_count, free_dsc_blks);
  4931. #endif
  4932. rc = msm_get_dsc_count(dp->priv, drm_mode->hdisplay,
  4933. &required_dsc_blks);
  4934. if (rc) {
  4935. DP_ERR("error getting dsc count. rc:%d\n", rc);
  4936. return;
  4937. }
  4938. curr_dsc = dp_panel->dsc_blks_in_use;
  4939. dp->tot_dsc_blks_in_use -= dp_panel->dsc_blks_in_use;
  4940. dp_panel->dsc_blks_in_use = 0;
  4941. if (free_dsc_blks >= required_dsc_blks) {
  4942. dp_mode->capabilities |= DP_PANEL_CAPS_DSC;
  4943. new_dsc = max(curr_dsc, required_dsc_blks);
  4944. dp_panel->dsc_blks_in_use = new_dsc;
  4945. dp->tot_dsc_blks_in_use += new_dsc;
  4946. }
  4947. #if !defined(CONFIG_SECDP)
  4948. DP_DEBUG_V("After: in_use:%d, max:%d, free:%d, req:%d, caps:0x%x\n",
  4949. dp->tot_dsc_blks_in_use,
  4950. dp_display->max_dsc_count,
  4951. free_dsc_blks, required_dsc_blks,
  4952. dp_mode->capabilities);
  4953. #endif
  4954. }
  4955. dp_panel->convert_to_dp_mode(dp_panel, drm_mode, dp_mode);
  4956. }
  4957. static int dp_display_config_hdr(struct dp_display *dp_display, void *panel,
  4958. struct drm_msm_ext_hdr_metadata *hdr, bool dhdr_update)
  4959. {
  4960. struct dp_panel *dp_panel;
  4961. struct sde_connector *sde_conn;
  4962. struct dp_display_private *dp;
  4963. u64 core_clk_rate;
  4964. bool flush_hdr;
  4965. if (!dp_display || !panel) {
  4966. DP_ERR("invalid input\n");
  4967. return -EINVAL;
  4968. }
  4969. dp_panel = panel;
  4970. dp = container_of(dp_display, struct dp_display_private, dp_display);
  4971. sde_conn = to_sde_connector(dp_panel->connector);
  4972. core_clk_rate = dp->power->clk_get_rate(dp->power, "core_clk");
  4973. if (!core_clk_rate) {
  4974. DP_ERR("invalid rate for core_clk\n");
  4975. return -EINVAL;
  4976. }
  4977. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  4978. dp_display_state_show("[not enabled]");
  4979. return 0;
  4980. }
  4981. /*
  4982. * In rare cases where HDR metadata is updated independently
  4983. * flush the HDR metadata immediately instead of relying on
  4984. * the colorspace
  4985. */
  4986. flush_hdr = !sde_conn->colorspace_updated;
  4987. if (flush_hdr)
  4988. DP_DEBUG("flushing the HDR metadata\n");
  4989. else
  4990. DP_DEBUG("piggy-backing with colorspace\n");
  4991. return dp_panel->setup_hdr(dp_panel, hdr, dhdr_update,
  4992. core_clk_rate, flush_hdr);
  4993. }
  4994. static int dp_display_setup_colospace(struct dp_display *dp_display,
  4995. void *panel,
  4996. u32 colorspace)
  4997. {
  4998. struct dp_panel *dp_panel;
  4999. struct dp_display_private *dp;
  5000. if (!dp_display || !panel) {
  5001. pr_err("invalid input\n");
  5002. return -EINVAL;
  5003. }
  5004. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5005. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  5006. dp_display_state_show("[not enabled]");
  5007. return 0;
  5008. }
  5009. dp_panel = panel;
  5010. return dp_panel->set_colorspace(dp_panel, colorspace);
  5011. }
  5012. static int dp_display_create_workqueue(struct dp_display_private *dp)
  5013. {
  5014. dp->wq = create_singlethread_workqueue("drm_dp");
  5015. if (IS_ERR_OR_NULL(dp->wq)) {
  5016. DP_ERR("Error creating wq\n");
  5017. return -EPERM;
  5018. }
  5019. INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
  5020. INIT_WORK(&dp->connect_work, dp_display_connect_work);
  5021. INIT_WORK(&dp->attention_work, dp_display_attention_work);
  5022. INIT_WORK(&dp->disconnect_work, dp_display_disconnect_work);
  5023. return 0;
  5024. }
  5025. static int dp_display_bridge_internal_hpd(void *dev, bool hpd, bool hpd_irq)
  5026. {
  5027. struct dp_display_private *dp = dev;
  5028. struct drm_device *drm_dev = dp->dp_display.drm_dev;
  5029. if (!drm_dev || !drm_dev->mode_config.poll_enabled)
  5030. return -EBUSY;
  5031. if (hpd_irq)
  5032. dp_display_mst_attention(dp);
  5033. else
  5034. dp->hpd->simulate_connect(dp->hpd, hpd);
  5035. return 0;
  5036. }
  5037. static int dp_display_init_aux_bridge(struct dp_display_private *dp)
  5038. {
  5039. int rc = 0;
  5040. const char *phandle = "qcom,dp-aux-bridge";
  5041. struct device_node *bridge_node;
  5042. if (!dp->pdev->dev.of_node) {
  5043. pr_err("cannot find dev.of_node\n");
  5044. rc = -ENODEV;
  5045. goto end;
  5046. }
  5047. bridge_node = of_parse_phandle(dp->pdev->dev.of_node,
  5048. phandle, 0);
  5049. if (!bridge_node)
  5050. goto end;
  5051. dp->aux_bridge = of_dp_aux_find_bridge(bridge_node);
  5052. if (!dp->aux_bridge) {
  5053. pr_err("failed to find dp aux bridge\n");
  5054. rc = -EPROBE_DEFER;
  5055. goto end;
  5056. }
  5057. if (dp->aux_bridge->register_hpd &&
  5058. !(dp->aux_bridge->flag & DP_AUX_BRIDGE_HPD))
  5059. dp->aux_bridge->register_hpd(dp->aux_bridge,
  5060. dp_display_bridge_internal_hpd, dp);
  5061. end:
  5062. return rc;
  5063. }
  5064. static int dp_display_mst_install(struct dp_display *dp_display,
  5065. struct dp_mst_drm_install_info *mst_install_info)
  5066. {
  5067. struct dp_display_private *dp;
  5068. if (!dp_display || !mst_install_info) {
  5069. DP_ERR("invalid input\n");
  5070. return -EINVAL;
  5071. }
  5072. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5073. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  5074. if (!mst_install_info->cbs->hpd || !mst_install_info->cbs->hpd_irq) {
  5075. DP_ERR("invalid mst cbs\n");
  5076. return -EINVAL;
  5077. }
  5078. dp_display->dp_mst_prv_info = mst_install_info->dp_mst_prv_info;
  5079. if (!dp->parser->has_mst) {
  5080. DP_DEBUG("mst not enabled\n");
  5081. return -EPERM;
  5082. }
  5083. memcpy(&dp->mst.cbs, mst_install_info->cbs, sizeof(dp->mst.cbs));
  5084. dp->mst.drm_registered = true;
  5085. DP_MST_DEBUG("dp mst drm installed\n");
  5086. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  5087. return 0;
  5088. }
  5089. static int dp_display_mst_uninstall(struct dp_display *dp_display)
  5090. {
  5091. struct dp_display_private *dp;
  5092. if (!dp_display) {
  5093. DP_ERR("invalid input\n");
  5094. return -EINVAL;
  5095. }
  5096. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5097. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  5098. if (!dp->mst.drm_registered) {
  5099. DP_DEBUG("drm mst not registered\n");
  5100. return -EPERM;
  5101. }
  5102. dp = container_of(dp_display, struct dp_display_private,
  5103. dp_display);
  5104. memset(&dp->mst.cbs, 0, sizeof(dp->mst.cbs));
  5105. dp->mst.drm_registered = false;
  5106. DP_MST_DEBUG("dp mst drm uninstalled\n");
  5107. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  5108. return 0;
  5109. }
  5110. static int dp_display_mst_connector_install(struct dp_display *dp_display,
  5111. struct drm_connector *connector)
  5112. {
  5113. int rc = 0;
  5114. struct dp_panel_in panel_in;
  5115. struct dp_panel *dp_panel;
  5116. struct dp_display_private *dp;
  5117. if (!dp_display || !connector) {
  5118. DP_ERR("invalid input\n");
  5119. return -EINVAL;
  5120. }
  5121. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5122. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  5123. mutex_lock(&dp->session_lock);
  5124. if (!dp->mst.drm_registered) {
  5125. DP_DEBUG("drm mst not registered\n");
  5126. rc = -EPERM;
  5127. goto end;
  5128. }
  5129. panel_in.dev = &dp->pdev->dev;
  5130. panel_in.aux = dp->aux;
  5131. panel_in.catalog = &dp->catalog->panel;
  5132. panel_in.link = dp->link;
  5133. panel_in.connector = connector;
  5134. panel_in.base_panel = dp->panel;
  5135. panel_in.parser = dp->parser;
  5136. dp_panel = dp_panel_get(&panel_in);
  5137. if (IS_ERR(dp_panel)) {
  5138. rc = PTR_ERR(dp_panel);
  5139. DP_ERR("failed to initialize panel, rc = %d\n", rc);
  5140. goto end;
  5141. }
  5142. dp_panel->audio = dp_audio_get(dp->pdev, dp_panel, &dp->catalog->audio);
  5143. if (IS_ERR(dp_panel->audio)) {
  5144. rc = PTR_ERR(dp_panel->audio);
  5145. DP_ERR("[mst] failed to initialize audio, rc = %d\n", rc);
  5146. dp_panel->audio = NULL;
  5147. goto end;
  5148. }
  5149. #if defined(CONFIG_SECDP_SWITCH)
  5150. dp_panel->audio->has_mst = dp->parser->has_mst;
  5151. #endif
  5152. DP_MST_DEBUG("dp mst connector installed. conn:%d\n",
  5153. connector->base.id);
  5154. end:
  5155. mutex_unlock(&dp->session_lock);
  5156. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state, rc);
  5157. return rc;
  5158. }
  5159. static int dp_display_mst_connector_uninstall(struct dp_display *dp_display,
  5160. struct drm_connector *connector)
  5161. {
  5162. int rc = 0;
  5163. struct sde_connector *sde_conn;
  5164. struct dp_panel *dp_panel;
  5165. struct dp_display_private *dp;
  5166. struct dp_audio *audio = NULL;
  5167. if (!dp_display || !connector) {
  5168. DP_ERR("invalid input\n");
  5169. return -EINVAL;
  5170. }
  5171. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5172. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY, dp->state);
  5173. mutex_lock(&dp->session_lock);
  5174. if (!dp->mst.drm_registered) {
  5175. DP_DEBUG("drm mst not registered\n");
  5176. mutex_unlock(&dp->session_lock);
  5177. return -EPERM;
  5178. }
  5179. sde_conn = to_sde_connector(connector);
  5180. if (!sde_conn->drv_panel) {
  5181. DP_ERR("invalid panel for connector:%d\n", connector->base.id);
  5182. mutex_unlock(&dp->session_lock);
  5183. return -EINVAL;
  5184. }
  5185. dp_panel = sde_conn->drv_panel;
  5186. /* Make a copy of audio structure to call into dp_audio_put later */
  5187. audio = dp_panel->audio;
  5188. dp_panel_put(dp_panel);
  5189. DP_MST_DEBUG("dp mst connector uninstalled. conn:%d\n",
  5190. connector->base.id);
  5191. mutex_unlock(&dp->session_lock);
  5192. dp_audio_put(audio);
  5193. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  5194. return rc;
  5195. }
  5196. static int dp_display_mst_connector_update_edid(struct dp_display *dp_display,
  5197. struct drm_connector *connector,
  5198. struct edid *edid)
  5199. {
  5200. int rc = 0;
  5201. struct sde_connector *sde_conn;
  5202. struct dp_panel *dp_panel;
  5203. struct dp_display_private *dp;
  5204. if (!dp_display || !connector || !edid) {
  5205. DP_ERR("invalid input\n");
  5206. return -EINVAL;
  5207. }
  5208. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5209. if (!dp->mst.drm_registered) {
  5210. DP_DEBUG("drm mst not registered\n");
  5211. return -EPERM;
  5212. }
  5213. sde_conn = to_sde_connector(connector);
  5214. if (!sde_conn->drv_panel) {
  5215. DP_ERR("invalid panel for connector:%d\n", connector->base.id);
  5216. return -EINVAL;
  5217. }
  5218. dp_panel = sde_conn->drv_panel;
  5219. rc = dp_panel->update_edid(dp_panel, edid);
  5220. DP_MST_DEBUG("dp mst connector:%d edid updated. mode_cnt:%d\n",
  5221. connector->base.id, rc);
  5222. return rc;
  5223. }
  5224. static int dp_display_update_pps(struct dp_display *dp_display,
  5225. struct drm_connector *connector, char *pps_cmd)
  5226. {
  5227. struct sde_connector *sde_conn;
  5228. struct dp_panel *dp_panel;
  5229. struct dp_display_private *dp;
  5230. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5231. sde_conn = to_sde_connector(connector);
  5232. if (!sde_conn->drv_panel) {
  5233. DP_ERR("invalid panel for connector:%d\n", connector->base.id);
  5234. return -EINVAL;
  5235. }
  5236. if (!dp_display_state_is(DP_STATE_ENABLED)) {
  5237. dp_display_state_show("[not enabled]");
  5238. return 0;
  5239. }
  5240. dp_panel = sde_conn->drv_panel;
  5241. dp_panel->update_pps(dp_panel, pps_cmd);
  5242. return 0;
  5243. }
  5244. static int dp_display_mst_connector_update_link_info(
  5245. struct dp_display *dp_display,
  5246. struct drm_connector *connector)
  5247. {
  5248. int rc = 0;
  5249. struct sde_connector *sde_conn;
  5250. struct dp_panel *dp_panel;
  5251. struct dp_display_private *dp;
  5252. if (!dp_display || !connector) {
  5253. DP_ERR("invalid input\n");
  5254. return -EINVAL;
  5255. }
  5256. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5257. if (!dp->mst.drm_registered) {
  5258. DP_DEBUG("drm mst not registered\n");
  5259. return -EPERM;
  5260. }
  5261. sde_conn = to_sde_connector(connector);
  5262. if (!sde_conn->drv_panel) {
  5263. DP_ERR("invalid panel for connector:%d\n", connector->base.id);
  5264. return -EINVAL;
  5265. }
  5266. dp_panel = sde_conn->drv_panel;
  5267. memcpy(dp_panel->dpcd, dp->panel->dpcd,
  5268. DP_RECEIVER_CAP_SIZE + 1);
  5269. memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd,
  5270. DP_RECEIVER_DSC_CAP_SIZE + 1);
  5271. memcpy(&dp_panel->link_info, &dp->panel->link_info,
  5272. sizeof(dp_panel->link_info));
  5273. DP_MST_DEBUG("dp mst connector:%d link info updated\n",
  5274. connector->base.id);
  5275. return rc;
  5276. }
  5277. static int dp_display_mst_get_fixed_topology_port(
  5278. struct dp_display *dp_display,
  5279. u32 strm_id, u32 *port_num)
  5280. {
  5281. struct dp_display_private *dp;
  5282. u32 port;
  5283. if (!dp_display) {
  5284. DP_ERR("invalid input\n");
  5285. return -EINVAL;
  5286. }
  5287. if (strm_id >= DP_STREAM_MAX) {
  5288. DP_ERR("invalid stream id:%d\n", strm_id);
  5289. return -EINVAL;
  5290. }
  5291. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5292. port = dp->parser->mst_fixed_port[strm_id];
  5293. if (!port || port > 255)
  5294. return -ENOENT;
  5295. if (port_num)
  5296. *port_num = port;
  5297. return 0;
  5298. }
  5299. static int dp_display_get_mst_caps(struct dp_display *dp_display,
  5300. struct dp_mst_caps *mst_caps)
  5301. {
  5302. int rc = 0;
  5303. struct dp_display_private *dp;
  5304. if (!dp_display || !mst_caps) {
  5305. DP_ERR("invalid input\n");
  5306. return -EINVAL;
  5307. }
  5308. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5309. mst_caps->has_mst = dp->parser->has_mst;
  5310. mst_caps->max_streams_supported = (mst_caps->has_mst) ? 2 : 0;
  5311. mst_caps->max_dpcd_transaction_bytes = (mst_caps->has_mst) ? 16 : 0;
  5312. mst_caps->drm_aux = dp->aux->drm_aux;
  5313. return rc;
  5314. }
  5315. static void dp_display_wakeup_phy_layer(struct dp_display *dp_display,
  5316. bool wakeup)
  5317. {
  5318. struct dp_display_private *dp;
  5319. struct dp_hpd *hpd;
  5320. if (!dp_display) {
  5321. DP_ERR("invalid input\n");
  5322. return;
  5323. }
  5324. dp = container_of(dp_display, struct dp_display_private, dp_display);
  5325. if (!dp->mst.drm_registered) {
  5326. DP_DEBUG("drm mst not registered\n");
  5327. return;
  5328. }
  5329. hpd = dp->hpd;
  5330. if (hpd && hpd->wakeup_phy)
  5331. hpd->wakeup_phy(hpd, wakeup);
  5332. }
  5333. static int dp_display_probe(struct platform_device *pdev)
  5334. {
  5335. int rc = 0;
  5336. struct dp_display_private *dp;
  5337. if (!pdev || !pdev->dev.of_node) {
  5338. DP_ERR("pdev not found\n");
  5339. rc = -ENODEV;
  5340. goto bail;
  5341. }
  5342. dp = devm_kzalloc(&pdev->dev, sizeof(*dp), GFP_KERNEL);
  5343. if (!dp) {
  5344. rc = -ENOMEM;
  5345. goto bail;
  5346. }
  5347. init_completion(&dp->notification_comp);
  5348. init_completion(&dp->attention_comp);
  5349. dp->pdev = pdev;
  5350. dp->name = "drm_dp";
  5351. memset(&dp->mst, 0, sizeof(dp->mst));
  5352. rc = dp_display_init_aux_bridge(dp);
  5353. if (rc)
  5354. goto error;
  5355. rc = dp_display_create_workqueue(dp);
  5356. if (rc) {
  5357. DP_ERR("Failed to create workqueue\n");
  5358. goto error;
  5359. }
  5360. platform_set_drvdata(pdev, dp);
  5361. g_dp_display = &dp->dp_display;
  5362. g_dp_display->dp_ipc_log = ipc_log_context_create(DRM_DP_IPC_NUM_PAGES, "drm_dp", 0);
  5363. if (!g_dp_display->dp_ipc_log)
  5364. DP_WARN("Error in creating ipc_log_context for drm_dp\n");
  5365. g_dp_display->dp_aux_ipc_log = ipc_log_context_create(DRM_DP_IPC_NUM_PAGES, "drm_dp_aux",
  5366. 0);
  5367. if (!g_dp_display->dp_aux_ipc_log)
  5368. DP_WARN("Error in creating ipc_log_context for drm_dp_aux\n");
  5369. g_dp_display->enable = dp_display_enable;
  5370. g_dp_display->post_enable = dp_display_post_enable;
  5371. g_dp_display->pre_disable = dp_display_pre_disable;
  5372. g_dp_display->disable = dp_display_disable;
  5373. g_dp_display->set_mode = dp_display_set_mode;
  5374. g_dp_display->validate_mode = dp_display_validate_mode;
  5375. g_dp_display->get_modes = dp_display_get_modes;
  5376. g_dp_display->prepare = dp_display_prepare;
  5377. g_dp_display->unprepare = dp_display_unprepare;
  5378. g_dp_display->request_irq = dp_request_irq;
  5379. g_dp_display->get_debug = dp_get_debug;
  5380. g_dp_display->post_open = NULL;
  5381. g_dp_display->post_init = dp_display_post_init;
  5382. g_dp_display->config_hdr = dp_display_config_hdr;
  5383. g_dp_display->mst_install = dp_display_mst_install;
  5384. g_dp_display->mst_uninstall = dp_display_mst_uninstall;
  5385. g_dp_display->mst_connector_install = dp_display_mst_connector_install;
  5386. g_dp_display->mst_connector_uninstall =
  5387. dp_display_mst_connector_uninstall;
  5388. g_dp_display->mst_connector_update_edid =
  5389. dp_display_mst_connector_update_edid;
  5390. g_dp_display->mst_connector_update_link_info =
  5391. dp_display_mst_connector_update_link_info;
  5392. g_dp_display->get_mst_caps = dp_display_get_mst_caps;
  5393. g_dp_display->set_stream_info = dp_display_set_stream_info;
  5394. g_dp_display->update_pps = dp_display_update_pps;
  5395. g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode;
  5396. g_dp_display->mst_get_fixed_topology_port =
  5397. dp_display_mst_get_fixed_topology_port;
  5398. g_dp_display->wakeup_phy_layer =
  5399. dp_display_wakeup_phy_layer;
  5400. g_dp_display->set_colorspace = dp_display_setup_colospace;
  5401. g_dp_display->get_available_dp_resources =
  5402. dp_display_get_available_dp_resources;
  5403. g_dp_display->clear_reservation = dp_display_clear_reservation;
  5404. g_dp_display->get_mst_pbn_div = dp_display_get_mst_pbn_div;
  5405. rc = component_add(&pdev->dev, &dp_display_comp_ops);
  5406. if (rc) {
  5407. DP_ERR("component add failed, rc=%d\n", rc);
  5408. goto error;
  5409. }
  5410. return 0;
  5411. error:
  5412. devm_kfree(&pdev->dev, dp);
  5413. bail:
  5414. return rc;
  5415. }
  5416. int dp_display_get_displays(void **displays, int count)
  5417. {
  5418. if (!displays) {
  5419. DP_ERR("invalid data\n");
  5420. return -EINVAL;
  5421. }
  5422. if (count != 1) {
  5423. DP_ERR("invalid number of displays\n");
  5424. return -EINVAL;
  5425. }
  5426. displays[0] = g_dp_display;
  5427. return count;
  5428. }
  5429. int dp_display_get_num_of_displays(void)
  5430. {
  5431. if (!g_dp_display)
  5432. return 0;
  5433. return 1;
  5434. }
  5435. int dp_display_get_num_of_streams(void)
  5436. {
  5437. return DP_STREAM_MAX;
  5438. }
  5439. static void dp_display_set_mst_state(void *dp_display,
  5440. enum dp_drv_state mst_state)
  5441. {
  5442. struct dp_display_private *dp;
  5443. if (!g_dp_display) {
  5444. DP_DEBUG("dp display not initialized\n");
  5445. return;
  5446. }
  5447. dp = container_of(g_dp_display, struct dp_display_private, dp_display);
  5448. SDE_EVT32_EXTERNAL(mst_state, dp->mst.mst_active);
  5449. if (dp->mst.mst_active && dp->mst.cbs.set_drv_state)
  5450. dp->mst.cbs.set_drv_state(g_dp_display, mst_state);
  5451. }
  5452. static int dp_display_remove(struct platform_device *pdev)
  5453. {
  5454. struct dp_display_private *dp;
  5455. if (!pdev)
  5456. return -EINVAL;
  5457. dp = platform_get_drvdata(pdev);
  5458. dp_display_deinit_sub_modules(dp);
  5459. if (dp->wq)
  5460. destroy_workqueue(dp->wq);
  5461. platform_set_drvdata(pdev, NULL);
  5462. devm_kfree(&pdev->dev, dp);
  5463. if (g_dp_display->dp_ipc_log) {
  5464. ipc_log_context_destroy(g_dp_display->dp_ipc_log);
  5465. g_dp_display->dp_ipc_log = NULL;
  5466. }
  5467. if (g_dp_display->dp_aux_ipc_log) {
  5468. ipc_log_context_destroy(g_dp_display->dp_aux_ipc_log);
  5469. g_dp_display->dp_aux_ipc_log = NULL;
  5470. }
  5471. return 0;
  5472. }
  5473. static int dp_pm_prepare(struct device *dev)
  5474. {
  5475. struct dp_display_private *dp = container_of(g_dp_display,
  5476. struct dp_display_private, dp_display);
  5477. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
  5478. mutex_lock(&dp->session_lock);
  5479. dp_display_set_mst_state(g_dp_display, PM_SUSPEND);
  5480. /*
  5481. * There are a few instances where the DP is hotplugged when the device
  5482. * is in PM suspend state. After hotplug, it is observed the device
  5483. * enters and exits the PM suspend multiple times while aux transactions
  5484. * are taking place. This may sometimes cause an unclocked register
  5485. * access error. So, abort aux transactions when such a situation
  5486. * arises i.e. when DP is connected but display not enabled yet.
  5487. */
  5488. if (dp_display_state_is(DP_STATE_CONNECTED) &&
  5489. !dp_display_state_is(DP_STATE_ENABLED)) {
  5490. dp->aux->abort(dp->aux, true);
  5491. dp->ctrl->abort(dp->ctrl, true);
  5492. }
  5493. dp_display_state_add(DP_STATE_SUSPENDED);
  5494. mutex_unlock(&dp->session_lock);
  5495. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  5496. #if defined(CONFIG_SECDP)
  5497. secdp_show_clk_status(dp);
  5498. #endif
  5499. return 0;
  5500. }
  5501. static void dp_pm_complete(struct device *dev)
  5502. {
  5503. struct dp_display_private *dp = container_of(g_dp_display,
  5504. struct dp_display_private, dp_display);
  5505. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_ENTRY);
  5506. mutex_lock(&dp->session_lock);
  5507. dp_display_set_mst_state(g_dp_display, PM_DEFAULT);
  5508. /*
  5509. * There are multiple PM suspend entry and exits observed before
  5510. * the connect uevent is issued to userspace. The aux transactions are
  5511. * aborted during PM suspend entry in dp_pm_prepare to prevent unclocked
  5512. * register access. On PM suspend exit, there will be no host_init call
  5513. * to reset the abort flags for ctrl and aux incase DP is connected
  5514. * but display not enabled. So, resetting abort flags for aux and ctrl.
  5515. */
  5516. if (dp_display_state_is(DP_STATE_CONNECTED) &&
  5517. !dp_display_state_is(DP_STATE_ENABLED)) {
  5518. dp->aux->abort(dp->aux, false);
  5519. dp->ctrl->abort(dp->ctrl, false);
  5520. }
  5521. dp_display_state_remove(DP_STATE_SUSPENDED);
  5522. mutex_unlock(&dp->session_lock);
  5523. SDE_EVT32_EXTERNAL(SDE_EVTLOG_FUNC_EXIT, dp->state);
  5524. }
  5525. void *get_ipc_log_context(void)
  5526. {
  5527. if (g_dp_display && g_dp_display->dp_ipc_log)
  5528. return g_dp_display->dp_ipc_log;
  5529. return NULL;
  5530. }
  5531. static const struct dev_pm_ops dp_pm_ops = {
  5532. .prepare = dp_pm_prepare,
  5533. .complete = dp_pm_complete,
  5534. };
  5535. static struct platform_driver dp_display_driver = {
  5536. .probe = dp_display_probe,
  5537. .remove = dp_display_remove,
  5538. .driver = {
  5539. .name = "msm-dp-display",
  5540. .of_match_table = dp_dt_match,
  5541. .suppress_bind_attrs = true,
  5542. .pm = &dp_pm_ops,
  5543. },
  5544. };
  5545. void __init dp_display_register(void)
  5546. {
  5547. platform_driver_register(&dp_display_driver);
  5548. }
  5549. void __exit dp_display_unregister(void)
  5550. {
  5551. platform_driver_unregister(&dp_display_driver);
  5552. }