sde_encoder.c 164 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023
  1. /*
  2. * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  3. * Copyright (C) 2013 Red Hat
  4. * Author: Rob Clark <[email protected]>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program. If not, see <http://www.gnu.org/licenses/>.
  17. */
  18. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  19. #include <linux/kthread.h>
  20. #include <linux/debugfs.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/sde_rsc.h>
  23. #include "msm_drv.h"
  24. #include "sde_kms.h"
  25. #include <drm/drm_crtc.h>
  26. #include <drm/drm_crtc_helper.h>
  27. #include "sde_hwio.h"
  28. #include "sde_hw_catalog.h"
  29. #include "sde_hw_intf.h"
  30. #include "sde_hw_ctl.h"
  31. #include "sde_formats.h"
  32. #include "sde_encoder_phys.h"
  33. #include "sde_power_handle.h"
  34. #include "sde_hw_dsc.h"
  35. #include "sde_crtc.h"
  36. #include "sde_trace.h"
  37. #include "sde_core_irq.h"
  38. #include "sde_hw_top.h"
  39. #include "sde_hw_qdss.h"
  40. #define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
  41. (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  42. #define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
  43. (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
  44. #define SDE_DEBUG_PHYS(p, fmt, ...) SDE_DEBUG("enc%d intf%d pp%d " fmt,\
  45. (p) ? (p)->parent->base.id : -1, \
  46. (p) ? (p)->intf_idx - INTF_0 : -1, \
  47. (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  48. ##__VA_ARGS__)
  49. #define SDE_ERROR_PHYS(p, fmt, ...) SDE_ERROR("enc%d intf%d pp%d " fmt,\
  50. (p) ? (p)->parent->base.id : -1, \
  51. (p) ? (p)->intf_idx - INTF_0 : -1, \
  52. (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
  53. ##__VA_ARGS__)
  54. /*
  55. * Two to anticipate panels that can do cmd/vid dynamic switching
  56. * plan is to create all possible physical encoder types, and switch between
  57. * them at runtime
  58. */
  59. #define NUM_PHYS_ENCODER_TYPES 2
  60. #define MAX_PHYS_ENCODERS_PER_VIRTUAL \
  61. (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
  62. #define MISR_BUFF_SIZE 256
  63. #define IDLE_SHORT_TIMEOUT 1
  64. #define EVT_TIME_OUT_SPLIT 2
  65. /* Maximum number of VSYNC wait attempts for RSC state transition */
  66. #define MAX_RSC_WAIT 5
  67. #define TOPOLOGY_DUALPIPE_MERGE_MODE(x) \
  68. (((x) == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE) || \
  69. ((x) == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE) || \
  70. ((x) == SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC))
  71. /**
  72. * enum sde_enc_rc_events - events for resource control state machine
  73. * @SDE_ENC_RC_EVENT_KICKOFF:
  74. * This event happens at NORMAL priority.
  75. * Event that signals the start of the transfer. When this event is
  76. * received, enable MDP/DSI core clocks and request RSC with CMD state.
  77. * Regardless of the previous state, the resource should be in ON state
  78. * at the end of this event.
  79. * @SDE_ENC_RC_EVENT_FRAME_DONE:
  80. * This event happens at INTERRUPT level.
  81. * Event signals the end of the data transfer after the PP FRAME_DONE
  82. * event. At the end of this event, a delayed work is scheduled to go to
  83. * IDLE_PC state after IDLE_POWERCOLLAPSE_DURATION time.
  84. * @SDE_ENC_RC_EVENT_PRE_STOP:
  85. * This event happens at NORMAL priority.
  86. * This event, when received during the ON state, set RSC to IDLE, and
  87. * and leave the RC STATE in the PRE_OFF state.
  88. * It should be followed by the STOP event as part of encoder disable.
  89. * If received during IDLE or OFF states, it will do nothing.
  90. * @SDE_ENC_RC_EVENT_STOP:
  91. * This event happens at NORMAL priority.
  92. * When this event is received, disable all the MDP/DSI core clocks, and
  93. * disable IRQs. It should be called from the PRE_OFF or IDLE states.
  94. * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
  95. * PRE_OFF is expected when PRE_STOP was executed during the ON state.
  96. * Resource state should be in OFF at the end of the event.
  97. * @SDE_ENC_RC_EVENT_PRE_MODESET:
  98. * This event happens at NORMAL priority from a work item.
  99. * Event signals that there is a seamless mode switch is in prgoress. A
  100. * client needs to turn of only irq - leave clocks ON to reduce the mode
  101. * switch latency.
  102. * @SDE_ENC_RC_EVENT_POST_MODESET:
  103. * This event happens at NORMAL priority from a work item.
  104. * Event signals that seamless mode switch is complete and resources are
  105. * acquired. Clients wants to turn on the irq again and update the rsc
  106. * with new vtotal.
  107. * @SDE_ENC_RC_EVENT_ENTER_IDLE:
  108. * This event happens at NORMAL priority from a work item.
  109. * Event signals that there were no frame updates for
  110. * IDLE_POWERCOLLAPSE_DURATION time. This would disable MDP/DSI core clocks
  111. * and request RSC with IDLE state and change the resource state to IDLE.
  112. * @SDE_ENC_RC_EVENT_EARLY_WAKEUP:
  113. * This event is triggered from the input event thread when touch event is
  114. * received from the input device. On receiving this event,
  115. * - If the device is in SDE_ENC_RC_STATE_IDLE state, it turns ON the
  116. clocks and enable RSC.
  117. * - If the device is in SDE_ENC_RC_STATE_ON state, it resets the delayed
  118. * off work since a new commit is imminent.
  119. */
  120. enum sde_enc_rc_events {
  121. SDE_ENC_RC_EVENT_KICKOFF = 1,
  122. SDE_ENC_RC_EVENT_FRAME_DONE,
  123. SDE_ENC_RC_EVENT_PRE_STOP,
  124. SDE_ENC_RC_EVENT_STOP,
  125. SDE_ENC_RC_EVENT_PRE_MODESET,
  126. SDE_ENC_RC_EVENT_POST_MODESET,
  127. SDE_ENC_RC_EVENT_ENTER_IDLE,
  128. SDE_ENC_RC_EVENT_EARLY_WAKEUP,
  129. };
  130. /*
  131. * enum sde_enc_rc_states - states that the resource control maintains
  132. * @SDE_ENC_RC_STATE_OFF: Resource is in OFF state
  133. * @SDE_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
  134. * @SDE_ENC_RC_STATE_ON: Resource is in ON state
  135. * @SDE_ENC_RC_STATE_MODESET: Resource is in modeset state
  136. * @SDE_ENC_RC_STATE_IDLE: Resource is in IDLE state
  137. */
  138. enum sde_enc_rc_states {
  139. SDE_ENC_RC_STATE_OFF,
  140. SDE_ENC_RC_STATE_PRE_OFF,
  141. SDE_ENC_RC_STATE_ON,
  142. SDE_ENC_RC_STATE_MODESET,
  143. SDE_ENC_RC_STATE_IDLE
  144. };
  145. /**
  146. * struct sde_encoder_virt - virtual encoder. Container of one or more physical
  147. * encoders. Virtual encoder manages one "logical" display. Physical
  148. * encoders manage one intf block, tied to a specific panel/sub-panel.
  149. * Virtual encoder defers as much as possible to the physical encoders.
  150. * Virtual encoder registers itself with the DRM Framework as the encoder.
  151. * @base: drm_encoder base class for registration with DRM
  152. * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
  153. * @bus_scaling_client: Client handle to the bus scaling interface
  154. * @te_source: vsync source pin information
  155. * @ops: Encoder ops from init function
  156. * @num_phys_encs: Actual number of physical encoders contained.
  157. * @phys_encs: Container of physical encoders managed.
  158. * @phys_vid_encs: Video physical encoders for panel mode switch.
  159. * @phys_cmd_encs: Command physical encoders for panel mode switch.
  160. * @cur_master: Pointer to the current master in this mode. Optimization
  161. * Only valid after enable. Cleared as disable.
  162. * @hw_pp Handle to the pingpong blocks used for the display. No.
  163. * pingpong blocks can be different than num_phys_encs.
  164. * @hw_dsc: Array of DSC block handles used for the display.
  165. * @dirty_dsc_ids: Cached dsc indexes for dirty DSC blocks needing flush
  166. * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
  167. * for partial update right-only cases, such as pingpong
  168. * split where virtual pingpong does not generate IRQs
  169. @qdss_status: indicate if qdss is modified since last update
  170. * @crtc_vblank_cb: Callback into the upper layer / CRTC for
  171. * notification of the VBLANK
  172. * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
  173. * @crtc_kickoff_cb: Callback into CRTC that will flush & start
  174. * all CTL paths
  175. * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
  176. * @debugfs_root: Debug file system root file node
  177. * @enc_lock: Lock around physical encoder create/destroy and
  178. access.
  179. * @frame_done_cnt: Atomic counter for tracking which phys_enc is
  180. * done with frame processing.
  181. * @crtc_frame_event_cb: callback handler for frame event
  182. * @crtc_frame_event_cb_data: callback handler private data
  183. * @vsync_event_timer: vsync timer
  184. * @rsc_client: rsc client pointer
  185. * @rsc_state_init: boolean to indicate rsc config init
  186. * @disp_info: local copy of msm_display_info struct
  187. * @misr_enable: misr enable/disable status
  188. * @misr_frame_count: misr frame count before start capturing the data
  189. * @idle_pc_enabled: indicate if idle power collapse is enabled
  190. * currently. This can be controlled by user-mode
  191. * @rc_lock: resource control mutex lock to protect
  192. * virt encoder over various state changes
  193. * @rc_state: resource controller state
  194. * @delayed_off_work: delayed worker to schedule disabling of
  195. * clks and resources after IDLE_TIMEOUT time.
  196. * @vsync_event_work: worker to handle vsync event for autorefresh
  197. * @input_event_work: worker to handle input device touch events
  198. * @esd_trigger_work: worker to handle esd trigger events
  199. * @input_handler: handler for input device events
  200. * @topology: topology of the display
  201. * @vblank_enabled: boolean to track userspace vblank vote
  202. * @idle_pc_restore: flag to indicate idle_pc_restore happened
  203. * @frame_trigger_mode: frame trigger mode indication for command
  204. * mode display
  205. * @dynamic_hdr_updated: flag to indicate if mempool was programmed
  206. * @rsc_config: rsc configuration for display vtotal, fps, etc.
  207. * @cur_conn_roi: current connector roi
  208. * @prv_conn_roi: previous connector roi to optimize if unchanged
  209. * @crtc pointer to drm_crtc
  210. * @recovery_events_enabled: status of hw recovery feature enable by client
  211. * @elevated_ahb_vote: increase AHB bus speed for the first frame
  212. * after power collapse
  213. * @pm_qos_cpu_req: pm_qos request for cpu frequency
  214. * @mode_info: stores the current mode and should be used
  215. * only in commit phase
  216. */
  217. struct sde_encoder_virt {
  218. struct drm_encoder base;
  219. spinlock_t enc_spinlock;
  220. struct mutex vblank_ctl_lock;
  221. uint32_t bus_scaling_client;
  222. uint32_t display_num_of_h_tiles;
  223. uint32_t te_source;
  224. struct sde_encoder_ops ops;
  225. unsigned int num_phys_encs;
  226. struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
  227. struct sde_encoder_phys *phys_vid_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
  228. struct sde_encoder_phys *phys_cmd_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
  229. struct sde_encoder_phys *cur_master;
  230. struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
  231. struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
  232. struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
  233. enum sde_dsc dirty_dsc_ids[MAX_CHANNELS_PER_ENC];
  234. bool intfs_swapped;
  235. bool qdss_status;
  236. void (*crtc_vblank_cb)(void *data);
  237. void *crtc_vblank_cb_data;
  238. struct dentry *debugfs_root;
  239. struct mutex enc_lock;
  240. atomic_t frame_done_cnt[MAX_PHYS_ENCODERS_PER_VIRTUAL];
  241. void (*crtc_frame_event_cb)(void *data, u32 event);
  242. struct sde_crtc_frame_event_cb_data crtc_frame_event_cb_data;
  243. struct timer_list vsync_event_timer;
  244. struct sde_rsc_client *rsc_client;
  245. bool rsc_state_init;
  246. struct msm_display_info disp_info;
  247. bool misr_enable;
  248. u32 misr_frame_count;
  249. bool idle_pc_enabled;
  250. struct mutex rc_lock;
  251. enum sde_enc_rc_states rc_state;
  252. struct kthread_delayed_work delayed_off_work;
  253. struct kthread_work vsync_event_work;
  254. struct kthread_work input_event_work;
  255. struct kthread_work esd_trigger_work;
  256. struct input_handler *input_handler;
  257. struct msm_display_topology topology;
  258. bool vblank_enabled;
  259. bool idle_pc_restore;
  260. enum frame_trigger_mode_type frame_trigger_mode;
  261. bool dynamic_hdr_updated;
  262. struct sde_rsc_cmd_config rsc_config;
  263. struct sde_rect cur_conn_roi;
  264. struct sde_rect prv_conn_roi;
  265. struct drm_crtc *crtc;
  266. bool recovery_events_enabled;
  267. bool elevated_ahb_vote;
  268. struct pm_qos_request pm_qos_cpu_req;
  269. struct msm_mode_info mode_info;
  270. };
  271. #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
  272. void sde_encoder_uidle_enable(struct drm_encoder *drm_enc, bool enable)
  273. {
  274. struct sde_encoder_virt *sde_enc;
  275. int i;
  276. sde_enc = to_sde_encoder_virt(drm_enc);
  277. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  278. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  279. if (phys && phys->hw_ctl && phys->hw_ctl->ops.uidle_enable) {
  280. SDE_EVT32(DRMID(drm_enc), enable);
  281. phys->hw_ctl->ops.uidle_enable(phys->hw_ctl, enable);
  282. }
  283. }
  284. }
  285. static void _sde_encoder_pm_qos_add_request(struct drm_encoder *drm_enc,
  286. struct sde_kms *sde_kms)
  287. {
  288. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  289. struct pm_qos_request *req;
  290. u32 cpu_mask;
  291. u32 cpu_dma_latency;
  292. int cpu;
  293. if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
  294. return;
  295. cpu_mask = sde_kms->catalog->perf.cpu_mask;
  296. cpu_dma_latency = sde_kms->catalog->perf.cpu_dma_latency;
  297. req = &sde_enc->pm_qos_cpu_req;
  298. req->type = PM_QOS_REQ_AFFINE_CORES;
  299. cpumask_empty(&req->cpus_affine);
  300. for_each_possible_cpu(cpu) {
  301. if ((1 << cpu) & cpu_mask)
  302. cpumask_set_cpu(cpu, &req->cpus_affine);
  303. }
  304. pm_qos_add_request(req, PM_QOS_CPU_DMA_LATENCY, cpu_dma_latency);
  305. SDE_EVT32_VERBOSE(DRMID(drm_enc), cpu_mask, cpu_dma_latency);
  306. }
  307. static void _sde_encoder_pm_qos_remove_request(struct drm_encoder *drm_enc,
  308. struct sde_kms *sde_kms)
  309. {
  310. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  311. if (!sde_kms->catalog || !sde_kms->catalog->perf.cpu_mask)
  312. return;
  313. pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
  314. }
  315. static bool _sde_encoder_is_autorefresh_enabled(
  316. struct sde_encoder_virt *sde_enc)
  317. {
  318. struct drm_connector *drm_conn;
  319. if (!sde_enc->cur_master ||
  320. !(sde_enc->disp_info.capabilities & MSM_DISPLAY_CAP_CMD_MODE))
  321. return false;
  322. drm_conn = sde_enc->cur_master->connector;
  323. if (!drm_conn || !drm_conn->state)
  324. return false;
  325. return sde_connector_get_property(drm_conn->state,
  326. CONNECTOR_PROP_AUTOREFRESH) ? true : false;
  327. }
  328. static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
  329. {
  330. struct sde_encoder_virt *sde_enc;
  331. struct msm_compression_info *comp_info;
  332. if (!drm_enc)
  333. return false;
  334. sde_enc = to_sde_encoder_virt(drm_enc);
  335. comp_info = &sde_enc->mode_info.comp_info;
  336. return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
  337. }
  338. static void sde_configure_qdss(struct sde_encoder_virt *sde_enc,
  339. struct sde_hw_qdss *hw_qdss,
  340. struct sde_encoder_phys *phys, bool enable)
  341. {
  342. if (sde_enc->qdss_status == enable)
  343. return;
  344. sde_enc->qdss_status = enable;
  345. phys->hw_mdptop->ops.set_mdp_hw_events(phys->hw_mdptop,
  346. sde_enc->qdss_status);
  347. hw_qdss->ops.enable_qdss_events(hw_qdss, sde_enc->qdss_status);
  348. }
  349. static int _sde_encoder_wait_timeout(int32_t drm_id, int32_t hw_id,
  350. s64 timeout_ms, struct sde_encoder_wait_info *info)
  351. {
  352. int rc = 0;
  353. s64 wait_time_jiffies = msecs_to_jiffies(timeout_ms);
  354. ktime_t cur_ktime;
  355. ktime_t exp_ktime = ktime_add_ms(ktime_get(), timeout_ms);
  356. do {
  357. rc = wait_event_timeout(*(info->wq),
  358. atomic_read(info->atomic_cnt) == 0, wait_time_jiffies);
  359. cur_ktime = ktime_get();
  360. SDE_EVT32(drm_id, hw_id, rc, ktime_to_ms(cur_ktime),
  361. timeout_ms, atomic_read(info->atomic_cnt));
  362. /* If we timed out, counter is valid and time is less, wait again */
  363. } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
  364. (ktime_compare_safe(exp_ktime, cur_ktime) > 0));
  365. return rc;
  366. }
  367. bool sde_encoder_is_dsc_merge(struct drm_encoder *drm_enc)
  368. {
  369. enum sde_rm_topology_name topology;
  370. struct sde_encoder_virt *sde_enc;
  371. struct drm_connector *drm_conn;
  372. if (!drm_enc)
  373. return false;
  374. sde_enc = to_sde_encoder_virt(drm_enc);
  375. if (!sde_enc->cur_master)
  376. return false;
  377. drm_conn = sde_enc->cur_master->connector;
  378. if (!drm_conn)
  379. return false;
  380. topology = sde_connector_get_topology_name(drm_conn);
  381. if (topology == SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE)
  382. return true;
  383. return false;
  384. }
  385. bool sde_encoder_is_primary_display(struct drm_encoder *drm_enc)
  386. {
  387. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  388. return sde_enc &&
  389. (sde_enc->disp_info.display_type ==
  390. SDE_CONNECTOR_PRIMARY);
  391. }
  392. int sde_encoder_in_cont_splash(struct drm_encoder *drm_enc)
  393. {
  394. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  395. return sde_enc && sde_enc->cur_master &&
  396. sde_enc->cur_master->cont_splash_enabled;
  397. }
  398. void sde_encoder_helper_report_irq_timeout(struct sde_encoder_phys *phys_enc,
  399. enum sde_intr_idx intr_idx)
  400. {
  401. SDE_EVT32(DRMID(phys_enc->parent),
  402. phys_enc->intf_idx - INTF_0,
  403. phys_enc->hw_pp->idx - PINGPONG_0,
  404. intr_idx);
  405. SDE_ERROR_PHYS(phys_enc, "irq %d timeout\n", intr_idx);
  406. if (phys_enc->parent_ops.handle_frame_done)
  407. phys_enc->parent_ops.handle_frame_done(
  408. phys_enc->parent, phys_enc,
  409. SDE_ENCODER_FRAME_EVENT_ERROR);
  410. }
  411. int sde_encoder_helper_wait_for_irq(struct sde_encoder_phys *phys_enc,
  412. enum sde_intr_idx intr_idx,
  413. struct sde_encoder_wait_info *wait_info)
  414. {
  415. struct sde_encoder_irq *irq;
  416. u32 irq_status;
  417. int ret, i;
  418. if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
  419. SDE_ERROR("invalid params\n");
  420. return -EINVAL;
  421. }
  422. irq = &phys_enc->irq[intr_idx];
  423. /* note: do master / slave checking outside */
  424. /* return EWOULDBLOCK since we know the wait isn't necessary */
  425. if (phys_enc->enable_state == SDE_ENC_DISABLED) {
  426. SDE_ERROR_PHYS(phys_enc, "encoder is disabled\n");
  427. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  428. irq->irq_idx, intr_idx, SDE_EVTLOG_ERROR);
  429. return -EWOULDBLOCK;
  430. }
  431. if (irq->irq_idx < 0) {
  432. SDE_DEBUG_PHYS(phys_enc, "irq %s hw %d disabled, skip wait\n",
  433. irq->name, irq->hw_idx);
  434. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  435. irq->irq_idx);
  436. return 0;
  437. }
  438. SDE_DEBUG_PHYS(phys_enc, "pending_cnt %d\n",
  439. atomic_read(wait_info->atomic_cnt));
  440. SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  441. irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
  442. atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_ENTRY);
  443. /*
  444. * Some module X may disable interrupt for longer duration
  445. * and it may trigger all interrupts including timer interrupt
  446. * when module X again enable the interrupt.
  447. * That may cause interrupt wait timeout API in this API.
  448. * It is handled by split the wait timer in two halves.
  449. */
  450. for (i = 0; i < EVT_TIME_OUT_SPLIT; i++) {
  451. ret = _sde_encoder_wait_timeout(DRMID(phys_enc->parent),
  452. irq->hw_idx,
  453. (wait_info->timeout_ms/EVT_TIME_OUT_SPLIT),
  454. wait_info);
  455. if (ret)
  456. break;
  457. }
  458. if (ret <= 0) {
  459. irq_status = sde_core_irq_read(phys_enc->sde_kms,
  460. irq->irq_idx, true);
  461. if (irq_status) {
  462. unsigned long flags;
  463. SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
  464. irq->hw_idx, irq->irq_idx,
  465. phys_enc->hw_pp->idx - PINGPONG_0,
  466. atomic_read(wait_info->atomic_cnt));
  467. SDE_DEBUG_PHYS(phys_enc,
  468. "done but irq %d not triggered\n",
  469. irq->irq_idx);
  470. local_irq_save(flags);
  471. irq->cb.func(phys_enc, irq->irq_idx);
  472. local_irq_restore(flags);
  473. ret = 0;
  474. } else {
  475. ret = -ETIMEDOUT;
  476. SDE_EVT32(DRMID(phys_enc->parent), intr_idx,
  477. irq->hw_idx, irq->irq_idx,
  478. phys_enc->hw_pp->idx - PINGPONG_0,
  479. atomic_read(wait_info->atomic_cnt), irq_status,
  480. SDE_EVTLOG_ERROR);
  481. }
  482. } else {
  483. ret = 0;
  484. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  485. irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
  486. atomic_read(wait_info->atomic_cnt));
  487. }
  488. SDE_EVT32_VERBOSE(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  489. irq->irq_idx, ret, phys_enc->hw_pp->idx - PINGPONG_0,
  490. atomic_read(wait_info->atomic_cnt), SDE_EVTLOG_FUNC_EXIT);
  491. return ret;
  492. }
  493. int sde_encoder_helper_register_irq(struct sde_encoder_phys *phys_enc,
  494. enum sde_intr_idx intr_idx)
  495. {
  496. struct sde_encoder_irq *irq;
  497. int ret = 0;
  498. if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
  499. SDE_ERROR("invalid params\n");
  500. return -EINVAL;
  501. }
  502. irq = &phys_enc->irq[intr_idx];
  503. if (irq->irq_idx >= 0) {
  504. SDE_DEBUG_PHYS(phys_enc,
  505. "skipping already registered irq %s type %d\n",
  506. irq->name, irq->intr_type);
  507. return 0;
  508. }
  509. irq->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
  510. irq->intr_type, irq->hw_idx);
  511. if (irq->irq_idx < 0) {
  512. SDE_ERROR_PHYS(phys_enc,
  513. "failed to lookup IRQ index for %s type:%d\n",
  514. irq->name, irq->intr_type);
  515. return -EINVAL;
  516. }
  517. ret = sde_core_irq_register_callback(phys_enc->sde_kms, irq->irq_idx,
  518. &irq->cb);
  519. if (ret) {
  520. SDE_ERROR_PHYS(phys_enc,
  521. "failed to register IRQ callback for %s\n",
  522. irq->name);
  523. irq->irq_idx = -EINVAL;
  524. return ret;
  525. }
  526. ret = sde_core_irq_enable(phys_enc->sde_kms, &irq->irq_idx, 1);
  527. if (ret) {
  528. SDE_ERROR_PHYS(phys_enc,
  529. "enable IRQ for intr:%s failed, irq_idx %d\n",
  530. irq->name, irq->irq_idx);
  531. sde_core_irq_unregister_callback(phys_enc->sde_kms,
  532. irq->irq_idx, &irq->cb);
  533. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  534. irq->irq_idx, SDE_EVTLOG_ERROR);
  535. irq->irq_idx = -EINVAL;
  536. return ret;
  537. }
  538. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
  539. SDE_DEBUG_PHYS(phys_enc, "registered irq %s idx: %d\n",
  540. irq->name, irq->irq_idx);
  541. return ret;
  542. }
  543. int sde_encoder_helper_unregister_irq(struct sde_encoder_phys *phys_enc,
  544. enum sde_intr_idx intr_idx)
  545. {
  546. struct sde_encoder_irq *irq;
  547. int ret;
  548. if (!phys_enc) {
  549. SDE_ERROR("invalid encoder\n");
  550. return -EINVAL;
  551. }
  552. irq = &phys_enc->irq[intr_idx];
  553. /* silently skip irqs that weren't registered */
  554. if (irq->irq_idx < 0) {
  555. SDE_ERROR(
  556. "extra unregister irq, enc%d intr_idx:0x%x hw_idx:0x%x irq_idx:0x%x\n",
  557. DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  558. irq->irq_idx);
  559. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  560. irq->irq_idx, SDE_EVTLOG_ERROR);
  561. return 0;
  562. }
  563. ret = sde_core_irq_disable(phys_enc->sde_kms, &irq->irq_idx, 1);
  564. if (ret)
  565. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  566. irq->irq_idx, ret, SDE_EVTLOG_ERROR);
  567. ret = sde_core_irq_unregister_callback(phys_enc->sde_kms, irq->irq_idx,
  568. &irq->cb);
  569. if (ret)
  570. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
  571. irq->irq_idx, ret, SDE_EVTLOG_ERROR);
  572. SDE_EVT32(DRMID(phys_enc->parent), intr_idx, irq->hw_idx, irq->irq_idx);
  573. SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", irq->irq_idx);
  574. irq->irq_idx = -EINVAL;
  575. return 0;
  576. }
  577. void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
  578. struct sde_encoder_hw_resources *hw_res,
  579. struct drm_connector_state *conn_state)
  580. {
  581. struct sde_encoder_virt *sde_enc = NULL;
  582. struct msm_mode_info mode_info;
  583. int i = 0;
  584. if (!hw_res || !drm_enc || !conn_state) {
  585. SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
  586. !drm_enc, !hw_res, !conn_state);
  587. return;
  588. }
  589. sde_enc = to_sde_encoder_virt(drm_enc);
  590. SDE_DEBUG_ENC(sde_enc, "\n");
  591. /* Query resources used by phys encs, expected to be without overlap */
  592. memset(hw_res, 0, sizeof(*hw_res));
  593. hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
  594. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  595. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  596. if (phys && phys->ops.get_hw_resources)
  597. phys->ops.get_hw_resources(phys, hw_res, conn_state);
  598. }
  599. /*
  600. * NOTE: Do not use sde_encoder_get_mode_info here as this function is
  601. * called from atomic_check phase. Use the below API to get mode
  602. * information of the temporary conn_state passed
  603. */
  604. sde_connector_state_get_mode_info(conn_state, &mode_info);
  605. hw_res->topology = mode_info.topology;
  606. hw_res->display_type = sde_enc->disp_info.display_type;
  607. }
  608. void sde_encoder_destroy(struct drm_encoder *drm_enc)
  609. {
  610. struct sde_encoder_virt *sde_enc = NULL;
  611. int i = 0;
  612. if (!drm_enc) {
  613. SDE_ERROR("invalid encoder\n");
  614. return;
  615. }
  616. sde_enc = to_sde_encoder_virt(drm_enc);
  617. SDE_DEBUG_ENC(sde_enc, "\n");
  618. mutex_lock(&sde_enc->enc_lock);
  619. sde_rsc_client_destroy(sde_enc->rsc_client);
  620. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  621. struct sde_encoder_phys *phys;
  622. phys = sde_enc->phys_vid_encs[i];
  623. if (phys && phys->ops.destroy) {
  624. phys->ops.destroy(phys);
  625. --sde_enc->num_phys_encs;
  626. sde_enc->phys_encs[i] = NULL;
  627. }
  628. phys = sde_enc->phys_cmd_encs[i];
  629. if (phys && phys->ops.destroy) {
  630. phys->ops.destroy(phys);
  631. --sde_enc->num_phys_encs;
  632. sde_enc->phys_encs[i] = NULL;
  633. }
  634. }
  635. if (sde_enc->num_phys_encs)
  636. SDE_ERROR_ENC(sde_enc, "expected 0 num_phys_encs not %d\n",
  637. sde_enc->num_phys_encs);
  638. sde_enc->num_phys_encs = 0;
  639. mutex_unlock(&sde_enc->enc_lock);
  640. drm_encoder_cleanup(drm_enc);
  641. mutex_destroy(&sde_enc->enc_lock);
  642. kfree(sde_enc->input_handler);
  643. sde_enc->input_handler = NULL;
  644. kfree(sde_enc);
  645. }
  646. void sde_encoder_helper_update_intf_cfg(
  647. struct sde_encoder_phys *phys_enc)
  648. {
  649. struct sde_encoder_virt *sde_enc;
  650. struct sde_hw_intf_cfg_v1 *intf_cfg;
  651. enum sde_3d_blend_mode mode_3d;
  652. if (!phys_enc) {
  653. SDE_ERROR("invalid arg, encoder %d\n", !phys_enc);
  654. return;
  655. }
  656. sde_enc = to_sde_encoder_virt(phys_enc->parent);
  657. intf_cfg = &sde_enc->cur_master->intf_cfg_v1;
  658. SDE_DEBUG_ENC(sde_enc,
  659. "intf_cfg updated for %d at idx %d\n",
  660. phys_enc->intf_idx,
  661. intf_cfg->intf_count);
  662. /* setup interface configuration */
  663. if (intf_cfg->intf_count >= MAX_INTF_PER_CTL_V1) {
  664. pr_err("invalid inf_count %d\n", intf_cfg->intf_count);
  665. return;
  666. }
  667. intf_cfg->intf[intf_cfg->intf_count++] = phys_enc->intf_idx;
  668. if (phys_enc == sde_enc->cur_master) {
  669. if (sde_enc->cur_master->intf_mode == INTF_MODE_CMD)
  670. intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
  671. else
  672. intf_cfg->intf_mode_sel = SDE_CTL_MODE_SEL_VID;
  673. }
  674. /* configure this interface as master for split display */
  675. if (phys_enc->split_role == ENC_ROLE_MASTER)
  676. intf_cfg->intf_master = phys_enc->hw_intf->idx;
  677. /* setup which pp blk will connect to this intf */
  678. if (phys_enc->hw_intf->ops.bind_pingpong_blk)
  679. phys_enc->hw_intf->ops.bind_pingpong_blk(
  680. phys_enc->hw_intf,
  681. true,
  682. phys_enc->hw_pp->idx);
  683. /*setup merge_3d configuration */
  684. mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
  685. if (mode_3d && phys_enc->hw_pp->merge_3d &&
  686. intf_cfg->merge_3d_count < MAX_MERGE_3D_PER_CTL_V1)
  687. intf_cfg->merge_3d[intf_cfg->merge_3d_count++] =
  688. phys_enc->hw_pp->merge_3d->idx;
  689. if (phys_enc->hw_pp->ops.setup_3d_mode)
  690. phys_enc->hw_pp->ops.setup_3d_mode(phys_enc->hw_pp,
  691. mode_3d);
  692. }
  693. void sde_encoder_helper_split_config(
  694. struct sde_encoder_phys *phys_enc,
  695. enum sde_intf interface)
  696. {
  697. struct sde_encoder_virt *sde_enc;
  698. struct split_pipe_cfg *cfg;
  699. struct sde_hw_mdp *hw_mdptop;
  700. enum sde_rm_topology_name topology;
  701. struct msm_display_info *disp_info;
  702. if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
  703. SDE_ERROR("invalid arg(s), encoder %d\n", !phys_enc);
  704. return;
  705. }
  706. sde_enc = to_sde_encoder_virt(phys_enc->parent);
  707. hw_mdptop = phys_enc->hw_mdptop;
  708. disp_info = &sde_enc->disp_info;
  709. cfg = &phys_enc->hw_intf->cfg;
  710. memset(cfg, 0, sizeof(*cfg));
  711. if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
  712. return;
  713. if (disp_info->capabilities & MSM_DISPLAY_SPLIT_LINK)
  714. cfg->split_link_en = true;
  715. /**
  716. * disable split modes since encoder will be operating in as the only
  717. * encoder, either for the entire use case in the case of, for example,
  718. * single DSI, or for this frame in the case of left/right only partial
  719. * update.
  720. */
  721. if (phys_enc->split_role == ENC_ROLE_SOLO) {
  722. if (hw_mdptop->ops.setup_split_pipe)
  723. hw_mdptop->ops.setup_split_pipe(hw_mdptop, cfg);
  724. if (hw_mdptop->ops.setup_pp_split)
  725. hw_mdptop->ops.setup_pp_split(hw_mdptop, cfg);
  726. return;
  727. }
  728. cfg->en = true;
  729. cfg->mode = phys_enc->intf_mode;
  730. cfg->intf = interface;
  731. if (cfg->en && phys_enc->ops.needs_single_flush &&
  732. phys_enc->ops.needs_single_flush(phys_enc))
  733. cfg->split_flush_en = true;
  734. topology = sde_connector_get_topology_name(phys_enc->connector);
  735. if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
  736. cfg->pp_split_slave = cfg->intf;
  737. else
  738. cfg->pp_split_slave = INTF_MAX;
  739. if (phys_enc->split_role == ENC_ROLE_MASTER) {
  740. SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg->en);
  741. if (hw_mdptop->ops.setup_split_pipe)
  742. hw_mdptop->ops.setup_split_pipe(hw_mdptop, cfg);
  743. } else if (sde_enc->hw_pp[0]) {
  744. /*
  745. * slave encoder
  746. * - determine split index from master index,
  747. * assume master is first pp
  748. */
  749. cfg->pp_split_index = sde_enc->hw_pp[0]->idx - PINGPONG_0;
  750. SDE_DEBUG_ENC(sde_enc, "master using pp%d\n",
  751. cfg->pp_split_index);
  752. if (hw_mdptop->ops.setup_pp_split)
  753. hw_mdptop->ops.setup_pp_split(hw_mdptop, cfg);
  754. }
  755. }
  756. bool sde_encoder_in_clone_mode(struct drm_encoder *drm_enc)
  757. {
  758. struct sde_encoder_virt *sde_enc;
  759. int i = 0;
  760. if (!drm_enc)
  761. return false;
  762. sde_enc = to_sde_encoder_virt(drm_enc);
  763. if (!sde_enc)
  764. return false;
  765. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  766. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  767. if (phys && phys->in_clone_mode)
  768. return true;
  769. }
  770. return false;
  771. }
  772. static int _sde_encoder_atomic_check_phys_enc(struct sde_encoder_virt *sde_enc,
  773. struct drm_crtc_state *crtc_state,
  774. struct drm_connector_state *conn_state)
  775. {
  776. const struct drm_display_mode *mode;
  777. struct drm_display_mode *adj_mode;
  778. int i = 0;
  779. int ret = 0;
  780. mode = &crtc_state->mode;
  781. adj_mode = &crtc_state->adjusted_mode;
  782. /* perform atomic check on the first physical encoder (master) */
  783. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  784. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  785. if (phys && phys->ops.atomic_check)
  786. ret = phys->ops.atomic_check(phys, crtc_state,
  787. conn_state);
  788. else if (phys && phys->ops.mode_fixup)
  789. if (!phys->ops.mode_fixup(phys, mode, adj_mode))
  790. ret = -EINVAL;
  791. if (ret) {
  792. SDE_ERROR_ENC(sde_enc,
  793. "mode unsupported, phys idx %d\n", i);
  794. break;
  795. }
  796. }
  797. return ret;
  798. }
  799. static int _sde_encoder_atomic_check_pu_roi(struct sde_encoder_virt *sde_enc,
  800. struct drm_crtc_state *crtc_state,
  801. struct drm_connector_state *conn_state,
  802. struct sde_connector_state *sde_conn_state,
  803. struct sde_crtc_state *sde_crtc_state)
  804. {
  805. int ret = 0;
  806. if (crtc_state->mode_changed || crtc_state->active_changed) {
  807. struct sde_rect mode_roi, roi;
  808. mode_roi.x = 0;
  809. mode_roi.y = 0;
  810. mode_roi.w = crtc_state->adjusted_mode.hdisplay;
  811. mode_roi.h = crtc_state->adjusted_mode.vdisplay;
  812. if (sde_conn_state->rois.num_rects) {
  813. sde_kms_rect_merge_rectangles(
  814. &sde_conn_state->rois, &roi);
  815. if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
  816. SDE_ERROR_ENC(sde_enc,
  817. "roi (%d,%d,%d,%d) on connector invalid during modeset\n",
  818. roi.x, roi.y, roi.w, roi.h);
  819. ret = -EINVAL;
  820. }
  821. }
  822. if (sde_crtc_state->user_roi_list.num_rects) {
  823. sde_kms_rect_merge_rectangles(
  824. &sde_crtc_state->user_roi_list, &roi);
  825. if (!sde_kms_rect_is_equal(&mode_roi, &roi)) {
  826. SDE_ERROR_ENC(sde_enc,
  827. "roi (%d,%d,%d,%d) on crtc invalid during modeset\n",
  828. roi.x, roi.y, roi.w, roi.h);
  829. ret = -EINVAL;
  830. }
  831. }
  832. }
  833. return ret;
  834. }
  835. static int _sde_encoder_atomic_check_reserve(struct drm_encoder *drm_enc,
  836. struct drm_crtc_state *crtc_state,
  837. struct drm_connector_state *conn_state,
  838. struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms,
  839. struct sde_connector *sde_conn,
  840. struct sde_connector_state *sde_conn_state)
  841. {
  842. int ret = 0;
  843. struct drm_display_mode *adj_mode = &crtc_state->adjusted_mode;
  844. if (sde_conn && drm_atomic_crtc_needs_modeset(crtc_state)) {
  845. struct msm_display_topology *topology = NULL;
  846. ret = sde_connector_get_mode_info(&sde_conn->base,
  847. adj_mode, &sde_conn_state->mode_info);
  848. if (ret) {
  849. SDE_ERROR_ENC(sde_enc,
  850. "failed to get mode info, rc = %d\n", ret);
  851. return ret;
  852. }
  853. if (sde_conn_state->mode_info.comp_info.comp_type &&
  854. sde_conn_state->mode_info.comp_info.comp_ratio >=
  855. MSM_DISPLAY_COMPRESSION_RATIO_MAX) {
  856. SDE_ERROR_ENC(sde_enc,
  857. "invalid compression ratio: %d\n",
  858. sde_conn_state->mode_info.comp_info.comp_ratio);
  859. ret = -EINVAL;
  860. return ret;
  861. }
  862. /* Reserve dynamic resources, indicating atomic_check phase */
  863. ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
  864. conn_state, true);
  865. if (ret) {
  866. SDE_ERROR_ENC(sde_enc,
  867. "RM failed to reserve resources, rc = %d\n",
  868. ret);
  869. return ret;
  870. }
  871. /**
  872. * Update connector state with the topology selected for the
  873. * resource set validated. Reset the topology if we are
  874. * de-activating crtc.
  875. */
  876. if (crtc_state->active)
  877. topology = &sde_conn_state->mode_info.topology;
  878. ret = sde_rm_update_topology(conn_state, topology);
  879. if (ret) {
  880. SDE_ERROR_ENC(sde_enc,
  881. "RM failed to update topology, rc: %d\n", ret);
  882. return ret;
  883. }
  884. ret = sde_connector_set_blob_data(conn_state->connector,
  885. conn_state,
  886. CONNECTOR_PROP_SDE_INFO);
  887. if (ret) {
  888. SDE_ERROR_ENC(sde_enc,
  889. "connector failed to update info, rc: %d\n",
  890. ret);
  891. return ret;
  892. }
  893. }
  894. return ret;
  895. }
  896. static int sde_encoder_virt_atomic_check(
  897. struct drm_encoder *drm_enc, struct drm_crtc_state *crtc_state,
  898. struct drm_connector_state *conn_state)
  899. {
  900. struct sde_encoder_virt *sde_enc;
  901. struct msm_drm_private *priv;
  902. struct sde_kms *sde_kms;
  903. const struct drm_display_mode *mode;
  904. struct drm_display_mode *adj_mode;
  905. struct sde_connector *sde_conn = NULL;
  906. struct sde_connector_state *sde_conn_state = NULL;
  907. struct sde_crtc_state *sde_crtc_state = NULL;
  908. enum sde_rm_topology_name old_top;
  909. int ret = 0;
  910. if (!drm_enc || !crtc_state || !conn_state) {
  911. SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
  912. !drm_enc, !crtc_state, !conn_state);
  913. return -EINVAL;
  914. }
  915. sde_enc = to_sde_encoder_virt(drm_enc);
  916. SDE_DEBUG_ENC(sde_enc, "\n");
  917. priv = drm_enc->dev->dev_private;
  918. sde_kms = to_sde_kms(priv->kms);
  919. mode = &crtc_state->mode;
  920. adj_mode = &crtc_state->adjusted_mode;
  921. sde_conn = to_sde_connector(conn_state->connector);
  922. sde_conn_state = to_sde_connector_state(conn_state);
  923. sde_crtc_state = to_sde_crtc_state(crtc_state);
  924. SDE_EVT32(DRMID(drm_enc), drm_atomic_crtc_needs_modeset(crtc_state));
  925. ret = _sde_encoder_atomic_check_phys_enc(sde_enc, crtc_state,
  926. conn_state);
  927. if (ret)
  928. return ret;
  929. ret = _sde_encoder_atomic_check_pu_roi(sde_enc, crtc_state,
  930. conn_state, sde_conn_state, sde_crtc_state);
  931. if (ret)
  932. return ret;
  933. /**
  934. * record topology in previous atomic state to be able to handle
  935. * topology transitions correctly.
  936. */
  937. old_top = sde_connector_get_property(conn_state,
  938. CONNECTOR_PROP_TOPOLOGY_NAME);
  939. ret = sde_connector_set_old_topology_name(conn_state, old_top);
  940. if (ret)
  941. return ret;
  942. ret = _sde_encoder_atomic_check_reserve(drm_enc, crtc_state,
  943. conn_state, sde_enc, sde_kms, sde_conn, sde_conn_state);
  944. if (ret)
  945. return ret;
  946. ret = sde_connector_roi_v1_check_roi(conn_state);
  947. if (ret) {
  948. SDE_ERROR_ENC(sde_enc, "connector roi check failed, rc: %d",
  949. ret);
  950. return ret;
  951. }
  952. drm_mode_set_crtcinfo(adj_mode, 0);
  953. SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
  954. return ret;
  955. }
  956. static int _sde_encoder_dsc_update_pic_dim(struct msm_display_dsc_info *dsc,
  957. int pic_width, int pic_height)
  958. {
  959. if (!dsc || !pic_width || !pic_height) {
  960. SDE_ERROR("invalid input: pic_width=%d pic_height=%d\n",
  961. pic_width, pic_height);
  962. return -EINVAL;
  963. }
  964. if ((pic_width % dsc->slice_width) ||
  965. (pic_height % dsc->slice_height)) {
  966. SDE_ERROR("pic_dim=%dx%d has to be multiple of slice=%dx%d\n",
  967. pic_width, pic_height,
  968. dsc->slice_width, dsc->slice_height);
  969. return -EINVAL;
  970. }
  971. dsc->pic_width = pic_width;
  972. dsc->pic_height = pic_height;
  973. return 0;
  974. }
  975. static void _sde_encoder_dsc_pclk_param_calc(struct msm_display_dsc_info *dsc,
  976. int intf_width)
  977. {
  978. int slice_per_pkt, slice_per_intf;
  979. int bytes_in_slice, total_bytes_per_intf;
  980. if (!dsc || !dsc->slice_width || !dsc->slice_per_pkt ||
  981. (intf_width < dsc->slice_width)) {
  982. SDE_ERROR("invalid input: intf_width=%d slice_width=%d\n",
  983. intf_width, dsc ? dsc->slice_width : -1);
  984. return;
  985. }
  986. slice_per_pkt = dsc->slice_per_pkt;
  987. slice_per_intf = DIV_ROUND_UP(intf_width, dsc->slice_width);
  988. /*
  989. * If slice_per_pkt is greater than slice_per_intf then default to 1.
  990. * This can happen during partial update.
  991. */
  992. if (slice_per_pkt > slice_per_intf)
  993. slice_per_pkt = 1;
  994. bytes_in_slice = DIV_ROUND_UP(dsc->slice_width * dsc->bpp, 8);
  995. total_bytes_per_intf = bytes_in_slice * slice_per_intf;
  996. dsc->eol_byte_num = total_bytes_per_intf % 3;
  997. dsc->pclk_per_line = DIV_ROUND_UP(total_bytes_per_intf, 3);
  998. dsc->bytes_in_slice = bytes_in_slice;
  999. dsc->bytes_per_pkt = bytes_in_slice * slice_per_pkt;
  1000. dsc->pkt_per_line = slice_per_intf / slice_per_pkt;
  1001. }
  1002. static int _sde_encoder_dsc_initial_line_calc(struct msm_display_dsc_info *dsc,
  1003. int enc_ip_width)
  1004. {
  1005. int max_ssm_delay, max_se_size, obuf_latency;
  1006. int input_ssm_out_latency, base_hs_latency;
  1007. int multi_hs_extra_latency, mux_word_size;
  1008. /* Hardent core config */
  1009. int max_muxword_size = 48;
  1010. int output_rate = 64;
  1011. int rtl_max_bpc = 10;
  1012. int pipeline_latency = 28;
  1013. max_se_size = 4 * (rtl_max_bpc + 1);
  1014. max_ssm_delay = max_se_size + max_muxword_size - 1;
  1015. mux_word_size = (dsc->bpc >= 12 ? 64 : 48);
  1016. input_ssm_out_latency = pipeline_latency + (3 * (max_ssm_delay + 2));
  1017. obuf_latency = DIV_ROUND_UP((9 * output_rate +
  1018. mux_word_size), dsc->bpp) + 1;
  1019. base_hs_latency = dsc->initial_xmit_delay + input_ssm_out_latency
  1020. + obuf_latency;
  1021. multi_hs_extra_latency = DIV_ROUND_UP((8 * dsc->chunk_size), dsc->bpp);
  1022. dsc->initial_lines = DIV_ROUND_UP((base_hs_latency +
  1023. multi_hs_extra_latency), dsc->slice_width);
  1024. return 0;
  1025. }
  1026. static bool _sde_encoder_dsc_ich_reset_override_needed(bool pu_en,
  1027. struct msm_display_dsc_info *dsc)
  1028. {
  1029. /*
  1030. * As per the DSC spec, ICH_RESET can be either end of the slice line
  1031. * or at the end of the slice. HW internally generates ich_reset at
  1032. * end of the slice line if DSC_MERGE is used or encoder has two
  1033. * soft slices. However, if encoder has only 1 soft slice and DSC_MERGE
  1034. * is not used then it will generate ich_reset at the end of slice.
  1035. *
  1036. * Now as per the spec, during one PPS session, position where
  1037. * ich_reset is generated should not change. Now if full-screen frame
  1038. * has more than 1 soft slice then HW will automatically generate
  1039. * ich_reset at the end of slice_line. But for the same panel, if
  1040. * partial frame is enabled and only 1 encoder is used with 1 slice,
  1041. * then HW will generate ich_reset at end of the slice. This is a
  1042. * mismatch. Prevent this by overriding HW's decision.
  1043. */
  1044. return pu_en && dsc && (dsc->full_frame_slices > 1) &&
  1045. (dsc->slice_width == dsc->pic_width);
  1046. }
  1047. static void _sde_encoder_dsc_pipe_cfg(struct sde_hw_dsc *hw_dsc,
  1048. struct sde_hw_pingpong *hw_pp, struct msm_display_dsc_info *dsc,
  1049. u32 common_mode, bool ich_reset, bool enable,
  1050. struct sde_hw_pingpong *hw_dsc_pp)
  1051. {
  1052. if (!enable) {
  1053. if (hw_dsc_pp && hw_dsc_pp->ops.disable_dsc)
  1054. hw_dsc_pp->ops.disable_dsc(hw_dsc_pp);
  1055. if (hw_dsc && hw_dsc->ops.dsc_disable)
  1056. hw_dsc->ops.dsc_disable(hw_dsc);
  1057. if (hw_dsc && hw_dsc->ops.bind_pingpong_blk)
  1058. hw_dsc->ops.bind_pingpong_blk(hw_dsc, false,
  1059. PINGPONG_MAX);
  1060. return;
  1061. }
  1062. if (!dsc || !hw_dsc || !hw_pp || !hw_dsc_pp) {
  1063. SDE_ERROR("invalid params %d %d %d %d\n", !dsc, !hw_dsc,
  1064. !hw_pp, !hw_dsc_pp);
  1065. return;
  1066. }
  1067. if (hw_dsc->ops.dsc_config)
  1068. hw_dsc->ops.dsc_config(hw_dsc, dsc, common_mode, ich_reset);
  1069. if (hw_dsc->ops.dsc_config_thresh)
  1070. hw_dsc->ops.dsc_config_thresh(hw_dsc, dsc);
  1071. if (hw_dsc_pp->ops.setup_dsc)
  1072. hw_dsc_pp->ops.setup_dsc(hw_dsc_pp);
  1073. if (hw_dsc->ops.bind_pingpong_blk)
  1074. hw_dsc->ops.bind_pingpong_blk(hw_dsc, true, hw_pp->idx);
  1075. if (hw_dsc_pp->ops.enable_dsc)
  1076. hw_dsc_pp->ops.enable_dsc(hw_dsc_pp);
  1077. }
  1078. static void _sde_encoder_get_connector_roi(
  1079. struct sde_encoder_virt *sde_enc,
  1080. struct sde_rect *merged_conn_roi)
  1081. {
  1082. struct drm_connector *drm_conn;
  1083. struct sde_connector_state *c_state;
  1084. if (!sde_enc || !merged_conn_roi)
  1085. return;
  1086. drm_conn = sde_enc->phys_encs[0]->connector;
  1087. if (!drm_conn || !drm_conn->state)
  1088. return;
  1089. c_state = to_sde_connector_state(drm_conn->state);
  1090. sde_kms_rect_merge_rectangles(&c_state->rois, merged_conn_roi);
  1091. }
  1092. static int _sde_encoder_dsc_n_lm_1_enc_1_intf(struct sde_encoder_virt *sde_enc)
  1093. {
  1094. int this_frame_slices;
  1095. int intf_ip_w, enc_ip_w;
  1096. int ich_res, dsc_common_mode = 0;
  1097. struct sde_hw_pingpong *hw_pp = sde_enc->hw_pp[0];
  1098. struct sde_hw_pingpong *hw_dsc_pp = sde_enc->hw_dsc_pp[0];
  1099. struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
  1100. struct sde_encoder_phys *enc_master = sde_enc->cur_master;
  1101. const struct sde_rect *roi = &sde_enc->cur_conn_roi;
  1102. struct msm_display_dsc_info *dsc = NULL;
  1103. struct sde_hw_ctl *hw_ctl;
  1104. struct sde_ctl_dsc_cfg cfg;
  1105. if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
  1106. SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
  1107. return -EINVAL;
  1108. }
  1109. hw_ctl = enc_master->hw_ctl;
  1110. memset(&cfg, 0, sizeof(cfg));
  1111. dsc = &sde_enc->mode_info.comp_info.dsc_info;
  1112. _sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
  1113. this_frame_slices = roi->w / dsc->slice_width;
  1114. intf_ip_w = this_frame_slices * dsc->slice_width;
  1115. _sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
  1116. enc_ip_w = intf_ip_w;
  1117. _sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
  1118. ich_res = _sde_encoder_dsc_ich_reset_override_needed(false, dsc);
  1119. if (enc_master->intf_mode == INTF_MODE_VIDEO)
  1120. dsc_common_mode = DSC_MODE_VIDEO;
  1121. SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
  1122. roi->w, roi->h, dsc_common_mode);
  1123. SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h, dsc_common_mode);
  1124. _sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, dsc, dsc_common_mode,
  1125. ich_res, true, hw_dsc_pp);
  1126. cfg.dsc[cfg.dsc_count++] = hw_dsc->idx;
  1127. /* setup dsc active configuration in the control path */
  1128. if (hw_ctl->ops.setup_dsc_cfg) {
  1129. hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
  1130. SDE_DEBUG_ENC(sde_enc,
  1131. "setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n",
  1132. hw_ctl->idx,
  1133. cfg.dsc_count,
  1134. cfg.dsc[0],
  1135. cfg.dsc[1]);
  1136. }
  1137. if (hw_ctl->ops.update_bitmask_dsc)
  1138. hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc->idx, 1);
  1139. return 0;
  1140. }
  1141. static int _sde_encoder_dsc_2_lm_2_enc_2_intf(struct sde_encoder_virt *sde_enc,
  1142. struct sde_encoder_kickoff_params *params)
  1143. {
  1144. int this_frame_slices;
  1145. int intf_ip_w, enc_ip_w;
  1146. int ich_res, dsc_common_mode;
  1147. struct sde_encoder_phys *enc_master = sde_enc->cur_master;
  1148. const struct sde_rect *roi = &sde_enc->cur_conn_roi;
  1149. struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
  1150. struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
  1151. struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
  1152. struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
  1153. bool half_panel_partial_update;
  1154. struct sde_hw_ctl *hw_ctl = NULL;
  1155. struct sde_ctl_dsc_cfg cfg;
  1156. int i;
  1157. if (!enc_master) {
  1158. SDE_ERROR_ENC(sde_enc, "invalid encoder master for DSC\n");
  1159. return -EINVAL;
  1160. }
  1161. memset(&cfg, 0, sizeof(cfg));
  1162. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  1163. hw_pp[i] = sde_enc->hw_pp[i];
  1164. hw_dsc[i] = sde_enc->hw_dsc[i];
  1165. hw_dsc_pp[i] = sde_enc->hw_dsc_pp[i];
  1166. if (!hw_pp[i] || !hw_dsc[i] || !hw_dsc_pp[i]) {
  1167. SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
  1168. return -EINVAL;
  1169. }
  1170. }
  1171. hw_ctl = enc_master->hw_ctl;
  1172. half_panel_partial_update =
  1173. hweight_long(params->affected_displays) == 1;
  1174. dsc_common_mode = 0;
  1175. if (!half_panel_partial_update)
  1176. dsc_common_mode |= DSC_MODE_SPLIT_PANEL;
  1177. if (enc_master->intf_mode == INTF_MODE_VIDEO)
  1178. dsc_common_mode |= DSC_MODE_VIDEO;
  1179. memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
  1180. memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
  1181. /*
  1182. * Since both DSC use same pic dimension, set same pic dimension
  1183. * to both DSC structures.
  1184. */
  1185. _sde_encoder_dsc_update_pic_dim(&dsc[0], roi->w, roi->h);
  1186. _sde_encoder_dsc_update_pic_dim(&dsc[1], roi->w, roi->h);
  1187. this_frame_slices = roi->w / dsc[0].slice_width;
  1188. intf_ip_w = this_frame_slices * dsc[0].slice_width;
  1189. if (!half_panel_partial_update)
  1190. intf_ip_w /= 2;
  1191. /*
  1192. * In this topology when both interfaces are active, they have same
  1193. * load so intf_ip_w will be same.
  1194. */
  1195. _sde_encoder_dsc_pclk_param_calc(&dsc[0], intf_ip_w);
  1196. _sde_encoder_dsc_pclk_param_calc(&dsc[1], intf_ip_w);
  1197. /*
  1198. * In this topology, since there is no dsc_merge, uncompressed input
  1199. * to encoder and interface is same.
  1200. */
  1201. enc_ip_w = intf_ip_w;
  1202. _sde_encoder_dsc_initial_line_calc(&dsc[0], enc_ip_w);
  1203. _sde_encoder_dsc_initial_line_calc(&dsc[1], enc_ip_w);
  1204. /*
  1205. * __is_ich_reset_override_needed should be called only after
  1206. * updating pic dimension, mdss_panel_dsc_update_pic_dim.
  1207. */
  1208. ich_res = _sde_encoder_dsc_ich_reset_override_needed(
  1209. half_panel_partial_update, &dsc[0]);
  1210. SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
  1211. roi->w, roi->h, dsc_common_mode);
  1212. for (i = 0; i < sde_enc->num_phys_encs &&
  1213. i < MAX_CHANNELS_PER_ENC; i++) {
  1214. bool active = !!((1 << i) & params->affected_displays);
  1215. SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
  1216. dsc_common_mode, i, active);
  1217. _sde_encoder_dsc_pipe_cfg(hw_dsc[i], hw_pp[i], &dsc[i],
  1218. dsc_common_mode, ich_res, active, hw_dsc_pp[i]);
  1219. if (active) {
  1220. if (cfg.dsc_count >= MAX_DSC_PER_CTL_V1) {
  1221. pr_err("Invalid dsc count:%d\n",
  1222. cfg.dsc_count);
  1223. return -EINVAL;
  1224. }
  1225. cfg.dsc[cfg.dsc_count++] = hw_dsc[i]->idx;
  1226. if (hw_ctl->ops.update_bitmask_dsc)
  1227. hw_ctl->ops.update_bitmask_dsc(hw_ctl,
  1228. hw_dsc[i]->idx, 1);
  1229. }
  1230. }
  1231. /* setup dsc active configuration in the control path */
  1232. if (hw_ctl->ops.setup_dsc_cfg) {
  1233. hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
  1234. SDE_DEBUG_ENC(sde_enc,
  1235. "setup dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n",
  1236. hw_ctl->idx,
  1237. cfg.dsc_count,
  1238. cfg.dsc[0],
  1239. cfg.dsc[1]);
  1240. }
  1241. return 0;
  1242. }
  1243. static int _sde_encoder_dsc_2_lm_2_enc_1_intf(struct sde_encoder_virt *sde_enc,
  1244. struct sde_encoder_kickoff_params *params)
  1245. {
  1246. int this_frame_slices;
  1247. int intf_ip_w, enc_ip_w;
  1248. int ich_res, dsc_common_mode;
  1249. struct sde_encoder_phys *enc_master = sde_enc->cur_master;
  1250. const struct sde_rect *roi = &sde_enc->cur_conn_roi;
  1251. struct sde_hw_dsc *hw_dsc[MAX_CHANNELS_PER_ENC];
  1252. struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
  1253. struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
  1254. struct msm_display_dsc_info *dsc = NULL;
  1255. bool half_panel_partial_update;
  1256. struct sde_hw_ctl *hw_ctl = NULL;
  1257. struct sde_ctl_dsc_cfg cfg;
  1258. int i;
  1259. if (!enc_master) {
  1260. SDE_ERROR_ENC(sde_enc, "invalid encoder master for DSC\n");
  1261. return -EINVAL;
  1262. }
  1263. memset(&cfg, 0, sizeof(cfg));
  1264. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  1265. hw_pp[i] = sde_enc->hw_pp[i];
  1266. hw_dsc[i] = sde_enc->hw_dsc[i];
  1267. hw_dsc_pp[i] = sde_enc->hw_dsc_pp[i];
  1268. if (!hw_pp[i] || !hw_dsc[i] || !hw_dsc_pp[i]) {
  1269. SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
  1270. return -EINVAL;
  1271. }
  1272. }
  1273. hw_ctl = enc_master->hw_ctl;
  1274. dsc = &sde_enc->mode_info.comp_info.dsc_info;
  1275. half_panel_partial_update =
  1276. hweight_long(params->affected_displays) == 1;
  1277. dsc_common_mode = 0;
  1278. if (!half_panel_partial_update)
  1279. dsc_common_mode |= DSC_MODE_SPLIT_PANEL | DSC_MODE_MULTIPLEX;
  1280. if (enc_master->intf_mode == INTF_MODE_VIDEO)
  1281. dsc_common_mode |= DSC_MODE_VIDEO;
  1282. _sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
  1283. this_frame_slices = roi->w / dsc->slice_width;
  1284. intf_ip_w = this_frame_slices * dsc->slice_width;
  1285. _sde_encoder_dsc_pclk_param_calc(dsc, intf_ip_w);
  1286. /*
  1287. * dsc merge case: when using 2 encoders for the same stream,
  1288. * no. of slices need to be same on both the encoders.
  1289. */
  1290. enc_ip_w = intf_ip_w / 2;
  1291. _sde_encoder_dsc_initial_line_calc(dsc, enc_ip_w);
  1292. ich_res = _sde_encoder_dsc_ich_reset_override_needed(
  1293. half_panel_partial_update, dsc);
  1294. SDE_DEBUG_ENC(sde_enc, "pic_w: %d pic_h: %d mode:%d\n",
  1295. roi->w, roi->h, dsc_common_mode);
  1296. SDE_EVT32(DRMID(&sde_enc->base), roi->w, roi->h,
  1297. dsc_common_mode, i, params->affected_displays);
  1298. _sde_encoder_dsc_pipe_cfg(hw_dsc[0], hw_pp[0], dsc, dsc_common_mode,
  1299. ich_res, true, hw_dsc_pp[0]);
  1300. cfg.dsc[0] = hw_dsc[0]->idx;
  1301. cfg.dsc_count++;
  1302. if (hw_ctl->ops.update_bitmask_dsc)
  1303. hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[0]->idx, 1);
  1304. _sde_encoder_dsc_pipe_cfg(hw_dsc[1], hw_pp[1], dsc, dsc_common_mode,
  1305. ich_res, !half_panel_partial_update, hw_dsc_pp[1]);
  1306. if (!half_panel_partial_update) {
  1307. cfg.dsc[1] = hw_dsc[1]->idx;
  1308. cfg.dsc_count++;
  1309. if (hw_ctl->ops.update_bitmask_dsc)
  1310. hw_ctl->ops.update_bitmask_dsc(hw_ctl, hw_dsc[1]->idx,
  1311. 1);
  1312. }
  1313. /* setup dsc active configuration in the control path */
  1314. if (hw_ctl->ops.setup_dsc_cfg) {
  1315. hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
  1316. SDE_DEBUG_ENC(sde_enc,
  1317. "setup_dsc_cfg hw_ctl[%d], count:%d,dsc[0]:%d, dsc[1]:%d\n",
  1318. hw_ctl->idx,
  1319. cfg.dsc_count,
  1320. cfg.dsc[0],
  1321. cfg.dsc[1]);
  1322. }
  1323. return 0;
  1324. }
  1325. static int _sde_encoder_update_roi(struct drm_encoder *drm_enc)
  1326. {
  1327. struct sde_encoder_virt *sde_enc;
  1328. struct drm_connector *drm_conn;
  1329. struct drm_display_mode *adj_mode;
  1330. struct sde_rect roi;
  1331. if (!drm_enc) {
  1332. SDE_ERROR("invalid encoder parameter\n");
  1333. return -EINVAL;
  1334. }
  1335. sde_enc = to_sde_encoder_virt(drm_enc);
  1336. if (!sde_enc->crtc || !sde_enc->crtc->state) {
  1337. SDE_ERROR("invalid crtc parameter\n");
  1338. return -EINVAL;
  1339. }
  1340. if (!sde_enc->cur_master) {
  1341. SDE_ERROR("invalid cur_master parameter\n");
  1342. return -EINVAL;
  1343. }
  1344. adj_mode = &sde_enc->cur_master->cached_mode;
  1345. drm_conn = sde_enc->cur_master->connector;
  1346. _sde_encoder_get_connector_roi(sde_enc, &roi);
  1347. if (sde_kms_rect_is_null(&roi)) {
  1348. roi.w = adj_mode->hdisplay;
  1349. roi.h = adj_mode->vdisplay;
  1350. }
  1351. memcpy(&sde_enc->prv_conn_roi, &sde_enc->cur_conn_roi,
  1352. sizeof(sde_enc->prv_conn_roi));
  1353. memcpy(&sde_enc->cur_conn_roi, &roi, sizeof(sde_enc->cur_conn_roi));
  1354. return 0;
  1355. }
  1356. static int _sde_encoder_dsc_setup(struct sde_encoder_virt *sde_enc,
  1357. struct sde_encoder_kickoff_params *params)
  1358. {
  1359. enum sde_rm_topology_name topology;
  1360. struct drm_connector *drm_conn;
  1361. int ret = 0;
  1362. if (!sde_enc || !params || !sde_enc->phys_encs[0] ||
  1363. !sde_enc->phys_encs[0]->connector)
  1364. return -EINVAL;
  1365. drm_conn = sde_enc->phys_encs[0]->connector;
  1366. topology = sde_connector_get_topology_name(drm_conn);
  1367. if (topology == SDE_RM_TOPOLOGY_NONE) {
  1368. SDE_ERROR_ENC(sde_enc, "topology not set yet\n");
  1369. return -EINVAL;
  1370. }
  1371. SDE_DEBUG_ENC(sde_enc, "topology:%d\n", topology);
  1372. SDE_EVT32(DRMID(&sde_enc->base), topology,
  1373. sde_enc->cur_conn_roi.x,
  1374. sde_enc->cur_conn_roi.y,
  1375. sde_enc->cur_conn_roi.w,
  1376. sde_enc->cur_conn_roi.h,
  1377. sde_enc->prv_conn_roi.x,
  1378. sde_enc->prv_conn_roi.y,
  1379. sde_enc->prv_conn_roi.w,
  1380. sde_enc->prv_conn_roi.h,
  1381. sde_enc->cur_master->cached_mode.hdisplay,
  1382. sde_enc->cur_master->cached_mode.vdisplay);
  1383. if (sde_kms_rect_is_equal(&sde_enc->cur_conn_roi,
  1384. &sde_enc->prv_conn_roi))
  1385. return ret;
  1386. switch (topology) {
  1387. case SDE_RM_TOPOLOGY_SINGLEPIPE_DSC:
  1388. case SDE_RM_TOPOLOGY_DUALPIPE_3DMERGE_DSC:
  1389. ret = _sde_encoder_dsc_n_lm_1_enc_1_intf(sde_enc);
  1390. break;
  1391. case SDE_RM_TOPOLOGY_DUALPIPE_DSCMERGE:
  1392. ret = _sde_encoder_dsc_2_lm_2_enc_1_intf(sde_enc, params);
  1393. break;
  1394. case SDE_RM_TOPOLOGY_DUALPIPE_DSC:
  1395. ret = _sde_encoder_dsc_2_lm_2_enc_2_intf(sde_enc, params);
  1396. break;
  1397. default:
  1398. SDE_ERROR_ENC(sde_enc, "No DSC support for topology %d",
  1399. topology);
  1400. return -EINVAL;
  1401. }
  1402. return ret;
  1403. }
  1404. void sde_encoder_helper_vsync_config(struct sde_encoder_phys *phys_enc,
  1405. u32 vsync_source, bool is_dummy)
  1406. {
  1407. struct sde_vsync_source_cfg vsync_cfg = { 0 };
  1408. struct msm_drm_private *priv;
  1409. struct sde_kms *sde_kms;
  1410. struct sde_hw_mdp *hw_mdptop;
  1411. struct drm_encoder *drm_enc;
  1412. struct sde_encoder_virt *sde_enc;
  1413. int i;
  1414. sde_enc = to_sde_encoder_virt(phys_enc->parent);
  1415. if (!sde_enc) {
  1416. SDE_ERROR("invalid param sde_enc:%d\n", sde_enc != NULL);
  1417. return;
  1418. } else if (sde_enc->num_phys_encs > ARRAY_SIZE(sde_enc->hw_pp)) {
  1419. SDE_ERROR("invalid num phys enc %d/%d\n",
  1420. sde_enc->num_phys_encs,
  1421. (int) ARRAY_SIZE(sde_enc->hw_pp));
  1422. return;
  1423. }
  1424. drm_enc = &sde_enc->base;
  1425. /* this pointers are checked in virt_enable_helper */
  1426. priv = drm_enc->dev->dev_private;
  1427. sde_kms = to_sde_kms(priv->kms);
  1428. if (!sde_kms) {
  1429. SDE_ERROR("invalid sde_kms\n");
  1430. return;
  1431. }
  1432. hw_mdptop = sde_kms->hw_mdp;
  1433. if (!hw_mdptop) {
  1434. SDE_ERROR("invalid mdptop\n");
  1435. return;
  1436. }
  1437. if (hw_mdptop->ops.setup_vsync_source) {
  1438. for (i = 0; i < sde_enc->num_phys_encs; i++)
  1439. vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
  1440. vsync_cfg.pp_count = sde_enc->num_phys_encs;
  1441. vsync_cfg.frame_rate = sde_enc->mode_info.frame_rate;
  1442. vsync_cfg.vsync_source = vsync_source;
  1443. vsync_cfg.is_dummy = is_dummy;
  1444. hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
  1445. }
  1446. }
  1447. static void _sde_encoder_update_vsync_source(struct sde_encoder_virt *sde_enc,
  1448. struct msm_display_info *disp_info, bool is_dummy)
  1449. {
  1450. struct sde_encoder_phys *phys;
  1451. int i;
  1452. u32 vsync_source;
  1453. if (!sde_enc || !disp_info) {
  1454. SDE_ERROR("invalid param sde_enc:%d or disp_info:%d\n",
  1455. sde_enc != NULL, disp_info != NULL);
  1456. return;
  1457. } else if (sde_enc->num_phys_encs > ARRAY_SIZE(sde_enc->hw_pp)) {
  1458. SDE_ERROR("invalid num phys enc %d/%d\n",
  1459. sde_enc->num_phys_encs,
  1460. (int) ARRAY_SIZE(sde_enc->hw_pp));
  1461. return;
  1462. }
  1463. if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_CMD_MODE)) {
  1464. if (is_dummy)
  1465. vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_0 -
  1466. sde_enc->te_source;
  1467. else if (disp_info->is_te_using_watchdog_timer)
  1468. vsync_source = SDE_VSYNC_SOURCE_WD_TIMER_4;
  1469. else
  1470. vsync_source = sde_enc->te_source;
  1471. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  1472. phys = sde_enc->phys_encs[i];
  1473. if (phys && phys->ops.setup_vsync_source)
  1474. phys->ops.setup_vsync_source(phys,
  1475. vsync_source, is_dummy);
  1476. }
  1477. }
  1478. }
  1479. static void _sde_encoder_dsc_disable(struct sde_encoder_virt *sde_enc)
  1480. {
  1481. int i;
  1482. struct sde_hw_pingpong *hw_pp = NULL;
  1483. struct sde_hw_pingpong *hw_dsc_pp = NULL;
  1484. struct sde_hw_dsc *hw_dsc = NULL;
  1485. struct sde_hw_ctl *hw_ctl = NULL;
  1486. struct sde_ctl_dsc_cfg cfg;
  1487. if (!sde_enc || !sde_enc->phys_encs[0] ||
  1488. !sde_enc->phys_encs[0]->connector) {
  1489. SDE_ERROR("invalid params %d %d\n",
  1490. !sde_enc, sde_enc ? !sde_enc->phys_encs[0] : -1);
  1491. return;
  1492. }
  1493. if (sde_enc->cur_master)
  1494. hw_ctl = sde_enc->cur_master->hw_ctl;
  1495. /* Disable DSC for all the pp's present in this topology */
  1496. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  1497. hw_pp = sde_enc->hw_pp[i];
  1498. hw_dsc = sde_enc->hw_dsc[i];
  1499. hw_dsc_pp = sde_enc->hw_dsc_pp[i];
  1500. _sde_encoder_dsc_pipe_cfg(hw_dsc, hw_pp, NULL,
  1501. 0, 0, 0, hw_dsc_pp);
  1502. if (hw_dsc)
  1503. sde_enc->dirty_dsc_ids[i] = hw_dsc->idx;
  1504. }
  1505. /* Clear the DSC ACTIVE config for this CTL */
  1506. if (hw_ctl && hw_ctl->ops.setup_dsc_cfg) {
  1507. memset(&cfg, 0, sizeof(cfg));
  1508. hw_ctl->ops.setup_dsc_cfg(hw_ctl, &cfg);
  1509. }
  1510. /**
  1511. * Since pending flushes from previous commit get cleared
  1512. * sometime after this point, setting DSC flush bits now
  1513. * will have no effect. Therefore dirty_dsc_ids track which
  1514. * DSC blocks must be flushed for the next trigger.
  1515. */
  1516. }
  1517. static int _sde_encoder_switch_to_watchdog_vsync(struct drm_encoder *drm_enc)
  1518. {
  1519. struct sde_encoder_virt *sde_enc;
  1520. struct msm_display_info disp_info;
  1521. if (!drm_enc) {
  1522. pr_err("invalid drm encoder\n");
  1523. return -EINVAL;
  1524. }
  1525. sde_enc = to_sde_encoder_virt(drm_enc);
  1526. sde_encoder_control_te(drm_enc, false);
  1527. memcpy(&disp_info, &sde_enc->disp_info, sizeof(disp_info));
  1528. disp_info.is_te_using_watchdog_timer = true;
  1529. _sde_encoder_update_vsync_source(sde_enc, &disp_info, false);
  1530. sde_encoder_control_te(drm_enc, true);
  1531. return 0;
  1532. }
  1533. static int _sde_encoder_rsc_client_update_vsync_wait(
  1534. struct drm_encoder *drm_enc, struct sde_encoder_virt *sde_enc,
  1535. int wait_vblank_crtc_id)
  1536. {
  1537. int wait_refcount = 0, ret = 0;
  1538. int pipe = -1;
  1539. int wait_count = 0;
  1540. struct drm_crtc *primary_crtc;
  1541. struct drm_crtc *crtc;
  1542. crtc = sde_enc->crtc;
  1543. if (wait_vblank_crtc_id)
  1544. wait_refcount =
  1545. sde_rsc_client_get_vsync_refcount(sde_enc->rsc_client);
  1546. SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id, wait_refcount,
  1547. SDE_EVTLOG_FUNC_ENTRY);
  1548. if (crtc->base.id != wait_vblank_crtc_id) {
  1549. primary_crtc = drm_crtc_find(drm_enc->dev,
  1550. NULL, wait_vblank_crtc_id);
  1551. if (!primary_crtc) {
  1552. SDE_ERROR_ENC(sde_enc,
  1553. "failed to find primary crtc id %d\n",
  1554. wait_vblank_crtc_id);
  1555. return -EINVAL;
  1556. }
  1557. pipe = drm_crtc_index(primary_crtc);
  1558. }
  1559. /**
  1560. * note: VBLANK is expected to be enabled at this point in
  1561. * resource control state machine if on primary CRTC
  1562. */
  1563. for (wait_count = 0; wait_count < MAX_RSC_WAIT; wait_count++) {
  1564. if (sde_rsc_client_is_state_update_complete(
  1565. sde_enc->rsc_client))
  1566. break;
  1567. if (crtc->base.id == wait_vblank_crtc_id)
  1568. ret = sde_encoder_wait_for_event(drm_enc,
  1569. MSM_ENC_VBLANK);
  1570. else
  1571. drm_wait_one_vblank(drm_enc->dev, pipe);
  1572. if (ret) {
  1573. SDE_ERROR_ENC(sde_enc,
  1574. "wait for vblank failed ret:%d\n", ret);
  1575. /**
  1576. * rsc hardware may hang without vsync. avoid rsc hang
  1577. * by generating the vsync from watchdog timer.
  1578. */
  1579. if (crtc->base.id == wait_vblank_crtc_id)
  1580. _sde_encoder_switch_to_watchdog_vsync(drm_enc);
  1581. }
  1582. }
  1583. if (wait_count >= MAX_RSC_WAIT)
  1584. SDE_EVT32(DRMID(drm_enc), wait_vblank_crtc_id, wait_count,
  1585. SDE_EVTLOG_ERROR);
  1586. if (wait_refcount)
  1587. sde_rsc_client_reset_vsync_refcount(sde_enc->rsc_client);
  1588. SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id, wait_refcount,
  1589. SDE_EVTLOG_FUNC_EXIT);
  1590. return ret;
  1591. }
  1592. static int _sde_encoder_update_rsc_client(
  1593. struct drm_encoder *drm_enc, bool enable)
  1594. {
  1595. struct sde_encoder_virt *sde_enc;
  1596. struct drm_crtc *crtc;
  1597. enum sde_rsc_state rsc_state = SDE_RSC_IDLE_STATE;
  1598. struct sde_rsc_cmd_config *rsc_config;
  1599. int ret, prefill_lines;
  1600. struct msm_display_info *disp_info;
  1601. struct msm_mode_info *mode_info;
  1602. int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
  1603. u32 qsync_mode = 0;
  1604. if (!drm_enc || !drm_enc->dev) {
  1605. SDE_ERROR("invalid encoder arguments\n");
  1606. return -EINVAL;
  1607. }
  1608. sde_enc = to_sde_encoder_virt(drm_enc);
  1609. mode_info = &sde_enc->mode_info;
  1610. crtc = sde_enc->crtc;
  1611. if (!sde_enc->crtc) {
  1612. SDE_ERROR("invalid crtc parameter\n");
  1613. return -EINVAL;
  1614. }
  1615. disp_info = &sde_enc->disp_info;
  1616. rsc_config = &sde_enc->rsc_config;
  1617. if (!sde_enc->rsc_client) {
  1618. SDE_DEBUG_ENC(sde_enc, "rsc client not created\n");
  1619. return 0;
  1620. }
  1621. /**
  1622. * only primary command mode panel without Qsync can request CMD state.
  1623. * all other panels/displays can request for VID state including
  1624. * secondary command mode panel.
  1625. * Clone mode encoder can request CLK STATE only.
  1626. */
  1627. if (sde_enc->cur_master)
  1628. qsync_mode = sde_connector_get_qsync_mode(
  1629. sde_enc->cur_master->connector);
  1630. if (sde_encoder_in_clone_mode(drm_enc) ||
  1631. (disp_info->display_type != SDE_CONNECTOR_PRIMARY) ||
  1632. (disp_info->display_type && qsync_mode))
  1633. rsc_state = enable ? SDE_RSC_CLK_STATE : SDE_RSC_IDLE_STATE;
  1634. else if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
  1635. rsc_state = enable ? SDE_RSC_CMD_STATE : SDE_RSC_IDLE_STATE;
  1636. else if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE))
  1637. rsc_state = enable ? SDE_RSC_VID_STATE : SDE_RSC_IDLE_STATE;
  1638. SDE_EVT32(rsc_state, qsync_mode);
  1639. prefill_lines = mode_info->prefill_lines;
  1640. /* compare specific items and reconfigure the rsc */
  1641. if ((rsc_config->fps != mode_info->frame_rate) ||
  1642. (rsc_config->vtotal != mode_info->vtotal) ||
  1643. (rsc_config->prefill_lines != prefill_lines) ||
  1644. (rsc_config->jitter_numer != mode_info->jitter_numer) ||
  1645. (rsc_config->jitter_denom != mode_info->jitter_denom)) {
  1646. rsc_config->fps = mode_info->frame_rate;
  1647. rsc_config->vtotal = mode_info->vtotal;
  1648. rsc_config->prefill_lines = prefill_lines;
  1649. rsc_config->jitter_numer = mode_info->jitter_numer;
  1650. rsc_config->jitter_denom = mode_info->jitter_denom;
  1651. sde_enc->rsc_state_init = false;
  1652. }
  1653. if (rsc_state != SDE_RSC_IDLE_STATE && !sde_enc->rsc_state_init
  1654. && (disp_info->display_type == SDE_CONNECTOR_PRIMARY)) {
  1655. /* update it only once */
  1656. sde_enc->rsc_state_init = true;
  1657. ret = sde_rsc_client_state_update(sde_enc->rsc_client,
  1658. rsc_state, rsc_config, crtc->base.id,
  1659. &wait_vblank_crtc_id);
  1660. } else {
  1661. ret = sde_rsc_client_state_update(sde_enc->rsc_client,
  1662. rsc_state, NULL, crtc->base.id,
  1663. &wait_vblank_crtc_id);
  1664. }
  1665. /**
  1666. * if RSC performed a state change that requires a VBLANK wait, it will
  1667. * set wait_vblank_crtc_id to the CRTC whose VBLANK we must wait on.
  1668. *
  1669. * if we are the primary display, we will need to enable and wait
  1670. * locally since we hold the commit thread
  1671. *
  1672. * if we are an external display, we must send a signal to the primary
  1673. * to enable its VBLANK and wait one, since the RSC hardware is driven
  1674. * by the primary panel's VBLANK signals
  1675. */
  1676. SDE_EVT32_VERBOSE(DRMID(drm_enc), wait_vblank_crtc_id);
  1677. if (ret) {
  1678. SDE_ERROR_ENC(sde_enc,
  1679. "sde rsc client update failed ret:%d\n", ret);
  1680. return ret;
  1681. } else if (wait_vblank_crtc_id == SDE_RSC_INVALID_CRTC_ID) {
  1682. return ret;
  1683. }
  1684. ret = _sde_encoder_rsc_client_update_vsync_wait(drm_enc,
  1685. sde_enc, wait_vblank_crtc_id);
  1686. return ret;
  1687. }
  1688. static void _sde_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
  1689. {
  1690. struct sde_encoder_virt *sde_enc;
  1691. int i;
  1692. if (!drm_enc) {
  1693. SDE_ERROR("invalid encoder\n");
  1694. return;
  1695. }
  1696. sde_enc = to_sde_encoder_virt(drm_enc);
  1697. SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
  1698. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  1699. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  1700. if (phys && phys->ops.irq_control)
  1701. phys->ops.irq_control(phys, enable);
  1702. }
  1703. }
  1704. /* keep track of the userspace vblank during modeset */
  1705. static void _sde_encoder_modeset_helper_locked(struct drm_encoder *drm_enc,
  1706. u32 sw_event)
  1707. {
  1708. struct sde_encoder_virt *sde_enc;
  1709. bool enable;
  1710. int i;
  1711. if (!drm_enc) {
  1712. SDE_ERROR("invalid encoder\n");
  1713. return;
  1714. }
  1715. sde_enc = to_sde_encoder_virt(drm_enc);
  1716. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, vblank_enabled:%d\n",
  1717. sw_event, sde_enc->vblank_enabled);
  1718. /* nothing to do if vblank not enabled by userspace */
  1719. if (!sde_enc->vblank_enabled)
  1720. return;
  1721. /* disable vblank on pre_modeset */
  1722. if (sw_event == SDE_ENC_RC_EVENT_PRE_MODESET)
  1723. enable = false;
  1724. /* enable vblank on post_modeset */
  1725. else if (sw_event == SDE_ENC_RC_EVENT_POST_MODESET)
  1726. enable = true;
  1727. else
  1728. return;
  1729. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  1730. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  1731. if (phys && phys->ops.control_vblank_irq)
  1732. phys->ops.control_vblank_irq(phys, enable);
  1733. }
  1734. }
  1735. struct sde_rsc_client *sde_encoder_get_rsc_client(struct drm_encoder *drm_enc)
  1736. {
  1737. struct sde_encoder_virt *sde_enc;
  1738. if (!drm_enc)
  1739. return NULL;
  1740. sde_enc = to_sde_encoder_virt(drm_enc);
  1741. return sde_enc->rsc_client;
  1742. }
  1743. static int _sde_encoder_resource_control_helper(struct drm_encoder *drm_enc,
  1744. bool enable)
  1745. {
  1746. struct msm_drm_private *priv;
  1747. struct sde_kms *sde_kms;
  1748. struct sde_encoder_virt *sde_enc;
  1749. int rc;
  1750. bool is_cmd_mode = false;
  1751. sde_enc = to_sde_encoder_virt(drm_enc);
  1752. priv = drm_enc->dev->dev_private;
  1753. sde_kms = to_sde_kms(priv->kms);
  1754. if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
  1755. is_cmd_mode = true;
  1756. SDE_DEBUG_ENC(sde_enc, "enable:%d\n", enable);
  1757. SDE_EVT32(DRMID(drm_enc), enable);
  1758. if (!sde_enc->cur_master) {
  1759. SDE_ERROR("encoder master not set\n");
  1760. return -EINVAL;
  1761. }
  1762. if (enable) {
  1763. /* enable SDE core clks */
  1764. rc = pm_runtime_get_sync(drm_enc->dev->dev);
  1765. if (rc < 0) {
  1766. SDE_ERROR("failed to enable power resource %d\n", rc);
  1767. SDE_EVT32(rc, SDE_EVTLOG_ERROR);
  1768. return rc;
  1769. }
  1770. sde_enc->elevated_ahb_vote = true;
  1771. /* enable DSI clks */
  1772. rc = sde_connector_clk_ctrl(sde_enc->cur_master->connector,
  1773. true);
  1774. if (rc) {
  1775. SDE_ERROR("failed to enable clk control %d\n", rc);
  1776. pm_runtime_put_sync(drm_enc->dev->dev);
  1777. return rc;
  1778. }
  1779. /* enable all the irq */
  1780. _sde_encoder_irq_control(drm_enc, true);
  1781. if (is_cmd_mode)
  1782. _sde_encoder_pm_qos_add_request(drm_enc, sde_kms);
  1783. } else {
  1784. if (is_cmd_mode)
  1785. _sde_encoder_pm_qos_remove_request(drm_enc, sde_kms);
  1786. /* disable all the irq */
  1787. _sde_encoder_irq_control(drm_enc, false);
  1788. /* disable DSI clks */
  1789. sde_connector_clk_ctrl(sde_enc->cur_master->connector, false);
  1790. /* disable SDE core clks */
  1791. pm_runtime_put_sync(drm_enc->dev->dev);
  1792. }
  1793. return 0;
  1794. }
  1795. static void sde_encoder_misr_configure(struct drm_encoder *drm_enc,
  1796. bool enable, u32 frame_count)
  1797. {
  1798. struct sde_encoder_virt *sde_enc;
  1799. int i;
  1800. if (!drm_enc) {
  1801. SDE_ERROR("invalid encoder\n");
  1802. return;
  1803. }
  1804. sde_enc = to_sde_encoder_virt(drm_enc);
  1805. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  1806. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  1807. if (!phys || !phys->ops.setup_misr)
  1808. continue;
  1809. phys->ops.setup_misr(phys, enable, frame_count);
  1810. }
  1811. }
  1812. static void sde_encoder_input_event_handler(struct input_handle *handle,
  1813. unsigned int type, unsigned int code, int value)
  1814. {
  1815. struct drm_encoder *drm_enc = NULL;
  1816. struct sde_encoder_virt *sde_enc = NULL;
  1817. struct msm_drm_thread *disp_thread = NULL;
  1818. struct msm_drm_private *priv = NULL;
  1819. if (!handle || !handle->handler || !handle->handler->private) {
  1820. SDE_ERROR("invalid encoder for the input event\n");
  1821. return;
  1822. }
  1823. drm_enc = (struct drm_encoder *)handle->handler->private;
  1824. if (!drm_enc->dev || !drm_enc->dev->dev_private) {
  1825. SDE_ERROR("invalid parameters\n");
  1826. return;
  1827. }
  1828. priv = drm_enc->dev->dev_private;
  1829. sde_enc = to_sde_encoder_virt(drm_enc);
  1830. if (!sde_enc->crtc || (sde_enc->crtc->index
  1831. >= ARRAY_SIZE(priv->disp_thread))) {
  1832. SDE_DEBUG_ENC(sde_enc,
  1833. "invalid cached CRTC: %d or crtc index: %d\n",
  1834. sde_enc->crtc == NULL,
  1835. sde_enc->crtc ? sde_enc->crtc->index : -EINVAL);
  1836. return;
  1837. }
  1838. SDE_EVT32_VERBOSE(DRMID(drm_enc));
  1839. disp_thread = &priv->disp_thread[sde_enc->crtc->index];
  1840. kthread_queue_work(&disp_thread->worker,
  1841. &sde_enc->input_event_work);
  1842. }
  1843. void sde_encoder_control_idle_pc(struct drm_encoder *drm_enc, bool enable)
  1844. {
  1845. struct sde_encoder_virt *sde_enc;
  1846. if (!drm_enc) {
  1847. SDE_ERROR("invalid encoder\n");
  1848. return;
  1849. }
  1850. sde_enc = to_sde_encoder_virt(drm_enc);
  1851. /* return early if there is no state change */
  1852. if (sde_enc->idle_pc_enabled == enable)
  1853. return;
  1854. sde_enc->idle_pc_enabled = enable;
  1855. SDE_DEBUG("idle-pc state:%d\n", sde_enc->idle_pc_enabled);
  1856. SDE_EVT32(sde_enc->idle_pc_enabled);
  1857. }
  1858. static void _sde_encoder_rc_cancel_delayed(struct sde_encoder_virt *sde_enc,
  1859. u32 sw_event)
  1860. {
  1861. if (kthread_cancel_delayed_work_sync(
  1862. &sde_enc->delayed_off_work))
  1863. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work cancelled\n",
  1864. sw_event);
  1865. }
  1866. static int _sde_encoder_rc_kickoff(struct drm_encoder *drm_enc,
  1867. u32 sw_event, struct sde_encoder_virt *sde_enc, bool is_vid_mode)
  1868. {
  1869. int ret = 0;
  1870. /* cancel delayed off work, if any */
  1871. _sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
  1872. mutex_lock(&sde_enc->rc_lock);
  1873. /* return if the resource control is already in ON state */
  1874. if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
  1875. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in ON state\n",
  1876. sw_event);
  1877. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1878. SDE_EVTLOG_FUNC_CASE1);
  1879. goto end;
  1880. } else if (sde_enc->rc_state != SDE_ENC_RC_STATE_OFF &&
  1881. sde_enc->rc_state != SDE_ENC_RC_STATE_IDLE) {
  1882. SDE_ERROR_ENC(sde_enc, "sw_event:%d, rc in state %d\n",
  1883. sw_event, sde_enc->rc_state);
  1884. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1885. SDE_EVTLOG_ERROR);
  1886. goto end;
  1887. }
  1888. if (is_vid_mode && sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
  1889. _sde_encoder_irq_control(drm_enc, true);
  1890. } else {
  1891. /* enable all the clks and resources */
  1892. ret = _sde_encoder_resource_control_helper(drm_enc,
  1893. true);
  1894. if (ret) {
  1895. SDE_ERROR_ENC(sde_enc,
  1896. "sw_event:%d, rc in state %d\n",
  1897. sw_event, sde_enc->rc_state);
  1898. SDE_EVT32(DRMID(drm_enc), sw_event,
  1899. sde_enc->rc_state,
  1900. SDE_EVTLOG_ERROR);
  1901. goto end;
  1902. }
  1903. _sde_encoder_update_rsc_client(drm_enc, true);
  1904. }
  1905. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1906. SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE1);
  1907. sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
  1908. end:
  1909. mutex_unlock(&sde_enc->rc_lock);
  1910. return ret;
  1911. }
  1912. static int _sde_encoder_rc_frame_done(struct drm_encoder *drm_enc,
  1913. u32 sw_event, struct sde_encoder_virt *sde_enc,
  1914. struct msm_drm_private *priv)
  1915. {
  1916. unsigned int lp, idle_pc_duration;
  1917. struct msm_drm_thread *disp_thread;
  1918. bool autorefresh_enabled = false;
  1919. if (!sde_enc->crtc) {
  1920. SDE_ERROR("invalid crtc, sw_event:%u\n", sw_event);
  1921. return -EINVAL;
  1922. }
  1923. if (sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
  1924. SDE_ERROR("invalid crtc index :%u\n",
  1925. sde_enc->crtc->index);
  1926. return -EINVAL;
  1927. }
  1928. disp_thread = &priv->disp_thread[sde_enc->crtc->index];
  1929. /*
  1930. * mutex lock is not used as this event happens at interrupt
  1931. * context. And locking is not required as, the other events
  1932. * like KICKOFF and STOP does a wait-for-idle before executing
  1933. * the resource_control
  1934. */
  1935. if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
  1936. SDE_ERROR_ENC(sde_enc, "sw_event:%d,rc:%d-unexpected\n",
  1937. sw_event, sde_enc->rc_state);
  1938. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1939. SDE_EVTLOG_ERROR);
  1940. return -EINVAL;
  1941. }
  1942. /*
  1943. * schedule off work item only when there are no
  1944. * frames pending
  1945. */
  1946. if (sde_crtc_frame_pending(sde_enc->crtc) > 1) {
  1947. SDE_DEBUG_ENC(sde_enc, "skip schedule work");
  1948. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1949. SDE_EVTLOG_FUNC_CASE2);
  1950. return 0;
  1951. }
  1952. /* schedule delayed off work if autorefresh is disabled */
  1953. if (sde_enc->cur_master &&
  1954. sde_enc->cur_master->ops.is_autorefresh_enabled)
  1955. autorefresh_enabled =
  1956. sde_enc->cur_master->ops.is_autorefresh_enabled(
  1957. sde_enc->cur_master);
  1958. /* set idle timeout based on master connector's lp value */
  1959. if (sde_enc->cur_master)
  1960. lp = sde_connector_get_lp(
  1961. sde_enc->cur_master->connector);
  1962. else
  1963. lp = SDE_MODE_DPMS_ON;
  1964. if (lp == SDE_MODE_DPMS_LP2)
  1965. idle_pc_duration = IDLE_SHORT_TIMEOUT;
  1966. else
  1967. idle_pc_duration = IDLE_POWERCOLLAPSE_DURATION;
  1968. if (!autorefresh_enabled)
  1969. kthread_mod_delayed_work(
  1970. &disp_thread->worker,
  1971. &sde_enc->delayed_off_work,
  1972. msecs_to_jiffies(idle_pc_duration));
  1973. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1974. autorefresh_enabled,
  1975. idle_pc_duration, SDE_EVTLOG_FUNC_CASE2);
  1976. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, work scheduled\n",
  1977. sw_event);
  1978. return 0;
  1979. }
  1980. static int _sde_encoder_rc_pre_stop(struct drm_encoder *drm_enc,
  1981. u32 sw_event, struct sde_encoder_virt *sde_enc, bool is_vid_mode)
  1982. {
  1983. /* cancel delayed off work, if any */
  1984. _sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
  1985. mutex_lock(&sde_enc->rc_lock);
  1986. if (is_vid_mode &&
  1987. sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
  1988. _sde_encoder_irq_control(drm_enc, true);
  1989. }
  1990. /* skip if is already OFF or IDLE, resources are off already */
  1991. else if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF ||
  1992. sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
  1993. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in %d state\n",
  1994. sw_event, sde_enc->rc_state);
  1995. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  1996. SDE_EVTLOG_FUNC_CASE3);
  1997. goto end;
  1998. }
  1999. /**
  2000. * IRQs are still enabled currently, which allows wait for
  2001. * VBLANK which RSC may require to correctly transition to OFF
  2002. */
  2003. _sde_encoder_update_rsc_client(drm_enc, false);
  2004. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2005. SDE_ENC_RC_STATE_PRE_OFF,
  2006. SDE_EVTLOG_FUNC_CASE3);
  2007. sde_enc->rc_state = SDE_ENC_RC_STATE_PRE_OFF;
  2008. end:
  2009. mutex_unlock(&sde_enc->rc_lock);
  2010. return 0;
  2011. }
  2012. static int _sde_encoder_rc_stop(struct drm_encoder *drm_enc,
  2013. u32 sw_event, struct sde_encoder_virt *sde_enc)
  2014. {
  2015. int ret = 0;
  2016. /* cancel vsync event work and timer */
  2017. kthread_cancel_work_sync(&sde_enc->vsync_event_work);
  2018. if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI)
  2019. del_timer_sync(&sde_enc->vsync_event_timer);
  2020. mutex_lock(&sde_enc->rc_lock);
  2021. /* return if the resource control is already in OFF state */
  2022. if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
  2023. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
  2024. sw_event);
  2025. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2026. SDE_EVTLOG_FUNC_CASE4);
  2027. goto end;
  2028. } else if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON ||
  2029. sde_enc->rc_state == SDE_ENC_RC_STATE_MODESET) {
  2030. SDE_ERROR_ENC(sde_enc, "sw_event:%d, rc in state %d\n",
  2031. sw_event, sde_enc->rc_state);
  2032. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2033. SDE_EVTLOG_ERROR);
  2034. ret = -EINVAL;
  2035. goto end;
  2036. }
  2037. /**
  2038. * expect to arrive here only if in either idle state or pre-off
  2039. * and in IDLE state the resources are already disabled
  2040. */
  2041. if (sde_enc->rc_state == SDE_ENC_RC_STATE_PRE_OFF)
  2042. _sde_encoder_resource_control_helper(drm_enc, false);
  2043. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2044. SDE_ENC_RC_STATE_OFF, SDE_EVTLOG_FUNC_CASE4);
  2045. sde_enc->rc_state = SDE_ENC_RC_STATE_OFF;
  2046. end:
  2047. mutex_unlock(&sde_enc->rc_lock);
  2048. return ret;
  2049. }
  2050. static int _sde_encoder_rc_pre_modeset(struct drm_encoder *drm_enc,
  2051. u32 sw_event, struct sde_encoder_virt *sde_enc)
  2052. {
  2053. int ret = 0;
  2054. /* cancel delayed off work, if any */
  2055. _sde_encoder_rc_cancel_delayed(sde_enc, sw_event);
  2056. mutex_lock(&sde_enc->rc_lock);
  2057. if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
  2058. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
  2059. sw_event);
  2060. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2061. SDE_EVTLOG_FUNC_CASE5);
  2062. goto end;
  2063. } else if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
  2064. /* enable all the clks and resources */
  2065. ret = _sde_encoder_resource_control_helper(drm_enc,
  2066. true);
  2067. if (ret) {
  2068. SDE_ERROR_ENC(sde_enc,
  2069. "sw_event:%d, rc in state %d\n",
  2070. sw_event, sde_enc->rc_state);
  2071. SDE_EVT32(DRMID(drm_enc), sw_event,
  2072. sde_enc->rc_state,
  2073. SDE_EVTLOG_ERROR);
  2074. goto end;
  2075. }
  2076. _sde_encoder_update_rsc_client(drm_enc, true);
  2077. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2078. SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE5);
  2079. sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
  2080. }
  2081. ret = sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
  2082. if (ret && ret != -EWOULDBLOCK) {
  2083. SDE_ERROR_ENC(sde_enc,
  2084. "wait for commit done returned %d\n",
  2085. ret);
  2086. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2087. ret, SDE_EVTLOG_ERROR);
  2088. ret = -EINVAL;
  2089. goto end;
  2090. }
  2091. _sde_encoder_irq_control(drm_enc, false);
  2092. _sde_encoder_modeset_helper_locked(drm_enc, sw_event);
  2093. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2094. SDE_ENC_RC_STATE_MODESET, SDE_EVTLOG_FUNC_CASE5);
  2095. sde_enc->rc_state = SDE_ENC_RC_STATE_MODESET;
  2096. end:
  2097. mutex_unlock(&sde_enc->rc_lock);
  2098. return ret;
  2099. }
  2100. static int _sde_encoder_rc_post_modeset(struct drm_encoder *drm_enc,
  2101. u32 sw_event, struct sde_encoder_virt *sde_enc)
  2102. {
  2103. int ret = 0;
  2104. mutex_lock(&sde_enc->rc_lock);
  2105. if (sde_enc->rc_state == SDE_ENC_RC_STATE_OFF) {
  2106. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc in OFF state\n",
  2107. sw_event);
  2108. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2109. SDE_EVTLOG_FUNC_CASE5);
  2110. goto end;
  2111. } else if (sde_enc->rc_state != SDE_ENC_RC_STATE_MODESET) {
  2112. SDE_ERROR_ENC(sde_enc,
  2113. "sw_event:%d, rc:%d !MODESET state\n",
  2114. sw_event, sde_enc->rc_state);
  2115. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2116. SDE_EVTLOG_ERROR);
  2117. ret = -EINVAL;
  2118. goto end;
  2119. }
  2120. _sde_encoder_modeset_helper_locked(drm_enc, sw_event);
  2121. _sde_encoder_irq_control(drm_enc, true);
  2122. _sde_encoder_update_rsc_client(drm_enc, true);
  2123. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2124. SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE6);
  2125. sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
  2126. end:
  2127. mutex_unlock(&sde_enc->rc_lock);
  2128. return ret;
  2129. }
  2130. static int _sde_encoder_rc_idle(struct drm_encoder *drm_enc,
  2131. u32 sw_event, struct sde_encoder_virt *sde_enc, bool is_vid_mode)
  2132. {
  2133. mutex_lock(&sde_enc->rc_lock);
  2134. if (sde_enc->rc_state != SDE_ENC_RC_STATE_ON) {
  2135. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, rc:%d !ON state\n",
  2136. sw_event, sde_enc->rc_state);
  2137. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2138. SDE_EVTLOG_ERROR);
  2139. goto end;
  2140. } else if (sde_crtc_frame_pending(sde_enc->crtc) > 1) {
  2141. SDE_ERROR_ENC(sde_enc, "skip idle entry");
  2142. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2143. sde_crtc_frame_pending(sde_enc->crtc),
  2144. SDE_EVTLOG_ERROR);
  2145. goto end;
  2146. }
  2147. if (is_vid_mode) {
  2148. _sde_encoder_irq_control(drm_enc, false);
  2149. } else {
  2150. /* disable all the clks and resources */
  2151. _sde_encoder_update_rsc_client(drm_enc, false);
  2152. _sde_encoder_resource_control_helper(drm_enc, false);
  2153. }
  2154. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2155. SDE_ENC_RC_STATE_IDLE, SDE_EVTLOG_FUNC_CASE7);
  2156. sde_enc->rc_state = SDE_ENC_RC_STATE_IDLE;
  2157. end:
  2158. mutex_unlock(&sde_enc->rc_lock);
  2159. return 0;
  2160. }
  2161. static int _sde_encoder_rc_early_wakeup(struct drm_encoder *drm_enc,
  2162. u32 sw_event, struct sde_encoder_virt *sde_enc,
  2163. struct msm_drm_private *priv, bool is_vid_mode)
  2164. {
  2165. bool autorefresh_enabled = false;
  2166. struct msm_drm_thread *disp_thread;
  2167. int ret = 0;
  2168. if (!sde_enc->crtc ||
  2169. sde_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
  2170. SDE_DEBUG_ENC(sde_enc,
  2171. "invalid crtc:%d or crtc index:%d , sw_event:%u\n",
  2172. sde_enc->crtc == NULL,
  2173. sde_enc->crtc ? sde_enc->crtc->index : -EINVAL,
  2174. sw_event);
  2175. return -EINVAL;
  2176. }
  2177. disp_thread = &priv->disp_thread[sde_enc->crtc->index];
  2178. mutex_lock(&sde_enc->rc_lock);
  2179. if (sde_enc->rc_state == SDE_ENC_RC_STATE_ON) {
  2180. if (sde_enc->cur_master &&
  2181. sde_enc->cur_master->ops.is_autorefresh_enabled)
  2182. autorefresh_enabled =
  2183. sde_enc->cur_master->ops.is_autorefresh_enabled(
  2184. sde_enc->cur_master);
  2185. if (autorefresh_enabled) {
  2186. SDE_DEBUG_ENC(sde_enc,
  2187. "not handling early wakeup since auto refresh is enabled\n");
  2188. goto end;
  2189. }
  2190. if (!sde_crtc_frame_pending(sde_enc->crtc))
  2191. kthread_mod_delayed_work(&disp_thread->worker,
  2192. &sde_enc->delayed_off_work,
  2193. msecs_to_jiffies(
  2194. IDLE_POWERCOLLAPSE_DURATION));
  2195. } else if (sde_enc->rc_state == SDE_ENC_RC_STATE_IDLE) {
  2196. /* enable all the clks and resources */
  2197. ret = _sde_encoder_resource_control_helper(drm_enc,
  2198. true);
  2199. if (ret) {
  2200. SDE_ERROR_ENC(sde_enc,
  2201. "sw_event:%d, rc in state %d\n",
  2202. sw_event, sde_enc->rc_state);
  2203. SDE_EVT32(DRMID(drm_enc), sw_event,
  2204. sde_enc->rc_state,
  2205. SDE_EVTLOG_ERROR);
  2206. goto end;
  2207. }
  2208. _sde_encoder_update_rsc_client(drm_enc, true);
  2209. /*
  2210. * In some cases, commit comes with slight delay
  2211. * (> 80 ms)after early wake up, prevent clock switch
  2212. * off to avoid jank in next update. So, increase the
  2213. * command mode idle timeout sufficiently to prevent
  2214. * such case.
  2215. */
  2216. kthread_mod_delayed_work(&disp_thread->worker,
  2217. &sde_enc->delayed_off_work,
  2218. msecs_to_jiffies(
  2219. IDLE_POWERCOLLAPSE_IN_EARLY_WAKEUP));
  2220. sde_enc->rc_state = SDE_ENC_RC_STATE_ON;
  2221. }
  2222. SDE_EVT32(DRMID(drm_enc), sw_event, sde_enc->rc_state,
  2223. SDE_ENC_RC_STATE_ON, SDE_EVTLOG_FUNC_CASE8);
  2224. end:
  2225. mutex_unlock(&sde_enc->rc_lock);
  2226. return ret;
  2227. }
  2228. static int sde_encoder_resource_control(struct drm_encoder *drm_enc,
  2229. u32 sw_event)
  2230. {
  2231. struct sde_encoder_virt *sde_enc;
  2232. struct msm_drm_private *priv;
  2233. int ret = 0;
  2234. bool is_vid_mode = false;
  2235. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
  2236. SDE_ERROR("invalid encoder parameters, sw_event:%u\n",
  2237. sw_event);
  2238. return -EINVAL;
  2239. }
  2240. sde_enc = to_sde_encoder_virt(drm_enc);
  2241. priv = drm_enc->dev->dev_private;
  2242. if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_VIDEO_MODE))
  2243. is_vid_mode = true;
  2244. /*
  2245. * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
  2246. * events and return early for other events (ie wb display).
  2247. */
  2248. if (!sde_enc->idle_pc_enabled &&
  2249. (sw_event != SDE_ENC_RC_EVENT_KICKOFF &&
  2250. sw_event != SDE_ENC_RC_EVENT_PRE_MODESET &&
  2251. sw_event != SDE_ENC_RC_EVENT_POST_MODESET &&
  2252. sw_event != SDE_ENC_RC_EVENT_STOP &&
  2253. sw_event != SDE_ENC_RC_EVENT_PRE_STOP))
  2254. return 0;
  2255. SDE_DEBUG_ENC(sde_enc, "sw_event:%d, idle_pc:%d\n",
  2256. sw_event, sde_enc->idle_pc_enabled);
  2257. SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_enabled,
  2258. sde_enc->rc_state, SDE_EVTLOG_FUNC_ENTRY);
  2259. switch (sw_event) {
  2260. case SDE_ENC_RC_EVENT_KICKOFF:
  2261. ret = _sde_encoder_rc_kickoff(drm_enc, sw_event, sde_enc,
  2262. is_vid_mode);
  2263. break;
  2264. case SDE_ENC_RC_EVENT_FRAME_DONE:
  2265. ret = _sde_encoder_rc_frame_done(drm_enc, sw_event, sde_enc,
  2266. priv);
  2267. break;
  2268. case SDE_ENC_RC_EVENT_PRE_STOP:
  2269. ret = _sde_encoder_rc_pre_stop(drm_enc, sw_event, sde_enc,
  2270. is_vid_mode);
  2271. break;
  2272. case SDE_ENC_RC_EVENT_STOP:
  2273. ret = _sde_encoder_rc_stop(drm_enc, sw_event, sde_enc);
  2274. break;
  2275. case SDE_ENC_RC_EVENT_PRE_MODESET:
  2276. ret = _sde_encoder_rc_pre_modeset(drm_enc, sw_event, sde_enc);
  2277. break;
  2278. case SDE_ENC_RC_EVENT_POST_MODESET:
  2279. ret = _sde_encoder_rc_post_modeset(drm_enc, sw_event, sde_enc);
  2280. break;
  2281. case SDE_ENC_RC_EVENT_ENTER_IDLE:
  2282. ret = _sde_encoder_rc_idle(drm_enc, sw_event, sde_enc,
  2283. is_vid_mode);
  2284. break;
  2285. case SDE_ENC_RC_EVENT_EARLY_WAKEUP:
  2286. ret = _sde_encoder_rc_early_wakeup(drm_enc, sw_event, sde_enc,
  2287. priv, is_vid_mode);
  2288. break;
  2289. default:
  2290. SDE_EVT32(DRMID(drm_enc), sw_event, SDE_EVTLOG_ERROR);
  2291. SDE_ERROR("unexpected sw_event: %d\n", sw_event);
  2292. break;
  2293. }
  2294. SDE_EVT32_VERBOSE(DRMID(drm_enc), sw_event, sde_enc->idle_pc_enabled,
  2295. sde_enc->rc_state, SDE_EVTLOG_FUNC_EXIT);
  2296. return ret;
  2297. }
  2298. static void sde_encoder_virt_mode_switch(struct drm_encoder *drm_enc,
  2299. enum sde_intf_mode intf_mode, struct drm_display_mode *adj_mode)
  2300. {
  2301. int i = 0;
  2302. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  2303. if (intf_mode == INTF_MODE_CMD)
  2304. sde_enc->disp_info.curr_panel_mode = MSM_DISPLAY_VIDEO_MODE;
  2305. else if (intf_mode == INTF_MODE_VIDEO)
  2306. sde_enc->disp_info.curr_panel_mode = MSM_DISPLAY_CMD_MODE;
  2307. _sde_encoder_update_rsc_client(drm_enc, true);
  2308. if (intf_mode == INTF_MODE_CMD) {
  2309. for (i = 0; i < sde_enc->num_phys_encs; i++)
  2310. sde_enc->phys_encs[i] = sde_enc->phys_vid_encs[i];
  2311. SDE_DEBUG_ENC(sde_enc, "switch to video physical encoder\n");
  2312. SDE_EVT32(DRMID(&sde_enc->base), intf_mode,
  2313. msm_is_mode_seamless_poms(adj_mode),
  2314. SDE_EVTLOG_FUNC_CASE1);
  2315. } else if (intf_mode == INTF_MODE_VIDEO) {
  2316. for (i = 0; i < sde_enc->num_phys_encs; i++)
  2317. sde_enc->phys_encs[i] = sde_enc->phys_cmd_encs[i];
  2318. SDE_EVT32(DRMID(&sde_enc->base), intf_mode,
  2319. msm_is_mode_seamless_poms(adj_mode),
  2320. SDE_EVTLOG_FUNC_CASE2);
  2321. SDE_DEBUG_ENC(sde_enc, "switch to command physical encoder\n");
  2322. }
  2323. }
  2324. static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
  2325. struct drm_display_mode *mode,
  2326. struct drm_display_mode *adj_mode)
  2327. {
  2328. struct sde_encoder_virt *sde_enc;
  2329. struct msm_drm_private *priv;
  2330. struct sde_kms *sde_kms;
  2331. struct list_head *connector_list;
  2332. struct drm_connector *conn = NULL, *conn_iter;
  2333. struct sde_rm_hw_iter dsc_iter, pp_iter, qdss_iter;
  2334. struct sde_rm_hw_request request_hw;
  2335. enum sde_intf_mode intf_mode;
  2336. bool is_cmd_mode = false;
  2337. int i = 0, ret;
  2338. if (!drm_enc) {
  2339. SDE_ERROR("invalid encoder\n");
  2340. return;
  2341. }
  2342. if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
  2343. SDE_ERROR("power resource is not enabled\n");
  2344. return;
  2345. }
  2346. sde_enc = to_sde_encoder_virt(drm_enc);
  2347. SDE_DEBUG_ENC(sde_enc, "\n");
  2348. if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
  2349. is_cmd_mode = true;
  2350. priv = drm_enc->dev->dev_private;
  2351. sde_kms = to_sde_kms(priv->kms);
  2352. connector_list = &sde_kms->dev->mode_config.connector_list;
  2353. SDE_EVT32(DRMID(drm_enc));
  2354. /*
  2355. * cache the crtc in sde_enc on enable for duration of use case
  2356. * for correctly servicing asynchronous irq events and timers
  2357. */
  2358. if (!drm_enc->crtc) {
  2359. SDE_ERROR("invalid crtc\n");
  2360. return;
  2361. }
  2362. sde_enc->crtc = drm_enc->crtc;
  2363. list_for_each_entry(conn_iter, connector_list, head)
  2364. if (conn_iter->encoder == drm_enc)
  2365. conn = conn_iter;
  2366. if (!conn) {
  2367. SDE_ERROR_ENC(sde_enc, "failed to find attached connector\n");
  2368. return;
  2369. } else if (!conn->state) {
  2370. SDE_ERROR_ENC(sde_enc, "invalid connector state\n");
  2371. return;
  2372. }
  2373. intf_mode = sde_encoder_get_intf_mode(drm_enc);
  2374. /* store the mode_info */
  2375. sde_connector_state_get_mode_info(conn->state, &sde_enc->mode_info);
  2376. /* release resources before seamless mode change */
  2377. if (msm_is_mode_seamless_dms(adj_mode) ||
  2378. (msm_is_mode_seamless_dyn_clk(adj_mode) &&
  2379. is_cmd_mode)) {
  2380. /* restore resource state before releasing them */
  2381. ret = sde_encoder_resource_control(drm_enc,
  2382. SDE_ENC_RC_EVENT_PRE_MODESET);
  2383. if (ret) {
  2384. SDE_ERROR_ENC(sde_enc,
  2385. "sde resource control failed: %d\n",
  2386. ret);
  2387. return;
  2388. }
  2389. /*
  2390. * Disable dsc before switch the mode and after pre_modeset,
  2391. * to guarantee that previous kickoff finished.
  2392. */
  2393. _sde_encoder_dsc_disable(sde_enc);
  2394. } else if (msm_is_mode_seamless_poms(adj_mode)) {
  2395. _sde_encoder_modeset_helper_locked(drm_enc,
  2396. SDE_ENC_RC_EVENT_PRE_MODESET);
  2397. sde_encoder_virt_mode_switch(drm_enc, intf_mode, adj_mode);
  2398. }
  2399. /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
  2400. ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
  2401. conn->state, false);
  2402. if (ret) {
  2403. SDE_ERROR_ENC(sde_enc,
  2404. "failed to reserve hw resources, %d\n", ret);
  2405. return;
  2406. }
  2407. sde_rm_init_hw_iter(&pp_iter, drm_enc->base.id, SDE_HW_BLK_PINGPONG);
  2408. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  2409. sde_enc->hw_pp[i] = NULL;
  2410. if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
  2411. break;
  2412. sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
  2413. }
  2414. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2415. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2416. if (phys) {
  2417. sde_rm_init_hw_iter(&qdss_iter, drm_enc->base.id,
  2418. SDE_HW_BLK_QDSS);
  2419. for (i = 0; i < QDSS_MAX; i++) {
  2420. if (sde_rm_get_hw(&sde_kms->rm, &qdss_iter)) {
  2421. phys->hw_qdss =
  2422. (struct sde_hw_qdss *)qdss_iter.hw;
  2423. break;
  2424. }
  2425. }
  2426. }
  2427. }
  2428. sde_rm_init_hw_iter(&dsc_iter, drm_enc->base.id, SDE_HW_BLK_DSC);
  2429. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  2430. sde_enc->hw_dsc[i] = NULL;
  2431. if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
  2432. break;
  2433. sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
  2434. }
  2435. /* Get PP for DSC configuration */
  2436. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  2437. sde_enc->hw_dsc_pp[i] = NULL;
  2438. if (!sde_enc->hw_dsc[i])
  2439. continue;
  2440. request_hw.id = sde_enc->hw_dsc[i]->base.id;
  2441. request_hw.type = SDE_HW_BLK_PINGPONG;
  2442. if (!sde_rm_request_hw_blk(&sde_kms->rm, &request_hw))
  2443. break;
  2444. sde_enc->hw_dsc_pp[i] =
  2445. (struct sde_hw_pingpong *) request_hw.hw;
  2446. }
  2447. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2448. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2449. if (phys) {
  2450. if (!sde_enc->hw_pp[i] && sde_enc->topology.num_intf) {
  2451. SDE_ERROR_ENC(sde_enc,
  2452. "invalid pingpong block for the encoder\n");
  2453. return;
  2454. }
  2455. phys->hw_pp = sde_enc->hw_pp[i];
  2456. phys->connector = conn->state->connector;
  2457. if (phys->ops.mode_set)
  2458. phys->ops.mode_set(phys, mode, adj_mode);
  2459. }
  2460. }
  2461. /* update resources after seamless mode change */
  2462. if (msm_is_mode_seamless_dms(adj_mode) ||
  2463. (msm_is_mode_seamless_dyn_clk(adj_mode) &&
  2464. is_cmd_mode))
  2465. sde_encoder_resource_control(&sde_enc->base,
  2466. SDE_ENC_RC_EVENT_POST_MODESET);
  2467. else if (msm_is_mode_seamless_poms(adj_mode))
  2468. _sde_encoder_modeset_helper_locked(drm_enc,
  2469. SDE_ENC_RC_EVENT_POST_MODESET);
  2470. }
  2471. void sde_encoder_control_te(struct drm_encoder *drm_enc, bool enable)
  2472. {
  2473. struct sde_encoder_virt *sde_enc;
  2474. struct sde_encoder_phys *phys;
  2475. int i;
  2476. if (!drm_enc) {
  2477. SDE_ERROR("invalid parameters\n");
  2478. return;
  2479. }
  2480. sde_enc = to_sde_encoder_virt(drm_enc);
  2481. if (!sde_enc) {
  2482. SDE_ERROR("invalid sde encoder\n");
  2483. return;
  2484. }
  2485. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2486. phys = sde_enc->phys_encs[i];
  2487. if (phys && phys->ops.control_te)
  2488. phys->ops.control_te(phys, enable);
  2489. }
  2490. }
  2491. static int _sde_encoder_input_connect(struct input_handler *handler,
  2492. struct input_dev *dev, const struct input_device_id *id)
  2493. {
  2494. struct input_handle *handle;
  2495. int rc = 0;
  2496. handle = kzalloc(sizeof(*handle), GFP_KERNEL);
  2497. if (!handle)
  2498. return -ENOMEM;
  2499. handle->dev = dev;
  2500. handle->handler = handler;
  2501. handle->name = handler->name;
  2502. rc = input_register_handle(handle);
  2503. if (rc) {
  2504. pr_err("failed to register input handle\n");
  2505. goto error;
  2506. }
  2507. rc = input_open_device(handle);
  2508. if (rc) {
  2509. pr_err("failed to open input device\n");
  2510. goto error_unregister;
  2511. }
  2512. return 0;
  2513. error_unregister:
  2514. input_unregister_handle(handle);
  2515. error:
  2516. kfree(handle);
  2517. return rc;
  2518. }
  2519. static void _sde_encoder_input_disconnect(struct input_handle *handle)
  2520. {
  2521. input_close_device(handle);
  2522. input_unregister_handle(handle);
  2523. kfree(handle);
  2524. }
  2525. /**
  2526. * Structure for specifying event parameters on which to receive callbacks.
  2527. * This structure will trigger a callback in case of a touch event (specified by
  2528. * EV_ABS) where there is a change in X and Y coordinates,
  2529. */
  2530. static const struct input_device_id sde_input_ids[] = {
  2531. {
  2532. .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
  2533. .evbit = { BIT_MASK(EV_ABS) },
  2534. .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
  2535. BIT_MASK(ABS_MT_POSITION_X) |
  2536. BIT_MASK(ABS_MT_POSITION_Y) },
  2537. },
  2538. { },
  2539. };
  2540. static int _sde_encoder_input_handler_register(
  2541. struct input_handler *input_handler)
  2542. {
  2543. int rc = 0;
  2544. rc = input_register_handler(input_handler);
  2545. if (rc) {
  2546. pr_err("input_register_handler failed, rc= %d\n", rc);
  2547. kfree(input_handler);
  2548. return rc;
  2549. }
  2550. return rc;
  2551. }
  2552. static int _sde_encoder_input_handler(
  2553. struct sde_encoder_virt *sde_enc)
  2554. {
  2555. struct input_handler *input_handler = NULL;
  2556. int rc = 0;
  2557. if (sde_enc->input_handler) {
  2558. SDE_ERROR_ENC(sde_enc,
  2559. "input_handle is active. unexpected\n");
  2560. return -EINVAL;
  2561. }
  2562. input_handler = kzalloc(sizeof(*sde_enc->input_handler), GFP_KERNEL);
  2563. if (!input_handler)
  2564. return -ENOMEM;
  2565. input_handler->event = sde_encoder_input_event_handler;
  2566. input_handler->connect = _sde_encoder_input_connect;
  2567. input_handler->disconnect = _sde_encoder_input_disconnect;
  2568. input_handler->name = "sde";
  2569. input_handler->id_table = sde_input_ids;
  2570. input_handler->private = sde_enc;
  2571. sde_enc->input_handler = input_handler;
  2572. return rc;
  2573. }
  2574. static void _sde_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
  2575. {
  2576. struct sde_encoder_virt *sde_enc = NULL;
  2577. struct msm_drm_private *priv;
  2578. struct sde_kms *sde_kms;
  2579. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
  2580. SDE_ERROR("invalid parameters\n");
  2581. return;
  2582. }
  2583. priv = drm_enc->dev->dev_private;
  2584. sde_kms = to_sde_kms(priv->kms);
  2585. if (!sde_kms) {
  2586. SDE_ERROR("invalid sde_kms\n");
  2587. return;
  2588. }
  2589. sde_enc = to_sde_encoder_virt(drm_enc);
  2590. if (!sde_enc || !sde_enc->cur_master) {
  2591. SDE_DEBUG("invalid sde encoder/master\n");
  2592. return;
  2593. }
  2594. if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
  2595. sde_enc->cur_master->hw_mdptop &&
  2596. sde_enc->cur_master->hw_mdptop->ops.intf_audio_select)
  2597. sde_enc->cur_master->hw_mdptop->ops.intf_audio_select(
  2598. sde_enc->cur_master->hw_mdptop);
  2599. if (sde_enc->cur_master->hw_mdptop &&
  2600. sde_enc->cur_master->hw_mdptop->ops.reset_ubwc)
  2601. sde_enc->cur_master->hw_mdptop->ops.reset_ubwc(
  2602. sde_enc->cur_master->hw_mdptop,
  2603. sde_kms->catalog);
  2604. if (sde_enc->cur_master->hw_ctl &&
  2605. sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1 &&
  2606. !sde_enc->cur_master->cont_splash_enabled)
  2607. sde_enc->cur_master->hw_ctl->ops.setup_intf_cfg_v1(
  2608. sde_enc->cur_master->hw_ctl,
  2609. &sde_enc->cur_master->intf_cfg_v1);
  2610. _sde_encoder_update_vsync_source(sde_enc, &sde_enc->disp_info, false);
  2611. sde_encoder_control_te(drm_enc, true);
  2612. memset(&sde_enc->prv_conn_roi, 0, sizeof(sde_enc->prv_conn_roi));
  2613. memset(&sde_enc->cur_conn_roi, 0, sizeof(sde_enc->cur_conn_roi));
  2614. }
  2615. void sde_encoder_virt_restore(struct drm_encoder *drm_enc)
  2616. {
  2617. struct sde_encoder_virt *sde_enc = NULL;
  2618. int i;
  2619. if (!drm_enc) {
  2620. SDE_ERROR("invalid encoder\n");
  2621. return;
  2622. }
  2623. sde_enc = to_sde_encoder_virt(drm_enc);
  2624. if (!sde_enc->cur_master) {
  2625. SDE_DEBUG("virt encoder has no master\n");
  2626. return;
  2627. }
  2628. memset(&sde_enc->cur_master->intf_cfg_v1, 0,
  2629. sizeof(sde_enc->cur_master->intf_cfg_v1));
  2630. sde_enc->idle_pc_restore = true;
  2631. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2632. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2633. if (!phys)
  2634. continue;
  2635. if (phys->hw_ctl && phys->hw_ctl->ops.clear_pending_flush)
  2636. phys->hw_ctl->ops.clear_pending_flush(phys->hw_ctl);
  2637. if ((phys != sde_enc->cur_master) && phys->ops.restore)
  2638. phys->ops.restore(phys);
  2639. }
  2640. if (sde_enc->cur_master->ops.restore)
  2641. sde_enc->cur_master->ops.restore(sde_enc->cur_master);
  2642. _sde_encoder_virt_enable_helper(drm_enc);
  2643. }
  2644. static void sde_encoder_off_work(struct kthread_work *work)
  2645. {
  2646. struct sde_encoder_virt *sde_enc = container_of(work,
  2647. struct sde_encoder_virt, delayed_off_work.work);
  2648. struct drm_encoder *drm_enc;
  2649. if (!sde_enc) {
  2650. SDE_ERROR("invalid sde encoder\n");
  2651. return;
  2652. }
  2653. drm_enc = &sde_enc->base;
  2654. SDE_ATRACE_BEGIN("sde_encoder_off_work");
  2655. sde_encoder_idle_request(drm_enc);
  2656. SDE_ATRACE_END("sde_encoder_off_work");
  2657. }
  2658. static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
  2659. {
  2660. struct sde_encoder_virt *sde_enc = NULL;
  2661. int i, ret = 0;
  2662. struct msm_compression_info *comp_info = NULL;
  2663. struct drm_display_mode *cur_mode = NULL;
  2664. struct msm_display_info *disp_info;
  2665. if (!drm_enc) {
  2666. SDE_ERROR("invalid encoder\n");
  2667. return;
  2668. }
  2669. sde_enc = to_sde_encoder_virt(drm_enc);
  2670. disp_info = &sde_enc->disp_info;
  2671. if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
  2672. SDE_ERROR("power resource is not enabled\n");
  2673. return;
  2674. }
  2675. if (drm_enc->crtc && !sde_enc->crtc)
  2676. sde_enc->crtc = drm_enc->crtc;
  2677. comp_info = &sde_enc->mode_info.comp_info;
  2678. cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
  2679. SDE_DEBUG_ENC(sde_enc, "\n");
  2680. SDE_EVT32(DRMID(drm_enc), cur_mode->hdisplay, cur_mode->vdisplay);
  2681. sde_enc->cur_master = NULL;
  2682. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2683. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2684. if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
  2685. SDE_DEBUG_ENC(sde_enc, "master is now idx %d\n", i);
  2686. sde_enc->cur_master = phys;
  2687. break;
  2688. }
  2689. }
  2690. if (!sde_enc->cur_master) {
  2691. SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
  2692. return;
  2693. }
  2694. /* register input handler if not already registered */
  2695. if (sde_enc->input_handler && !msm_is_mode_seamless_dms(cur_mode) &&
  2696. sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE) &&
  2697. !msm_is_mode_seamless_dyn_clk(cur_mode)) {
  2698. ret = _sde_encoder_input_handler_register(
  2699. sde_enc->input_handler);
  2700. if (ret)
  2701. SDE_ERROR(
  2702. "input handler registration failed, rc = %d\n", ret);
  2703. }
  2704. if (!(msm_is_mode_seamless_vrr(cur_mode)
  2705. || msm_is_mode_seamless_dms(cur_mode)
  2706. || msm_is_mode_seamless_dyn_clk(cur_mode)))
  2707. kthread_init_delayed_work(&sde_enc->delayed_off_work,
  2708. sde_encoder_off_work);
  2709. ret = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
  2710. if (ret) {
  2711. SDE_ERROR_ENC(sde_enc, "sde resource control failed: %d\n",
  2712. ret);
  2713. return;
  2714. }
  2715. memset(&sde_enc->cur_master->intf_cfg_v1, 0,
  2716. sizeof(sde_enc->cur_master->intf_cfg_v1));
  2717. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2718. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2719. if (!phys)
  2720. continue;
  2721. phys->comp_type = comp_info->comp_type;
  2722. phys->comp_ratio = comp_info->comp_ratio;
  2723. phys->wide_bus_en = sde_enc->mode_info.wide_bus_en;
  2724. phys->frame_trigger_mode = sde_enc->frame_trigger_mode;
  2725. if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
  2726. phys->dsc_extra_pclk_cycle_cnt =
  2727. comp_info->dsc_info.pclk_per_line;
  2728. phys->dsc_extra_disp_width =
  2729. comp_info->dsc_info.extra_width;
  2730. }
  2731. if (phys != sde_enc->cur_master) {
  2732. /**
  2733. * on DMS request, the encoder will be enabled
  2734. * already. Invoke restore to reconfigure the
  2735. * new mode.
  2736. */
  2737. if ((msm_is_mode_seamless_dms(cur_mode) ||
  2738. msm_is_mode_seamless_dyn_clk(cur_mode)) &&
  2739. phys->ops.restore)
  2740. phys->ops.restore(phys);
  2741. else if (phys->ops.enable)
  2742. phys->ops.enable(phys);
  2743. }
  2744. if (sde_enc->misr_enable && phys->ops.setup_misr &&
  2745. (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_VIDEO_MODE)))
  2746. phys->ops.setup_misr(phys, true,
  2747. sde_enc->misr_frame_count);
  2748. }
  2749. if ((msm_is_mode_seamless_dms(cur_mode) ||
  2750. msm_is_mode_seamless_dyn_clk(cur_mode)) &&
  2751. sde_enc->cur_master->ops.restore)
  2752. sde_enc->cur_master->ops.restore(sde_enc->cur_master);
  2753. else if (sde_enc->cur_master->ops.enable)
  2754. sde_enc->cur_master->ops.enable(sde_enc->cur_master);
  2755. _sde_encoder_virt_enable_helper(drm_enc);
  2756. }
  2757. static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
  2758. {
  2759. struct sde_encoder_virt *sde_enc = NULL;
  2760. struct msm_drm_private *priv;
  2761. struct sde_kms *sde_kms;
  2762. enum sde_intf_mode intf_mode;
  2763. int i = 0;
  2764. if (!drm_enc) {
  2765. SDE_ERROR("invalid encoder\n");
  2766. return;
  2767. } else if (!drm_enc->dev) {
  2768. SDE_ERROR("invalid dev\n");
  2769. return;
  2770. } else if (!drm_enc->dev->dev_private) {
  2771. SDE_ERROR("invalid dev_private\n");
  2772. return;
  2773. }
  2774. if (!sde_kms_power_resource_is_enabled(drm_enc->dev)) {
  2775. SDE_ERROR("power resource is not enabled\n");
  2776. return;
  2777. }
  2778. sde_enc = to_sde_encoder_virt(drm_enc);
  2779. SDE_DEBUG_ENC(sde_enc, "\n");
  2780. priv = drm_enc->dev->dev_private;
  2781. sde_kms = to_sde_kms(priv->kms);
  2782. intf_mode = sde_encoder_get_intf_mode(drm_enc);
  2783. SDE_EVT32(DRMID(drm_enc));
  2784. /* wait for idle */
  2785. sde_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
  2786. if (sde_enc->input_handler)
  2787. input_unregister_handler(sde_enc->input_handler);
  2788. /*
  2789. * For primary command mode and video mode encoders, execute the
  2790. * resource control pre-stop operations before the physical encoders
  2791. * are disabled, to allow the rsc to transition its states properly.
  2792. *
  2793. * For other encoder types, rsc should not be enabled until after
  2794. * they have been fully disabled, so delay the pre-stop operations
  2795. * until after the physical disable calls have returned.
  2796. */
  2797. if (sde_enc->disp_info.display_type == SDE_CONNECTOR_PRIMARY &&
  2798. (intf_mode == INTF_MODE_CMD || intf_mode == INTF_MODE_VIDEO)) {
  2799. sde_encoder_resource_control(drm_enc,
  2800. SDE_ENC_RC_EVENT_PRE_STOP);
  2801. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2802. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2803. if (phys && phys->ops.disable)
  2804. phys->ops.disable(phys);
  2805. }
  2806. } else {
  2807. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2808. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  2809. if (phys && phys->ops.disable)
  2810. phys->ops.disable(phys);
  2811. }
  2812. sde_encoder_resource_control(drm_enc,
  2813. SDE_ENC_RC_EVENT_PRE_STOP);
  2814. }
  2815. /*
  2816. * disable dsc after the transfer is complete (for command mode)
  2817. * and after physical encoder is disabled, to make sure timing
  2818. * engine is already disabled (for video mode).
  2819. */
  2820. _sde_encoder_dsc_disable(sde_enc);
  2821. sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_STOP);
  2822. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  2823. if (sde_enc->phys_encs[i]) {
  2824. sde_enc->phys_encs[i]->cont_splash_enabled = false;
  2825. sde_enc->phys_encs[i]->connector = NULL;
  2826. }
  2827. atomic_set(&sde_enc->frame_done_cnt[i], 0);
  2828. }
  2829. sde_enc->cur_master = NULL;
  2830. /*
  2831. * clear the cached crtc in sde_enc on use case finish, after all the
  2832. * outstanding events and timers have been completed
  2833. */
  2834. sde_enc->crtc = NULL;
  2835. memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
  2836. SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
  2837. sde_rm_release(&sde_kms->rm, drm_enc, false);
  2838. }
  2839. void sde_encoder_helper_phys_disable(struct sde_encoder_phys *phys_enc,
  2840. struct sde_encoder_phys_wb *wb_enc)
  2841. {
  2842. struct sde_encoder_virt *sde_enc;
  2843. phys_enc->hw_ctl->ops.reset(phys_enc->hw_ctl);
  2844. sde_encoder_helper_reset_mixers(phys_enc, NULL);
  2845. if (wb_enc) {
  2846. if (wb_enc->hw_wb->ops.bind_pingpong_blk) {
  2847. wb_enc->hw_wb->ops.bind_pingpong_blk(wb_enc->hw_wb,
  2848. false, phys_enc->hw_pp->idx);
  2849. if (phys_enc->hw_ctl->ops.update_bitmask_wb)
  2850. phys_enc->hw_ctl->ops.update_bitmask_wb(
  2851. phys_enc->hw_ctl,
  2852. wb_enc->hw_wb->idx, true);
  2853. }
  2854. } else {
  2855. if (phys_enc->hw_intf->ops.bind_pingpong_blk) {
  2856. phys_enc->hw_intf->ops.bind_pingpong_blk(
  2857. phys_enc->hw_intf, false,
  2858. phys_enc->hw_pp->idx);
  2859. if (phys_enc->hw_ctl->ops.update_bitmask_intf)
  2860. phys_enc->hw_ctl->ops.update_bitmask_intf(
  2861. phys_enc->hw_ctl,
  2862. phys_enc->hw_intf->idx, true);
  2863. }
  2864. }
  2865. if (phys_enc->hw_pp && phys_enc->hw_pp->ops.reset_3d_mode) {
  2866. phys_enc->hw_pp->ops.reset_3d_mode(phys_enc->hw_pp);
  2867. if (phys_enc->hw_ctl->ops.update_bitmask_merge3d &&
  2868. phys_enc->hw_pp->merge_3d)
  2869. phys_enc->hw_ctl->ops.update_bitmask_merge3d(
  2870. phys_enc->hw_ctl,
  2871. phys_enc->hw_pp->merge_3d->idx, true);
  2872. }
  2873. if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.bind_pingpong_blk &&
  2874. phys_enc->hw_pp) {
  2875. phys_enc->hw_cdm->ops.bind_pingpong_blk(phys_enc->hw_cdm,
  2876. false, phys_enc->hw_pp->idx);
  2877. if (phys_enc->hw_ctl->ops.update_bitmask_cdm)
  2878. phys_enc->hw_ctl->ops.update_bitmask_cdm(
  2879. phys_enc->hw_ctl,
  2880. phys_enc->hw_cdm->idx, true);
  2881. }
  2882. sde_enc = to_sde_encoder_virt(phys_enc->parent);
  2883. if (phys_enc == sde_enc->cur_master && phys_enc->hw_pp &&
  2884. phys_enc->hw_ctl->ops.reset_post_disable)
  2885. phys_enc->hw_ctl->ops.reset_post_disable(
  2886. phys_enc->hw_ctl, &phys_enc->intf_cfg_v1,
  2887. phys_enc->hw_pp->merge_3d ?
  2888. phys_enc->hw_pp->merge_3d->idx : 0);
  2889. phys_enc->hw_ctl->ops.trigger_flush(phys_enc->hw_ctl);
  2890. phys_enc->hw_ctl->ops.trigger_start(phys_enc->hw_ctl);
  2891. }
  2892. static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
  2893. enum sde_intf_type type, u32 controller_id)
  2894. {
  2895. int i = 0;
  2896. for (i = 0; i < catalog->intf_count; i++) {
  2897. if (catalog->intf[i].type == type
  2898. && catalog->intf[i].controller_id == controller_id) {
  2899. return catalog->intf[i].id;
  2900. }
  2901. }
  2902. return INTF_MAX;
  2903. }
  2904. static enum sde_wb sde_encoder_get_wb(struct sde_mdss_cfg *catalog,
  2905. enum sde_intf_type type, u32 controller_id)
  2906. {
  2907. if (controller_id < catalog->wb_count)
  2908. return catalog->wb[controller_id].id;
  2909. return WB_MAX;
  2910. }
  2911. void sde_encoder_perf_uidle_status(struct sde_kms *sde_kms,
  2912. struct drm_crtc *crtc)
  2913. {
  2914. struct sde_hw_uidle *uidle;
  2915. struct sde_uidle_cntr cntr;
  2916. struct sde_uidle_status status;
  2917. if (!sde_kms || !crtc || !sde_kms->hw_uidle) {
  2918. pr_err("invalid params %d %d\n",
  2919. !sde_kms, !crtc);
  2920. return;
  2921. }
  2922. /* check if perf counters are enabled and setup */
  2923. if (!sde_kms->catalog->uidle_cfg.perf_cntr_en)
  2924. return;
  2925. uidle = sde_kms->hw_uidle;
  2926. if ((sde_kms->catalog->uidle_cfg.debugfs_perf & SDE_PERF_UIDLE_STATUS)
  2927. && uidle->ops.uidle_get_status) {
  2928. uidle->ops.uidle_get_status(uidle, &status);
  2929. trace_sde_perf_uidle_status(
  2930. crtc->base.id,
  2931. status.uidle_danger_status_0,
  2932. status.uidle_danger_status_1,
  2933. status.uidle_safe_status_0,
  2934. status.uidle_safe_status_1,
  2935. status.uidle_idle_status_0,
  2936. status.uidle_idle_status_1,
  2937. status.uidle_fal_status_0,
  2938. status.uidle_fal_status_1,
  2939. status.uidle_status,
  2940. status.uidle_en_fal10);
  2941. }
  2942. if ((sde_kms->catalog->uidle_cfg.debugfs_perf & SDE_PERF_UIDLE_CNT)
  2943. && uidle->ops.uidle_get_cntr) {
  2944. uidle->ops.uidle_get_cntr(uidle, &cntr);
  2945. trace_sde_perf_uidle_cntr(
  2946. crtc->base.id,
  2947. cntr.fal1_gate_cntr,
  2948. cntr.fal10_gate_cntr,
  2949. cntr.fal_wait_gate_cntr,
  2950. cntr.fal1_num_transitions_cntr,
  2951. cntr.fal10_num_transitions_cntr,
  2952. cntr.min_gate_cntr,
  2953. cntr.max_gate_cntr);
  2954. }
  2955. }
  2956. static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
  2957. struct sde_encoder_phys *phy_enc)
  2958. {
  2959. struct sde_encoder_virt *sde_enc = NULL;
  2960. unsigned long lock_flags;
  2961. if (!drm_enc || !phy_enc)
  2962. return;
  2963. SDE_ATRACE_BEGIN("encoder_vblank_callback");
  2964. sde_enc = to_sde_encoder_virt(drm_enc);
  2965. spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
  2966. if (sde_enc->crtc_vblank_cb)
  2967. sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
  2968. spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
  2969. if (phy_enc->sde_kms &&
  2970. phy_enc->sde_kms->catalog->uidle_cfg.debugfs_perf)
  2971. sde_encoder_perf_uidle_status(phy_enc->sde_kms, sde_enc->crtc);
  2972. atomic_inc(&phy_enc->vsync_cnt);
  2973. SDE_ATRACE_END("encoder_vblank_callback");
  2974. }
  2975. static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
  2976. struct sde_encoder_phys *phy_enc)
  2977. {
  2978. if (!phy_enc)
  2979. return;
  2980. SDE_ATRACE_BEGIN("encoder_underrun_callback");
  2981. atomic_inc(&phy_enc->underrun_cnt);
  2982. SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
  2983. trace_sde_encoder_underrun(DRMID(drm_enc),
  2984. atomic_read(&phy_enc->underrun_cnt));
  2985. SDE_DBG_CTRL("stop_ftrace");
  2986. SDE_DBG_CTRL("panic_underrun");
  2987. SDE_ATRACE_END("encoder_underrun_callback");
  2988. }
  2989. void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
  2990. void (*vbl_cb)(void *), void *vbl_data)
  2991. {
  2992. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  2993. unsigned long lock_flags;
  2994. bool enable;
  2995. int i;
  2996. enable = vbl_cb ? true : false;
  2997. if (!drm_enc) {
  2998. SDE_ERROR("invalid encoder\n");
  2999. return;
  3000. }
  3001. SDE_DEBUG_ENC(sde_enc, "\n");
  3002. SDE_EVT32(DRMID(drm_enc), enable);
  3003. spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
  3004. sde_enc->crtc_vblank_cb = vbl_cb;
  3005. sde_enc->crtc_vblank_cb_data = vbl_data;
  3006. spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
  3007. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3008. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  3009. if (phys && phys->ops.control_vblank_irq)
  3010. phys->ops.control_vblank_irq(phys, enable);
  3011. }
  3012. sde_enc->vblank_enabled = enable;
  3013. }
  3014. void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
  3015. void (*frame_event_cb)(void *, u32 event),
  3016. struct drm_crtc *crtc)
  3017. {
  3018. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  3019. unsigned long lock_flags;
  3020. bool enable;
  3021. enable = frame_event_cb ? true : false;
  3022. if (!drm_enc) {
  3023. SDE_ERROR("invalid encoder\n");
  3024. return;
  3025. }
  3026. SDE_DEBUG_ENC(sde_enc, "\n");
  3027. SDE_EVT32(DRMID(drm_enc), enable, 0);
  3028. spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
  3029. sde_enc->crtc_frame_event_cb = frame_event_cb;
  3030. sde_enc->crtc_frame_event_cb_data.crtc = crtc;
  3031. spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
  3032. }
  3033. static void sde_encoder_frame_done_callback(
  3034. struct drm_encoder *drm_enc,
  3035. struct sde_encoder_phys *ready_phys, u32 event)
  3036. {
  3037. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  3038. unsigned int i;
  3039. bool trigger = true;
  3040. bool is_cmd_mode = false;
  3041. enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
  3042. if (!drm_enc || !sde_enc->cur_master) {
  3043. SDE_ERROR("invalid param: drm_enc %pK, cur_master %pK\n",
  3044. drm_enc, drm_enc ? sde_enc->cur_master : 0);
  3045. return;
  3046. }
  3047. sde_enc->crtc_frame_event_cb_data.connector =
  3048. sde_enc->cur_master->connector;
  3049. if (sde_encoder_check_curr_mode(drm_enc, MSM_DISPLAY_CMD_MODE))
  3050. is_cmd_mode = true;
  3051. if (event & (SDE_ENCODER_FRAME_EVENT_DONE
  3052. | SDE_ENCODER_FRAME_EVENT_ERROR
  3053. | SDE_ENCODER_FRAME_EVENT_PANEL_DEAD) && is_cmd_mode) {
  3054. if (ready_phys->connector)
  3055. topology = sde_connector_get_topology_name(
  3056. ready_phys->connector);
  3057. /* One of the physical encoders has become idle */
  3058. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3059. if (sde_enc->phys_encs[i] == ready_phys) {
  3060. SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
  3061. atomic_read(&sde_enc->frame_done_cnt[i]));
  3062. if (!atomic_add_unless(
  3063. &sde_enc->frame_done_cnt[i], 1, 1)) {
  3064. SDE_EVT32(DRMID(drm_enc), event,
  3065. ready_phys->intf_idx,
  3066. SDE_EVTLOG_ERROR);
  3067. SDE_ERROR_ENC(sde_enc,
  3068. "intf idx:%d, event:%d\n",
  3069. ready_phys->intf_idx, event);
  3070. return;
  3071. }
  3072. }
  3073. if (topology != SDE_RM_TOPOLOGY_PPSPLIT &&
  3074. atomic_read(&sde_enc->frame_done_cnt[i]) != 1)
  3075. trigger = false;
  3076. }
  3077. if (trigger) {
  3078. sde_encoder_resource_control(drm_enc,
  3079. SDE_ENC_RC_EVENT_FRAME_DONE);
  3080. if (sde_enc->crtc_frame_event_cb)
  3081. sde_enc->crtc_frame_event_cb(
  3082. &sde_enc->crtc_frame_event_cb_data,
  3083. event);
  3084. for (i = 0; i < sde_enc->num_phys_encs; i++)
  3085. atomic_set(&sde_enc->frame_done_cnt[i], 0);
  3086. }
  3087. } else if (sde_enc->crtc_frame_event_cb) {
  3088. if (!is_cmd_mode)
  3089. sde_encoder_resource_control(drm_enc,
  3090. SDE_ENC_RC_EVENT_FRAME_DONE);
  3091. sde_enc->crtc_frame_event_cb(
  3092. &sde_enc->crtc_frame_event_cb_data, event);
  3093. }
  3094. }
  3095. static void sde_encoder_get_qsync_fps_callback(
  3096. struct drm_encoder *drm_enc,
  3097. u32 *qsync_fps)
  3098. {
  3099. struct msm_display_info *disp_info;
  3100. struct sde_encoder_virt *sde_enc;
  3101. if (!qsync_fps)
  3102. return;
  3103. *qsync_fps = 0;
  3104. if (!drm_enc) {
  3105. SDE_ERROR("invalid drm encoder\n");
  3106. return;
  3107. }
  3108. sde_enc = to_sde_encoder_virt(drm_enc);
  3109. disp_info = &sde_enc->disp_info;
  3110. *qsync_fps = disp_info->qsync_min_fps;
  3111. }
  3112. int sde_encoder_idle_request(struct drm_encoder *drm_enc)
  3113. {
  3114. struct sde_encoder_virt *sde_enc;
  3115. if (!drm_enc) {
  3116. SDE_ERROR("invalid drm encoder\n");
  3117. return -EINVAL;
  3118. }
  3119. sde_enc = to_sde_encoder_virt(drm_enc);
  3120. sde_encoder_resource_control(&sde_enc->base,
  3121. SDE_ENC_RC_EVENT_ENTER_IDLE);
  3122. return 0;
  3123. }
  3124. /**
  3125. * _sde_encoder_trigger_flush - trigger flush for a physical encoder
  3126. * drm_enc: Pointer to drm encoder structure
  3127. * phys: Pointer to physical encoder structure
  3128. * extra_flush: Additional bit mask to include in flush trigger
  3129. */
  3130. static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
  3131. struct sde_encoder_phys *phys,
  3132. struct sde_ctl_flush_cfg *extra_flush)
  3133. {
  3134. struct sde_hw_ctl *ctl;
  3135. unsigned long lock_flags;
  3136. struct sde_encoder_virt *sde_enc;
  3137. int pend_ret_fence_cnt;
  3138. struct sde_connector *c_conn;
  3139. if (!drm_enc || !phys) {
  3140. SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
  3141. !drm_enc, !phys);
  3142. return;
  3143. }
  3144. sde_enc = to_sde_encoder_virt(drm_enc);
  3145. c_conn = to_sde_connector(phys->connector);
  3146. if (!phys->hw_pp) {
  3147. SDE_ERROR("invalid pingpong hw\n");
  3148. return;
  3149. }
  3150. ctl = phys->hw_ctl;
  3151. if (!ctl || !phys->ops.trigger_flush) {
  3152. SDE_ERROR("missing ctl/trigger cb\n");
  3153. return;
  3154. }
  3155. if (phys->split_role == ENC_ROLE_SKIP) {
  3156. SDE_DEBUG_ENC(to_sde_encoder_virt(phys->parent),
  3157. "skip flush pp%d ctl%d\n",
  3158. phys->hw_pp->idx - PINGPONG_0,
  3159. ctl->idx - CTL_0);
  3160. return;
  3161. }
  3162. /* update pending counts and trigger kickoff ctl flush atomically */
  3163. spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
  3164. if (phys->ops.is_master && phys->ops.is_master(phys))
  3165. atomic_inc(&phys->pending_retire_fence_cnt);
  3166. pend_ret_fence_cnt = atomic_read(&phys->pending_retire_fence_cnt);
  3167. if (phys->hw_intf && phys->hw_intf->cap->type == INTF_DP &&
  3168. ctl->ops.update_bitmask_periph) {
  3169. /* perform peripheral flush on every frame update for dp dsc */
  3170. if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC &&
  3171. phys->comp_ratio && c_conn->ops.update_pps) {
  3172. c_conn->ops.update_pps(phys->connector, NULL,
  3173. c_conn->display);
  3174. ctl->ops.update_bitmask_periph(ctl,
  3175. phys->hw_intf->idx, 1);
  3176. }
  3177. if (sde_enc->dynamic_hdr_updated)
  3178. ctl->ops.update_bitmask_periph(ctl,
  3179. phys->hw_intf->idx, 1);
  3180. }
  3181. if ((extra_flush && extra_flush->pending_flush_mask)
  3182. && ctl->ops.update_pending_flush)
  3183. ctl->ops.update_pending_flush(ctl, extra_flush);
  3184. phys->ops.trigger_flush(phys);
  3185. spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
  3186. if (ctl->ops.get_pending_flush) {
  3187. struct sde_ctl_flush_cfg pending_flush = {0,};
  3188. ctl->ops.get_pending_flush(ctl, &pending_flush);
  3189. SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0,
  3190. ctl->idx - CTL_0,
  3191. pending_flush.pending_flush_mask,
  3192. pend_ret_fence_cnt);
  3193. } else {
  3194. SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0,
  3195. ctl->idx - CTL_0,
  3196. pend_ret_fence_cnt);
  3197. }
  3198. }
  3199. /**
  3200. * _sde_encoder_trigger_start - trigger start for a physical encoder
  3201. * phys: Pointer to physical encoder structure
  3202. */
  3203. static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
  3204. {
  3205. struct sde_hw_ctl *ctl;
  3206. struct sde_encoder_virt *sde_enc;
  3207. if (!phys) {
  3208. SDE_ERROR("invalid argument(s)\n");
  3209. return;
  3210. }
  3211. if (!phys->hw_pp) {
  3212. SDE_ERROR("invalid pingpong hw\n");
  3213. return;
  3214. }
  3215. if (!phys->parent) {
  3216. SDE_ERROR("invalid parent\n");
  3217. return;
  3218. }
  3219. /* avoid ctrl start for encoder in clone mode */
  3220. if (phys->in_clone_mode)
  3221. return;
  3222. ctl = phys->hw_ctl;
  3223. sde_enc = to_sde_encoder_virt(phys->parent);
  3224. if (phys->split_role == ENC_ROLE_SKIP) {
  3225. SDE_DEBUG_ENC(sde_enc,
  3226. "skip start pp%d ctl%d\n",
  3227. phys->hw_pp->idx - PINGPONG_0,
  3228. ctl->idx - CTL_0);
  3229. return;
  3230. }
  3231. if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
  3232. phys->ops.trigger_start(phys);
  3233. }
  3234. void sde_encoder_helper_trigger_flush(struct sde_encoder_phys *phys_enc)
  3235. {
  3236. struct sde_hw_ctl *ctl;
  3237. if (!phys_enc) {
  3238. SDE_ERROR("invalid encoder\n");
  3239. return;
  3240. }
  3241. ctl = phys_enc->hw_ctl;
  3242. if (ctl && ctl->ops.trigger_flush)
  3243. ctl->ops.trigger_flush(ctl);
  3244. }
  3245. void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
  3246. {
  3247. struct sde_hw_ctl *ctl;
  3248. if (!phys_enc) {
  3249. SDE_ERROR("invalid encoder\n");
  3250. return;
  3251. }
  3252. ctl = phys_enc->hw_ctl;
  3253. if (ctl && ctl->ops.trigger_start) {
  3254. ctl->ops.trigger_start(ctl);
  3255. SDE_EVT32(DRMID(phys_enc->parent), ctl->idx - CTL_0);
  3256. }
  3257. }
  3258. void sde_encoder_helper_hw_reset(struct sde_encoder_phys *phys_enc)
  3259. {
  3260. struct sde_encoder_virt *sde_enc;
  3261. struct sde_connector *sde_con;
  3262. void *sde_con_disp;
  3263. struct sde_hw_ctl *ctl;
  3264. int rc;
  3265. if (!phys_enc) {
  3266. SDE_ERROR("invalid encoder\n");
  3267. return;
  3268. }
  3269. sde_enc = to_sde_encoder_virt(phys_enc->parent);
  3270. ctl = phys_enc->hw_ctl;
  3271. if (!ctl || !ctl->ops.reset)
  3272. return;
  3273. SDE_DEBUG_ENC(sde_enc, "ctl %d reset\n", ctl->idx);
  3274. SDE_EVT32(DRMID(phys_enc->parent), ctl->idx);
  3275. if (phys_enc->ops.is_master && phys_enc->ops.is_master(phys_enc) &&
  3276. phys_enc->connector) {
  3277. sde_con = to_sde_connector(phys_enc->connector);
  3278. sde_con_disp = sde_connector_get_display(phys_enc->connector);
  3279. if (sde_con->ops.soft_reset) {
  3280. rc = sde_con->ops.soft_reset(sde_con_disp);
  3281. if (rc) {
  3282. SDE_ERROR_ENC(sde_enc,
  3283. "connector soft reset failure\n");
  3284. SDE_DBG_DUMP("all", "dbg_bus", "vbif_dbg_bus",
  3285. "panic");
  3286. }
  3287. }
  3288. }
  3289. phys_enc->enable_state = SDE_ENC_ENABLED;
  3290. }
  3291. /**
  3292. * _sde_encoder_kickoff_phys - handle physical encoder kickoff
  3293. * Iterate through the physical encoders and perform consolidated flush
  3294. * and/or control start triggering as needed. This is done in the virtual
  3295. * encoder rather than the individual physical ones in order to handle
  3296. * use cases that require visibility into multiple physical encoders at
  3297. * a time.
  3298. * sde_enc: Pointer to virtual encoder structure
  3299. */
  3300. static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
  3301. {
  3302. struct sde_hw_ctl *ctl;
  3303. uint32_t i;
  3304. struct sde_ctl_flush_cfg pending_flush = {0,};
  3305. u32 pending_kickoff_cnt;
  3306. struct msm_drm_private *priv = NULL;
  3307. struct sde_kms *sde_kms = NULL;
  3308. struct sde_crtc_misr_info crtc_misr_info = {false, 0};
  3309. bool is_regdma_blocking = false, is_vid_mode = false;
  3310. if (!sde_enc) {
  3311. SDE_ERROR("invalid encoder\n");
  3312. return;
  3313. }
  3314. if (sde_encoder_check_curr_mode(&sde_enc->base, MSM_DISPLAY_VIDEO_MODE))
  3315. is_vid_mode = true;
  3316. is_regdma_blocking = (is_vid_mode ||
  3317. _sde_encoder_is_autorefresh_enabled(sde_enc));
  3318. /* don't perform flush/start operations for slave encoders */
  3319. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3320. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  3321. enum sde_rm_topology_name topology = SDE_RM_TOPOLOGY_NONE;
  3322. if (!phys || phys->enable_state == SDE_ENC_DISABLED)
  3323. continue;
  3324. ctl = phys->hw_ctl;
  3325. if (!ctl)
  3326. continue;
  3327. if (phys->connector)
  3328. topology = sde_connector_get_topology_name(
  3329. phys->connector);
  3330. if (!phys->ops.needs_single_flush ||
  3331. !phys->ops.needs_single_flush(phys)) {
  3332. if (ctl->ops.reg_dma_flush)
  3333. ctl->ops.reg_dma_flush(ctl, is_regdma_blocking);
  3334. _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
  3335. } else if (ctl->ops.get_pending_flush) {
  3336. ctl->ops.get_pending_flush(ctl, &pending_flush);
  3337. }
  3338. }
  3339. /* for split flush, combine pending flush masks and send to master */
  3340. if (pending_flush.pending_flush_mask && sde_enc->cur_master) {
  3341. ctl = sde_enc->cur_master->hw_ctl;
  3342. if (ctl->ops.reg_dma_flush)
  3343. ctl->ops.reg_dma_flush(ctl, is_regdma_blocking);
  3344. _sde_encoder_trigger_flush(&sde_enc->base, sde_enc->cur_master,
  3345. &pending_flush);
  3346. }
  3347. /* update pending_kickoff_cnt AFTER flush but before trigger start */
  3348. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3349. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  3350. if (!phys || phys->enable_state == SDE_ENC_DISABLED)
  3351. continue;
  3352. if (!phys->ops.needs_single_flush ||
  3353. !phys->ops.needs_single_flush(phys)) {
  3354. pending_kickoff_cnt =
  3355. sde_encoder_phys_inc_pending(phys);
  3356. SDE_EVT32(pending_kickoff_cnt, SDE_EVTLOG_FUNC_CASE1);
  3357. } else {
  3358. pending_kickoff_cnt =
  3359. sde_encoder_phys_inc_pending(phys);
  3360. SDE_EVT32(pending_kickoff_cnt,
  3361. pending_flush.pending_flush_mask,
  3362. SDE_EVTLOG_FUNC_CASE2);
  3363. }
  3364. }
  3365. if (sde_enc->misr_enable)
  3366. sde_encoder_misr_configure(&sde_enc->base, true,
  3367. sde_enc->misr_frame_count);
  3368. sde_crtc_get_misr_info(sde_enc->crtc, &crtc_misr_info);
  3369. if (crtc_misr_info.misr_enable)
  3370. sde_crtc_misr_setup(sde_enc->crtc, true,
  3371. crtc_misr_info.misr_frame_count);
  3372. _sde_encoder_trigger_start(sde_enc->cur_master);
  3373. if (sde_enc->elevated_ahb_vote) {
  3374. priv = sde_enc->base.dev->dev_private;
  3375. if (priv != NULL) {
  3376. sde_kms = to_sde_kms(priv->kms);
  3377. if (sde_kms != NULL) {
  3378. sde_power_scale_reg_bus(&priv->phandle,
  3379. VOTE_INDEX_LOW,
  3380. false);
  3381. }
  3382. }
  3383. sde_enc->elevated_ahb_vote = false;
  3384. }
  3385. }
  3386. static void _sde_encoder_ppsplit_swap_intf_for_right_only_update(
  3387. struct drm_encoder *drm_enc,
  3388. unsigned long *affected_displays,
  3389. int num_active_phys)
  3390. {
  3391. struct sde_encoder_virt *sde_enc;
  3392. struct sde_encoder_phys *master;
  3393. enum sde_rm_topology_name topology;
  3394. bool is_right_only;
  3395. if (!drm_enc || !affected_displays)
  3396. return;
  3397. sde_enc = to_sde_encoder_virt(drm_enc);
  3398. master = sde_enc->cur_master;
  3399. if (!master || !master->connector)
  3400. return;
  3401. topology = sde_connector_get_topology_name(master->connector);
  3402. if (topology != SDE_RM_TOPOLOGY_PPSPLIT)
  3403. return;
  3404. /*
  3405. * For pingpong split, the slave pingpong won't generate IRQs. For
  3406. * right-only updates, we can't swap pingpongs, or simply swap the
  3407. * master/slave assignment, we actually have to swap the interfaces
  3408. * so that the master physical encoder will use a pingpong/interface
  3409. * that generates irqs on which to wait.
  3410. */
  3411. is_right_only = !test_bit(0, affected_displays) &&
  3412. test_bit(1, affected_displays);
  3413. if (is_right_only && !sde_enc->intfs_swapped) {
  3414. /* right-only update swap interfaces */
  3415. swap(sde_enc->phys_encs[0]->intf_idx,
  3416. sde_enc->phys_encs[1]->intf_idx);
  3417. sde_enc->intfs_swapped = true;
  3418. } else if (!is_right_only && sde_enc->intfs_swapped) {
  3419. /* left-only or full update, swap back */
  3420. swap(sde_enc->phys_encs[0]->intf_idx,
  3421. sde_enc->phys_encs[1]->intf_idx);
  3422. sde_enc->intfs_swapped = false;
  3423. }
  3424. SDE_DEBUG_ENC(sde_enc,
  3425. "right_only %d swapped %d phys0->intf%d, phys1->intf%d\n",
  3426. is_right_only, sde_enc->intfs_swapped,
  3427. sde_enc->phys_encs[0]->intf_idx - INTF_0,
  3428. sde_enc->phys_encs[1]->intf_idx - INTF_0);
  3429. SDE_EVT32(DRMID(drm_enc), is_right_only, sde_enc->intfs_swapped,
  3430. sde_enc->phys_encs[0]->intf_idx - INTF_0,
  3431. sde_enc->phys_encs[1]->intf_idx - INTF_0,
  3432. *affected_displays);
  3433. /* ppsplit always uses master since ppslave invalid for irqs*/
  3434. if (num_active_phys == 1)
  3435. *affected_displays = BIT(0);
  3436. }
  3437. static void _sde_encoder_update_master(struct drm_encoder *drm_enc,
  3438. struct sde_encoder_kickoff_params *params)
  3439. {
  3440. struct sde_encoder_virt *sde_enc;
  3441. struct sde_encoder_phys *phys;
  3442. int i, num_active_phys;
  3443. bool master_assigned = false;
  3444. if (!drm_enc || !params)
  3445. return;
  3446. sde_enc = to_sde_encoder_virt(drm_enc);
  3447. if (sde_enc->num_phys_encs <= 1)
  3448. return;
  3449. /* count bits set */
  3450. num_active_phys = hweight_long(params->affected_displays);
  3451. SDE_DEBUG_ENC(sde_enc, "affected_displays 0x%lx num_active_phys %d\n",
  3452. params->affected_displays, num_active_phys);
  3453. SDE_EVT32_VERBOSE(DRMID(drm_enc), params->affected_displays,
  3454. num_active_phys);
  3455. /* for left/right only update, ppsplit master switches interface */
  3456. _sde_encoder_ppsplit_swap_intf_for_right_only_update(drm_enc,
  3457. &params->affected_displays, num_active_phys);
  3458. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3459. enum sde_enc_split_role prv_role, new_role;
  3460. bool active = false;
  3461. phys = sde_enc->phys_encs[i];
  3462. if (!phys || !phys->ops.update_split_role || !phys->hw_pp)
  3463. continue;
  3464. active = test_bit(i, &params->affected_displays);
  3465. prv_role = phys->split_role;
  3466. if (active && num_active_phys == 1)
  3467. new_role = ENC_ROLE_SOLO;
  3468. else if (active && !master_assigned)
  3469. new_role = ENC_ROLE_MASTER;
  3470. else if (active)
  3471. new_role = ENC_ROLE_SLAVE;
  3472. else
  3473. new_role = ENC_ROLE_SKIP;
  3474. phys->ops.update_split_role(phys, new_role);
  3475. if (new_role == ENC_ROLE_SOLO || new_role == ENC_ROLE_MASTER) {
  3476. sde_enc->cur_master = phys;
  3477. master_assigned = true;
  3478. }
  3479. SDE_DEBUG_ENC(sde_enc, "pp %d role prv %d new %d active %d\n",
  3480. phys->hw_pp->idx - PINGPONG_0, prv_role,
  3481. phys->split_role, active);
  3482. SDE_EVT32(DRMID(drm_enc), params->affected_displays,
  3483. phys->hw_pp->idx - PINGPONG_0, prv_role,
  3484. phys->split_role, active, num_active_phys);
  3485. }
  3486. }
  3487. bool sde_encoder_check_curr_mode(struct drm_encoder *drm_enc, u32 mode)
  3488. {
  3489. struct sde_encoder_virt *sde_enc;
  3490. struct msm_display_info *disp_info;
  3491. if (!drm_enc) {
  3492. SDE_ERROR("invalid encoder\n");
  3493. return false;
  3494. }
  3495. sde_enc = to_sde_encoder_virt(drm_enc);
  3496. disp_info = &sde_enc->disp_info;
  3497. return (disp_info->curr_panel_mode == mode);
  3498. }
  3499. void sde_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
  3500. {
  3501. struct sde_encoder_virt *sde_enc;
  3502. struct sde_encoder_phys *phys;
  3503. unsigned int i;
  3504. struct sde_hw_ctl *ctl;
  3505. if (!drm_enc) {
  3506. SDE_ERROR("invalid encoder\n");
  3507. return;
  3508. }
  3509. sde_enc = to_sde_encoder_virt(drm_enc);
  3510. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3511. phys = sde_enc->phys_encs[i];
  3512. if (phys && phys->hw_ctl && (phys == sde_enc->cur_master) &&
  3513. sde_encoder_check_curr_mode(drm_enc,
  3514. MSM_DISPLAY_CMD_MODE)) {
  3515. ctl = phys->hw_ctl;
  3516. if (ctl->ops.trigger_pending)
  3517. /* update only for command mode primary ctl */
  3518. ctl->ops.trigger_pending(ctl);
  3519. }
  3520. }
  3521. sde_enc->idle_pc_restore = false;
  3522. }
  3523. static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
  3524. {
  3525. void *dither_cfg;
  3526. int ret = 0, i = 0;
  3527. size_t len = 0;
  3528. enum sde_rm_topology_name topology;
  3529. struct drm_encoder *drm_enc;
  3530. struct msm_display_dsc_info *dsc = NULL;
  3531. struct sde_encoder_virt *sde_enc;
  3532. struct sde_hw_pingpong *hw_pp;
  3533. if (!phys || !phys->connector || !phys->hw_pp ||
  3534. !phys->hw_pp->ops.setup_dither || !phys->parent)
  3535. return;
  3536. topology = sde_connector_get_topology_name(phys->connector);
  3537. if ((topology == SDE_RM_TOPOLOGY_PPSPLIT) &&
  3538. (phys->split_role == ENC_ROLE_SLAVE))
  3539. return;
  3540. drm_enc = phys->parent;
  3541. sde_enc = to_sde_encoder_virt(drm_enc);
  3542. dsc = &sde_enc->mode_info.comp_info.dsc_info;
  3543. /* disable dither for 10 bpp or 10bpc dsc config */
  3544. if (dsc->bpp == 10 || dsc->bpc == 10) {
  3545. phys->hw_pp->ops.setup_dither(phys->hw_pp, NULL, 0);
  3546. return;
  3547. }
  3548. ret = sde_connector_get_dither_cfg(phys->connector,
  3549. phys->connector->state, &dither_cfg, &len);
  3550. if (ret)
  3551. return;
  3552. if (TOPOLOGY_DUALPIPE_MERGE_MODE(topology)) {
  3553. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  3554. hw_pp = sde_enc->hw_pp[i];
  3555. if (hw_pp) {
  3556. phys->hw_pp->ops.setup_dither(hw_pp, dither_cfg,
  3557. len);
  3558. }
  3559. }
  3560. } else {
  3561. phys->hw_pp->ops.setup_dither(phys->hw_pp, dither_cfg, len);
  3562. }
  3563. }
  3564. static u32 _sde_encoder_calculate_linetime(struct sde_encoder_virt *sde_enc,
  3565. struct drm_display_mode *mode)
  3566. {
  3567. u64 pclk_rate;
  3568. u32 pclk_period;
  3569. u32 line_time;
  3570. /*
  3571. * For linetime calculation, only operate on master encoder.
  3572. */
  3573. if (!sde_enc->cur_master)
  3574. return 0;
  3575. if (!sde_enc->cur_master->ops.get_line_count) {
  3576. SDE_ERROR("get_line_count function not defined\n");
  3577. return 0;
  3578. }
  3579. pclk_rate = mode->clock; /* pixel clock in kHz */
  3580. if (pclk_rate == 0) {
  3581. SDE_ERROR("pclk is 0, cannot calculate line time\n");
  3582. return 0;
  3583. }
  3584. pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
  3585. if (pclk_period == 0) {
  3586. SDE_ERROR("pclk period is 0\n");
  3587. return 0;
  3588. }
  3589. /*
  3590. * Line time calculation based on Pixel clock and HTOTAL.
  3591. * Final unit is in ns.
  3592. */
  3593. line_time = (pclk_period * mode->htotal) / 1000;
  3594. if (line_time == 0) {
  3595. SDE_ERROR("line time calculation is 0\n");
  3596. return 0;
  3597. }
  3598. SDE_DEBUG_ENC(sde_enc,
  3599. "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
  3600. pclk_rate, pclk_period, line_time);
  3601. return line_time;
  3602. }
  3603. static int _sde_encoder_wakeup_time(struct drm_encoder *drm_enc,
  3604. ktime_t *wakeup_time)
  3605. {
  3606. struct drm_display_mode *mode;
  3607. struct sde_encoder_virt *sde_enc;
  3608. u32 cur_line;
  3609. u32 line_time;
  3610. u32 vtotal, time_to_vsync;
  3611. ktime_t cur_time;
  3612. sde_enc = to_sde_encoder_virt(drm_enc);
  3613. if (!sde_enc || !sde_enc->cur_master) {
  3614. SDE_ERROR("invalid sde encoder/master\n");
  3615. return -EINVAL;
  3616. }
  3617. mode = &sde_enc->cur_master->cached_mode;
  3618. line_time = _sde_encoder_calculate_linetime(sde_enc, mode);
  3619. if (!line_time)
  3620. return -EINVAL;
  3621. cur_line = sde_enc->cur_master->ops.get_line_count(sde_enc->cur_master);
  3622. vtotal = mode->vtotal;
  3623. if (cur_line >= vtotal)
  3624. time_to_vsync = line_time * vtotal;
  3625. else
  3626. time_to_vsync = line_time * (vtotal - cur_line);
  3627. if (time_to_vsync == 0) {
  3628. SDE_ERROR("time to vsync should not be zero, vtotal=%d\n",
  3629. vtotal);
  3630. return -EINVAL;
  3631. }
  3632. cur_time = ktime_get();
  3633. *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
  3634. SDE_DEBUG_ENC(sde_enc,
  3635. "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
  3636. cur_line, vtotal, time_to_vsync,
  3637. ktime_to_ms(cur_time),
  3638. ktime_to_ms(*wakeup_time));
  3639. return 0;
  3640. }
  3641. static void sde_encoder_vsync_event_handler(struct timer_list *t)
  3642. {
  3643. struct drm_encoder *drm_enc;
  3644. struct sde_encoder_virt *sde_enc =
  3645. from_timer(sde_enc, t, vsync_event_timer);
  3646. struct msm_drm_private *priv;
  3647. struct msm_drm_thread *event_thread;
  3648. if (!sde_enc || !sde_enc->crtc) {
  3649. SDE_ERROR("invalid encoder parameters %d\n", !sde_enc);
  3650. return;
  3651. }
  3652. drm_enc = &sde_enc->base;
  3653. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
  3654. SDE_ERROR("invalid encoder parameters\n");
  3655. return;
  3656. }
  3657. priv = drm_enc->dev->dev_private;
  3658. if (sde_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
  3659. SDE_ERROR("invalid crtc index:%u\n",
  3660. sde_enc->crtc->index);
  3661. return;
  3662. }
  3663. event_thread = &priv->event_thread[sde_enc->crtc->index];
  3664. if (!event_thread) {
  3665. SDE_ERROR("event_thread not found for crtc:%d\n",
  3666. sde_enc->crtc->index);
  3667. return;
  3668. }
  3669. kthread_queue_work(&event_thread->worker,
  3670. &sde_enc->vsync_event_work);
  3671. }
  3672. static void sde_encoder_esd_trigger_work_handler(struct kthread_work *work)
  3673. {
  3674. struct sde_encoder_virt *sde_enc = container_of(work,
  3675. struct sde_encoder_virt, esd_trigger_work);
  3676. if (!sde_enc) {
  3677. SDE_ERROR("invalid sde encoder\n");
  3678. return;
  3679. }
  3680. sde_encoder_resource_control(&sde_enc->base,
  3681. SDE_ENC_RC_EVENT_KICKOFF);
  3682. }
  3683. static void sde_encoder_input_event_work_handler(struct kthread_work *work)
  3684. {
  3685. struct sde_encoder_virt *sde_enc = container_of(work,
  3686. struct sde_encoder_virt, input_event_work);
  3687. if (!sde_enc) {
  3688. SDE_ERROR("invalid sde encoder\n");
  3689. return;
  3690. }
  3691. sde_encoder_resource_control(&sde_enc->base,
  3692. SDE_ENC_RC_EVENT_EARLY_WAKEUP);
  3693. }
  3694. static void sde_encoder_vsync_event_work_handler(struct kthread_work *work)
  3695. {
  3696. struct sde_encoder_virt *sde_enc = container_of(work,
  3697. struct sde_encoder_virt, vsync_event_work);
  3698. bool autorefresh_enabled = false;
  3699. int rc = 0;
  3700. ktime_t wakeup_time;
  3701. struct drm_encoder *drm_enc;
  3702. if (!sde_enc) {
  3703. SDE_ERROR("invalid sde encoder\n");
  3704. return;
  3705. }
  3706. drm_enc = &sde_enc->base;
  3707. rc = pm_runtime_get_sync(drm_enc->dev->dev);
  3708. if (rc < 0) {
  3709. SDE_ERROR_ENC(sde_enc, "sde enc power enabled failed:%d\n", rc);
  3710. return;
  3711. }
  3712. if (sde_enc->cur_master &&
  3713. sde_enc->cur_master->ops.is_autorefresh_enabled)
  3714. autorefresh_enabled =
  3715. sde_enc->cur_master->ops.is_autorefresh_enabled(
  3716. sde_enc->cur_master);
  3717. /* Update timer if autorefresh is enabled else return */
  3718. if (!autorefresh_enabled)
  3719. goto exit;
  3720. rc = _sde_encoder_wakeup_time(&sde_enc->base, &wakeup_time);
  3721. if (rc)
  3722. goto exit;
  3723. SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
  3724. mod_timer(&sde_enc->vsync_event_timer,
  3725. nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
  3726. exit:
  3727. pm_runtime_put_sync(drm_enc->dev->dev);
  3728. }
  3729. int sde_encoder_poll_line_counts(struct drm_encoder *drm_enc)
  3730. {
  3731. static const uint64_t timeout_us = 50000;
  3732. static const uint64_t sleep_us = 20;
  3733. struct sde_encoder_virt *sde_enc;
  3734. ktime_t cur_ktime, exp_ktime;
  3735. uint32_t line_count, tmp, i;
  3736. if (!drm_enc) {
  3737. SDE_ERROR("invalid encoder\n");
  3738. return -EINVAL;
  3739. }
  3740. sde_enc = to_sde_encoder_virt(drm_enc);
  3741. if (!sde_enc->cur_master ||
  3742. !sde_enc->cur_master->ops.get_line_count) {
  3743. SDE_DEBUG_ENC(sde_enc, "can't get master line count\n");
  3744. SDE_EVT32(DRMID(drm_enc), SDE_EVTLOG_ERROR);
  3745. return -EINVAL;
  3746. }
  3747. exp_ktime = ktime_add_ms(ktime_get(), timeout_us / 1000);
  3748. line_count = sde_enc->cur_master->ops.get_line_count(
  3749. sde_enc->cur_master);
  3750. for (i = 0; i < (timeout_us * 2 / sleep_us); ++i) {
  3751. tmp = line_count;
  3752. line_count = sde_enc->cur_master->ops.get_line_count(
  3753. sde_enc->cur_master);
  3754. if (line_count < tmp) {
  3755. SDE_EVT32(DRMID(drm_enc), line_count);
  3756. return 0;
  3757. }
  3758. cur_ktime = ktime_get();
  3759. if (ktime_compare_safe(exp_ktime, cur_ktime) <= 0)
  3760. break;
  3761. usleep_range(sleep_us / 2, sleep_us);
  3762. }
  3763. SDE_EVT32(DRMID(drm_enc), line_count, SDE_EVTLOG_ERROR);
  3764. return -ETIMEDOUT;
  3765. }
  3766. static int _helper_flush_qsync(struct sde_encoder_phys *phys_enc)
  3767. {
  3768. struct drm_encoder *drm_enc;
  3769. struct sde_rm_hw_iter rm_iter;
  3770. bool lm_valid = false;
  3771. bool intf_valid = false;
  3772. if (!phys_enc || !phys_enc->parent) {
  3773. SDE_ERROR("invalid encoder\n");
  3774. return -EINVAL;
  3775. }
  3776. drm_enc = phys_enc->parent;
  3777. /* Flush the interfaces for AVR update or Qsync with INTF TE */
  3778. if (phys_enc->intf_mode == INTF_MODE_VIDEO ||
  3779. (phys_enc->intf_mode == INTF_MODE_CMD &&
  3780. phys_enc->has_intf_te)) {
  3781. sde_rm_init_hw_iter(&rm_iter, drm_enc->base.id,
  3782. SDE_HW_BLK_INTF);
  3783. while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &rm_iter)) {
  3784. struct sde_hw_intf *hw_intf =
  3785. (struct sde_hw_intf *)rm_iter.hw;
  3786. if (!hw_intf)
  3787. continue;
  3788. if (phys_enc->hw_ctl->ops.update_bitmask_intf)
  3789. phys_enc->hw_ctl->ops.update_bitmask_intf(
  3790. phys_enc->hw_ctl,
  3791. hw_intf->idx, 1);
  3792. intf_valid = true;
  3793. }
  3794. if (!intf_valid) {
  3795. SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
  3796. "intf not found to flush\n");
  3797. return -EFAULT;
  3798. }
  3799. } else {
  3800. sde_rm_init_hw_iter(&rm_iter, drm_enc->base.id, SDE_HW_BLK_LM);
  3801. while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &rm_iter)) {
  3802. struct sde_hw_mixer *hw_lm =
  3803. (struct sde_hw_mixer *)rm_iter.hw;
  3804. if (!hw_lm)
  3805. continue;
  3806. /* update LM flush for HW without INTF TE */
  3807. if (phys_enc->hw_ctl->ops.update_bitmask_mixer)
  3808. phys_enc->hw_ctl->ops.update_bitmask_mixer(
  3809. phys_enc->hw_ctl,
  3810. hw_lm->idx, 1);
  3811. lm_valid = true;
  3812. }
  3813. if (!lm_valid) {
  3814. SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
  3815. "lm not found to flush\n");
  3816. return -EFAULT;
  3817. }
  3818. }
  3819. return 0;
  3820. }
  3821. static bool _sde_encoder_dsc_is_dirty(struct sde_encoder_virt *sde_enc)
  3822. {
  3823. int i;
  3824. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  3825. /**
  3826. * This dirty_dsc_hw field is set during DSC disable to
  3827. * indicate which DSC blocks need to be flushed
  3828. */
  3829. if (sde_enc->dirty_dsc_ids[i])
  3830. return true;
  3831. }
  3832. return false;
  3833. }
  3834. static void _helper_flush_dsc(struct sde_encoder_virt *sde_enc)
  3835. {
  3836. int i;
  3837. struct sde_hw_ctl *hw_ctl = NULL;
  3838. enum sde_dsc dsc_idx;
  3839. if (sde_enc->cur_master)
  3840. hw_ctl = sde_enc->cur_master->hw_ctl;
  3841. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  3842. dsc_idx = sde_enc->dirty_dsc_ids[i];
  3843. if (dsc_idx && hw_ctl && hw_ctl->ops.update_bitmask_dsc)
  3844. hw_ctl->ops.update_bitmask_dsc(hw_ctl, dsc_idx, 1);
  3845. sde_enc->dirty_dsc_ids[i] = DSC_NONE;
  3846. }
  3847. }
  3848. static void _sde_encoder_helper_hdr_plus_mempool_update(
  3849. struct sde_encoder_virt *sde_enc)
  3850. {
  3851. struct sde_connector_dyn_hdr_metadata *dhdr_meta = NULL;
  3852. struct sde_hw_mdp *mdptop = NULL;
  3853. sde_enc->dynamic_hdr_updated = false;
  3854. if (sde_enc->cur_master) {
  3855. mdptop = sde_enc->cur_master->hw_mdptop;
  3856. dhdr_meta = sde_connector_get_dyn_hdr_meta(
  3857. sde_enc->cur_master->connector);
  3858. }
  3859. if (!mdptop || !dhdr_meta || !dhdr_meta->dynamic_hdr_update)
  3860. return;
  3861. if (mdptop->ops.set_hdr_plus_metadata) {
  3862. sde_enc->dynamic_hdr_updated = true;
  3863. mdptop->ops.set_hdr_plus_metadata(
  3864. mdptop, dhdr_meta->dynamic_hdr_payload,
  3865. dhdr_meta->dynamic_hdr_payload_size,
  3866. sde_enc->cur_master->intf_idx == INTF_0 ?
  3867. 0 : 1);
  3868. }
  3869. }
  3870. void sde_encoder_helper_needs_hw_reset(struct drm_encoder *drm_enc)
  3871. {
  3872. struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
  3873. struct sde_encoder_phys *phys;
  3874. int i;
  3875. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3876. phys = sde_enc->phys_encs[i];
  3877. if (phys && phys->ops.hw_reset)
  3878. phys->ops.hw_reset(phys);
  3879. }
  3880. }
  3881. int sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
  3882. struct sde_encoder_kickoff_params *params)
  3883. {
  3884. struct sde_encoder_virt *sde_enc;
  3885. struct sde_encoder_phys *phys;
  3886. struct sde_kms *sde_kms = NULL;
  3887. struct sde_crtc *sde_crtc;
  3888. struct msm_drm_private *priv = NULL;
  3889. bool needs_hw_reset = false, is_cmd_mode;
  3890. int i, rc, ret = 0;
  3891. struct msm_display_info *disp_info;
  3892. if (!drm_enc || !params || !drm_enc->dev ||
  3893. !drm_enc->dev->dev_private) {
  3894. SDE_ERROR("invalid args\n");
  3895. return -EINVAL;
  3896. }
  3897. sde_enc = to_sde_encoder_virt(drm_enc);
  3898. priv = drm_enc->dev->dev_private;
  3899. sde_kms = to_sde_kms(priv->kms);
  3900. disp_info = &sde_enc->disp_info;
  3901. sde_crtc = to_sde_crtc(sde_enc->crtc);
  3902. SDE_DEBUG_ENC(sde_enc, "\n");
  3903. SDE_EVT32(DRMID(drm_enc));
  3904. /* update the qsync parameters for the current frame */
  3905. if (sde_enc->cur_master)
  3906. sde_connector_set_qsync_params(
  3907. sde_enc->cur_master->connector);
  3908. is_cmd_mode = sde_encoder_check_curr_mode(drm_enc,
  3909. MSM_DISPLAY_CMD_MODE);
  3910. if (sde_enc->cur_master && sde_enc->cur_master->connector
  3911. && is_cmd_mode)
  3912. sde_enc->frame_trigger_mode = sde_connector_get_property(
  3913. sde_enc->cur_master->connector->state,
  3914. CONNECTOR_PROP_CMD_FRAME_TRIGGER_MODE);
  3915. _sde_encoder_helper_hdr_plus_mempool_update(sde_enc);
  3916. /* prepare for next kickoff, may include waiting on previous kickoff */
  3917. SDE_ATRACE_BEGIN("sde_encoder_prepare_for_kickoff");
  3918. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3919. phys = sde_enc->phys_encs[i];
  3920. params->frame_trigger_mode = sde_enc->frame_trigger_mode;
  3921. params->recovery_events_enabled =
  3922. sde_enc->recovery_events_enabled;
  3923. if (phys) {
  3924. if (phys->ops.prepare_for_kickoff) {
  3925. rc = phys->ops.prepare_for_kickoff(
  3926. phys, params);
  3927. if (rc)
  3928. ret = rc;
  3929. }
  3930. if (phys->enable_state == SDE_ENC_ERR_NEEDS_HW_RESET)
  3931. needs_hw_reset = true;
  3932. _sde_encoder_setup_dither(phys);
  3933. if (sde_enc->cur_master &&
  3934. sde_connector_is_qsync_updated(
  3935. sde_enc->cur_master->connector)) {
  3936. _helper_flush_qsync(phys);
  3937. }
  3938. }
  3939. }
  3940. rc = sde_encoder_resource_control(drm_enc, SDE_ENC_RC_EVENT_KICKOFF);
  3941. if (rc) {
  3942. SDE_ERROR_ENC(sde_enc, "resource kickoff failed rc %d\n", rc);
  3943. ret = rc;
  3944. goto end;
  3945. }
  3946. /* if any phys needs reset, reset all phys, in-order */
  3947. if (needs_hw_reset)
  3948. sde_encoder_helper_needs_hw_reset(drm_enc);
  3949. _sde_encoder_update_master(drm_enc, params);
  3950. _sde_encoder_update_roi(drm_enc);
  3951. if (sde_enc->cur_master && sde_enc->cur_master->connector) {
  3952. rc = sde_connector_pre_kickoff(sde_enc->cur_master->connector);
  3953. if (rc) {
  3954. SDE_ERROR_ENC(sde_enc, "kickoff conn%d failed rc %d\n",
  3955. sde_enc->cur_master->connector->base.id,
  3956. rc);
  3957. ret = rc;
  3958. }
  3959. }
  3960. if (_sde_encoder_is_dsc_enabled(drm_enc) && sde_enc->cur_master &&
  3961. ((is_cmd_mode && sde_enc->cur_master->cont_splash_enabled) ||
  3962. !sde_enc->cur_master->cont_splash_enabled)) {
  3963. rc = _sde_encoder_dsc_setup(sde_enc, params);
  3964. if (rc) {
  3965. SDE_ERROR_ENC(sde_enc, "failed to setup DSC: %d\n", rc);
  3966. ret = rc;
  3967. }
  3968. }
  3969. if (_sde_encoder_dsc_is_dirty(sde_enc))
  3970. _helper_flush_dsc(sde_enc);
  3971. if (sde_enc->cur_master && !sde_enc->cur_master->cont_splash_enabled)
  3972. sde_configure_qdss(sde_enc, sde_enc->cur_master->hw_qdss,
  3973. sde_enc->cur_master, sde_kms->qdss_enabled);
  3974. end:
  3975. SDE_ATRACE_END("sde_encoder_prepare_for_kickoff");
  3976. return ret;
  3977. }
  3978. /**
  3979. * _sde_encoder_reset_ctl_hw - reset h/w configuration for all ctl's associated
  3980. * with the specified encoder, and unstage all pipes from it
  3981. * @encoder: encoder pointer
  3982. * Returns: 0 on success
  3983. */
  3984. static int _sde_encoder_reset_ctl_hw(struct drm_encoder *drm_enc)
  3985. {
  3986. struct sde_encoder_virt *sde_enc;
  3987. struct sde_encoder_phys *phys;
  3988. unsigned int i;
  3989. int rc = 0;
  3990. if (!drm_enc) {
  3991. SDE_ERROR("invalid encoder\n");
  3992. return -EINVAL;
  3993. }
  3994. sde_enc = to_sde_encoder_virt(drm_enc);
  3995. SDE_ATRACE_BEGIN("encoder_release_lm");
  3996. SDE_DEBUG_ENC(sde_enc, "\n");
  3997. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  3998. phys = sde_enc->phys_encs[i];
  3999. if (!phys)
  4000. continue;
  4001. SDE_EVT32(DRMID(drm_enc), phys->intf_idx - INTF_0);
  4002. rc = sde_encoder_helper_reset_mixers(phys, NULL);
  4003. if (rc)
  4004. SDE_EVT32(DRMID(drm_enc), rc, SDE_EVTLOG_ERROR);
  4005. }
  4006. SDE_ATRACE_END("encoder_release_lm");
  4007. return rc;
  4008. }
  4009. void sde_encoder_kickoff(struct drm_encoder *drm_enc, bool is_error)
  4010. {
  4011. struct sde_encoder_virt *sde_enc;
  4012. struct sde_encoder_phys *phys;
  4013. ktime_t wakeup_time;
  4014. unsigned int i;
  4015. if (!drm_enc) {
  4016. SDE_ERROR("invalid encoder\n");
  4017. return;
  4018. }
  4019. SDE_ATRACE_BEGIN("encoder_kickoff");
  4020. sde_enc = to_sde_encoder_virt(drm_enc);
  4021. SDE_DEBUG_ENC(sde_enc, "\n");
  4022. /* create a 'no pipes' commit to release buffers on errors */
  4023. if (is_error)
  4024. _sde_encoder_reset_ctl_hw(drm_enc);
  4025. /* All phys encs are ready to go, trigger the kickoff */
  4026. _sde_encoder_kickoff_phys(sde_enc);
  4027. /* allow phys encs to handle any post-kickoff business */
  4028. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4029. phys = sde_enc->phys_encs[i];
  4030. if (phys && phys->ops.handle_post_kickoff)
  4031. phys->ops.handle_post_kickoff(phys);
  4032. }
  4033. if (sde_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
  4034. !_sde_encoder_wakeup_time(drm_enc, &wakeup_time)) {
  4035. SDE_EVT32_VERBOSE(ktime_to_ms(wakeup_time));
  4036. mod_timer(&sde_enc->vsync_event_timer,
  4037. nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
  4038. }
  4039. SDE_ATRACE_END("encoder_kickoff");
  4040. }
  4041. void sde_encoder_helper_get_pp_line_count(struct drm_encoder *drm_enc,
  4042. struct sde_hw_pp_vsync_info *info)
  4043. {
  4044. struct sde_encoder_virt *sde_enc;
  4045. struct sde_encoder_phys *phys;
  4046. int i, ret;
  4047. if (!drm_enc || !info)
  4048. return;
  4049. sde_enc = to_sde_encoder_virt(drm_enc);
  4050. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4051. phys = sde_enc->phys_encs[i];
  4052. if (phys && phys->hw_intf && phys->hw_pp
  4053. && phys->hw_intf->ops.get_vsync_info) {
  4054. ret = phys->hw_intf->ops.get_vsync_info(
  4055. phys->hw_intf, &info[i]);
  4056. if (!ret) {
  4057. info[i].pp_idx = phys->hw_pp->idx - PINGPONG_0;
  4058. info[i].intf_idx = phys->hw_intf->idx - INTF_0;
  4059. }
  4060. }
  4061. }
  4062. }
  4063. int sde_encoder_helper_reset_mixers(struct sde_encoder_phys *phys_enc,
  4064. struct drm_framebuffer *fb)
  4065. {
  4066. struct drm_encoder *drm_enc;
  4067. struct sde_hw_mixer_cfg mixer;
  4068. struct sde_rm_hw_iter lm_iter;
  4069. bool lm_valid = false;
  4070. if (!phys_enc || !phys_enc->parent) {
  4071. SDE_ERROR("invalid encoder\n");
  4072. return -EINVAL;
  4073. }
  4074. drm_enc = phys_enc->parent;
  4075. memset(&mixer, 0, sizeof(mixer));
  4076. /* reset associated CTL/LMs */
  4077. if (phys_enc->hw_ctl->ops.clear_all_blendstages)
  4078. phys_enc->hw_ctl->ops.clear_all_blendstages(phys_enc->hw_ctl);
  4079. sde_rm_init_hw_iter(&lm_iter, drm_enc->base.id, SDE_HW_BLK_LM);
  4080. while (sde_rm_get_hw(&phys_enc->sde_kms->rm, &lm_iter)) {
  4081. struct sde_hw_mixer *hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
  4082. if (!hw_lm)
  4083. continue;
  4084. /* need to flush LM to remove it */
  4085. if (phys_enc->hw_ctl->ops.update_bitmask_mixer)
  4086. phys_enc->hw_ctl->ops.update_bitmask_mixer(
  4087. phys_enc->hw_ctl,
  4088. hw_lm->idx, 1);
  4089. if (fb) {
  4090. /* assume a single LM if targeting a frame buffer */
  4091. if (lm_valid)
  4092. continue;
  4093. mixer.out_height = fb->height;
  4094. mixer.out_width = fb->width;
  4095. if (hw_lm->ops.setup_mixer_out)
  4096. hw_lm->ops.setup_mixer_out(hw_lm, &mixer);
  4097. }
  4098. lm_valid = true;
  4099. /* only enable border color on LM */
  4100. if (phys_enc->hw_ctl->ops.setup_blendstage)
  4101. phys_enc->hw_ctl->ops.setup_blendstage(
  4102. phys_enc->hw_ctl, hw_lm->idx, NULL);
  4103. }
  4104. if (!lm_valid) {
  4105. SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc), "lm not found\n");
  4106. return -EFAULT;
  4107. }
  4108. return 0;
  4109. }
  4110. void sde_encoder_prepare_commit(struct drm_encoder *drm_enc)
  4111. {
  4112. struct sde_encoder_virt *sde_enc;
  4113. struct sde_encoder_phys *phys;
  4114. int i;
  4115. struct sde_hw_ctl *ctl;
  4116. if (!drm_enc) {
  4117. SDE_ERROR("invalid encoder\n");
  4118. return;
  4119. }
  4120. sde_enc = to_sde_encoder_virt(drm_enc);
  4121. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4122. phys = sde_enc->phys_encs[i];
  4123. if (phys && phys->ops.prepare_commit)
  4124. phys->ops.prepare_commit(phys);
  4125. if (phys && phys->hw_ctl) {
  4126. ctl = phys->hw_ctl;
  4127. /*
  4128. * avoid clearing the pending flush during the first
  4129. * frame update after idle power collpase as the
  4130. * restore path would have updated the pending flush
  4131. */
  4132. if (!sde_enc->idle_pc_restore &&
  4133. ctl->ops.clear_pending_flush)
  4134. ctl->ops.clear_pending_flush(ctl);
  4135. }
  4136. }
  4137. }
  4138. void sde_encoder_helper_setup_misr(struct sde_encoder_phys *phys_enc,
  4139. bool enable, u32 frame_count)
  4140. {
  4141. if (!phys_enc)
  4142. return;
  4143. if (phys_enc->hw_intf && phys_enc->hw_intf->ops.setup_misr)
  4144. phys_enc->hw_intf->ops.setup_misr(phys_enc->hw_intf,
  4145. enable, frame_count);
  4146. }
  4147. int sde_encoder_helper_collect_misr(struct sde_encoder_phys *phys_enc,
  4148. bool nonblock, u32 *misr_value)
  4149. {
  4150. if (!phys_enc)
  4151. return -EINVAL;
  4152. return phys_enc->hw_intf && phys_enc->hw_intf->ops.collect_misr ?
  4153. phys_enc->hw_intf->ops.collect_misr(phys_enc->hw_intf,
  4154. nonblock, misr_value) : -ENOTSUPP;
  4155. }
  4156. #ifdef CONFIG_DEBUG_FS
  4157. static int _sde_encoder_status_show(struct seq_file *s, void *data)
  4158. {
  4159. struct sde_encoder_virt *sde_enc;
  4160. int i;
  4161. if (!s || !s->private)
  4162. return -EINVAL;
  4163. sde_enc = s->private;
  4164. mutex_lock(&sde_enc->enc_lock);
  4165. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4166. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  4167. if (!phys)
  4168. continue;
  4169. seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
  4170. phys->intf_idx - INTF_0,
  4171. atomic_read(&phys->vsync_cnt),
  4172. atomic_read(&phys->underrun_cnt));
  4173. switch (phys->intf_mode) {
  4174. case INTF_MODE_VIDEO:
  4175. seq_puts(s, "mode: video\n");
  4176. break;
  4177. case INTF_MODE_CMD:
  4178. seq_puts(s, "mode: command\n");
  4179. break;
  4180. case INTF_MODE_WB_BLOCK:
  4181. seq_puts(s, "mode: wb block\n");
  4182. break;
  4183. case INTF_MODE_WB_LINE:
  4184. seq_puts(s, "mode: wb line\n");
  4185. break;
  4186. default:
  4187. seq_puts(s, "mode: ???\n");
  4188. break;
  4189. }
  4190. }
  4191. mutex_unlock(&sde_enc->enc_lock);
  4192. return 0;
  4193. }
  4194. static int _sde_encoder_debugfs_status_open(struct inode *inode,
  4195. struct file *file)
  4196. {
  4197. return single_open(file, _sde_encoder_status_show, inode->i_private);
  4198. }
  4199. static ssize_t _sde_encoder_misr_setup(struct file *file,
  4200. const char __user *user_buf, size_t count, loff_t *ppos)
  4201. {
  4202. struct sde_encoder_virt *sde_enc;
  4203. int rc;
  4204. char buf[MISR_BUFF_SIZE + 1];
  4205. size_t buff_copy;
  4206. u32 frame_count, enable;
  4207. struct msm_drm_private *priv = NULL;
  4208. struct sde_kms *sde_kms = NULL;
  4209. struct drm_encoder *drm_enc;
  4210. if (!file || !file->private_data)
  4211. return -EINVAL;
  4212. sde_enc = file->private_data;
  4213. priv = sde_enc->base.dev->dev_private;
  4214. if (!sde_enc || !priv || !priv->kms)
  4215. return -EINVAL;
  4216. sde_kms = to_sde_kms(priv->kms);
  4217. drm_enc = &sde_enc->base;
  4218. if (sde_kms_is_secure_session_inprogress(sde_kms)) {
  4219. SDE_DEBUG_ENC(sde_enc, "misr enable/disable not allowed\n");
  4220. return -ENOTSUPP;
  4221. }
  4222. buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
  4223. if (copy_from_user(buf, user_buf, buff_copy))
  4224. return -EINVAL;
  4225. buf[buff_copy] = 0; /* end of string */
  4226. if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
  4227. return -EINVAL;
  4228. rc = pm_runtime_get_sync(drm_enc->dev->dev);
  4229. if (rc < 0)
  4230. return rc;
  4231. sde_enc->misr_enable = enable;
  4232. sde_enc->misr_frame_count = frame_count;
  4233. sde_encoder_misr_configure(&sde_enc->base, enable, frame_count);
  4234. pm_runtime_put_sync(drm_enc->dev->dev);
  4235. return count;
  4236. }
  4237. static ssize_t _sde_encoder_misr_read(struct file *file,
  4238. char __user *user_buff, size_t count, loff_t *ppos)
  4239. {
  4240. struct sde_encoder_virt *sde_enc;
  4241. struct msm_drm_private *priv = NULL;
  4242. struct sde_kms *sde_kms = NULL;
  4243. struct drm_encoder *drm_enc;
  4244. int i = 0, len = 0;
  4245. char buf[MISR_BUFF_SIZE + 1] = {'\0'};
  4246. int rc;
  4247. if (*ppos)
  4248. return 0;
  4249. if (!file || !file->private_data)
  4250. return -EINVAL;
  4251. sde_enc = file->private_data;
  4252. priv = sde_enc->base.dev->dev_private;
  4253. if (priv != NULL)
  4254. sde_kms = to_sde_kms(priv->kms);
  4255. if (sde_kms_is_secure_session_inprogress(sde_kms)) {
  4256. SDE_DEBUG_ENC(sde_enc, "misr read not allowed\n");
  4257. return -ENOTSUPP;
  4258. }
  4259. drm_enc = &sde_enc->base;
  4260. rc = pm_runtime_get_sync(drm_enc->dev->dev);
  4261. if (rc < 0)
  4262. return rc;
  4263. if (!sde_enc->misr_enable) {
  4264. len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
  4265. "disabled\n");
  4266. goto buff_check;
  4267. }
  4268. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4269. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  4270. u32 misr_value = 0;
  4271. if (!phys || !phys->ops.collect_misr) {
  4272. len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
  4273. "invalid\n");
  4274. SDE_ERROR_ENC(sde_enc, "invalid misr ops\n");
  4275. continue;
  4276. }
  4277. rc = phys->ops.collect_misr(phys, false, &misr_value);
  4278. if (rc) {
  4279. len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
  4280. "invalid\n");
  4281. SDE_ERROR_ENC(sde_enc, "failed to collect misr %d\n",
  4282. rc);
  4283. continue;
  4284. } else {
  4285. len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
  4286. "Intf idx:%d\n",
  4287. phys->intf_idx - INTF_0);
  4288. len += scnprintf(buf + len, MISR_BUFF_SIZE - len,
  4289. "0x%x\n", misr_value);
  4290. }
  4291. }
  4292. buff_check:
  4293. if (count <= len) {
  4294. len = 0;
  4295. goto end;
  4296. }
  4297. if (copy_to_user(user_buff, buf, len)) {
  4298. len = -EFAULT;
  4299. goto end;
  4300. }
  4301. *ppos += len; /* increase offset */
  4302. end:
  4303. pm_runtime_put_sync(drm_enc->dev->dev);
  4304. return len;
  4305. }
  4306. static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
  4307. {
  4308. struct sde_encoder_virt *sde_enc;
  4309. struct msm_drm_private *priv;
  4310. struct sde_kms *sde_kms;
  4311. int i;
  4312. static const struct file_operations debugfs_status_fops = {
  4313. .open = _sde_encoder_debugfs_status_open,
  4314. .read = seq_read,
  4315. .llseek = seq_lseek,
  4316. .release = single_release,
  4317. };
  4318. static const struct file_operations debugfs_misr_fops = {
  4319. .open = simple_open,
  4320. .read = _sde_encoder_misr_read,
  4321. .write = _sde_encoder_misr_setup,
  4322. };
  4323. char name[SDE_NAME_SIZE];
  4324. if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
  4325. SDE_ERROR("invalid encoder or kms\n");
  4326. return -EINVAL;
  4327. }
  4328. sde_enc = to_sde_encoder_virt(drm_enc);
  4329. priv = drm_enc->dev->dev_private;
  4330. sde_kms = to_sde_kms(priv->kms);
  4331. snprintf(name, SDE_NAME_SIZE, "encoder%u", drm_enc->base.id);
  4332. /* create overall sub-directory for the encoder */
  4333. sde_enc->debugfs_root = debugfs_create_dir(name,
  4334. drm_enc->dev->primary->debugfs_root);
  4335. if (!sde_enc->debugfs_root)
  4336. return -ENOMEM;
  4337. /* don't error check these */
  4338. debugfs_create_file("status", 0400,
  4339. sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
  4340. debugfs_create_file("misr_data", 0600,
  4341. sde_enc->debugfs_root, sde_enc, &debugfs_misr_fops);
  4342. debugfs_create_bool("idle_power_collapse", 0600, sde_enc->debugfs_root,
  4343. &sde_enc->idle_pc_enabled);
  4344. debugfs_create_u32("frame_trigger_mode", 0400, sde_enc->debugfs_root,
  4345. &sde_enc->frame_trigger_mode);
  4346. for (i = 0; i < sde_enc->num_phys_encs; i++)
  4347. if (sde_enc->phys_encs[i] &&
  4348. sde_enc->phys_encs[i]->ops.late_register)
  4349. sde_enc->phys_encs[i]->ops.late_register(
  4350. sde_enc->phys_encs[i],
  4351. sde_enc->debugfs_root);
  4352. return 0;
  4353. }
  4354. static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
  4355. {
  4356. struct sde_encoder_virt *sde_enc;
  4357. if (!drm_enc)
  4358. return;
  4359. sde_enc = to_sde_encoder_virt(drm_enc);
  4360. debugfs_remove_recursive(sde_enc->debugfs_root);
  4361. }
  4362. #else
  4363. static int _sde_encoder_init_debugfs(struct drm_encoder *drm_enc)
  4364. {
  4365. return 0;
  4366. }
  4367. static void _sde_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
  4368. {
  4369. }
  4370. #endif
  4371. static int sde_encoder_late_register(struct drm_encoder *encoder)
  4372. {
  4373. return _sde_encoder_init_debugfs(encoder);
  4374. }
  4375. static void sde_encoder_early_unregister(struct drm_encoder *encoder)
  4376. {
  4377. _sde_encoder_destroy_debugfs(encoder);
  4378. }
  4379. static int sde_encoder_virt_add_phys_encs(
  4380. struct msm_display_info *disp_info,
  4381. struct sde_encoder_virt *sde_enc,
  4382. struct sde_enc_phys_init_params *params)
  4383. {
  4384. struct sde_encoder_phys *enc = NULL;
  4385. u32 display_caps = disp_info->capabilities;
  4386. SDE_DEBUG_ENC(sde_enc, "\n");
  4387. /*
  4388. * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
  4389. * in this function, check up-front.
  4390. */
  4391. if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
  4392. ARRAY_SIZE(sde_enc->phys_encs)) {
  4393. SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
  4394. sde_enc->num_phys_encs);
  4395. return -EINVAL;
  4396. }
  4397. if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
  4398. enc = sde_encoder_phys_vid_init(params);
  4399. if (IS_ERR_OR_NULL(enc)) {
  4400. SDE_ERROR_ENC(sde_enc, "failed to init vid enc: %ld\n",
  4401. PTR_ERR(enc));
  4402. return !enc ? -EINVAL : PTR_ERR(enc);
  4403. }
  4404. sde_enc->phys_vid_encs[sde_enc->num_phys_encs] = enc;
  4405. }
  4406. if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
  4407. enc = sde_encoder_phys_cmd_init(params);
  4408. if (IS_ERR_OR_NULL(enc)) {
  4409. SDE_ERROR_ENC(sde_enc, "failed to init cmd enc: %ld\n",
  4410. PTR_ERR(enc));
  4411. return !enc ? -EINVAL : PTR_ERR(enc);
  4412. }
  4413. sde_enc->phys_cmd_encs[sde_enc->num_phys_encs] = enc;
  4414. }
  4415. if (disp_info->curr_panel_mode == MSM_DISPLAY_VIDEO_MODE)
  4416. sde_enc->phys_encs[sde_enc->num_phys_encs] =
  4417. sde_enc->phys_vid_encs[sde_enc->num_phys_encs];
  4418. else
  4419. sde_enc->phys_encs[sde_enc->num_phys_encs] =
  4420. sde_enc->phys_cmd_encs[sde_enc->num_phys_encs];
  4421. ++sde_enc->num_phys_encs;
  4422. return 0;
  4423. }
  4424. static int sde_encoder_virt_add_phys_enc_wb(struct sde_encoder_virt *sde_enc,
  4425. struct sde_enc_phys_init_params *params)
  4426. {
  4427. struct sde_encoder_phys *enc = NULL;
  4428. if (!sde_enc) {
  4429. SDE_ERROR("invalid encoder\n");
  4430. return -EINVAL;
  4431. }
  4432. SDE_DEBUG_ENC(sde_enc, "\n");
  4433. if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
  4434. SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
  4435. sde_enc->num_phys_encs);
  4436. return -EINVAL;
  4437. }
  4438. enc = sde_encoder_phys_wb_init(params);
  4439. if (IS_ERR_OR_NULL(enc)) {
  4440. SDE_ERROR_ENC(sde_enc, "failed to init wb enc: %ld\n",
  4441. PTR_ERR(enc));
  4442. return !enc ? -EINVAL : PTR_ERR(enc);
  4443. }
  4444. sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
  4445. ++sde_enc->num_phys_encs;
  4446. return 0;
  4447. }
  4448. static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
  4449. struct sde_kms *sde_kms,
  4450. struct msm_display_info *disp_info,
  4451. int *drm_enc_mode)
  4452. {
  4453. int ret = 0;
  4454. int i = 0;
  4455. enum sde_intf_type intf_type;
  4456. struct sde_encoder_virt_ops parent_ops = {
  4457. sde_encoder_vblank_callback,
  4458. sde_encoder_underrun_callback,
  4459. sde_encoder_frame_done_callback,
  4460. sde_encoder_get_qsync_fps_callback,
  4461. };
  4462. struct sde_enc_phys_init_params phys_params;
  4463. if (!sde_enc || !sde_kms) {
  4464. SDE_ERROR("invalid arg(s), enc %d kms %d\n",
  4465. !sde_enc, !sde_kms);
  4466. return -EINVAL;
  4467. }
  4468. memset(&phys_params, 0, sizeof(phys_params));
  4469. phys_params.sde_kms = sde_kms;
  4470. phys_params.parent = &sde_enc->base;
  4471. phys_params.parent_ops = parent_ops;
  4472. phys_params.enc_spinlock = &sde_enc->enc_spinlock;
  4473. phys_params.vblank_ctl_lock = &sde_enc->vblank_ctl_lock;
  4474. SDE_DEBUG("\n");
  4475. if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
  4476. *drm_enc_mode = DRM_MODE_ENCODER_DSI;
  4477. intf_type = INTF_DSI;
  4478. } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
  4479. *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
  4480. intf_type = INTF_HDMI;
  4481. } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
  4482. if (disp_info->capabilities & MSM_DISPLAY_CAP_MST_MODE)
  4483. *drm_enc_mode = DRM_MODE_ENCODER_DPMST;
  4484. else
  4485. *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
  4486. intf_type = INTF_DP;
  4487. } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
  4488. *drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
  4489. intf_type = INTF_WB;
  4490. } else {
  4491. SDE_ERROR_ENC(sde_enc, "unsupported display interface type\n");
  4492. return -EINVAL;
  4493. }
  4494. WARN_ON(disp_info->num_of_h_tiles < 1);
  4495. sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
  4496. sde_enc->te_source = disp_info->te_source;
  4497. SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
  4498. if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
  4499. (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
  4500. sde_enc->idle_pc_enabled = sde_kms->catalog->has_idle_pc;
  4501. mutex_lock(&sde_enc->enc_lock);
  4502. for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
  4503. /*
  4504. * Left-most tile is at index 0, content is controller id
  4505. * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
  4506. * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
  4507. */
  4508. u32 controller_id = disp_info->h_tile_instance[i];
  4509. if (disp_info->num_of_h_tiles > 1) {
  4510. if (i == 0)
  4511. phys_params.split_role = ENC_ROLE_MASTER;
  4512. else
  4513. phys_params.split_role = ENC_ROLE_SLAVE;
  4514. } else {
  4515. phys_params.split_role = ENC_ROLE_SOLO;
  4516. }
  4517. SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
  4518. i, controller_id, phys_params.split_role);
  4519. if (sde_enc->ops.phys_init) {
  4520. struct sde_encoder_phys *enc;
  4521. enc = sde_enc->ops.phys_init(intf_type,
  4522. controller_id,
  4523. &phys_params);
  4524. if (enc) {
  4525. sde_enc->phys_encs[sde_enc->num_phys_encs] =
  4526. enc;
  4527. ++sde_enc->num_phys_encs;
  4528. } else
  4529. SDE_ERROR_ENC(sde_enc,
  4530. "failed to add phys encs\n");
  4531. continue;
  4532. }
  4533. if (intf_type == INTF_WB) {
  4534. phys_params.intf_idx = INTF_MAX;
  4535. phys_params.wb_idx = sde_encoder_get_wb(
  4536. sde_kms->catalog,
  4537. intf_type, controller_id);
  4538. if (phys_params.wb_idx == WB_MAX) {
  4539. SDE_ERROR_ENC(sde_enc,
  4540. "could not get wb: type %d, id %d\n",
  4541. intf_type, controller_id);
  4542. ret = -EINVAL;
  4543. }
  4544. } else {
  4545. phys_params.wb_idx = WB_MAX;
  4546. phys_params.intf_idx = sde_encoder_get_intf(
  4547. sde_kms->catalog, intf_type,
  4548. controller_id);
  4549. if (phys_params.intf_idx == INTF_MAX) {
  4550. SDE_ERROR_ENC(sde_enc,
  4551. "could not get wb: type %d, id %d\n",
  4552. intf_type, controller_id);
  4553. ret = -EINVAL;
  4554. }
  4555. }
  4556. if (!ret) {
  4557. if (intf_type == INTF_WB)
  4558. ret = sde_encoder_virt_add_phys_enc_wb(sde_enc,
  4559. &phys_params);
  4560. else
  4561. ret = sde_encoder_virt_add_phys_encs(
  4562. disp_info,
  4563. sde_enc,
  4564. &phys_params);
  4565. if (ret)
  4566. SDE_ERROR_ENC(sde_enc,
  4567. "failed to add phys encs\n");
  4568. }
  4569. }
  4570. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4571. struct sde_encoder_phys *vid_phys = sde_enc->phys_vid_encs[i];
  4572. struct sde_encoder_phys *cmd_phys = sde_enc->phys_cmd_encs[i];
  4573. if (vid_phys) {
  4574. atomic_set(&vid_phys->vsync_cnt, 0);
  4575. atomic_set(&vid_phys->underrun_cnt, 0);
  4576. }
  4577. if (cmd_phys) {
  4578. atomic_set(&cmd_phys->vsync_cnt, 0);
  4579. atomic_set(&cmd_phys->underrun_cnt, 0);
  4580. }
  4581. }
  4582. mutex_unlock(&sde_enc->enc_lock);
  4583. return ret;
  4584. }
  4585. static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
  4586. .mode_set = sde_encoder_virt_mode_set,
  4587. .disable = sde_encoder_virt_disable,
  4588. .enable = sde_encoder_virt_enable,
  4589. .atomic_check = sde_encoder_virt_atomic_check,
  4590. };
  4591. static const struct drm_encoder_funcs sde_encoder_funcs = {
  4592. .destroy = sde_encoder_destroy,
  4593. .late_register = sde_encoder_late_register,
  4594. .early_unregister = sde_encoder_early_unregister,
  4595. };
  4596. struct drm_encoder *sde_encoder_init_with_ops(
  4597. struct drm_device *dev,
  4598. struct msm_display_info *disp_info,
  4599. const struct sde_encoder_ops *ops)
  4600. {
  4601. struct msm_drm_private *priv = dev->dev_private;
  4602. struct sde_kms *sde_kms = to_sde_kms(priv->kms);
  4603. struct drm_encoder *drm_enc = NULL;
  4604. struct sde_encoder_virt *sde_enc = NULL;
  4605. int drm_enc_mode = DRM_MODE_ENCODER_NONE;
  4606. char name[SDE_NAME_SIZE];
  4607. int ret = 0, i, intf_index = INTF_MAX;
  4608. struct sde_encoder_phys *phys = NULL;
  4609. sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
  4610. if (!sde_enc) {
  4611. ret = -ENOMEM;
  4612. goto fail;
  4613. }
  4614. if (ops)
  4615. sde_enc->ops = *ops;
  4616. mutex_init(&sde_enc->enc_lock);
  4617. ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
  4618. &drm_enc_mode);
  4619. if (ret)
  4620. goto fail;
  4621. sde_enc->cur_master = NULL;
  4622. spin_lock_init(&sde_enc->enc_spinlock);
  4623. mutex_init(&sde_enc->vblank_ctl_lock);
  4624. for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
  4625. atomic_set(&sde_enc->frame_done_cnt[i], 0);
  4626. drm_enc = &sde_enc->base;
  4627. drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode, NULL);
  4628. drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
  4629. if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
  4630. timer_setup(&sde_enc->vsync_event_timer,
  4631. sde_encoder_vsync_event_handler, 0);
  4632. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4633. phys = sde_enc->phys_encs[i];
  4634. if (!phys)
  4635. continue;
  4636. if (phys->ops.is_master && phys->ops.is_master(phys))
  4637. intf_index = phys->intf_idx - INTF_0;
  4638. }
  4639. snprintf(name, SDE_NAME_SIZE, "rsc_enc%u", drm_enc->base.id);
  4640. sde_enc->rsc_client = sde_rsc_client_create(SDE_RSC_INDEX, name,
  4641. (disp_info->display_type == SDE_CONNECTOR_PRIMARY) ?
  4642. SDE_RSC_PRIMARY_DISP_CLIENT :
  4643. SDE_RSC_EXTERNAL_DISP_CLIENT, intf_index + 1);
  4644. if (IS_ERR_OR_NULL(sde_enc->rsc_client)) {
  4645. SDE_DEBUG("sde rsc client create failed :%ld\n",
  4646. PTR_ERR(sde_enc->rsc_client));
  4647. sde_enc->rsc_client = NULL;
  4648. }
  4649. if (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
  4650. ret = _sde_encoder_input_handler(sde_enc);
  4651. if (ret)
  4652. SDE_ERROR(
  4653. "input handler registration failed, rc = %d\n", ret);
  4654. }
  4655. mutex_init(&sde_enc->rc_lock);
  4656. kthread_init_delayed_work(&sde_enc->delayed_off_work,
  4657. sde_encoder_off_work);
  4658. sde_enc->vblank_enabled = false;
  4659. sde_enc->qdss_status = false;
  4660. kthread_init_work(&sde_enc->vsync_event_work,
  4661. sde_encoder_vsync_event_work_handler);
  4662. kthread_init_work(&sde_enc->input_event_work,
  4663. sde_encoder_input_event_work_handler);
  4664. kthread_init_work(&sde_enc->esd_trigger_work,
  4665. sde_encoder_esd_trigger_work_handler);
  4666. memcpy(&sde_enc->disp_info, disp_info, sizeof(*disp_info));
  4667. SDE_DEBUG_ENC(sde_enc, "created\n");
  4668. return drm_enc;
  4669. fail:
  4670. SDE_ERROR("failed to create encoder\n");
  4671. if (drm_enc)
  4672. sde_encoder_destroy(drm_enc);
  4673. return ERR_PTR(ret);
  4674. }
  4675. struct drm_encoder *sde_encoder_init(
  4676. struct drm_device *dev,
  4677. struct msm_display_info *disp_info)
  4678. {
  4679. return sde_encoder_init_with_ops(dev, disp_info, NULL);
  4680. }
  4681. int sde_encoder_wait_for_event(struct drm_encoder *drm_enc,
  4682. enum msm_event_wait event)
  4683. {
  4684. int (*fn_wait)(struct sde_encoder_phys *phys_enc) = NULL;
  4685. struct sde_encoder_virt *sde_enc = NULL;
  4686. int i, ret = 0;
  4687. char atrace_buf[32];
  4688. if (!drm_enc) {
  4689. SDE_ERROR("invalid encoder\n");
  4690. return -EINVAL;
  4691. }
  4692. sde_enc = to_sde_encoder_virt(drm_enc);
  4693. SDE_DEBUG_ENC(sde_enc, "\n");
  4694. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4695. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  4696. switch (event) {
  4697. case MSM_ENC_COMMIT_DONE:
  4698. fn_wait = phys->ops.wait_for_commit_done;
  4699. break;
  4700. case MSM_ENC_TX_COMPLETE:
  4701. fn_wait = phys->ops.wait_for_tx_complete;
  4702. break;
  4703. case MSM_ENC_VBLANK:
  4704. fn_wait = phys->ops.wait_for_vblank;
  4705. break;
  4706. case MSM_ENC_ACTIVE_REGION:
  4707. fn_wait = phys->ops.wait_for_active;
  4708. break;
  4709. default:
  4710. SDE_ERROR_ENC(sde_enc, "unknown wait event %d\n",
  4711. event);
  4712. return -EINVAL;
  4713. }
  4714. if (phys && fn_wait) {
  4715. snprintf(atrace_buf, sizeof(atrace_buf),
  4716. "wait_completion_event_%d", event);
  4717. SDE_ATRACE_BEGIN(atrace_buf);
  4718. ret = fn_wait(phys);
  4719. SDE_ATRACE_END(atrace_buf);
  4720. if (ret)
  4721. return ret;
  4722. }
  4723. }
  4724. return ret;
  4725. }
  4726. u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
  4727. {
  4728. struct sde_encoder_virt *sde_enc;
  4729. if (!drm_enc) {
  4730. SDE_ERROR("invalid encoder\n");
  4731. return 0;
  4732. }
  4733. sde_enc = to_sde_encoder_virt(drm_enc);
  4734. return sde_enc->mode_info.frame_rate;
  4735. }
  4736. enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
  4737. {
  4738. struct sde_encoder_virt *sde_enc = NULL;
  4739. int i;
  4740. if (!encoder) {
  4741. SDE_ERROR("invalid encoder\n");
  4742. return INTF_MODE_NONE;
  4743. }
  4744. sde_enc = to_sde_encoder_virt(encoder);
  4745. if (sde_enc->cur_master)
  4746. return sde_enc->cur_master->intf_mode;
  4747. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4748. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  4749. if (phys)
  4750. return phys->intf_mode;
  4751. }
  4752. return INTF_MODE_NONE;
  4753. }
  4754. static void _sde_encoder_cache_hw_res_cont_splash(
  4755. struct drm_encoder *encoder,
  4756. struct sde_kms *sde_kms)
  4757. {
  4758. int i, idx;
  4759. struct sde_encoder_virt *sde_enc;
  4760. struct sde_encoder_phys *phys_enc;
  4761. struct sde_rm_hw_iter dsc_iter, pp_iter, ctl_iter, intf_iter;
  4762. sde_enc = to_sde_encoder_virt(encoder);
  4763. sde_rm_init_hw_iter(&pp_iter, encoder->base.id, SDE_HW_BLK_PINGPONG);
  4764. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  4765. sde_enc->hw_pp[i] = NULL;
  4766. if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
  4767. break;
  4768. sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
  4769. }
  4770. sde_rm_init_hw_iter(&dsc_iter, encoder->base.id, SDE_HW_BLK_DSC);
  4771. for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
  4772. sde_enc->hw_dsc[i] = NULL;
  4773. if (!sde_rm_get_hw(&sde_kms->rm, &dsc_iter))
  4774. break;
  4775. sde_enc->hw_dsc[i] = (struct sde_hw_dsc *) dsc_iter.hw;
  4776. }
  4777. /*
  4778. * If we have multiple phys encoders with one controller, make
  4779. * sure to populate the controller pointer in both phys encoders.
  4780. */
  4781. for (idx = 0; idx < sde_enc->num_phys_encs; idx++) {
  4782. phys_enc = sde_enc->phys_encs[idx];
  4783. phys_enc->hw_ctl = NULL;
  4784. sde_rm_init_hw_iter(&ctl_iter, encoder->base.id,
  4785. SDE_HW_BLK_CTL);
  4786. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4787. if (sde_rm_get_hw(&sde_kms->rm, &ctl_iter)) {
  4788. phys_enc->hw_ctl =
  4789. (struct sde_hw_ctl *) ctl_iter.hw;
  4790. pr_debug("HW CTL intf_idx:%d hw_ctl:[0x%pK]\n",
  4791. phys_enc->intf_idx, phys_enc->hw_ctl);
  4792. }
  4793. }
  4794. }
  4795. sde_rm_init_hw_iter(&intf_iter, encoder->base.id, SDE_HW_BLK_INTF);
  4796. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4797. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  4798. phys->hw_intf = NULL;
  4799. if (!sde_rm_get_hw(&sde_kms->rm, &intf_iter))
  4800. break;
  4801. phys->hw_intf = (struct sde_hw_intf *) intf_iter.hw;
  4802. }
  4803. }
  4804. /**
  4805. * sde_encoder_update_caps_for_cont_splash - update encoder settings during
  4806. * device bootup when cont_splash is enabled
  4807. * @drm_enc: Pointer to drm encoder structure
  4808. * @splash_display: Pointer to sde_splash_display corresponding to this encoder
  4809. * @enable: boolean indicates enable or displae state of splash
  4810. * @Return: true if successful in updating the encoder structure
  4811. */
  4812. int sde_encoder_update_caps_for_cont_splash(struct drm_encoder *encoder,
  4813. struct sde_splash_display *splash_display, bool enable)
  4814. {
  4815. struct sde_encoder_virt *sde_enc;
  4816. struct msm_drm_private *priv;
  4817. struct sde_kms *sde_kms;
  4818. struct drm_connector *conn = NULL;
  4819. struct sde_connector *sde_conn = NULL;
  4820. struct sde_connector_state *sde_conn_state = NULL;
  4821. struct drm_display_mode *drm_mode = NULL;
  4822. struct sde_encoder_phys *phys_enc;
  4823. int ret = 0, i;
  4824. if (!encoder) {
  4825. SDE_ERROR("invalid drm enc\n");
  4826. return -EINVAL;
  4827. }
  4828. if (!encoder->dev || !encoder->dev->dev_private) {
  4829. SDE_ERROR("drm device invalid\n");
  4830. return -EINVAL;
  4831. }
  4832. priv = encoder->dev->dev_private;
  4833. if (!priv->kms) {
  4834. SDE_ERROR("invalid kms\n");
  4835. return -EINVAL;
  4836. }
  4837. sde_kms = to_sde_kms(priv->kms);
  4838. sde_enc = to_sde_encoder_virt(encoder);
  4839. if (!priv->num_connectors) {
  4840. SDE_ERROR_ENC(sde_enc, "No connectors registered\n");
  4841. return -EINVAL;
  4842. }
  4843. SDE_DEBUG_ENC(sde_enc,
  4844. "num of connectors: %d\n", priv->num_connectors);
  4845. SDE_DEBUG_ENC(sde_enc, "enable: %d\n", enable);
  4846. if (!enable) {
  4847. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4848. phys_enc = sde_enc->phys_encs[i];
  4849. if (phys_enc)
  4850. phys_enc->cont_splash_enabled = false;
  4851. }
  4852. return ret;
  4853. }
  4854. if (!splash_display) {
  4855. SDE_ERROR_ENC(sde_enc, "invalid splash data\n");
  4856. return -EINVAL;
  4857. }
  4858. for (i = 0; i < priv->num_connectors; i++) {
  4859. SDE_DEBUG_ENC(sde_enc, "connector id: %d\n",
  4860. priv->connectors[i]->base.id);
  4861. sde_conn = to_sde_connector(priv->connectors[i]);
  4862. if (!sde_conn->encoder) {
  4863. SDE_DEBUG_ENC(sde_enc,
  4864. "encoder not attached to connector\n");
  4865. continue;
  4866. }
  4867. if (sde_conn->encoder->base.id
  4868. == encoder->base.id) {
  4869. conn = (priv->connectors[i]);
  4870. break;
  4871. }
  4872. }
  4873. if (!conn || !conn->state) {
  4874. SDE_ERROR_ENC(sde_enc, "connector not found\n");
  4875. return -EINVAL;
  4876. }
  4877. sde_conn_state = to_sde_connector_state(conn->state);
  4878. if (!sde_conn->ops.get_mode_info) {
  4879. SDE_ERROR_ENC(sde_enc, "conn: get_mode_info ops not found\n");
  4880. return -EINVAL;
  4881. }
  4882. ret = sde_connector_get_mode_info(&sde_conn->base,
  4883. &encoder->crtc->state->adjusted_mode,
  4884. &sde_conn_state->mode_info);
  4885. if (ret) {
  4886. SDE_ERROR_ENC(sde_enc,
  4887. "conn: ->get_mode_info failed. ret=%d\n", ret);
  4888. return ret;
  4889. }
  4890. ret = sde_rm_reserve(&sde_kms->rm, encoder, encoder->crtc->state,
  4891. conn->state, false);
  4892. if (ret) {
  4893. SDE_ERROR_ENC(sde_enc,
  4894. "failed to reserve hw resources, %d\n", ret);
  4895. return ret;
  4896. }
  4897. if (sde_conn->encoder) {
  4898. conn->state->best_encoder = sde_conn->encoder;
  4899. SDE_DEBUG_ENC(sde_enc,
  4900. "configured cstate->best_encoder to ID = %d\n",
  4901. conn->state->best_encoder->base.id);
  4902. } else {
  4903. SDE_ERROR_ENC(sde_enc, "No encoder mapped to connector=%d\n",
  4904. conn->base.id);
  4905. }
  4906. SDE_DEBUG_ENC(sde_enc, "connector topology = %llu\n",
  4907. sde_connector_get_topology_name(conn));
  4908. drm_mode = &encoder->crtc->state->adjusted_mode;
  4909. SDE_DEBUG_ENC(sde_enc, "hdisplay = %d, vdisplay = %d\n",
  4910. drm_mode->hdisplay, drm_mode->vdisplay);
  4911. drm_set_preferred_mode(conn, drm_mode->hdisplay, drm_mode->vdisplay);
  4912. if (encoder->bridge) {
  4913. SDE_DEBUG_ENC(sde_enc, "Bridge mapped to encoder\n");
  4914. /*
  4915. * For cont-splash use case, we update the mode
  4916. * configurations manually. This will skip the
  4917. * usually mode set call when actual frame is
  4918. * pushed from framework. The bridge needs to
  4919. * be updated with the current drm mode by
  4920. * calling the bridge mode set ops.
  4921. */
  4922. if (encoder->bridge->funcs) {
  4923. SDE_DEBUG_ENC(sde_enc, "calling mode_set\n");
  4924. encoder->bridge->funcs->mode_set(encoder->bridge,
  4925. drm_mode, drm_mode);
  4926. }
  4927. } else {
  4928. SDE_ERROR_ENC(sde_enc, "No bridge attached to encoder\n");
  4929. }
  4930. _sde_encoder_cache_hw_res_cont_splash(encoder, sde_kms);
  4931. for (i = 0; i < sde_enc->num_phys_encs; i++) {
  4932. struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
  4933. if (!phys) {
  4934. SDE_ERROR_ENC(sde_enc,
  4935. "phys encoders not initialized\n");
  4936. return -EINVAL;
  4937. }
  4938. /* update connector for master and slave phys encoders */
  4939. phys->connector = conn;
  4940. phys->cont_splash_enabled = true;
  4941. phys->hw_pp = sde_enc->hw_pp[i];
  4942. if (phys->ops.cont_splash_mode_set)
  4943. phys->ops.cont_splash_mode_set(phys, drm_mode);
  4944. if (phys->ops.is_master && phys->ops.is_master(phys))
  4945. sde_enc->cur_master = phys;
  4946. }
  4947. return ret;
  4948. }
  4949. int sde_encoder_display_failure_notification(struct drm_encoder *enc,
  4950. bool skip_pre_kickoff)
  4951. {
  4952. struct msm_drm_thread *event_thread = NULL;
  4953. struct msm_drm_private *priv = NULL;
  4954. struct sde_encoder_virt *sde_enc = NULL;
  4955. if (!enc || !enc->dev || !enc->dev->dev_private) {
  4956. SDE_ERROR("invalid parameters\n");
  4957. return -EINVAL;
  4958. }
  4959. priv = enc->dev->dev_private;
  4960. sde_enc = to_sde_encoder_virt(enc);
  4961. if (!sde_enc->crtc || (sde_enc->crtc->index
  4962. >= ARRAY_SIZE(priv->event_thread))) {
  4963. SDE_DEBUG_ENC(sde_enc,
  4964. "invalid cached CRTC: %d or crtc index: %d\n",
  4965. sde_enc->crtc == NULL,
  4966. sde_enc->crtc ? sde_enc->crtc->index : -EINVAL);
  4967. return -EINVAL;
  4968. }
  4969. SDE_EVT32_VERBOSE(DRMID(enc));
  4970. event_thread = &priv->event_thread[sde_enc->crtc->index];
  4971. if (!skip_pre_kickoff) {
  4972. kthread_queue_work(&event_thread->worker,
  4973. &sde_enc->esd_trigger_work);
  4974. kthread_flush_work(&sde_enc->esd_trigger_work);
  4975. }
  4976. /**
  4977. * panel may stop generating te signal (vsync) during esd failure. rsc
  4978. * hardware may hang without vsync. Avoid rsc hang by generating the
  4979. * vsync from watchdog timer instead of panel.
  4980. */
  4981. _sde_encoder_switch_to_watchdog_vsync(enc);
  4982. if (!skip_pre_kickoff)
  4983. sde_encoder_wait_for_event(enc, MSM_ENC_TX_COMPLETE);
  4984. return 0;
  4985. }
  4986. bool sde_encoder_recovery_events_enabled(struct drm_encoder *encoder)
  4987. {
  4988. struct sde_encoder_virt *sde_enc;
  4989. if (!encoder) {
  4990. SDE_ERROR("invalid drm enc\n");
  4991. return false;
  4992. }
  4993. sde_enc = to_sde_encoder_virt(encoder);
  4994. return sde_enc->recovery_events_enabled;
  4995. }
  4996. void sde_encoder_recovery_events_handler(struct drm_encoder *encoder,
  4997. bool enabled)
  4998. {
  4999. struct sde_encoder_virt *sde_enc;
  5000. if (!encoder) {
  5001. SDE_ERROR("invalid drm enc\n");
  5002. return;
  5003. }
  5004. sde_enc = to_sde_encoder_virt(encoder);
  5005. sde_enc->recovery_events_enabled = enabled;
  5006. }