sde_kms.c 137 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412
  1. /*
  2. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  20. #include <drm/drm_crtc.h>
  21. #include <drm/drm_fixed.h>
  22. #include <drm/drm_panel.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/dma-buf.h>
  27. #include <linux/memblock.h>
  28. #include <linux/soc/qcom/panel_event_notifier.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_probe_helper.h>
  31. #include "msm_drv.h"
  32. #include "msm_mmu.h"
  33. #include "msm_gem.h"
  34. #include "dsi_display.h"
  35. #include "dsi_drm.h"
  36. #include "sde_wb.h"
  37. #include "dp_display.h"
  38. #include "dp_drm.h"
  39. #include "dp_mst_drm.h"
  40. #include "sde_kms.h"
  41. #include "sde_core_irq.h"
  42. #include "sde_formats.h"
  43. #include "sde_hw_vbif.h"
  44. #include "sde_vbif.h"
  45. #include "sde_encoder.h"
  46. #include "sde_plane.h"
  47. #include "sde_crtc.h"
  48. #include "sde_color_processing.h"
  49. #include "sde_reg_dma.h"
  50. #include "sde_connector.h"
  51. #include "sde_vm.h"
  52. #include "sde_fence.h"
  53. #include <linux/qcom_scm.h>
  54. #include <linux/qcom-iommu-util.h>
  55. #include "soc/qcom/secure_buffer.h"
  56. #include <linux/qtee_shmbridge.h>
  57. #ifdef CONFIG_DRM_SDE_VM
  58. #include <linux/gunyah/gh_irq_lend.h>
  59. #endif
  60. #define CREATE_TRACE_POINTS
  61. #include "sde_trace.h"
  62. /* defines for secure channel call */
  63. #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
  64. #define MDP_DEVICE_ID 0x1A
  65. #define DEMURA_REGION_NAME_MAX 32
  66. EXPORT_TRACEPOINT_SYMBOL(tracing_mark_write);
  67. static const char * const iommu_ports[] = {
  68. "mdp_0",
  69. };
  70. /**
  71. * Controls size of event log buffer. Specified as a power of 2.
  72. */
  73. #define SDE_EVTLOG_SIZE 1024
  74. /*
  75. * To enable overall DRM driver logging
  76. * # echo 0x2 > /sys/module/drm/parameters/debug
  77. *
  78. * To enable DRM driver h/w logging
  79. * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  80. *
  81. * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  82. */
  83. #define SDE_DEBUGFS_DIR "msm_sde"
  84. #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
  85. #define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
  86. #define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
  87. /**
  88. * sdecustom - enable certain driver customizations for sde clients
  89. * Enabling this modifies the standard DRM behavior slightly and assumes
  90. * that the clients have specific knowledge about the modifications that
  91. * are involved, so don't enable this unless you know what you're doing.
  92. *
  93. * Parts of the driver that are affected by this setting may be located by
  94. * searching for invocations of the 'sde_is_custom_client()' function.
  95. *
  96. * This is disabled by default.
  97. */
  98. static bool sdecustom = true;
  99. module_param(sdecustom, bool, 0400);
  100. MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
  101. static int sde_kms_hw_init(struct msm_kms *kms);
  102. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
  103. static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
  104. static int _sde_kms_register_events(struct msm_kms *kms,
  105. struct drm_mode_object *obj, u32 event, bool en);
  106. static void sde_kms_handle_power_event(u32 event_type, void *usr);
  107. bool sde_is_custom_client(void)
  108. {
  109. return sdecustom;
  110. }
  111. #if IS_ENABLED(CONFIG_DEBUG_FS)
  112. void *sde_debugfs_get_root(struct sde_kms *sde_kms)
  113. {
  114. struct msm_drm_private *priv;
  115. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  116. return NULL;
  117. priv = sde_kms->dev->dev_private;
  118. return priv->debug_root;
  119. }
  120. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  121. {
  122. void *p;
  123. int rc;
  124. void *debugfs_root;
  125. p = sde_hw_util_get_log_mask_ptr();
  126. if (!sde_kms || !p)
  127. return -EINVAL;
  128. debugfs_root = sde_debugfs_get_root(sde_kms);
  129. if (!debugfs_root)
  130. return -EINVAL;
  131. /* allow debugfs_root to be NULL */
  132. debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
  133. (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
  134. (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
  135. rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
  136. if (rc) {
  137. SDE_ERROR("failed to init perf %d\n", rc);
  138. return rc;
  139. }
  140. sde_rm_debugfs_init(&sde_kms->rm, debugfs_root);
  141. if (sde_kms->catalog->qdss_count)
  142. debugfs_create_u32("qdss", 0600, debugfs_root,
  143. (u32 *)&sde_kms->qdss_enabled);
  144. debugfs_create_u32("pm_suspend_clk_dump", 0600, debugfs_root,
  145. (u32 *)&sde_kms->pm_suspend_clk_dump);
  146. debugfs_create_u32("hw_fence_status", 0600, debugfs_root,
  147. (u32 *)&sde_kms->debugfs_hw_fence);
  148. return 0;
  149. }
  150. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  151. {
  152. struct sde_kms *sde_kms = to_sde_kms(kms);
  153. /* don't need to NULL check debugfs_root */
  154. if (sde_kms) {
  155. sde_debugfs_vbif_destroy(sde_kms);
  156. sde_debugfs_core_irq_destroy(sde_kms);
  157. }
  158. }
  159. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  160. {
  161. int i;
  162. struct device *dev = sde_kms->dev->dev;
  163. SDE_INFO("runtime PM suspended:%d", pm_runtime_suspended(dev));
  164. for (i = 0; i < sde_kms->dsi_display_count; i++)
  165. dsi_display_dump_clks_state(sde_kms->dsi_displays[i]);
  166. return 0;
  167. }
  168. #else
  169. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  170. {
  171. return 0;
  172. }
  173. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  174. {
  175. }
  176. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  177. {
  178. return 0;
  179. }
  180. #endif /* CONFIG_DEBUG_FS */
  181. static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
  182. struct drm_crtc *crtc)
  183. {
  184. struct drm_encoder *encoder;
  185. struct drm_device *dev;
  186. int ret;
  187. if (!kms || !crtc || !crtc->state || !crtc->dev) {
  188. SDE_ERROR("invalid params\n");
  189. return;
  190. }
  191. if (!crtc->state->enable) {
  192. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  193. return;
  194. }
  195. if (!crtc->state->active) {
  196. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  197. return;
  198. }
  199. dev = crtc->dev;
  200. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  201. if (encoder->crtc != crtc)
  202. continue;
  203. /*
  204. * Video Mode - Wait for VSYNC
  205. * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
  206. * complete
  207. */
  208. SDE_EVT32_VERBOSE(DRMID(crtc));
  209. ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
  210. if (ret && ret != -EWOULDBLOCK) {
  211. SDE_ERROR(
  212. "[crtc: %d][enc: %d] wait for commit done returned %d\n",
  213. crtc->base.id, encoder->base.id, ret);
  214. break;
  215. }
  216. }
  217. }
  218. static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
  219. struct drm_crtc *crtc, bool enable)
  220. {
  221. struct drm_device *dev;
  222. struct msm_drm_private *priv;
  223. struct sde_mdss_cfg *sde_cfg;
  224. struct drm_plane *plane;
  225. int i, ret;
  226. dev = sde_kms->dev;
  227. priv = dev->dev_private;
  228. sde_cfg = sde_kms->catalog;
  229. ret = sde_vbif_halt_xin_mask(sde_kms,
  230. sde_cfg->sui_block_xin_mask, enable);
  231. if (ret) {
  232. SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
  233. return ret;
  234. }
  235. if (enable) {
  236. for (i = 0; i < priv->num_planes; i++) {
  237. plane = priv->planes[i];
  238. sde_plane_secure_ctrl_xin_client(plane, crtc);
  239. }
  240. }
  241. return 0;
  242. }
  243. /**
  244. * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
  245. * @sde_kms: Pointer to sde_kms struct
  246. * @vimd: switch the stage 2 translation to this VMID
  247. */
  248. static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
  249. {
  250. struct device dummy = {};
  251. dma_addr_t dma_handle;
  252. uint32_t num_sids;
  253. uint32_t *sec_sid;
  254. struct sde_mdss_cfg *sde_cfg = sde_kms->catalog;
  255. int ret = 0, i;
  256. struct qtee_shm shm;
  257. bool qtee_en = qtee_shmbridge_is_enabled();
  258. phys_addr_t mem_addr;
  259. u64 mem_size;
  260. num_sids = sde_cfg->sec_sid_mask_count;
  261. if (!num_sids) {
  262. SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
  263. return -EINVAL;
  264. }
  265. if (qtee_en) {
  266. ret = qtee_shmbridge_allocate_shm(num_sids * sizeof(uint32_t),
  267. &shm);
  268. if (ret)
  269. return -ENOMEM;
  270. sec_sid = (uint32_t *) shm.vaddr;
  271. mem_addr = shm.paddr;
  272. /**
  273. * SMMUSecureModeSwitch requires the size to be number of SID's
  274. * but shm allocates size in pages. Modify the args as per
  275. * client requirement.
  276. */
  277. mem_size = sizeof(uint32_t) * num_sids;
  278. } else {
  279. sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
  280. if (!sec_sid)
  281. return -ENOMEM;
  282. mem_addr = virt_to_phys(sec_sid);
  283. mem_size = sizeof(uint32_t) * num_sids;
  284. }
  285. for (i = 0; i < num_sids; i++) {
  286. sec_sid[i] = sde_cfg->sec_sid_mask[i];
  287. SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
  288. }
  289. ret = dma_coerce_mask_and_coherent(&dummy, DMA_BIT_MASK(64));
  290. if (ret) {
  291. SDE_ERROR("Failed to set dma mask for dummy dev %d\n", ret);
  292. goto map_error;
  293. }
  294. set_dma_ops(&dummy, NULL);
  295. dma_handle = dma_map_single(&dummy, sec_sid,
  296. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  297. if (dma_mapping_error(&dummy, dma_handle)) {
  298. SDE_ERROR("dma_map_single for dummy dev failed vmid 0x%x\n",
  299. vmid);
  300. goto map_error;
  301. }
  302. SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d, qtee_en %d",
  303. vmid, num_sids, qtee_en);
  304. ret = qcom_scm_mem_protect_sd_ctrl(MDP_DEVICE_ID, mem_addr,
  305. mem_size, vmid);
  306. if (ret)
  307. SDE_ERROR("Error:scm_call2, vmid %d, ret%d\n",
  308. vmid, ret);
  309. SDE_EVT32(MEM_PROTECT_SD_CTRL_SWITCH, MDP_DEVICE_ID, mem_size,
  310. vmid, qtee_en, num_sids, ret);
  311. dma_unmap_single(&dummy, dma_handle,
  312. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  313. map_error:
  314. if (qtee_en)
  315. qtee_shmbridge_free_shm(&shm);
  316. else
  317. kfree(sec_sid);
  318. return ret;
  319. }
  320. static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
  321. {
  322. u32 ret;
  323. if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
  324. return 0;
  325. /* detach_all_contexts */
  326. ret = sde_kms_mmu_detach(sde_kms, false);
  327. if (ret) {
  328. SDE_ERROR("failed to detach all cb ret:%d\n", ret);
  329. goto mmu_error;
  330. }
  331. ret = _sde_kms_scm_call(sde_kms, vmid);
  332. if (ret) {
  333. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  334. goto scm_error;
  335. }
  336. return 0;
  337. scm_error:
  338. sde_kms_mmu_attach(sde_kms, false);
  339. mmu_error:
  340. atomic_dec(&sde_kms->detach_all_cb);
  341. return ret;
  342. }
  343. static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, u32 vmid,
  344. u32 old_vmid)
  345. {
  346. u32 ret;
  347. if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
  348. return 0;
  349. ret = _sde_kms_scm_call(sde_kms, vmid);
  350. if (ret) {
  351. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  352. goto scm_error;
  353. }
  354. /* attach_all_contexts */
  355. ret = sde_kms_mmu_attach(sde_kms, false);
  356. if (ret) {
  357. SDE_ERROR("failed to attach all cb ret:%d\n", ret);
  358. goto mmu_error;
  359. }
  360. return 0;
  361. mmu_error:
  362. _sde_kms_scm_call(sde_kms, old_vmid);
  363. scm_error:
  364. atomic_inc(&sde_kms->detach_all_cb);
  365. return ret;
  366. }
  367. static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
  368. {
  369. u32 ret;
  370. if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
  371. return 0;
  372. /* detach secure_context */
  373. ret = sde_kms_mmu_detach(sde_kms, true);
  374. if (ret) {
  375. SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
  376. goto mmu_error;
  377. }
  378. ret = _sde_kms_scm_call(sde_kms, vmid);
  379. if (ret) {
  380. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  381. goto scm_error;
  382. }
  383. return 0;
  384. scm_error:
  385. sde_kms_mmu_attach(sde_kms, true);
  386. mmu_error:
  387. atomic_dec(&sde_kms->detach_sec_cb);
  388. return ret;
  389. }
  390. static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, u32 vmid,
  391. u32 old_vmid)
  392. {
  393. u32 ret;
  394. if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
  395. return 0;
  396. ret = _sde_kms_scm_call(sde_kms, vmid);
  397. if (ret) {
  398. goto scm_error;
  399. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  400. }
  401. ret = sde_kms_mmu_attach(sde_kms, true);
  402. if (ret) {
  403. SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
  404. goto mmu_error;
  405. }
  406. return 0;
  407. mmu_error:
  408. _sde_kms_scm_call(sde_kms, old_vmid);
  409. scm_error:
  410. atomic_inc(&sde_kms->detach_sec_cb);
  411. return ret;
  412. }
  413. static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
  414. struct drm_crtc *crtc, bool enable)
  415. {
  416. int ret;
  417. if (enable) {
  418. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  419. if (ret < 0) {
  420. SDE_ERROR("failed to enable power resource %d\n", ret);
  421. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  422. return ret;
  423. }
  424. sde_crtc_misr_setup(crtc, true, 1);
  425. ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
  426. if (ret) {
  427. sde_crtc_misr_setup(crtc, false, 0);
  428. pm_runtime_put_sync(sde_kms->dev->dev);
  429. return ret;
  430. }
  431. } else {
  432. _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
  433. sde_crtc_misr_setup(crtc, false, 0);
  434. pm_runtime_put_sync(sde_kms->dev->dev);
  435. }
  436. return 0;
  437. }
  438. static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
  439. bool post_commit)
  440. {
  441. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  442. int old_smmu_state = smmu_state->state;
  443. int ret = 0;
  444. u32 vmid;
  445. if (!sde_kms || !crtc) {
  446. SDE_ERROR("invalid argument(s)\n");
  447. return -EINVAL;
  448. }
  449. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
  450. post_commit, smmu_state->sui_misr_state,
  451. smmu_state->secure_level, SDE_EVTLOG_FUNC_ENTRY);
  452. if ((!smmu_state->transition_type) ||
  453. ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
  454. /* Bail out */
  455. return 0;
  456. /* enable sui misr if requested, before the transition */
  457. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
  458. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
  459. if (ret) {
  460. smmu_state->sui_misr_state = NONE;
  461. goto end;
  462. }
  463. }
  464. mutex_lock(&sde_kms->secure_transition_lock);
  465. switch (smmu_state->state) {
  466. case DETACH_ALL_REQ:
  467. ret = _sde_kms_detach_all_cb(sde_kms, VMID_CP_SEC_DISPLAY);
  468. if (!ret)
  469. smmu_state->state = DETACHED;
  470. break;
  471. case ATTACH_ALL_REQ:
  472. ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL,
  473. VMID_CP_SEC_DISPLAY);
  474. if (!ret) {
  475. smmu_state->state = ATTACHED;
  476. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  477. }
  478. break;
  479. case DETACH_SEC_REQ:
  480. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  481. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  482. ret = _sde_kms_detach_sec_cb(sde_kms, vmid);
  483. if (!ret)
  484. smmu_state->state = DETACHED_SEC;
  485. break;
  486. case ATTACH_SEC_REQ:
  487. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  488. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  489. ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL, vmid);
  490. if (!ret) {
  491. smmu_state->state = ATTACHED;
  492. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  493. }
  494. break;
  495. default:
  496. SDE_ERROR("crtc%d: invalid smmu state %d transition type %d\n",
  497. DRMID(crtc), smmu_state->state,
  498. smmu_state->transition_type);
  499. ret = -EINVAL;
  500. break;
  501. }
  502. mutex_unlock(&sde_kms->secure_transition_lock);
  503. /* disable sui misr if requested, after the transition */
  504. if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
  505. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  506. if (ret)
  507. goto end;
  508. }
  509. end:
  510. smmu_state->transition_error = false;
  511. if (ret) {
  512. smmu_state->transition_error = true;
  513. SDE_ERROR(
  514. "crtc%d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  515. DRMID(crtc), old_smmu_state, smmu_state->state,
  516. smmu_state->secure_level, ret);
  517. smmu_state->state = smmu_state->prev_state;
  518. smmu_state->secure_level = smmu_state->prev_secure_level;
  519. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ)
  520. _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  521. }
  522. SDE_DEBUG("crtc %d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  523. DRMID(crtc), old_smmu_state, smmu_state->state,
  524. smmu_state->secure_level, ret);
  525. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->prev_state,
  526. smmu_state->transition_type,
  527. smmu_state->transition_error,
  528. smmu_state->secure_level, smmu_state->prev_secure_level,
  529. smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
  530. smmu_state->sui_misr_state = NONE;
  531. smmu_state->transition_type = NONE;
  532. return ret;
  533. }
  534. static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
  535. struct drm_atomic_state *state)
  536. {
  537. struct drm_crtc *crtc;
  538. struct drm_crtc_state *old_crtc_state;
  539. struct drm_plane_state *old_plane_state, *new_plane_state;
  540. struct drm_plane *plane;
  541. struct drm_plane_state *plane_state;
  542. struct sde_kms *sde_kms = to_sde_kms(kms);
  543. struct drm_device *dev = sde_kms->dev;
  544. int i, ops = 0, ret = 0;
  545. bool old_valid_fb = false;
  546. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  547. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  548. if (!crtc->state || !crtc->state->active)
  549. continue;
  550. /*
  551. * It is safe to assume only one active crtc,
  552. * and compatible translation modes on the
  553. * planes staged on this crtc.
  554. * otherwise validation would have failed.
  555. * For this CRTC,
  556. */
  557. /*
  558. * 1. Check if old state on the CRTC has planes
  559. * staged with valid fbs
  560. */
  561. for_each_old_plane_in_state(state, plane, plane_state, i) {
  562. if (!plane_state->crtc)
  563. continue;
  564. if (plane_state->fb) {
  565. old_valid_fb = true;
  566. break;
  567. }
  568. }
  569. /*
  570. * 2.Get the operations needed to be performed before
  571. * secure transition can be initiated.
  572. */
  573. ops = sde_crtc_get_secure_transition_ops(crtc,
  574. old_crtc_state, old_valid_fb);
  575. if (ops < 0) {
  576. SDE_ERROR("invalid secure operations %x\n", ops);
  577. return ops;
  578. }
  579. if (!ops) {
  580. smmu_state->transition_error = false;
  581. goto no_ops;
  582. }
  583. SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
  584. crtc->base.id, ops, crtc->state);
  585. SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
  586. /* 3. Perform operations needed for secure transition */
  587. if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
  588. SDE_DEBUG("wait_for_transfer_done\n");
  589. sde_kms_wait_for_frame_transfer_complete(kms, crtc);
  590. }
  591. if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
  592. SDE_DEBUG("cleanup planes\n");
  593. drm_atomic_helper_cleanup_planes(dev, state);
  594. for_each_oldnew_plane_in_state(state, plane,
  595. old_plane_state, new_plane_state, i)
  596. sde_plane_destroy_fb(old_plane_state);
  597. }
  598. if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
  599. SDE_DEBUG("secure ctrl\n");
  600. _sde_kms_secure_ctrl(sde_kms, crtc, false);
  601. }
  602. if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
  603. SDE_DEBUG("prepare planes %d",
  604. crtc->state->plane_mask);
  605. drm_atomic_crtc_for_each_plane(plane,
  606. crtc) {
  607. const struct drm_plane_helper_funcs *funcs;
  608. plane_state = plane->state;
  609. funcs = plane->helper_private;
  610. SDE_DEBUG("psde:%d FB[%u]\n",
  611. plane->base.id,
  612. plane->fb->base.id);
  613. if (!funcs)
  614. continue;
  615. if (funcs->prepare_fb(plane, plane_state)) {
  616. ret = funcs->prepare_fb(plane,
  617. plane_state);
  618. if (ret)
  619. return ret;
  620. }
  621. }
  622. }
  623. SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
  624. SDE_DEBUG("secure operations completed\n");
  625. }
  626. no_ops:
  627. return 0;
  628. }
  629. static int _sde_kms_release_shared_buffer(unsigned int mem_addr,
  630. unsigned int splash_buffer_size,
  631. unsigned int ramdump_base,
  632. unsigned int ramdump_buffer_size)
  633. {
  634. unsigned long pfn_start, pfn_end, pfn_idx;
  635. int ret = 0;
  636. if (!mem_addr || !splash_buffer_size) {
  637. SDE_ERROR("invalid params\n");
  638. return -EINVAL;
  639. }
  640. /* leave ramdump memory only if base address matches */
  641. if (ramdump_base == mem_addr &&
  642. ramdump_buffer_size <= splash_buffer_size) {
  643. mem_addr += ramdump_buffer_size;
  644. splash_buffer_size -= ramdump_buffer_size;
  645. }
  646. pfn_start = mem_addr >> PAGE_SHIFT;
  647. pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
  648. ret = memblock_free(mem_addr, splash_buffer_size);
  649. if (ret) {
  650. SDE_ERROR("continuous splash memory free failed:%d\n", ret);
  651. return ret;
  652. }
  653. for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
  654. free_reserved_page(pfn_to_page(pfn_idx));
  655. return ret;
  656. }
  657. static int _sde_kms_one2one_mem_map_ipcc_reg(struct sde_kms *sde_kms, u32 buf_size,
  658. unsigned long buf_base)
  659. {
  660. struct msm_mmu *mmu = NULL;
  661. int ret = 0;
  662. if (!sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]
  663. || !sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu) {
  664. SDE_ERROR("aspace not found for sde kms node\n");
  665. return -EINVAL;
  666. }
  667. mmu = sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu;
  668. if (!mmu) {
  669. SDE_ERROR("mmu not found for aspace\n");
  670. return -EINVAL;
  671. }
  672. if (!mmu->funcs || !mmu->funcs->one_to_one_map) {
  673. SDE_ERROR("invalid input params for map\n");
  674. return -EINVAL;
  675. }
  676. ret = mmu->funcs->one_to_one_map(mmu, buf_base, buf_base, buf_size,
  677. IOMMU_READ | IOMMU_WRITE);
  678. if (ret)
  679. SDE_ERROR("one2one memory smmu map failed:%d\n", ret);
  680. return ret;
  681. }
  682. static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms,
  683. struct sde_splash_mem *splash)
  684. {
  685. struct msm_mmu *mmu = NULL;
  686. int ret = 0;
  687. if (!sde_kms->aspace[0]) {
  688. SDE_ERROR("aspace not found for sde kms node\n");
  689. return -EINVAL;
  690. }
  691. mmu = sde_kms->aspace[0]->mmu;
  692. if (!mmu) {
  693. SDE_ERROR("mmu not found for aspace\n");
  694. return -EINVAL;
  695. }
  696. if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) {
  697. SDE_ERROR("invalid input params for map\n");
  698. return -EINVAL;
  699. }
  700. if (!splash->ref_cnt) {
  701. ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base,
  702. splash->splash_buf_base,
  703. splash->splash_buf_size,
  704. IOMMU_READ | IOMMU_NOEXEC);
  705. if (ret)
  706. SDE_ERROR("splash memory smmu map failed:%d\n", ret);
  707. }
  708. splash->ref_cnt++;
  709. SDE_DEBUG("one2one mapping done for base:%lx size:%x ref_cnt:%d\n",
  710. splash->splash_buf_base,
  711. splash->splash_buf_size,
  712. splash->ref_cnt);
  713. return ret;
  714. }
  715. static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms)
  716. {
  717. int i = 0;
  718. int ret = 0;
  719. struct sde_splash_mem *region;
  720. if (!sde_kms)
  721. return -EINVAL;
  722. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  723. region = sde_kms->splash_data.splash_display[i].splash;
  724. ret = _sde_kms_splash_mem_get(sde_kms, region);
  725. if (ret)
  726. return ret;
  727. /* Demura is optional and need not exist */
  728. region = sde_kms->splash_data.splash_display[i].demura;
  729. if (region) {
  730. ret = _sde_kms_splash_mem_get(sde_kms, region);
  731. if (ret)
  732. return ret;
  733. }
  734. }
  735. return ret;
  736. }
  737. static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms,
  738. struct sde_splash_mem *splash)
  739. {
  740. struct msm_mmu *mmu = NULL;
  741. int rc = 0;
  742. if (!sde_kms || !sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
  743. SDE_ERROR("invalid params\n");
  744. return -EINVAL;
  745. }
  746. mmu = sde_kms->aspace[0]->mmu;
  747. if (!splash || !splash->ref_cnt ||
  748. !mmu || !mmu->funcs || !mmu->funcs->one_to_one_unmap)
  749. return -EINVAL;
  750. splash->ref_cnt--;
  751. SDE_DEBUG("splash base:%lx refcnt:%d\n",
  752. splash->splash_buf_base, splash->ref_cnt);
  753. if (!splash->ref_cnt) {
  754. mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base,
  755. splash->splash_buf_size);
  756. rc = _sde_kms_release_shared_buffer(splash->splash_buf_base,
  757. splash->splash_buf_size, splash->ramdump_base,
  758. splash->ramdump_size);
  759. splash->splash_buf_base = 0;
  760. splash->splash_buf_size = 0;
  761. }
  762. return rc;
  763. }
  764. static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
  765. {
  766. int i = 0;
  767. int ret = 0, failure = 0;
  768. struct sde_splash_mem *region;
  769. if (!sde_kms || !sde_kms->splash_data.num_splash_regions)
  770. return -EINVAL;
  771. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  772. region = sde_kms->splash_data.splash_display[i].splash;
  773. ret = _sde_kms_splash_mem_put(sde_kms, region);
  774. if (ret) {
  775. failure = 1;
  776. pr_err("Error unmapping splash mem for display %d\n",
  777. i);
  778. }
  779. /* Demura is optional and need not exist */
  780. region = sde_kms->splash_data.splash_display[i].demura;
  781. if (region) {
  782. ret = _sde_kms_splash_mem_put(sde_kms, region);
  783. if (ret) {
  784. failure = 1;
  785. pr_err("Error unmapping demura mem for display %d\n",
  786. i);
  787. }
  788. }
  789. }
  790. if (failure)
  791. ret = -EINVAL;
  792. return ret;
  793. }
  794. static int _sde_kms_get_blank(struct drm_crtc_state *crtc_state,
  795. struct drm_connector_state *conn_state)
  796. {
  797. int lp_mode, blank;
  798. if (crtc_state->active)
  799. lp_mode = sde_connector_get_property(conn_state,
  800. CONNECTOR_PROP_LP);
  801. else
  802. lp_mode = SDE_MODE_DPMS_OFF;
  803. switch (lp_mode) {
  804. case SDE_MODE_DPMS_ON:
  805. blank = DRM_PANEL_EVENT_UNBLANK;
  806. break;
  807. case SDE_MODE_DPMS_LP1:
  808. case SDE_MODE_DPMS_LP2:
  809. blank = DRM_PANEL_EVENT_BLANK_LP;
  810. break;
  811. case SDE_MODE_DPMS_OFF:
  812. default:
  813. blank = DRM_PANEL_EVENT_BLANK;
  814. break;
  815. }
  816. return blank;
  817. }
  818. static void _sde_kms_drm_check_dpms(struct drm_atomic_state *old_state,
  819. bool is_pre_commit)
  820. {
  821. struct panel_event_notification notification;
  822. struct drm_connector *connector;
  823. struct drm_connector_state *old_conn_state;
  824. struct drm_crtc_state *old_crtc_state;
  825. struct drm_crtc *crtc;
  826. struct sde_connector *c_conn;
  827. int i, old_mode, new_mode, old_fps, new_fps;
  828. enum panel_event_notifier_tag panel_type;
  829. for_each_old_connector_in_state(old_state, connector,
  830. old_conn_state, i) {
  831. crtc = connector->state->crtc ? connector->state->crtc :
  832. old_conn_state->crtc;
  833. if (!crtc)
  834. continue;
  835. new_fps = drm_mode_vrefresh(&crtc->state->mode);
  836. new_mode = _sde_kms_get_blank(crtc->state, connector->state);
  837. if (old_conn_state->crtc) {
  838. old_crtc_state = drm_atomic_get_existing_crtc_state(
  839. old_state, old_conn_state->crtc);
  840. old_fps = drm_mode_vrefresh(&old_crtc_state->mode);
  841. old_mode = _sde_kms_get_blank(old_crtc_state,
  842. old_conn_state);
  843. } else {
  844. old_fps = 0;
  845. old_mode = DRM_PANEL_EVENT_BLANK;
  846. }
  847. if ((old_mode != new_mode) || (old_fps != new_fps)) {
  848. c_conn = to_sde_connector(connector);
  849. SDE_EVT32(old_mode, new_mode, old_fps, new_fps,
  850. c_conn->panel, crtc->state->active,
  851. old_conn_state->crtc);
  852. pr_debug("change detected for connector:%s (power mode %d->%d, fps %d->%d)\n",
  853. c_conn->name, old_mode, new_mode, old_fps, new_fps);
  854. /* If suspend resume and fps change are happening
  855. * at the same time, give preference to power mode
  856. * changes rather than fps change.
  857. */
  858. if ((old_mode == new_mode) && (old_fps != new_fps))
  859. new_mode = DRM_PANEL_EVENT_FPS_CHANGE;
  860. if (!c_conn->panel)
  861. continue;
  862. panel_type = sde_encoder_is_primary_display(
  863. connector->encoder) ?
  864. PANEL_EVENT_NOTIFICATION_PRIMARY :
  865. PANEL_EVENT_NOTIFICATION_SECONDARY;
  866. notification.notif_type = new_mode;
  867. notification.panel = c_conn->panel;
  868. notification.notif_data.old_fps = old_fps;
  869. notification.notif_data.new_fps = new_fps;
  870. notification.notif_data.early_trigger = is_pre_commit;
  871. panel_event_notification_trigger(panel_type,
  872. &notification);
  873. }
  874. }
  875. }
  876. static struct drm_crtc *sde_kms_vm_get_vm_crtc(
  877. struct drm_atomic_state *state)
  878. {
  879. int i;
  880. enum sde_crtc_vm_req vm_req = VM_REQ_NONE;
  881. struct drm_crtc *crtc, *vm_crtc = NULL;
  882. struct drm_crtc_state *new_cstate, *old_cstate;
  883. struct sde_crtc_state *vm_cstate;
  884. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  885. if (!new_cstate->active && !old_cstate->active)
  886. continue;
  887. vm_cstate = to_sde_crtc_state(new_cstate);
  888. vm_req = sde_crtc_get_property(vm_cstate,
  889. CRTC_PROP_VM_REQ_STATE);
  890. if (vm_req != VM_REQ_NONE) {
  891. SDE_DEBUG("valid vm request:%d found on crtc-%d\n",
  892. vm_req, crtc->base.id);
  893. vm_crtc = crtc;
  894. break;
  895. }
  896. }
  897. return vm_crtc;
  898. }
  899. int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
  900. struct drm_atomic_state *state)
  901. {
  902. struct drm_device *ddev;
  903. struct drm_crtc *crtc;
  904. struct drm_crtc_state *new_cstate;
  905. struct drm_encoder *encoder;
  906. struct drm_connector *connector;
  907. struct sde_vm_ops *vm_ops;
  908. struct sde_crtc_state *cstate;
  909. struct drm_connector_list_iter iter;
  910. enum sde_crtc_vm_req vm_req;
  911. int rc = 0;
  912. ddev = sde_kms->dev;
  913. vm_ops = sde_vm_get_ops(sde_kms);
  914. if (!vm_ops)
  915. return -EINVAL;
  916. crtc = sde_kms_vm_get_vm_crtc(state);
  917. if (!crtc)
  918. return 0;
  919. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  920. cstate = to_sde_crtc_state(new_cstate);
  921. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  922. if (vm_req != VM_REQ_ACQUIRE)
  923. return 0;
  924. /* enable MDSS irq line */
  925. sde_irq_update(&sde_kms->base, true);
  926. /* clear the stale IRQ status bits */
  927. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  928. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  929. /* enable the display path IRQ's */
  930. drm_for_each_encoder_mask(encoder, crtc->dev,
  931. crtc->state->encoder_mask) {
  932. if (sde_encoder_in_clone_mode(encoder))
  933. continue;
  934. sde_encoder_irq_control(encoder, true);
  935. }
  936. /* Schedule ESD work */
  937. drm_connector_list_iter_begin(ddev, &iter);
  938. drm_for_each_connector_iter(connector, &iter)
  939. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  940. sde_connector_schedule_status_work(connector, true);
  941. drm_connector_list_iter_end(&iter);
  942. /* enable vblank events */
  943. drm_crtc_vblank_on(crtc);
  944. sde_dbg_set_hw_ownership_status(true);
  945. /* handle non-SDE pre_acquire */
  946. if (vm_ops->vm_client_post_acquire)
  947. rc = vm_ops->vm_client_post_acquire(sde_kms);
  948. return rc;
  949. }
  950. void sde_kms_vm_set_sid(struct sde_kms *sde_kms, u32 vm)
  951. {
  952. struct drm_plane *plane;
  953. struct drm_device *ddev;
  954. struct sde_mdss_cfg *sde_cfg;
  955. ddev = sde_kms->dev;
  956. sde_cfg = sde_kms->catalog;
  957. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  958. sde_plane_set_sid(plane, vm);
  959. if (sde_kms->hw_sid && sde_kms->hw_sid->ops.set_vm_sid)
  960. sde_kms->hw_sid->ops.set_vm_sid(sde_kms->hw_sid, vm, sde_kms->catalog);
  961. }
  962. int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
  963. struct drm_atomic_state *state)
  964. {
  965. struct drm_crtc *crtc;
  966. struct drm_crtc_state *new_cstate;
  967. struct sde_crtc_state *cstate;
  968. enum sde_crtc_vm_req vm_req;
  969. crtc = sde_kms_vm_get_vm_crtc(state);
  970. if (!crtc)
  971. return 0;
  972. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  973. cstate = to_sde_crtc_state(new_cstate);
  974. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  975. if (vm_req != VM_REQ_ACQUIRE)
  976. return 0;
  977. /* Clear the stale IRQ status bits */
  978. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  979. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  980. /* Program the SID's for the trusted VM */
  981. sde_kms_vm_set_sid(sde_kms, 1);
  982. sde_dbg_set_hw_ownership_status(true);
  983. return 0;
  984. }
  985. static void sde_kms_prepare_commit(struct msm_kms *kms,
  986. struct drm_atomic_state *state)
  987. {
  988. struct sde_kms *sde_kms;
  989. struct msm_drm_private *priv;
  990. struct drm_device *dev;
  991. struct drm_encoder *encoder;
  992. struct drm_crtc *crtc;
  993. struct drm_crtc_state *cstate;
  994. struct sde_vm_ops *vm_ops;
  995. int i, rc;
  996. if (!kms)
  997. return;
  998. sde_kms = to_sde_kms(kms);
  999. dev = sde_kms->dev;
  1000. if (!dev || !dev->dev_private)
  1001. return;
  1002. priv = dev->dev_private;
  1003. SDE_ATRACE_BEGIN("prepare_commit");
  1004. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  1005. if (rc < 0) {
  1006. SDE_ERROR("failed to enable power resources %d\n", rc);
  1007. SDE_EVT32(rc, SDE_EVTLOG_ERROR);
  1008. goto end;
  1009. }
  1010. if (sde_kms->first_kickoff) {
  1011. sde_power_scale_reg_bus(&priv->phandle, VOTE_INDEX_HIGH, false);
  1012. sde_kms->first_kickoff = false;
  1013. }
  1014. for_each_new_crtc_in_state(state, crtc, cstate, i) {
  1015. drm_for_each_encoder_mask(encoder, dev, cstate->encoder_mask) {
  1016. if (sde_encoder_prepare_commit(encoder) == -ETIMEDOUT) {
  1017. SDE_ERROR("crtc:%d, initiating hw reset\n",
  1018. DRMID(crtc));
  1019. sde_encoder_needs_hw_reset(encoder);
  1020. sde_crtc_set_needs_hw_reset(crtc);
  1021. }
  1022. }
  1023. }
  1024. /*
  1025. * NOTE: for secure use cases we want to apply the new HW
  1026. * configuration only after completing preparation for secure
  1027. * transitions prepare below if any transtions is required.
  1028. */
  1029. sde_kms_prepare_secure_transition(kms, state);
  1030. vm_ops = sde_vm_get_ops(sde_kms);
  1031. if (!vm_ops)
  1032. goto end_vm;
  1033. if (vm_ops->vm_prepare_commit)
  1034. vm_ops->vm_prepare_commit(sde_kms, state);
  1035. end_vm:
  1036. _sde_kms_drm_check_dpms(state, true);
  1037. end:
  1038. SDE_ATRACE_END("prepare_commit");
  1039. }
  1040. static void sde_kms_commit(struct msm_kms *kms,
  1041. struct drm_atomic_state *old_state)
  1042. {
  1043. struct sde_kms *sde_kms;
  1044. struct drm_crtc *crtc;
  1045. struct drm_crtc_state *old_crtc_state;
  1046. int i;
  1047. if (!kms || !old_state)
  1048. return;
  1049. sde_kms = to_sde_kms(kms);
  1050. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1051. SDE_ERROR("power resource is not enabled\n");
  1052. return;
  1053. }
  1054. SDE_ATRACE_BEGIN("sde_kms_commit");
  1055. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1056. if (crtc->state->active) {
  1057. SDE_EVT32(DRMID(crtc), old_state);
  1058. sde_crtc_commit_kickoff(crtc, old_crtc_state);
  1059. }
  1060. }
  1061. SDE_ATRACE_END("sde_kms_commit");
  1062. }
  1063. static void _sde_kms_free_splash_display_data(struct sde_kms *sde_kms,
  1064. struct sde_splash_display *splash_display)
  1065. {
  1066. if (!sde_kms || !splash_display ||
  1067. !sde_kms->splash_data.num_splash_displays)
  1068. return;
  1069. if (sde_kms->splash_data.num_splash_regions) {
  1070. _sde_kms_splash_mem_put(sde_kms, splash_display->splash);
  1071. if (splash_display->demura)
  1072. _sde_kms_splash_mem_put(sde_kms,
  1073. splash_display->demura);
  1074. }
  1075. sde_kms->splash_data.num_splash_displays--;
  1076. SDE_DEBUG("cont_splash handoff done, remaining:%d\n",
  1077. sde_kms->splash_data.num_splash_displays);
  1078. memset(splash_display, 0x0, sizeof(struct sde_splash_display));
  1079. }
  1080. static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
  1081. struct drm_crtc *crtc)
  1082. {
  1083. struct msm_drm_private *priv;
  1084. struct sde_splash_display *splash_display;
  1085. int i;
  1086. if (!sde_kms || !crtc)
  1087. return;
  1088. priv = sde_kms->dev->dev_private;
  1089. if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays)
  1090. return;
  1091. SDE_EVT32(DRMID(crtc), crtc->state->active,
  1092. sde_kms->splash_data.num_splash_displays);
  1093. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  1094. splash_display = &sde_kms->splash_data.splash_display[i];
  1095. if (splash_display->encoder &&
  1096. crtc == splash_display->encoder->crtc)
  1097. break;
  1098. }
  1099. if (i >= MAX_DSI_DISPLAYS)
  1100. return;
  1101. if (splash_display->cont_splash_enabled) {
  1102. sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
  1103. splash_display, false);
  1104. _sde_kms_free_splash_display_data(sde_kms, splash_display);
  1105. }
  1106. /* remove the votes if all displays are done with splash */
  1107. if (!sde_kms->splash_data.num_splash_displays) {
  1108. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1109. sde_power_data_bus_set_quota(&priv->phandle, i,
  1110. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1111. priv->phandle.ib_quota[i] ? priv->phandle.ib_quota[i] :
  1112. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1113. pm_runtime_put_sync(sde_kms->dev->dev);
  1114. }
  1115. }
  1116. static void sde_kms_cancel_delayed_work(struct drm_crtc *crtc)
  1117. {
  1118. struct drm_connector *connector;
  1119. struct drm_connector_list_iter iter;
  1120. struct drm_encoder *encoder;
  1121. /* Cancel CRTC work */
  1122. sde_crtc_cancel_delayed_work(crtc);
  1123. /* Cancel ESD work */
  1124. drm_connector_list_iter_begin(crtc->dev, &iter);
  1125. drm_for_each_connector_iter(connector, &iter)
  1126. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  1127. sde_connector_schedule_status_work(connector, false);
  1128. drm_connector_list_iter_end(&iter);
  1129. /* Cancel Idle-PC work */
  1130. drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
  1131. if (sde_encoder_in_clone_mode(encoder))
  1132. continue;
  1133. sde_encoder_cancel_delayed_work(encoder);
  1134. }
  1135. }
  1136. int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
  1137. struct drm_atomic_state *state, bool is_primary)
  1138. {
  1139. struct drm_crtc *crtc;
  1140. struct drm_encoder *encoder;
  1141. int rc = 0;
  1142. crtc = sde_kms_vm_get_vm_crtc(state);
  1143. if (!crtc)
  1144. return 0;
  1145. /* if vm_req is enabled, once CRTC on the commit is guaranteed */
  1146. sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
  1147. sde_dbg_set_hw_ownership_status(false);
  1148. sde_kms_cancel_delayed_work(crtc);
  1149. /* disable SDE encoder irq's */
  1150. drm_for_each_encoder_mask(encoder, crtc->dev,
  1151. crtc->state->encoder_mask) {
  1152. if (sde_encoder_in_clone_mode(encoder))
  1153. continue;
  1154. sde_encoder_irq_control(encoder, false);
  1155. }
  1156. if (is_primary) {
  1157. /* disable vblank events */
  1158. drm_crtc_vblank_off(crtc);
  1159. /* reset sw state */
  1160. sde_crtc_reset_sw_state(crtc);
  1161. }
  1162. return rc;
  1163. }
  1164. int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
  1165. struct drm_atomic_state *state)
  1166. {
  1167. struct sde_vm_ops *vm_ops;
  1168. struct drm_crtc *crtc;
  1169. struct sde_crtc_state *cstate;
  1170. struct drm_crtc_state *new_cstate;
  1171. enum sde_crtc_vm_req vm_req;
  1172. int rc = 0;
  1173. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1174. return -EINVAL;
  1175. vm_ops = sde_vm_get_ops(sde_kms);
  1176. crtc = sde_kms_vm_get_vm_crtc(state);
  1177. if (!crtc)
  1178. return 0;
  1179. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1180. cstate = to_sde_crtc_state(new_cstate);
  1181. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1182. if (vm_req != VM_REQ_RELEASE)
  1183. return 0;
  1184. sde_kms_vm_pre_release(sde_kms, state, false);
  1185. sde_kms_vm_set_sid(sde_kms, 0);
  1186. sde_vm_lock(sde_kms);
  1187. if (vm_ops->vm_release)
  1188. rc = vm_ops->vm_release(sde_kms);
  1189. sde_vm_unlock(sde_kms);
  1190. return rc;
  1191. }
  1192. int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
  1193. struct drm_atomic_state *state)
  1194. {
  1195. struct sde_vm_ops *vm_ops;
  1196. struct sde_crtc_state *cstate;
  1197. struct drm_crtc *crtc;
  1198. struct drm_crtc_state *new_cstate;
  1199. enum sde_crtc_vm_req vm_req;
  1200. int rc = 0;
  1201. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1202. return -EINVAL;
  1203. vm_ops = sde_vm_get_ops(sde_kms);
  1204. crtc = sde_kms_vm_get_vm_crtc(state);
  1205. if (!crtc)
  1206. return 0;
  1207. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1208. cstate = to_sde_crtc_state(new_cstate);
  1209. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1210. if (vm_req != VM_REQ_RELEASE)
  1211. return 0;
  1212. /* handle SDE pre-release */
  1213. rc = sde_kms_vm_pre_release(sde_kms, state, true);
  1214. if (rc) {
  1215. SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
  1216. goto exit;
  1217. }
  1218. /* properly handoff color processing features */
  1219. sde_cp_crtc_vm_primary_handoff(crtc);
  1220. sde_vm_lock(sde_kms);
  1221. /* handle non-SDE clients pre-release */
  1222. if (vm_ops->vm_client_pre_release) {
  1223. rc = vm_ops->vm_client_pre_release(sde_kms);
  1224. if (rc) {
  1225. SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
  1226. rc);
  1227. sde_vm_unlock(sde_kms);
  1228. goto exit;
  1229. }
  1230. }
  1231. /* disable IRQ line */
  1232. sde_irq_update(&sde_kms->base, false);
  1233. /* release HW */
  1234. if (vm_ops->vm_release) {
  1235. rc = vm_ops->vm_release(sde_kms);
  1236. if (rc)
  1237. SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
  1238. }
  1239. sde_vm_unlock(sde_kms);
  1240. _sde_crtc_vm_release_notify(crtc);
  1241. exit:
  1242. return rc;
  1243. }
  1244. static void sde_kms_complete_commit(struct msm_kms *kms,
  1245. struct drm_atomic_state *old_state)
  1246. {
  1247. struct sde_kms *sde_kms;
  1248. struct msm_drm_private *priv;
  1249. struct drm_crtc *crtc;
  1250. struct drm_crtc_state *old_crtc_state;
  1251. struct drm_connector *connector;
  1252. struct drm_connector_state *old_conn_state;
  1253. struct msm_display_conn_params params;
  1254. struct sde_vm_ops *vm_ops;
  1255. int i, rc = 0;
  1256. if (!kms || !old_state)
  1257. return;
  1258. sde_kms = to_sde_kms(kms);
  1259. if (!sde_kms->dev || !sde_kms->dev->dev_private)
  1260. return;
  1261. priv = sde_kms->dev->dev_private;
  1262. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1263. SDE_ERROR("power resource is not enabled\n");
  1264. return;
  1265. }
  1266. SDE_ATRACE_BEGIN("sde_kms_complete_commit");
  1267. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1268. sde_crtc_complete_commit(crtc, old_crtc_state);
  1269. /* complete secure transitions if any */
  1270. if (sde_kms->smmu_state.transition_type == POST_COMMIT)
  1271. _sde_kms_secure_ctrl(sde_kms, crtc, true);
  1272. }
  1273. for_each_old_connector_in_state(old_state, connector,
  1274. old_conn_state, i) {
  1275. struct sde_connector *c_conn;
  1276. c_conn = to_sde_connector(connector);
  1277. if (!c_conn->ops.post_kickoff)
  1278. continue;
  1279. memset(&params, 0, sizeof(params));
  1280. sde_connector_complete_qsync_commit(connector, &params);
  1281. rc = c_conn->ops.post_kickoff(connector, &params);
  1282. if (rc) {
  1283. pr_err("Connector Post kickoff failed rc=%d\n",
  1284. rc);
  1285. }
  1286. }
  1287. vm_ops = sde_vm_get_ops(sde_kms);
  1288. if (vm_ops && vm_ops->vm_post_commit) {
  1289. rc = vm_ops->vm_post_commit(sde_kms, old_state);
  1290. if (rc)
  1291. SDE_ERROR("vm post commit failed, rc = %d\n",
  1292. rc);
  1293. }
  1294. _sde_kms_drm_check_dpms(old_state, false);
  1295. pm_runtime_put_sync(sde_kms->dev->dev);
  1296. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  1297. _sde_kms_release_splash_resource(sde_kms, crtc);
  1298. SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
  1299. SDE_ATRACE_END("sde_kms_complete_commit");
  1300. }
  1301. static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
  1302. struct drm_crtc *crtc)
  1303. {
  1304. struct sde_kms *sde_kms;
  1305. struct drm_encoder *encoder;
  1306. struct drm_device *dev;
  1307. int ret;
  1308. bool cwb_disabling;
  1309. if (!kms || !crtc || !crtc->state) {
  1310. SDE_ERROR("invalid params\n");
  1311. return;
  1312. }
  1313. dev = crtc->dev;
  1314. sde_kms = to_sde_kms(kms);
  1315. if (!crtc->state->enable) {
  1316. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  1317. return;
  1318. }
  1319. if (!crtc->state->active) {
  1320. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  1321. return;
  1322. }
  1323. if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
  1324. SDE_ERROR("power resource is not enabled\n");
  1325. return;
  1326. }
  1327. SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
  1328. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  1329. cwb_disabling = false;
  1330. if (encoder->crtc != crtc) {
  1331. cwb_disabling = sde_encoder_is_cwb_disabling(encoder,
  1332. crtc);
  1333. if (!cwb_disabling)
  1334. continue;
  1335. }
  1336. /*
  1337. * Wait for post-flush if necessary to delay before
  1338. * plane_cleanup. For example, wait for vsync in case of video
  1339. * mode panels. This may be a no-op for command mode panels.
  1340. */
  1341. SDE_EVT32_VERBOSE(DRMID(crtc));
  1342. ret = sde_encoder_wait_for_event(encoder, cwb_disabling ?
  1343. MSM_ENC_TX_COMPLETE : MSM_ENC_COMMIT_DONE);
  1344. if (ret && ret != -EWOULDBLOCK) {
  1345. SDE_ERROR("crtc:%d, enc:%d, cwb_d:%d, wait for commit done failed ret:%d\n",
  1346. DRMID(crtc), DRMID(encoder), cwb_disabling, ret);
  1347. SDE_EVT32(DRMID(crtc), DRMID(encoder), cwb_disabling,
  1348. ret, SDE_EVTLOG_ERROR);
  1349. sde_crtc_request_frame_reset(crtc, encoder);
  1350. break;
  1351. }
  1352. sde_crtc_complete_flip(crtc, NULL);
  1353. if (cwb_disabling)
  1354. sde_encoder_virt_reset(encoder);
  1355. }
  1356. /* avoid system cache update to set rd-noalloc bit when NSE feature is enabled */
  1357. if (!test_bit(SDE_FEATURE_SYS_CACHE_NSE, sde_kms->catalog->features))
  1358. sde_crtc_static_cache_read_kickoff(crtc);
  1359. SDE_ATRACE_END("sde_ksm_wait_for_commit_done");
  1360. }
  1361. static void sde_kms_prepare_fence(struct msm_kms *kms,
  1362. struct drm_atomic_state *old_state)
  1363. {
  1364. struct drm_crtc *crtc;
  1365. struct drm_crtc_state *old_crtc_state;
  1366. int i;
  1367. if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
  1368. SDE_ERROR("invalid argument(s)\n");
  1369. return;
  1370. }
  1371. SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
  1372. /* old_state actually contains updated crtc pointers */
  1373. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1374. if (crtc->state->active || crtc->state->active_changed)
  1375. sde_crtc_prepare_commit(crtc, old_crtc_state);
  1376. }
  1377. SDE_ATRACE_END("sde_kms_prepare_fence");
  1378. }
  1379. /**
  1380. * _sde_kms_get_displays - query for underlying display handles and cache them
  1381. * @sde_kms: Pointer to sde kms structure
  1382. * Returns: Zero on success
  1383. */
  1384. static int _sde_kms_get_displays(struct sde_kms *sde_kms)
  1385. {
  1386. int rc = -ENOMEM;
  1387. if (!sde_kms) {
  1388. SDE_ERROR("invalid sde kms\n");
  1389. return -EINVAL;
  1390. }
  1391. /* dsi */
  1392. sde_kms->dsi_displays = NULL;
  1393. sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
  1394. if (sde_kms->dsi_display_count) {
  1395. sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
  1396. sizeof(void *),
  1397. GFP_KERNEL);
  1398. if (!sde_kms->dsi_displays) {
  1399. SDE_ERROR("failed to allocate dsi displays\n");
  1400. goto exit_deinit_dsi;
  1401. }
  1402. sde_kms->dsi_display_count =
  1403. dsi_display_get_active_displays(sde_kms->dsi_displays,
  1404. sde_kms->dsi_display_count);
  1405. }
  1406. /* wb */
  1407. sde_kms->wb_displays = NULL;
  1408. sde_kms->wb_display_count = sde_wb_get_num_of_displays();
  1409. if (sde_kms->wb_display_count) {
  1410. sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
  1411. sizeof(void *),
  1412. GFP_KERNEL);
  1413. if (!sde_kms->wb_displays) {
  1414. SDE_ERROR("failed to allocate wb displays\n");
  1415. goto exit_deinit_wb;
  1416. }
  1417. sde_kms->wb_display_count =
  1418. wb_display_get_displays(sde_kms->wb_displays,
  1419. sde_kms->wb_display_count);
  1420. }
  1421. /* dp */
  1422. sde_kms->dp_displays = NULL;
  1423. sde_kms->dp_display_count = dp_display_get_num_of_displays();
  1424. if (sde_kms->dp_display_count) {
  1425. sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
  1426. sizeof(void *), GFP_KERNEL);
  1427. if (!sde_kms->dp_displays) {
  1428. SDE_ERROR("failed to allocate dp displays\n");
  1429. goto exit_deinit_dp;
  1430. }
  1431. sde_kms->dp_display_count =
  1432. dp_display_get_displays(sde_kms->dp_displays,
  1433. sde_kms->dp_display_count);
  1434. sde_kms->dp_stream_count = dp_display_get_num_of_streams();
  1435. }
  1436. return 0;
  1437. exit_deinit_dp:
  1438. kfree(sde_kms->dp_displays);
  1439. sde_kms->dp_stream_count = 0;
  1440. sde_kms->dp_display_count = 0;
  1441. sde_kms->dp_displays = NULL;
  1442. exit_deinit_wb:
  1443. kfree(sde_kms->wb_displays);
  1444. sde_kms->wb_display_count = 0;
  1445. sde_kms->wb_displays = NULL;
  1446. exit_deinit_dsi:
  1447. kfree(sde_kms->dsi_displays);
  1448. sde_kms->dsi_display_count = 0;
  1449. sde_kms->dsi_displays = NULL;
  1450. return rc;
  1451. }
  1452. /**
  1453. * _sde_kms_release_displays - release cache of underlying display handles
  1454. * @sde_kms: Pointer to sde kms structure
  1455. */
  1456. static void _sde_kms_release_displays(struct sde_kms *sde_kms)
  1457. {
  1458. if (!sde_kms) {
  1459. SDE_ERROR("invalid sde kms\n");
  1460. return;
  1461. }
  1462. kfree(sde_kms->wb_displays);
  1463. sde_kms->wb_displays = NULL;
  1464. sde_kms->wb_display_count = 0;
  1465. kfree(sde_kms->dsi_displays);
  1466. sde_kms->dsi_displays = NULL;
  1467. sde_kms->dsi_display_count = 0;
  1468. }
  1469. /**
  1470. * _sde_kms_setup_displays - create encoders, bridges and connectors
  1471. * for underlying displays
  1472. * @dev: Pointer to drm device structure
  1473. * @priv: Pointer to private drm device data
  1474. * @sde_kms: Pointer to sde kms structure
  1475. * Returns: Zero on success
  1476. */
  1477. static int _sde_kms_setup_displays(struct drm_device *dev,
  1478. struct msm_drm_private *priv,
  1479. struct sde_kms *sde_kms)
  1480. {
  1481. static const struct sde_connector_ops dsi_ops = {
  1482. .set_info_blob = dsi_conn_set_info_blob,
  1483. .detect = dsi_conn_detect,
  1484. .get_modes = dsi_connector_get_modes,
  1485. .pre_destroy = dsi_connector_put_modes,
  1486. .mode_valid = dsi_conn_mode_valid,
  1487. .get_info = dsi_display_get_info,
  1488. .set_backlight = dsi_display_set_backlight,
  1489. .soft_reset = dsi_display_soft_reset,
  1490. .pre_kickoff = dsi_conn_pre_kickoff,
  1491. .clk_ctrl = dsi_display_clk_ctrl,
  1492. .set_power = dsi_display_set_power,
  1493. .get_mode_info = dsi_conn_get_mode_info,
  1494. .get_dst_format = dsi_display_get_dst_format,
  1495. .post_kickoff = dsi_conn_post_kickoff,
  1496. .check_status = dsi_display_check_status,
  1497. .enable_event = dsi_conn_enable_event,
  1498. .cmd_transfer = dsi_display_cmd_transfer,
  1499. .cont_splash_config = dsi_display_cont_splash_config,
  1500. .cont_splash_res_disable = dsi_display_cont_splash_res_disable,
  1501. .get_panel_vfp = dsi_display_get_panel_vfp,
  1502. .get_default_lms = dsi_display_get_default_lms,
  1503. .cmd_receive = dsi_display_cmd_receive,
  1504. .install_properties = NULL,
  1505. .set_allowed_mode_switch = dsi_conn_set_allowed_mode_switch,
  1506. .set_dyn_bit_clk = dsi_conn_set_dyn_bit_clk,
  1507. .get_qsync_min_fps = dsi_conn_get_qsync_min_fps,
  1508. .get_avr_step_req = dsi_display_get_avr_step_req_fps,
  1509. .prepare_commit = dsi_conn_prepare_commit,
  1510. .set_submode_info = dsi_conn_set_submode_blob_info,
  1511. .get_num_lm_from_mode = dsi_conn_get_lm_from_mode,
  1512. .update_transfer_time = dsi_display_update_transfer_time,
  1513. };
  1514. static const struct sde_connector_ops wb_ops = {
  1515. .post_init = sde_wb_connector_post_init,
  1516. .set_info_blob = sde_wb_connector_set_info_blob,
  1517. .detect = sde_wb_connector_detect,
  1518. .get_modes = sde_wb_connector_get_modes,
  1519. .set_property = sde_wb_connector_set_property,
  1520. .get_info = sde_wb_get_info,
  1521. .soft_reset = NULL,
  1522. .get_mode_info = sde_wb_get_mode_info,
  1523. .get_dst_format = NULL,
  1524. .check_status = NULL,
  1525. .cmd_transfer = NULL,
  1526. .cont_splash_config = NULL,
  1527. .cont_splash_res_disable = NULL,
  1528. .get_panel_vfp = NULL,
  1529. .cmd_receive = NULL,
  1530. .install_properties = NULL,
  1531. .set_dyn_bit_clk = NULL,
  1532. .set_allowed_mode_switch = NULL,
  1533. .update_transfer_time = NULL,
  1534. };
  1535. static const struct sde_connector_ops dp_ops = {
  1536. .post_init = dp_connector_post_init,
  1537. .detect = dp_connector_detect,
  1538. .get_modes = dp_connector_get_modes,
  1539. .atomic_check = dp_connector_atomic_check,
  1540. .mode_valid = dp_connector_mode_valid,
  1541. .get_info = dp_connector_get_info,
  1542. .get_mode_info = dp_connector_get_mode_info,
  1543. .post_open = dp_connector_post_open,
  1544. .check_status = NULL,
  1545. .set_colorspace = dp_connector_set_colorspace,
  1546. .config_hdr = dp_connector_config_hdr,
  1547. .cmd_transfer = NULL,
  1548. .cont_splash_config = NULL,
  1549. .cont_splash_res_disable = NULL,
  1550. .get_panel_vfp = NULL,
  1551. .update_pps = dp_connector_update_pps,
  1552. .cmd_receive = NULL,
  1553. .install_properties = dp_connector_install_properties,
  1554. .set_allowed_mode_switch = NULL,
  1555. .set_dyn_bit_clk = NULL,
  1556. .update_transfer_time = NULL,
  1557. };
  1558. struct msm_display_info info;
  1559. struct drm_encoder *encoder;
  1560. void *display, *connector;
  1561. int i, max_encoders;
  1562. int rc = 0;
  1563. u32 dsc_count = 0, mixer_count = 0;
  1564. u32 max_dp_dsc_count, max_dp_mixer_count;
  1565. if (!dev || !priv || !sde_kms) {
  1566. SDE_ERROR("invalid argument(s)\n");
  1567. return -EINVAL;
  1568. }
  1569. max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
  1570. sde_kms->dp_display_count +
  1571. sde_kms->dp_stream_count;
  1572. if (max_encoders > ARRAY_SIZE(priv->encoders)) {
  1573. max_encoders = ARRAY_SIZE(priv->encoders);
  1574. SDE_ERROR("capping number of displays to %d", max_encoders);
  1575. }
  1576. /* wb */
  1577. for (i = 0; i < sde_kms->wb_display_count &&
  1578. priv->num_encoders < max_encoders; ++i) {
  1579. display = sde_kms->wb_displays[i];
  1580. encoder = NULL;
  1581. memset(&info, 0x0, sizeof(info));
  1582. rc = sde_wb_get_info(NULL, &info, display);
  1583. if (rc) {
  1584. SDE_ERROR("wb get_info %d failed\n", i);
  1585. continue;
  1586. }
  1587. encoder = sde_encoder_init(dev, &info);
  1588. if (IS_ERR_OR_NULL(encoder)) {
  1589. SDE_ERROR("encoder init failed for wb %d\n", i);
  1590. continue;
  1591. }
  1592. rc = sde_wb_drm_init(display, encoder);
  1593. if (rc) {
  1594. SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
  1595. sde_encoder_destroy(encoder);
  1596. continue;
  1597. }
  1598. connector = sde_connector_init(dev,
  1599. encoder,
  1600. 0,
  1601. display,
  1602. &wb_ops,
  1603. DRM_CONNECTOR_POLL_HPD,
  1604. DRM_MODE_CONNECTOR_VIRTUAL);
  1605. if (connector) {
  1606. priv->encoders[priv->num_encoders++] = encoder;
  1607. priv->connectors[priv->num_connectors++] = connector;
  1608. } else {
  1609. SDE_ERROR("wb %d connector init failed\n", i);
  1610. sde_wb_drm_deinit(display);
  1611. sde_encoder_destroy(encoder);
  1612. }
  1613. }
  1614. /* dsi */
  1615. for (i = 0; i < sde_kms->dsi_display_count &&
  1616. priv->num_encoders < max_encoders; ++i) {
  1617. display = sde_kms->dsi_displays[i];
  1618. encoder = NULL;
  1619. memset(&info, 0x0, sizeof(info));
  1620. rc = dsi_display_get_info(NULL, &info, display);
  1621. if (rc) {
  1622. SDE_ERROR("dsi get_info %d failed\n", i);
  1623. continue;
  1624. }
  1625. encoder = sde_encoder_init(dev, &info);
  1626. if (IS_ERR_OR_NULL(encoder)) {
  1627. SDE_ERROR("encoder init failed for dsi %d\n", i);
  1628. continue;
  1629. }
  1630. rc = dsi_display_drm_bridge_init(display, encoder);
  1631. if (rc) {
  1632. SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
  1633. sde_encoder_destroy(encoder);
  1634. continue;
  1635. }
  1636. connector = sde_connector_init(dev,
  1637. encoder,
  1638. dsi_display_get_drm_panel(display),
  1639. display,
  1640. &dsi_ops,
  1641. DRM_CONNECTOR_POLL_HPD,
  1642. DRM_MODE_CONNECTOR_DSI);
  1643. if (connector) {
  1644. priv->encoders[priv->num_encoders++] = encoder;
  1645. priv->connectors[priv->num_connectors++] = connector;
  1646. } else {
  1647. SDE_ERROR("dsi %d connector init failed\n", i);
  1648. dsi_display_drm_bridge_deinit(display);
  1649. sde_encoder_destroy(encoder);
  1650. continue;
  1651. }
  1652. rc = dsi_display_drm_ext_bridge_init(display,
  1653. encoder, connector);
  1654. if (rc) {
  1655. SDE_ERROR("dsi %d ext bridge init failed\n", rc);
  1656. dsi_display_drm_bridge_deinit(display);
  1657. sde_connector_destroy(connector);
  1658. sde_encoder_destroy(encoder);
  1659. }
  1660. dsc_count += info.dsc_count;
  1661. mixer_count += info.lm_count;
  1662. if (dsi_display_has_dsc_switch_support(display))
  1663. sde_kms->dsc_switch_support = true;
  1664. }
  1665. if (sde_kms->catalog->allowed_dsc_reservation_switch &&
  1666. !sde_kms->dsc_switch_support) {
  1667. SDE_DEBUG("dsc switch not supported\n");
  1668. sde_kms->catalog->allowed_dsc_reservation_switch = 0;
  1669. }
  1670. max_dp_mixer_count = sde_kms->catalog->mixer_count > mixer_count ?
  1671. sde_kms->catalog->mixer_count - mixer_count : 0;
  1672. max_dp_dsc_count = sde_kms->catalog->dsc_count > dsc_count ?
  1673. sde_kms->catalog->dsc_count - dsc_count : 0;
  1674. if (sde_kms->catalog->allowed_dsc_reservation_switch &
  1675. SDE_DP_DSC_RESERVATION_SWITCH)
  1676. max_dp_dsc_count = sde_kms->catalog->dsc_count;
  1677. /* dp */
  1678. for (i = 0; i < sde_kms->dp_display_count &&
  1679. priv->num_encoders < max_encoders; ++i) {
  1680. int idx;
  1681. display = sde_kms->dp_displays[i];
  1682. encoder = NULL;
  1683. memset(&info, 0x0, sizeof(info));
  1684. rc = dp_connector_get_info(NULL, &info, display);
  1685. if (rc) {
  1686. SDE_ERROR("dp get_info %d failed\n", i);
  1687. continue;
  1688. }
  1689. encoder = sde_encoder_init(dev, &info);
  1690. if (IS_ERR_OR_NULL(encoder)) {
  1691. SDE_ERROR("dp encoder init failed %d\n", i);
  1692. continue;
  1693. }
  1694. rc = dp_drm_bridge_init(display, encoder,
  1695. max_dp_mixer_count, max_dp_dsc_count);
  1696. if (rc) {
  1697. SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
  1698. sde_encoder_destroy(encoder);
  1699. continue;
  1700. }
  1701. connector = sde_connector_init(dev,
  1702. encoder,
  1703. NULL,
  1704. display,
  1705. &dp_ops,
  1706. DRM_CONNECTOR_POLL_HPD,
  1707. DRM_MODE_CONNECTOR_DisplayPort);
  1708. if (connector) {
  1709. priv->encoders[priv->num_encoders++] = encoder;
  1710. priv->connectors[priv->num_connectors++] = connector;
  1711. } else {
  1712. SDE_ERROR("dp %d connector init failed\n", i);
  1713. dp_drm_bridge_deinit(display);
  1714. sde_encoder_destroy(encoder);
  1715. }
  1716. /* update display cap to MST_MODE for DP MST encoders */
  1717. info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
  1718. for (idx = 0; idx < sde_kms->dp_stream_count &&
  1719. priv->num_encoders < max_encoders; idx++) {
  1720. info.h_tile_instance[0] = idx;
  1721. encoder = sde_encoder_init(dev, &info);
  1722. if (IS_ERR_OR_NULL(encoder)) {
  1723. SDE_ERROR("dp mst encoder init failed %d\n", i);
  1724. continue;
  1725. }
  1726. rc = dp_mst_drm_bridge_init(display, encoder);
  1727. if (rc) {
  1728. SDE_ERROR("dp mst bridge %d init failed, %d\n",
  1729. i, rc);
  1730. sde_encoder_destroy(encoder);
  1731. continue;
  1732. }
  1733. priv->encoders[priv->num_encoders++] = encoder;
  1734. }
  1735. }
  1736. return 0;
  1737. }
  1738. static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
  1739. {
  1740. struct msm_drm_private *priv;
  1741. int i;
  1742. if (!sde_kms) {
  1743. SDE_ERROR("invalid sde_kms\n");
  1744. return;
  1745. } else if (!sde_kms->dev) {
  1746. SDE_ERROR("invalid dev\n");
  1747. return;
  1748. } else if (!sde_kms->dev->dev_private) {
  1749. SDE_ERROR("invalid dev_private\n");
  1750. return;
  1751. }
  1752. priv = sde_kms->dev->dev_private;
  1753. for (i = 0; i < priv->num_crtcs; i++)
  1754. priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
  1755. priv->num_crtcs = 0;
  1756. for (i = 0; i < priv->num_planes; i++)
  1757. priv->planes[i]->funcs->destroy(priv->planes[i]);
  1758. priv->num_planes = 0;
  1759. for (i = 0; i < priv->num_connectors; i++)
  1760. priv->connectors[i]->funcs->destroy(priv->connectors[i]);
  1761. priv->num_connectors = 0;
  1762. for (i = 0; i < priv->num_encoders; i++)
  1763. priv->encoders[i]->funcs->destroy(priv->encoders[i]);
  1764. priv->num_encoders = 0;
  1765. _sde_kms_release_displays(sde_kms);
  1766. }
  1767. static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
  1768. {
  1769. struct drm_device *dev;
  1770. struct drm_plane *primary_planes[MAX_PLANES], *plane;
  1771. struct drm_crtc *crtc;
  1772. struct msm_drm_private *priv;
  1773. struct sde_mdss_cfg *catalog;
  1774. int primary_planes_idx = 0, i, ret;
  1775. int max_crtc_count;
  1776. u32 sspp_id[MAX_PLANES];
  1777. u32 master_plane_id[MAX_PLANES];
  1778. u32 num_virt_planes = 0, dummy_mixer_count = 0;
  1779. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1780. SDE_ERROR("invalid sde_kms\n");
  1781. return -EINVAL;
  1782. }
  1783. dev = sde_kms->dev;
  1784. priv = dev->dev_private;
  1785. catalog = sde_kms->catalog;
  1786. ret = sde_core_irq_domain_add(sde_kms);
  1787. if (ret)
  1788. goto fail_irq;
  1789. /*
  1790. * Query for underlying display drivers, and create connectors,
  1791. * bridges and encoders for them.
  1792. */
  1793. if (!_sde_kms_get_displays(sde_kms))
  1794. (void)_sde_kms_setup_displays(dev, priv, sde_kms);
  1795. for (i = 0; i < catalog->mixer_count; i++)
  1796. if (catalog->mixer[i].dummy_mixer)
  1797. dummy_mixer_count++;
  1798. max_crtc_count = catalog->mixer_count - dummy_mixer_count;
  1799. /* Create the planes */
  1800. for (i = 0; i < catalog->sspp_count; i++) {
  1801. bool primary = true;
  1802. if (primary_planes_idx >= max_crtc_count)
  1803. primary = false;
  1804. plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
  1805. (1UL << max_crtc_count) - 1, 0);
  1806. if (IS_ERR(plane)) {
  1807. SDE_ERROR("sde_plane_init failed\n");
  1808. ret = PTR_ERR(plane);
  1809. goto fail;
  1810. }
  1811. priv->planes[priv->num_planes++] = plane;
  1812. if (primary)
  1813. primary_planes[primary_planes_idx++] = plane;
  1814. if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
  1815. sde_is_custom_client()) {
  1816. int priority =
  1817. catalog->sspp[i].sblk->smart_dma_priority;
  1818. sspp_id[priority - 1] = catalog->sspp[i].id;
  1819. master_plane_id[priority - 1] = plane->base.id;
  1820. num_virt_planes++;
  1821. }
  1822. }
  1823. /* Initialize smart DMA virtual planes */
  1824. for (i = 0; i < num_virt_planes; i++) {
  1825. plane = sde_plane_init(dev, sspp_id[i], false,
  1826. (1UL << max_crtc_count) - 1, master_plane_id[i]);
  1827. if (IS_ERR(plane)) {
  1828. SDE_ERROR("sde_plane for virtual SSPP init failed\n");
  1829. ret = PTR_ERR(plane);
  1830. goto fail;
  1831. }
  1832. priv->planes[priv->num_planes++] = plane;
  1833. }
  1834. max_crtc_count = min(max_crtc_count, primary_planes_idx);
  1835. /* Create one CRTC per encoder */
  1836. for (i = 0; i < max_crtc_count; i++) {
  1837. crtc = sde_crtc_init(dev, primary_planes[i]);
  1838. if (IS_ERR(crtc)) {
  1839. ret = PTR_ERR(crtc);
  1840. goto fail;
  1841. }
  1842. priv->crtcs[priv->num_crtcs++] = crtc;
  1843. }
  1844. if (sde_is_custom_client()) {
  1845. /* All CRTCs are compatible with all planes */
  1846. for (i = 0; i < priv->num_planes; i++)
  1847. priv->planes[i]->possible_crtcs =
  1848. (1 << priv->num_crtcs) - 1;
  1849. }
  1850. /* All CRTCs are compatible with all encoders */
  1851. for (i = 0; i < priv->num_encoders; i++)
  1852. priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
  1853. return 0;
  1854. fail:
  1855. _sde_kms_drm_obj_destroy(sde_kms);
  1856. fail_irq:
  1857. sde_core_irq_domain_fini(sde_kms);
  1858. return ret;
  1859. }
  1860. /**
  1861. * sde_kms_timeline_status - provides current timeline status
  1862. * This API should be called without mode config lock.
  1863. * @dev: Pointer to drm device
  1864. */
  1865. void sde_kms_timeline_status(struct drm_device *dev)
  1866. {
  1867. struct drm_crtc *crtc;
  1868. struct drm_connector *conn;
  1869. struct drm_connector_list_iter conn_iter;
  1870. if (!dev) {
  1871. SDE_ERROR("invalid drm device node\n");
  1872. return;
  1873. }
  1874. drm_for_each_crtc(crtc, dev)
  1875. sde_crtc_timeline_status(crtc);
  1876. if (mutex_is_locked(&dev->mode_config.mutex)) {
  1877. /*
  1878. *Probably locked from last close dumping status anyway
  1879. */
  1880. SDE_ERROR("dumping conn_timeline without mode_config lock\n");
  1881. drm_connector_list_iter_begin(dev, &conn_iter);
  1882. drm_for_each_connector_iter(conn, &conn_iter)
  1883. sde_conn_timeline_status(conn);
  1884. drm_connector_list_iter_end(&conn_iter);
  1885. return;
  1886. }
  1887. mutex_lock(&dev->mode_config.mutex);
  1888. drm_connector_list_iter_begin(dev, &conn_iter);
  1889. drm_for_each_connector_iter(conn, &conn_iter)
  1890. sde_conn_timeline_status(conn);
  1891. drm_connector_list_iter_end(&conn_iter);
  1892. mutex_unlock(&dev->mode_config.mutex);
  1893. }
  1894. static int sde_kms_postinit(struct msm_kms *kms)
  1895. {
  1896. struct sde_kms *sde_kms = to_sde_kms(kms);
  1897. struct drm_device *dev;
  1898. struct drm_crtc *crtc;
  1899. struct msm_drm_private *priv;
  1900. int i, rc;
  1901. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev ||
  1902. !sde_kms->dev->dev_private) {
  1903. SDE_ERROR("invalid sde_kms\n");
  1904. return -EINVAL;
  1905. }
  1906. dev = sde_kms->dev;
  1907. priv = sde_kms->dev->dev_private;
  1908. /*
  1909. * Handle (re)initializations during power enable, the sde power
  1910. * event call has to be after drm_irq_install to handle irq update.
  1911. */
  1912. sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
  1913. sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
  1914. SDE_POWER_EVENT_POST_ENABLE |
  1915. SDE_POWER_EVENT_PRE_DISABLE,
  1916. sde_kms_handle_power_event, sde_kms, "kms");
  1917. if (sde_kms->splash_data.num_splash_displays) {
  1918. SDE_DEBUG("Skipping MDP Resources disable\n");
  1919. } else {
  1920. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1921. sde_power_data_bus_set_quota(&priv->phandle, i,
  1922. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1923. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1924. pm_runtime_put_sync(sde_kms->dev->dev);
  1925. }
  1926. rc = _sde_debugfs_init(sde_kms);
  1927. if (rc)
  1928. SDE_ERROR("sde_debugfs init failed: %d\n", rc);
  1929. drm_for_each_crtc(crtc, dev)
  1930. sde_crtc_post_init(dev, crtc);
  1931. return rc;
  1932. }
  1933. static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
  1934. struct drm_encoder *encoder)
  1935. {
  1936. return rate;
  1937. }
  1938. static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
  1939. struct platform_device *pdev)
  1940. {
  1941. struct drm_device *dev;
  1942. struct msm_drm_private *priv;
  1943. struct sde_vm_ops *vm_ops;
  1944. int i;
  1945. if (!sde_kms || !pdev)
  1946. return;
  1947. dev = sde_kms->dev;
  1948. if (!dev)
  1949. return;
  1950. priv = dev->dev_private;
  1951. if (!priv)
  1952. return;
  1953. if (sde_kms->genpd_init) {
  1954. sde_kms->genpd_init = false;
  1955. pm_genpd_remove(&sde_kms->genpd);
  1956. of_genpd_del_provider(pdev->dev.of_node);
  1957. }
  1958. vm_ops = sde_vm_get_ops(sde_kms);
  1959. if (vm_ops && vm_ops->vm_deinit)
  1960. vm_ops->vm_deinit(sde_kms, vm_ops);
  1961. if (sde_kms->hw_intr)
  1962. sde_hw_intr_destroy(sde_kms->hw_intr);
  1963. sde_kms->hw_intr = NULL;
  1964. if (sde_kms->power_event)
  1965. sde_power_handle_unregister_event(
  1966. &priv->phandle, sde_kms->power_event);
  1967. _sde_kms_release_displays(sde_kms);
  1968. _sde_kms_unmap_all_splash_regions(sde_kms);
  1969. if (sde_kms->catalog) {
  1970. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  1971. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  1972. if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
  1973. sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
  1974. }
  1975. }
  1976. if (sde_kms->rm_init)
  1977. sde_rm_destroy(&sde_kms->rm);
  1978. sde_kms->rm_init = false;
  1979. if (sde_kms->catalog)
  1980. sde_hw_catalog_deinit(sde_kms->catalog);
  1981. sde_kms->catalog = NULL;
  1982. if (sde_kms->sid)
  1983. msm_iounmap(pdev, sde_kms->sid);
  1984. sde_kms->sid = NULL;
  1985. if (sde_kms->reg_dma)
  1986. msm_iounmap(pdev, sde_kms->reg_dma);
  1987. sde_kms->reg_dma = NULL;
  1988. if (sde_kms->vbif[VBIF_NRT])
  1989. msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
  1990. sde_kms->vbif[VBIF_NRT] = NULL;
  1991. if (sde_kms->vbif[VBIF_RT])
  1992. msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
  1993. sde_kms->vbif[VBIF_RT] = NULL;
  1994. if (sde_kms->mmio)
  1995. msm_iounmap(pdev, sde_kms->mmio);
  1996. sde_kms->mmio = NULL;
  1997. sde_reg_dma_deinit();
  1998. _sde_kms_mmu_destroy(sde_kms);
  1999. }
  2000. int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
  2001. {
  2002. int i;
  2003. if (!sde_kms)
  2004. return -EINVAL;
  2005. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  2006. struct msm_mmu *mmu;
  2007. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  2008. if (!aspace)
  2009. continue;
  2010. mmu = sde_kms->aspace[i]->mmu;
  2011. if (secure_only &&
  2012. !aspace->mmu->funcs->is_domain_secure(mmu))
  2013. continue;
  2014. /* cleanup aspace before detaching */
  2015. msm_gem_aspace_domain_attach_detach_update(aspace, true);
  2016. SDE_DEBUG("Detaching domain:%d\n", i);
  2017. aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
  2018. ARRAY_SIZE(iommu_ports));
  2019. aspace->domain_attached = false;
  2020. }
  2021. return 0;
  2022. }
  2023. int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
  2024. {
  2025. int i;
  2026. if (!sde_kms)
  2027. return -EINVAL;
  2028. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  2029. struct msm_mmu *mmu;
  2030. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  2031. if (!aspace)
  2032. continue;
  2033. mmu = sde_kms->aspace[i]->mmu;
  2034. if (secure_only &&
  2035. !aspace->mmu->funcs->is_domain_secure(mmu))
  2036. continue;
  2037. SDE_DEBUG("Attaching domain:%d\n", i);
  2038. aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
  2039. ARRAY_SIZE(iommu_ports));
  2040. aspace->domain_attached = true;
  2041. msm_gem_aspace_domain_attach_detach_update(aspace, false);
  2042. }
  2043. return 0;
  2044. }
  2045. static void sde_kms_destroy(struct msm_kms *kms)
  2046. {
  2047. struct sde_kms *sde_kms;
  2048. struct drm_device *dev;
  2049. if (!kms) {
  2050. SDE_ERROR("invalid kms\n");
  2051. return;
  2052. }
  2053. sde_kms = to_sde_kms(kms);
  2054. dev = sde_kms->dev;
  2055. if (!dev || !dev->dev) {
  2056. SDE_ERROR("invalid device\n");
  2057. return;
  2058. }
  2059. _sde_kms_hw_destroy(sde_kms, to_platform_device(dev->dev));
  2060. kfree(sde_kms);
  2061. }
  2062. static void sde_kms_helper_clear_dim_layers(struct drm_atomic_state *state, struct drm_crtc *crtc)
  2063. {
  2064. struct drm_crtc_state *crtc_state = NULL;
  2065. struct sde_crtc_state *c_state;
  2066. if (!state || !crtc) {
  2067. SDE_ERROR("invalid params\n");
  2068. return;
  2069. }
  2070. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2071. c_state = to_sde_crtc_state(crtc_state);
  2072. _sde_crtc_clear_dim_layers_v1(crtc_state);
  2073. set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, c_state->dirty);
  2074. }
  2075. static int sde_kms_set_crtc_for_conn(struct drm_device *dev,
  2076. struct drm_encoder *enc, struct drm_atomic_state *state)
  2077. {
  2078. struct drm_connector *conn = NULL;
  2079. struct drm_connector *tmp_conn = NULL;
  2080. struct drm_connector_list_iter conn_iter;
  2081. struct drm_crtc_state *crtc_state = NULL;
  2082. struct drm_connector_state *conn_state = NULL;
  2083. int ret = 0;
  2084. drm_connector_list_iter_begin(dev, &conn_iter);
  2085. drm_for_each_connector_iter(tmp_conn, &conn_iter) {
  2086. if (enc == tmp_conn->state->best_encoder) {
  2087. conn = tmp_conn;
  2088. break;
  2089. }
  2090. }
  2091. drm_connector_list_iter_end(&conn_iter);
  2092. if (!conn || !enc->crtc) {
  2093. SDE_ERROR("invalid params for enc:%d\n", DRMID(enc));
  2094. return -EINVAL;
  2095. }
  2096. crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
  2097. if (IS_ERR(crtc_state)) {
  2098. ret = PTR_ERR(crtc_state);
  2099. SDE_ERROR("error %d getting crtc %d state\n",
  2100. ret, DRMID(enc->crtc));
  2101. return ret;
  2102. }
  2103. conn_state = drm_atomic_get_connector_state(state, conn);
  2104. if (IS_ERR(conn_state)) {
  2105. ret = PTR_ERR(conn_state);
  2106. SDE_ERROR("error %d getting connector %d state\n",
  2107. ret, DRMID(conn));
  2108. return ret;
  2109. }
  2110. crtc_state->active = true;
  2111. crtc_state->enable = true;
  2112. ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
  2113. if (ret)
  2114. SDE_ERROR("error %d setting the crtc\n", ret);
  2115. return ret;
  2116. }
  2117. static void _sde_kms_plane_force_remove(struct drm_plane *plane,
  2118. struct drm_atomic_state *state)
  2119. {
  2120. struct drm_plane_state *plane_state;
  2121. int ret = 0;
  2122. plane_state = drm_atomic_get_plane_state(state, plane);
  2123. if (IS_ERR(plane_state)) {
  2124. ret = PTR_ERR(plane_state);
  2125. SDE_ERROR("error %d getting plane %d state\n",
  2126. ret, plane->base.id);
  2127. return;
  2128. }
  2129. plane->old_fb = plane->fb;
  2130. SDE_DEBUG("disabling plane %d\n", plane->base.id);
  2131. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2132. if (ret != 0)
  2133. SDE_ERROR("error %d disabling plane %d\n", ret,
  2134. plane->base.id);
  2135. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2136. }
  2137. static int _sde_kms_connector_add_refcount(struct sde_kms *sde_kms,
  2138. struct drm_atomic_state *state)
  2139. {
  2140. struct drm_device *dev = sde_kms->dev;
  2141. struct drm_connector *conn;
  2142. struct drm_connector_state *conn_state;
  2143. struct drm_connector_list_iter conn_iter;
  2144. struct sde_connector_state *c_state;
  2145. int ret = 0;
  2146. drm_connector_list_iter_begin(dev, &conn_iter);
  2147. drm_for_each_connector_iter(conn, &conn_iter) {
  2148. /*
  2149. * Acquire a connector reference to avoid removing
  2150. * connector in drm_release for splash and recovery cases.
  2151. */
  2152. conn_state = drm_atomic_get_connector_state(state, conn);
  2153. if (IS_ERR(conn_state)) {
  2154. ret = PTR_ERR(conn_state);
  2155. SDE_ERROR("error %d getting connector %d state\n",
  2156. ret, DRMID(conn));
  2157. return ret;
  2158. }
  2159. c_state = to_sde_connector_state(conn_state);
  2160. if (c_state->out_fb)
  2161. drm_framebuffer_put(c_state->out_fb);
  2162. }
  2163. drm_connector_list_iter_end(&conn_iter);
  2164. return ret;
  2165. }
  2166. static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
  2167. struct drm_atomic_state *state)
  2168. {
  2169. struct drm_device *dev = sde_kms->dev;
  2170. struct drm_framebuffer *fb, *tfb;
  2171. struct list_head fbs;
  2172. struct drm_plane *plane;
  2173. struct drm_crtc *crtc = NULL;
  2174. unsigned int crtc_mask = 0;
  2175. int ret = 0;
  2176. INIT_LIST_HEAD(&fbs);
  2177. list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
  2178. if (drm_framebuffer_read_refcount(fb) > 1) {
  2179. list_move_tail(&fb->filp_head, &fbs);
  2180. drm_for_each_plane(plane, dev) {
  2181. if (plane->state && plane->state->fb == fb) {
  2182. if (plane->state->crtc)
  2183. crtc_mask |= drm_crtc_mask(plane->state->crtc);
  2184. _sde_kms_plane_force_remove(plane, state);
  2185. }
  2186. }
  2187. } else {
  2188. list_del_init(&fb->filp_head);
  2189. drm_framebuffer_put(fb);
  2190. }
  2191. }
  2192. if (list_empty(&fbs)) {
  2193. SDE_DEBUG("skip commit as no fb(s)\n");
  2194. if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
  2195. _sde_kms_connector_add_refcount(sde_kms, state);
  2196. return 0;
  2197. }
  2198. drm_for_each_crtc(crtc, dev) {
  2199. if ((crtc_mask & drm_crtc_mask(crtc)) && crtc->state->active) {
  2200. struct drm_encoder *drm_enc;
  2201. drm_for_each_encoder_mask(drm_enc, crtc->dev,
  2202. crtc->state->encoder_mask) {
  2203. ret = sde_kms_set_crtc_for_conn(dev, drm_enc, state);
  2204. if (ret)
  2205. goto error;
  2206. }
  2207. sde_kms_helper_clear_dim_layers(state, crtc);
  2208. }
  2209. }
  2210. SDE_EVT32(state, crtc_mask);
  2211. SDE_DEBUG("null commit after removing all the pipes\n");
  2212. ret = drm_atomic_commit(state);
  2213. error:
  2214. if (ret) {
  2215. /*
  2216. * move the fbs back to original list, so it would be
  2217. * handled during drm_release
  2218. */
  2219. list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
  2220. list_move_tail(&fb->filp_head, &file->fbs);
  2221. if (ret == -EDEADLK || ret == -ERESTARTSYS)
  2222. SDE_DEBUG("atomic commit failed in preclose, ret:%d\n", ret);
  2223. else
  2224. SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
  2225. goto end;
  2226. }
  2227. while (!list_empty(&fbs)) {
  2228. fb = list_first_entry(&fbs, typeof(*fb), filp_head);
  2229. list_del_init(&fb->filp_head);
  2230. drm_framebuffer_put(fb);
  2231. }
  2232. drm_for_each_crtc(crtc, dev) {
  2233. if (!ret && crtc_mask & drm_crtc_mask(crtc))
  2234. sde_kms_cancel_delayed_work(crtc);
  2235. }
  2236. end:
  2237. return ret;
  2238. }
  2239. static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
  2240. {
  2241. struct sde_kms *sde_kms = to_sde_kms(kms);
  2242. struct drm_device *dev = sde_kms->dev;
  2243. struct msm_drm_private *priv = dev->dev_private;
  2244. unsigned int i;
  2245. struct drm_atomic_state *state = NULL;
  2246. struct drm_modeset_acquire_ctx ctx;
  2247. int ret = 0;
  2248. /* cancel pending flip event */
  2249. for (i = 0; i < priv->num_crtcs; i++)
  2250. sde_crtc_complete_flip(priv->crtcs[i], file);
  2251. drm_modeset_acquire_init(&ctx, 0);
  2252. retry:
  2253. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2254. if (ret == -EDEADLK) {
  2255. drm_modeset_backoff(&ctx);
  2256. goto retry;
  2257. } else if (WARN_ON(ret)) {
  2258. goto end;
  2259. }
  2260. state = drm_atomic_state_alloc(dev);
  2261. if (!state) {
  2262. ret = -ENOMEM;
  2263. goto end;
  2264. }
  2265. state->acquire_ctx = &ctx;
  2266. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  2267. ret = _sde_kms_remove_fbs(sde_kms, file, state);
  2268. if (ret != -EDEADLK && ret != -ERESTARTSYS)
  2269. break;
  2270. drm_atomic_state_clear(state);
  2271. drm_modeset_backoff(&ctx);
  2272. }
  2273. end:
  2274. if (state)
  2275. drm_atomic_state_put(state);
  2276. SDE_DEBUG("sde preclose done, ret:%d\n", ret);
  2277. drm_modeset_drop_locks(&ctx);
  2278. drm_modeset_acquire_fini(&ctx);
  2279. }
  2280. static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
  2281. struct drm_atomic_state *state)
  2282. {
  2283. struct drm_device *dev = sde_kms->dev;
  2284. struct drm_plane *plane;
  2285. struct drm_plane_state *plane_state;
  2286. struct drm_crtc *crtc;
  2287. struct drm_crtc_state *crtc_state;
  2288. struct drm_connector *conn;
  2289. struct drm_connector_state *conn_state;
  2290. struct drm_connector_list_iter conn_iter;
  2291. int ret = 0;
  2292. drm_for_each_plane(plane, dev) {
  2293. plane_state = drm_atomic_get_plane_state(state, plane);
  2294. if (IS_ERR(plane_state)) {
  2295. ret = PTR_ERR(plane_state);
  2296. SDE_ERROR("error %d getting plane %d state\n",
  2297. ret, DRMID(plane));
  2298. return ret;
  2299. }
  2300. ret = sde_plane_helper_reset_custom_properties(plane,
  2301. plane_state);
  2302. if (ret) {
  2303. SDE_ERROR("error %d resetting plane props %d\n",
  2304. ret, DRMID(plane));
  2305. return ret;
  2306. }
  2307. }
  2308. drm_for_each_crtc(crtc, dev) {
  2309. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2310. if (IS_ERR(crtc_state)) {
  2311. ret = PTR_ERR(crtc_state);
  2312. SDE_ERROR("error %d getting crtc %d state\n",
  2313. ret, DRMID(crtc));
  2314. return ret;
  2315. }
  2316. ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
  2317. if (ret) {
  2318. SDE_ERROR("error %d resetting crtc props %d\n",
  2319. ret, DRMID(crtc));
  2320. return ret;
  2321. }
  2322. }
  2323. drm_connector_list_iter_begin(dev, &conn_iter);
  2324. drm_for_each_connector_iter(conn, &conn_iter) {
  2325. conn_state = drm_atomic_get_connector_state(state, conn);
  2326. if (IS_ERR(conn_state)) {
  2327. ret = PTR_ERR(conn_state);
  2328. SDE_ERROR("error %d getting connector %d state\n",
  2329. ret, DRMID(conn));
  2330. return ret;
  2331. }
  2332. ret = sde_connector_helper_reset_custom_properties(conn,
  2333. conn_state);
  2334. if (ret) {
  2335. SDE_ERROR("error %d resetting connector props %d\n",
  2336. ret, DRMID(conn));
  2337. return ret;
  2338. }
  2339. }
  2340. drm_connector_list_iter_end(&conn_iter);
  2341. return ret;
  2342. }
  2343. static void sde_kms_lastclose(struct msm_kms *kms)
  2344. {
  2345. struct sde_kms *sde_kms;
  2346. struct drm_device *dev;
  2347. struct drm_atomic_state *state;
  2348. struct drm_modeset_acquire_ctx ctx;
  2349. int ret;
  2350. if (!kms) {
  2351. SDE_ERROR("invalid argument\n");
  2352. return;
  2353. }
  2354. sde_kms = to_sde_kms(kms);
  2355. dev = sde_kms->dev;
  2356. drm_modeset_acquire_init(&ctx, 0);
  2357. state = drm_atomic_state_alloc(dev);
  2358. if (!state) {
  2359. ret = -ENOMEM;
  2360. goto out_ctx;
  2361. }
  2362. state->acquire_ctx = &ctx;
  2363. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  2364. retry:
  2365. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2366. if (ret)
  2367. goto out_state;
  2368. ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
  2369. if (ret)
  2370. goto out_state;
  2371. ret = drm_atomic_commit(state);
  2372. out_state:
  2373. if (ret == -EDEADLK)
  2374. goto backoff;
  2375. drm_atomic_state_put(state);
  2376. out_ctx:
  2377. drm_modeset_drop_locks(&ctx);
  2378. drm_modeset_acquire_fini(&ctx);
  2379. if (ret)
  2380. SDE_ERROR("kms lastclose failed: %d\n", ret);
  2381. SDE_EVT32(ret, SDE_EVTLOG_FUNC_EXIT);
  2382. return;
  2383. backoff:
  2384. drm_atomic_state_clear(state);
  2385. drm_modeset_backoff(&ctx);
  2386. SDE_EVT32(ret, SDE_EVTLOG_FUNC_CASE1);
  2387. goto retry;
  2388. }
  2389. static int _sde_kms_validate_vm_request(struct drm_atomic_state *state, struct sde_kms *sde_kms,
  2390. enum sde_crtc_vm_req vm_req, bool vm_owns_hw)
  2391. {
  2392. struct drm_crtc *crtc, *active_crtc = NULL, *global_active_crtc = NULL;
  2393. struct drm_crtc_state *new_cstate, *old_cstate, *active_cstate;
  2394. struct drm_encoder *encoder;
  2395. struct drm_connector *connector;
  2396. struct drm_connector_state *new_connstate;
  2397. struct sde_vm_ops *vm_ops = sde_vm_get_ops(sde_kms);
  2398. struct sde_mdss_cfg *catalog = sde_kms->catalog;
  2399. struct sde_connector *sde_conn;
  2400. struct dsi_display *dsi_display;
  2401. uint32_t i, commit_crtc_cnt = 0, global_crtc_cnt = 0;
  2402. uint32_t crtc_encoder_cnt = 0;
  2403. enum sde_crtc_idle_pc_state idle_pc_state;
  2404. int rc = 0;
  2405. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2406. struct sde_crtc_state *new_state = NULL;
  2407. if (!new_cstate->active && !old_cstate->active)
  2408. continue;
  2409. new_state = to_sde_crtc_state(new_cstate);
  2410. idle_pc_state = sde_crtc_get_property(new_state, CRTC_PROP_IDLE_PC_STATE);
  2411. active_crtc = crtc;
  2412. active_cstate = new_cstate;
  2413. commit_crtc_cnt++;
  2414. }
  2415. list_for_each_entry(crtc, &sde_kms->dev->mode_config.crtc_list, head) {
  2416. if (!crtc->state->active)
  2417. continue;
  2418. global_crtc_cnt++;
  2419. global_active_crtc = crtc;
  2420. }
  2421. if (active_crtc) {
  2422. drm_for_each_encoder_mask(encoder, active_crtc->dev, active_cstate->encoder_mask)
  2423. crtc_encoder_cnt++;
  2424. }
  2425. for_each_new_connector_in_state(state, connector, new_connstate, i) {
  2426. int conn_mask = active_cstate->connector_mask;
  2427. if (drm_connector_mask(connector) & conn_mask) {
  2428. sde_conn = to_sde_connector(connector);
  2429. dsi_display = (struct dsi_display *) sde_conn->display;
  2430. SDE_EVT32(DRMID(connector), DRMID(active_crtc), i, dsi_display->type,
  2431. dsi_display->trusted_vm_env);
  2432. SDE_DEBUG("VM display:%s, conn:%d, crtc:%d, type:%d, tvm:%d\n",
  2433. dsi_display->name, DRMID(connector), DRMID(active_crtc),
  2434. dsi_display->type, dsi_display->trusted_vm_env);
  2435. break;
  2436. }
  2437. }
  2438. /* Check for single crtc commits only on valid VM requests */
  2439. if (active_crtc && global_active_crtc &&
  2440. (commit_crtc_cnt > catalog->max_trusted_vm_displays ||
  2441. global_crtc_cnt > catalog->max_trusted_vm_displays ||
  2442. active_crtc != global_active_crtc)) {
  2443. SDE_ERROR("VM switch failed; MAX:%d a_cnt:%d g_cnt:%d a_crtc:%d g_crtc:%d\n",
  2444. catalog->max_trusted_vm_displays, commit_crtc_cnt, global_crtc_cnt,
  2445. DRMID(active_crtc), DRMID(global_active_crtc));
  2446. return -E2BIG;
  2447. } else if ((vm_req == VM_REQ_RELEASE) &&
  2448. ((idle_pc_state == IDLE_PC_ENABLE) ||
  2449. (crtc_encoder_cnt > TRUSTED_VM_MAX_ENCODER_PER_CRTC))) {
  2450. /*
  2451. * disable idle-pc before releasing the HW
  2452. * allow only specified number of encoders on a given crtc
  2453. */
  2454. SDE_ERROR("VM switch failed; idle-pc:%d max:%d encoder_cnt:%d\n",
  2455. idle_pc_state, TRUSTED_VM_MAX_ENCODER_PER_CRTC, crtc_encoder_cnt);
  2456. return -EINVAL;
  2457. }
  2458. if ((vm_req == VM_REQ_ACQUIRE) && !vm_owns_hw) {
  2459. rc = vm_ops->vm_acquire(sde_kms);
  2460. if (rc) {
  2461. SDE_ERROR("VM acquire failed; hw_owner:%d, rc:%d\n", vm_owns_hw, rc);
  2462. return rc;
  2463. }
  2464. if (vm_ops->vm_resource_init)
  2465. rc = vm_ops->vm_resource_init(sde_kms, state);
  2466. }
  2467. return rc;
  2468. }
  2469. static int sde_kms_check_vm_request(struct msm_kms *kms,
  2470. struct drm_atomic_state *state)
  2471. {
  2472. struct sde_kms *sde_kms;
  2473. struct drm_crtc *crtc;
  2474. struct drm_crtc_state *new_cstate, *old_cstate;
  2475. struct sde_vm_ops *vm_ops;
  2476. enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
  2477. int i, rc = 0;
  2478. bool vm_req_active = false, prev_vm_req = false;
  2479. bool vm_owns_hw;
  2480. if (!kms || !state)
  2481. return -EINVAL;
  2482. sde_kms = to_sde_kms(kms);
  2483. vm_ops = sde_vm_get_ops(sde_kms);
  2484. if (!vm_ops)
  2485. return 0;
  2486. if (!vm_ops->vm_request_valid || !vm_ops->vm_owns_hw || !vm_ops->vm_acquire)
  2487. return -EINVAL;
  2488. drm_for_each_crtc(crtc, state->dev) {
  2489. if (crtc->state && (sde_crtc_get_property(to_sde_crtc_state(crtc->state),
  2490. CRTC_PROP_VM_REQ_STATE) == VM_REQ_RELEASE)) {
  2491. prev_vm_req = true;
  2492. break;
  2493. }
  2494. }
  2495. /* check for an active vm request */
  2496. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2497. struct sde_crtc_state *old_state = NULL, *new_state = NULL;
  2498. if (!new_cstate->active && !old_cstate->active)
  2499. continue;
  2500. new_state = to_sde_crtc_state(new_cstate);
  2501. new_vm_req = sde_crtc_get_property(new_state, CRTC_PROP_VM_REQ_STATE);
  2502. old_state = to_sde_crtc_state(old_cstate);
  2503. old_vm_req = sde_crtc_get_property(old_state, CRTC_PROP_VM_REQ_STATE);
  2504. /*
  2505. * VM request should be validated in the following usecases
  2506. * - There is a vm request(other than VM_REQ_NONE) on current/prev crtc state.
  2507. * - Previously, vm transition has taken place on one of the crtc's.
  2508. */
  2509. if (old_vm_req || new_vm_req || prev_vm_req) {
  2510. if (!vm_req_active) {
  2511. sde_vm_lock(sde_kms);
  2512. vm_owns_hw = sde_vm_owns_hw(sde_kms);
  2513. }
  2514. rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
  2515. if (rc) {
  2516. SDE_ERROR(
  2517. "VM transition check failed; o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n",
  2518. old_vm_req, new_vm_req, vm_owns_hw, rc);
  2519. sde_vm_unlock(sde_kms);
  2520. vm_req_active = false;
  2521. break;
  2522. } else if (old_vm_req == VM_REQ_ACQUIRE && new_vm_req == VM_REQ_NONE) {
  2523. SDE_DEBUG("VM transition valid; ignore further checks\n");
  2524. if (!vm_req_active)
  2525. sde_vm_unlock(sde_kms);
  2526. } else {
  2527. vm_req_active = true;
  2528. }
  2529. }
  2530. }
  2531. /* validate active requests and perform acquire if necessary */
  2532. if (vm_req_active) {
  2533. rc = _sde_kms_validate_vm_request(state, sde_kms, new_vm_req, vm_owns_hw);
  2534. sde_vm_unlock(sde_kms);
  2535. SDE_EVT32(old_vm_req, new_vm_req, vm_req_active, vm_owns_hw, rc);
  2536. SDE_DEBUG("VM o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n", old_vm_req, new_vm_req,
  2537. vm_req_active ? vm_owns_hw : -1, rc);
  2538. }
  2539. return rc;
  2540. }
  2541. static int sde_kms_check_secure_transition(struct msm_kms *kms,
  2542. struct drm_atomic_state *state)
  2543. {
  2544. struct sde_kms *sde_kms;
  2545. struct drm_device *dev;
  2546. struct drm_crtc *crtc;
  2547. struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
  2548. struct drm_crtc_state *crtc_state;
  2549. int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
  2550. bool sec_session = false, global_sec_session = false;
  2551. uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
  2552. int i;
  2553. if (!kms || !state) {
  2554. return -EINVAL;
  2555. SDE_ERROR("invalid arguments\n");
  2556. }
  2557. sde_kms = to_sde_kms(kms);
  2558. dev = sde_kms->dev;
  2559. /* iterate state object for active secure/non-secure crtc */
  2560. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2561. if (!crtc_state->active)
  2562. continue;
  2563. active_crtc_cnt++;
  2564. sde_crtc_state_find_plane_fb_modes(crtc_state, &fb_ns,
  2565. &fb_sec, &fb_sec_dir);
  2566. if (fb_sec_dir)
  2567. sec_session = true;
  2568. cur_crtc = crtc;
  2569. }
  2570. /* iterate global list for active and secure/non-secure crtc */
  2571. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2572. if (!crtc->state->active)
  2573. continue;
  2574. global_active_crtc_cnt++;
  2575. /* update only when crtc is not the same as current crtc */
  2576. if (crtc != cur_crtc) {
  2577. fb_ns = fb_sec = fb_sec_dir = 0;
  2578. sde_crtc_find_plane_fb_modes(crtc, &fb_ns,
  2579. &fb_sec, &fb_sec_dir);
  2580. if (fb_sec_dir)
  2581. global_sec_session = true;
  2582. global_crtc = crtc;
  2583. }
  2584. }
  2585. if (!global_sec_session && !sec_session)
  2586. return 0;
  2587. /*
  2588. * - fail crtc commit, if secure-camera/secure-ui session is
  2589. * in-progress in any other display
  2590. * - fail secure-camera/secure-ui crtc commit, if any other display
  2591. * session is in-progress
  2592. */
  2593. if ((global_active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
  2594. (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
  2595. SDE_ERROR(
  2596. "crtc%d secure check failed global_active:%d active:%d\n",
  2597. cur_crtc ? cur_crtc->base.id : -1,
  2598. global_active_crtc_cnt, active_crtc_cnt);
  2599. return -EPERM;
  2600. /*
  2601. * As only one crtc is allowed during secure session, the crtc
  2602. * in this commit should match with the global crtc
  2603. */
  2604. } else if (global_crtc && cur_crtc && (global_crtc != cur_crtc)) {
  2605. SDE_ERROR("crtc%d-sec%d not allowed during crtc%d-sec%d\n",
  2606. cur_crtc->base.id, sec_session,
  2607. global_crtc->base.id, global_sec_session);
  2608. return -EPERM;
  2609. }
  2610. return 0;
  2611. }
  2612. static void sde_kms_vm_res_release(struct msm_kms *kms,
  2613. struct drm_atomic_state *state)
  2614. {
  2615. struct drm_crtc *crtc;
  2616. struct drm_crtc_state *new_cstate;
  2617. struct sde_crtc_state *cstate;
  2618. struct sde_vm_ops *vm_ops;
  2619. enum sde_crtc_vm_req vm_req;
  2620. struct sde_kms *sde_kms = to_sde_kms(kms);
  2621. vm_ops = sde_vm_get_ops(sde_kms);
  2622. if (!vm_ops)
  2623. return;
  2624. crtc = sde_kms_vm_get_vm_crtc(state);
  2625. if (!crtc)
  2626. return;
  2627. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  2628. cstate = to_sde_crtc_state(new_cstate);
  2629. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  2630. if (vm_req != VM_REQ_ACQUIRE)
  2631. return;
  2632. sde_vm_lock(sde_kms);
  2633. if (vm_ops->vm_acquire_fail_handler)
  2634. vm_ops->vm_acquire_fail_handler(sde_kms);
  2635. sde_vm_unlock(sde_kms);
  2636. }
  2637. static int sde_kms_check_cwb_concurreny(struct msm_kms *kms,
  2638. struct drm_atomic_state *state)
  2639. {
  2640. struct sde_kms *sde_kms;
  2641. struct drm_crtc *crtc;
  2642. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  2643. struct drm_encoder *encoder;
  2644. struct sde_crtc_state *cstate;
  2645. int i = 0, cnt = 0, max_cwb = 0;
  2646. if (!kms || !state) {
  2647. SDE_ERROR("invalid arguments\n");
  2648. return -EINVAL;
  2649. }
  2650. sde_kms = to_sde_kms(kms);
  2651. max_cwb = sde_kms->catalog->max_cwb;
  2652. if (!max_cwb)
  2653. return 0;
  2654. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  2655. cstate = to_sde_crtc_state(new_crtc_state);
  2656. drm_for_each_encoder_mask(encoder, crtc->dev, cstate->cwb_enc_mask) {
  2657. cnt++;
  2658. SDE_DEBUG("crtc%d has cwb%d attached to it\n", crtc->base.id,
  2659. encoder->base.id);
  2660. }
  2661. if (cnt > max_cwb) {
  2662. SDE_ERROR("found %d cwb in the atomic state, max supported %d\n",
  2663. cnt, max_cwb);
  2664. return -EOPNOTSUPP;
  2665. }
  2666. }
  2667. return 0;
  2668. }
  2669. static int sde_kms_atomic_check(struct msm_kms *kms,
  2670. struct drm_atomic_state *state)
  2671. {
  2672. struct sde_kms *sde_kms;
  2673. struct drm_device *dev;
  2674. int ret;
  2675. if (!kms || !state)
  2676. return -EINVAL;
  2677. sde_kms = to_sde_kms(kms);
  2678. dev = sde_kms->dev;
  2679. SDE_ATRACE_BEGIN("atomic_check");
  2680. if (sde_kms_is_suspend_blocked(dev)) {
  2681. SDE_DEBUG("suspended, skip atomic_check\n");
  2682. ret = -EBUSY;
  2683. goto end;
  2684. }
  2685. ret = sde_kms_check_vm_request(kms, state);
  2686. if (ret) {
  2687. SDE_ERROR("vm switch request checks failed\n");
  2688. goto end;
  2689. }
  2690. ret = drm_atomic_helper_check(dev, state);
  2691. if (ret)
  2692. goto vm_clean_up;
  2693. /*
  2694. * Check if any secure transition(moving CRTC between secure and
  2695. * non-secure state and vice-versa) is allowed or not. when moving
  2696. * to secure state, planes with fb_mode set to dir_translated only can
  2697. * be staged on the CRTC, and only one CRTC can be active during
  2698. * Secure state
  2699. */
  2700. ret = sde_kms_check_secure_transition(kms, state);
  2701. if (ret)
  2702. goto vm_clean_up;
  2703. ret = sde_kms_check_cwb_concurreny(kms, state);
  2704. if (ret)
  2705. goto vm_clean_up;
  2706. goto end;
  2707. vm_clean_up:
  2708. sde_kms_vm_res_release(kms, state);
  2709. end:
  2710. SDE_ATRACE_END("atomic_check");
  2711. return ret;
  2712. }
  2713. static struct msm_gem_address_space*
  2714. _sde_kms_get_address_space(struct msm_kms *kms,
  2715. unsigned int domain)
  2716. {
  2717. struct sde_kms *sde_kms;
  2718. if (!kms) {
  2719. SDE_ERROR("invalid kms\n");
  2720. return NULL;
  2721. }
  2722. sde_kms = to_sde_kms(kms);
  2723. if (!sde_kms) {
  2724. SDE_ERROR("invalid sde_kms\n");
  2725. return NULL;
  2726. }
  2727. if (domain >= MSM_SMMU_DOMAIN_MAX)
  2728. return NULL;
  2729. return (sde_kms->aspace[domain] &&
  2730. sde_kms->aspace[domain]->domain_attached) ?
  2731. sde_kms->aspace[domain] : NULL;
  2732. }
  2733. static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
  2734. unsigned int domain)
  2735. {
  2736. struct sde_kms *sde_kms;
  2737. struct msm_gem_address_space *aspace;
  2738. if (!kms) {
  2739. SDE_ERROR("invalid kms\n");
  2740. return NULL;
  2741. }
  2742. sde_kms = to_sde_kms(kms);
  2743. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  2744. SDE_ERROR("invalid params\n");
  2745. return NULL;
  2746. }
  2747. aspace = _sde_kms_get_address_space(kms, domain);
  2748. return (aspace && aspace->domain_attached) ?
  2749. msm_gem_get_aspace_device(aspace) : NULL;
  2750. }
  2751. static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
  2752. {
  2753. struct drm_device *dev = NULL;
  2754. struct sde_kms *sde_kms = NULL;
  2755. struct drm_connector *connector = NULL;
  2756. struct drm_connector_list_iter conn_iter;
  2757. struct sde_connector *sde_conn = NULL;
  2758. if (!kms) {
  2759. SDE_ERROR("invalid kms\n");
  2760. return;
  2761. }
  2762. sde_kms = to_sde_kms(kms);
  2763. dev = sde_kms->dev;
  2764. if (!dev) {
  2765. SDE_ERROR("invalid device\n");
  2766. return;
  2767. }
  2768. if (!dev->mode_config.poll_enabled)
  2769. return;
  2770. mutex_lock(&dev->mode_config.mutex);
  2771. drm_connector_list_iter_begin(dev, &conn_iter);
  2772. drm_for_each_connector_iter(connector, &conn_iter) {
  2773. /* Only handle HPD capable connectors. */
  2774. if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
  2775. continue;
  2776. sde_conn = to_sde_connector(connector);
  2777. if (sde_conn->ops.post_open)
  2778. sde_conn->ops.post_open(&sde_conn->base,
  2779. sde_conn->display);
  2780. }
  2781. drm_connector_list_iter_end(&conn_iter);
  2782. mutex_unlock(&dev->mode_config.mutex);
  2783. }
  2784. static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
  2785. struct sde_splash_display *splash_display,
  2786. struct drm_crtc *crtc)
  2787. {
  2788. struct msm_drm_private *priv;
  2789. struct drm_plane *plane;
  2790. struct sde_splash_mem *splash;
  2791. struct sde_splash_mem *demura;
  2792. struct sde_plane_state *pstate;
  2793. struct sde_sspp_index_info *pipe_info;
  2794. enum sde_sspp pipe_id;
  2795. bool is_virtual;
  2796. int i;
  2797. if (!sde_kms || !splash_display || !crtc) {
  2798. SDE_ERROR("invalid input args\n");
  2799. return -EINVAL;
  2800. }
  2801. priv = sde_kms->dev->dev_private;
  2802. pipe_info = &splash_display->pipe_info;
  2803. splash = splash_display->splash;
  2804. demura = splash_display->demura;
  2805. for (i = 0; i < priv->num_planes; i++) {
  2806. plane = priv->planes[i];
  2807. pipe_id = sde_plane_pipe(plane);
  2808. is_virtual = is_sde_plane_virtual(plane);
  2809. if ((is_virtual && test_bit(pipe_id, pipe_info->virt_pipes)) ||
  2810. (!is_virtual && test_bit(pipe_id, pipe_info->pipes))) {
  2811. if (splash && sde_plane_validate_src_addr(plane,
  2812. splash->splash_buf_base,
  2813. splash->splash_buf_size)) {
  2814. if (!demura || sde_plane_validate_src_addr(
  2815. plane, demura->splash_buf_base,
  2816. demura->splash_buf_size)) {
  2817. SDE_ERROR("invalid adr on pipe:%d crtc:%d\n",
  2818. pipe_id, DRMID(crtc));
  2819. continue;
  2820. }
  2821. }
  2822. plane->state->crtc = crtc;
  2823. crtc->state->plane_mask |= drm_plane_mask(plane);
  2824. pstate = to_sde_plane_state(plane->state);
  2825. pstate->cont_splash_populated = true;
  2826. SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n",
  2827. DRMID(crtc), DRMID(plane), is_virtual);
  2828. }
  2829. }
  2830. return 0;
  2831. }
  2832. static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
  2833. struct dsi_display *dsi_display)
  2834. {
  2835. void *display;
  2836. struct drm_encoder *encoder = NULL;
  2837. struct msm_display_info info;
  2838. struct drm_device *dev;
  2839. struct sde_kms *sde_kms;
  2840. struct drm_connector_list_iter conn_iter;
  2841. struct drm_connector *connector = NULL;
  2842. struct sde_connector *sde_conn = NULL;
  2843. int rc = 0;
  2844. sde_kms = to_sde_kms(kms);
  2845. dev = sde_kms->dev;
  2846. display = dsi_display;
  2847. if (dsi_display) {
  2848. if (dsi_display->bridge->base.encoder) {
  2849. encoder = dsi_display->bridge->base.encoder;
  2850. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2851. }
  2852. memset(&info, 0x0, sizeof(info));
  2853. rc = dsi_display_get_info(NULL, &info, display);
  2854. if (rc) {
  2855. SDE_ERROR("%s: dsi get_info failed: %d\n",
  2856. __func__, rc);
  2857. encoder = NULL;
  2858. }
  2859. }
  2860. drm_connector_list_iter_begin(dev, &conn_iter);
  2861. drm_for_each_connector_iter(connector, &conn_iter) {
  2862. struct drm_encoder *c_encoder;
  2863. drm_connector_for_each_possible_encoder(connector,
  2864. c_encoder)
  2865. break;
  2866. if (!c_encoder) {
  2867. SDE_ERROR("c_encoder not found\n");
  2868. return -EINVAL;
  2869. }
  2870. /**
  2871. * Inform cont_splash is disabled to each interface/connector.
  2872. * This is currently supported for DSI interface.
  2873. */
  2874. sde_conn = to_sde_connector(connector);
  2875. if (sde_conn && sde_conn->ops.cont_splash_res_disable) {
  2876. if (!dsi_display || !encoder) {
  2877. sde_conn->ops.cont_splash_res_disable
  2878. (sde_conn->display);
  2879. } else if (c_encoder->base.id == encoder->base.id) {
  2880. /**
  2881. * This handles dual DSI
  2882. * configuration where one DSI
  2883. * interface has cont_splash
  2884. * enabled and the other doesn't.
  2885. */
  2886. sde_conn->ops.cont_splash_res_disable
  2887. (sde_conn->display);
  2888. break;
  2889. }
  2890. }
  2891. }
  2892. drm_connector_list_iter_end(&conn_iter);
  2893. return 0;
  2894. }
  2895. static int sde_kms_vm_trusted_cont_splash_res_init(struct sde_kms *sde_kms)
  2896. {
  2897. int i;
  2898. void *display;
  2899. struct dsi_display *dsi_display;
  2900. struct drm_encoder *encoder;
  2901. if (!sde_kms)
  2902. return -EINVAL;
  2903. if (!sde_in_trusted_vm(sde_kms))
  2904. return 0;
  2905. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  2906. display = sde_kms->dsi_displays[i];
  2907. dsi_display = (struct dsi_display *)display;
  2908. if (!dsi_display->bridge->base.encoder) {
  2909. SDE_ERROR("no encoder on dsi display:%d", i);
  2910. return -EINVAL;
  2911. }
  2912. encoder = dsi_display->bridge->base.encoder;
  2913. encoder->possible_crtcs = 1 << i;
  2914. SDE_DEBUG(
  2915. "dsi-display:%d encoder id[%d]=%d name=%s crtcs=%x\n", i,
  2916. encoder->index, encoder->base.id,
  2917. encoder->name, encoder->possible_crtcs);
  2918. }
  2919. return 0;
  2920. }
  2921. static struct drm_display_mode *_sde_kms_get_splash_mode(
  2922. struct sde_kms *sde_kms, struct drm_connector *connector,
  2923. struct drm_atomic_state *state)
  2924. {
  2925. struct drm_display_mode *mode, *cur_mode = NULL;
  2926. struct drm_crtc *crtc;
  2927. struct drm_crtc_state *new_cstate, *old_cstate;
  2928. u32 i = 0;
  2929. if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
  2930. list_for_each_entry(mode, &connector->modes, head) {
  2931. if (mode->type & DRM_MODE_TYPE_PREFERRED) {
  2932. cur_mode = mode;
  2933. break;
  2934. }
  2935. }
  2936. } else if (state) {
  2937. /* get the mode from first atomic_check phase for trusted_vm*/
  2938. for_each_oldnew_crtc_in_state(state, crtc, old_cstate,
  2939. new_cstate, i) {
  2940. if (!new_cstate->active && !old_cstate->active)
  2941. continue;
  2942. list_for_each_entry(mode, &connector->modes, head) {
  2943. if (drm_mode_equal(&new_cstate->mode, mode)) {
  2944. cur_mode = mode;
  2945. break;
  2946. }
  2947. }
  2948. }
  2949. }
  2950. return cur_mode;
  2951. }
  2952. static int sde_kms_cont_splash_config(struct msm_kms *kms,
  2953. struct drm_atomic_state *state)
  2954. {
  2955. void *display;
  2956. struct dsi_display *dsi_display;
  2957. struct msm_display_info info;
  2958. struct drm_encoder *encoder = NULL;
  2959. struct drm_crtc *crtc = NULL;
  2960. int i, rc = 0;
  2961. struct drm_display_mode *drm_mode = NULL;
  2962. struct drm_device *dev;
  2963. struct msm_drm_private *priv;
  2964. struct sde_kms *sde_kms;
  2965. struct drm_connector_list_iter conn_iter;
  2966. struct drm_connector *connector = NULL;
  2967. struct sde_connector *sde_conn = NULL;
  2968. struct sde_splash_display *splash_display;
  2969. if (!kms) {
  2970. SDE_ERROR("invalid kms\n");
  2971. return -EINVAL;
  2972. }
  2973. sde_kms = to_sde_kms(kms);
  2974. dev = sde_kms->dev;
  2975. if (!dev) {
  2976. SDE_ERROR("invalid device\n");
  2977. return -EINVAL;
  2978. }
  2979. rc = sde_kms_vm_trusted_cont_splash_res_init(sde_kms);
  2980. if (rc) {
  2981. SDE_ERROR("failed vm cont splash resource init, rc=%d", rc);
  2982. return -EINVAL;
  2983. }
  2984. if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
  2985. && (!sde_kms->splash_data.num_splash_regions)) ||
  2986. !sde_kms->splash_data.num_splash_displays) {
  2987. DRM_INFO("cont_splash feature not enabled\n");
  2988. sde_kms_inform_cont_splash_res_disable(kms, NULL);
  2989. return rc;
  2990. }
  2991. DRM_INFO("cont_splash enabled in %d of %d display(s)\n",
  2992. sde_kms->splash_data.num_splash_displays,
  2993. sde_kms->dsi_display_count);
  2994. /* dsi */
  2995. for (i = 0; i < sde_kms->dsi_display_count; ++i) {
  2996. struct sde_crtc_state *cstate;
  2997. struct sde_connector_state *conn_state;
  2998. display = sde_kms->dsi_displays[i];
  2999. dsi_display = (struct dsi_display *)display;
  3000. splash_display = &sde_kms->splash_data.splash_display[i];
  3001. if (!splash_display->cont_splash_enabled) {
  3002. SDE_DEBUG("display->name = %s splash not enabled\n",
  3003. dsi_display->name);
  3004. sde_kms_inform_cont_splash_res_disable(kms,
  3005. dsi_display);
  3006. continue;
  3007. }
  3008. SDE_DEBUG("display->name = %s\n", dsi_display->name);
  3009. if (dsi_display->bridge->base.encoder) {
  3010. encoder = dsi_display->bridge->base.encoder;
  3011. SDE_DEBUG("encoder name = %s\n", encoder->name);
  3012. }
  3013. memset(&info, 0x0, sizeof(info));
  3014. rc = dsi_display_get_info(NULL, &info, display);
  3015. if (rc) {
  3016. SDE_ERROR("dsi get_info %d failed\n", i);
  3017. encoder = NULL;
  3018. continue;
  3019. }
  3020. SDE_DEBUG("info.is_connected = %s, info.display_type = %d\n",
  3021. ((info.is_connected) ? "true" : "false"),
  3022. info.display_type);
  3023. if (!encoder) {
  3024. SDE_ERROR("encoder not initialized\n");
  3025. return -EINVAL;
  3026. }
  3027. priv = sde_kms->dev->dev_private;
  3028. encoder->crtc = priv->crtcs[i];
  3029. crtc = encoder->crtc;
  3030. splash_display->encoder = encoder;
  3031. SDE_DEBUG("for dsi-display:%d crtc id[%d]:%d enc id[%d]:%d\n",
  3032. i, crtc->index, crtc->base.id, encoder->index,
  3033. encoder->base.id);
  3034. mutex_lock(&dev->mode_config.mutex);
  3035. drm_connector_list_iter_begin(dev, &conn_iter);
  3036. drm_for_each_connector_iter(connector, &conn_iter) {
  3037. struct drm_encoder *c_encoder;
  3038. drm_connector_for_each_possible_encoder(connector,
  3039. c_encoder)
  3040. break;
  3041. if (!c_encoder) {
  3042. SDE_ERROR("c_encoder not found\n");
  3043. mutex_unlock(&dev->mode_config.mutex);
  3044. return -EINVAL;
  3045. }
  3046. /**
  3047. * SDE_KMS doesn't attach more than one encoder to
  3048. * a DSI connector. So it is safe to check only with
  3049. * the first encoder entry. Revisit this logic if we
  3050. * ever have to support continuous splash for
  3051. * external displays in MST configuration.
  3052. */
  3053. if (c_encoder->base.id == encoder->base.id)
  3054. break;
  3055. }
  3056. drm_connector_list_iter_end(&conn_iter);
  3057. if (!connector) {
  3058. SDE_ERROR("connector not initialized\n");
  3059. mutex_unlock(&dev->mode_config.mutex);
  3060. return -EINVAL;
  3061. }
  3062. mutex_unlock(&dev->mode_config.mutex);
  3063. crtc->state->encoder_mask = drm_encoder_mask(encoder);
  3064. crtc->state->connector_mask = drm_connector_mask(connector);
  3065. connector->state->crtc = crtc;
  3066. drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, state);
  3067. if (!drm_mode) {
  3068. SDE_ERROR("drm_mode not found; handoff_type:%d\n",
  3069. sde_kms->splash_data.type);
  3070. return -EINVAL;
  3071. }
  3072. SDE_DEBUG(
  3073. "drm_mode->name:%s, type:0x%x, flags:0x%x, handoff_type:%d\n",
  3074. drm_mode->name, drm_mode->type,
  3075. drm_mode->flags, sde_kms->splash_data.type);
  3076. /* Update CRTC drm structure */
  3077. crtc->state->active = true;
  3078. rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
  3079. if (rc) {
  3080. SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
  3081. return rc;
  3082. }
  3083. drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
  3084. drm_mode_copy(&crtc->mode, drm_mode);
  3085. cstate = to_sde_crtc_state(crtc->state);
  3086. cstate->cont_splash_populated = true;
  3087. /* Update encoder structure */
  3088. sde_encoder_update_caps_for_cont_splash(encoder,
  3089. splash_display, true);
  3090. sde_crtc_update_cont_splash_settings(crtc);
  3091. sde_conn = to_sde_connector(connector);
  3092. if (sde_conn && sde_conn->ops.cont_splash_config)
  3093. sde_conn->ops.cont_splash_config(sde_conn->display);
  3094. conn_state = to_sde_connector_state(connector->state);
  3095. conn_state->cont_splash_populated = true;
  3096. rc = _sde_kms_update_planes_for_cont_splash(sde_kms,
  3097. splash_display, crtc);
  3098. if (rc) {
  3099. SDE_ERROR("Failed: updating plane status rc=%d\n", rc);
  3100. return rc;
  3101. }
  3102. }
  3103. return rc;
  3104. }
  3105. static bool sde_kms_check_for_splash(struct msm_kms *kms)
  3106. {
  3107. struct sde_kms *sde_kms;
  3108. if (!kms) {
  3109. SDE_ERROR("invalid kms\n");
  3110. return false;
  3111. }
  3112. sde_kms = to_sde_kms(kms);
  3113. return sde_kms->splash_data.num_splash_displays;
  3114. }
  3115. static int sde_kms_get_mixer_count(const struct msm_kms *kms,
  3116. const struct drm_display_mode *mode,
  3117. const struct msm_resource_caps_info *res, u32 *num_lm)
  3118. {
  3119. struct sde_kms *sde_kms;
  3120. s64 mode_clock_hz = 0;
  3121. s64 max_mdp_clock_hz = 0;
  3122. s64 max_lm_width = 0;
  3123. s64 hdisplay_fp = 0;
  3124. s64 htotal_fp = 0;
  3125. s64 vtotal_fp = 0;
  3126. s64 vrefresh_fp = 0;
  3127. s64 mdp_fudge_factor = 0;
  3128. s64 num_lm_fp = 0;
  3129. s64 lm_clk_fp = 0;
  3130. s64 lm_width_fp = 0;
  3131. int rc = 0;
  3132. if (!num_lm) {
  3133. SDE_ERROR("invalid num_lm pointer\n");
  3134. return -EINVAL;
  3135. }
  3136. /* default to 1 layer mixer */
  3137. *num_lm = 1;
  3138. if (!kms || !mode || !res) {
  3139. SDE_ERROR("invalid input args\n");
  3140. return -EINVAL;
  3141. }
  3142. sde_kms = to_sde_kms(kms);
  3143. max_mdp_clock_hz = drm_int2fixp(sde_kms->perf.max_core_clk_rate);
  3144. max_lm_width = drm_int2fixp(res->max_mixer_width);
  3145. hdisplay_fp = drm_int2fixp(mode->hdisplay);
  3146. htotal_fp = drm_int2fixp(mode->htotal);
  3147. vtotal_fp = drm_int2fixp(mode->vtotal);
  3148. vrefresh_fp = drm_int2fixp(drm_mode_vrefresh(mode));
  3149. mdp_fudge_factor = drm_fixp_from_fraction(105, 100);
  3150. /* mode clock = [(h * v * fps * 1.05) / (num_lm)] */
  3151. mode_clock_hz = drm_fixp_mul(htotal_fp, vtotal_fp);
  3152. mode_clock_hz = drm_fixp_mul(mode_clock_hz, vrefresh_fp);
  3153. mode_clock_hz = drm_fixp_mul(mode_clock_hz, mdp_fudge_factor);
  3154. if (mode_clock_hz > max_mdp_clock_hz ||
  3155. hdisplay_fp > max_lm_width) {
  3156. *num_lm = 0;
  3157. do {
  3158. *num_lm += 2;
  3159. num_lm_fp = drm_int2fixp(*num_lm);
  3160. lm_clk_fp = drm_fixp_div(mode_clock_hz, num_lm_fp);
  3161. lm_width_fp = drm_fixp_div(hdisplay_fp, num_lm_fp);
  3162. if (*num_lm > 4) {
  3163. rc = -EINVAL;
  3164. goto error;
  3165. }
  3166. } while (lm_clk_fp > max_mdp_clock_hz ||
  3167. lm_width_fp > max_lm_width);
  3168. mode_clock_hz = lm_clk_fp;
  3169. }
  3170. SDE_DEBUG("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3171. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3172. *num_lm, drm_fixp2int(mode_clock_hz),
  3173. sde_kms->perf.max_core_clk_rate);
  3174. return 0;
  3175. error:
  3176. SDE_ERROR("required mode clk exceeds max mdp clk\n");
  3177. SDE_ERROR("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3178. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3179. *num_lm, drm_fixp2int(mode_clock_hz),
  3180. sde_kms->perf.max_core_clk_rate);
  3181. return rc;
  3182. }
  3183. static int sde_kms_get_dsc_count(const struct msm_kms *kms,
  3184. u32 hdisplay, u32 *num_dsc)
  3185. {
  3186. struct sde_kms *sde_kms;
  3187. uint32_t max_dsc_width;
  3188. if (!num_dsc) {
  3189. SDE_ERROR("invalid num_dsc pointer\n");
  3190. return -EINVAL;
  3191. }
  3192. *num_dsc = 0;
  3193. if (!kms || !hdisplay) {
  3194. SDE_ERROR("invalid input args\n");
  3195. return -EINVAL;
  3196. }
  3197. sde_kms = to_sde_kms(kms);
  3198. max_dsc_width = sde_kms->catalog->max_dsc_width;
  3199. *num_dsc = DIV_ROUND_UP(hdisplay, max_dsc_width);
  3200. SDE_DEBUG("h=%d, max_dsc_width=%d, num_dsc=%d\n",
  3201. hdisplay, max_dsc_width,
  3202. *num_dsc);
  3203. return 0;
  3204. }
  3205. static int _sde_kms_null_commit(struct drm_device *dev,
  3206. struct drm_encoder *enc)
  3207. {
  3208. struct drm_modeset_acquire_ctx ctx;
  3209. struct drm_atomic_state *state = NULL;
  3210. int retry_cnt = 0;
  3211. int ret = 0;
  3212. drm_modeset_acquire_init(&ctx, 0);
  3213. retry:
  3214. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  3215. if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
  3216. drm_modeset_backoff(&ctx);
  3217. retry_cnt++;
  3218. udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
  3219. goto retry;
  3220. } else if (WARN_ON(ret)) {
  3221. goto end;
  3222. }
  3223. state = drm_atomic_state_alloc(dev);
  3224. if (!state) {
  3225. DRM_ERROR("failed to allocate atomic state, %d\n", ret);
  3226. goto end;
  3227. }
  3228. state->acquire_ctx = &ctx;
  3229. ret = sde_kms_set_crtc_for_conn(dev, enc, state);
  3230. if (ret)
  3231. goto end;
  3232. ret = drm_atomic_commit(state);
  3233. if (ret)
  3234. SDE_ERROR("Error %d doing the atomic commit\n", ret);
  3235. end:
  3236. if (state)
  3237. drm_atomic_state_put(state);
  3238. drm_modeset_drop_locks(&ctx);
  3239. drm_modeset_acquire_fini(&ctx);
  3240. return ret;
  3241. }
  3242. void sde_kms_display_early_wakeup(struct drm_device *dev,
  3243. const int32_t connector_id)
  3244. {
  3245. struct drm_connector_list_iter conn_iter;
  3246. struct drm_connector *conn;
  3247. struct drm_encoder *drm_enc;
  3248. drm_connector_list_iter_begin(dev, &conn_iter);
  3249. drm_for_each_connector_iter(conn, &conn_iter) {
  3250. if (connector_id != DRM_MSM_WAKE_UP_ALL_DISPLAYS &&
  3251. connector_id != conn->base.id)
  3252. continue;
  3253. if (conn->state && conn->state->best_encoder)
  3254. drm_enc = conn->state->best_encoder;
  3255. else
  3256. drm_enc = conn->encoder;
  3257. if (drm_enc)
  3258. sde_encoder_early_wakeup(drm_enc);
  3259. }
  3260. drm_connector_list_iter_end(&conn_iter);
  3261. }
  3262. static int sde_kms_trigger_null_flush(struct msm_kms *kms)
  3263. {
  3264. struct sde_kms *sde_kms;
  3265. struct sde_splash_display *splash_display;
  3266. struct drm_crtc *crtc;
  3267. int i, rc = 0;
  3268. if (!kms) {
  3269. SDE_ERROR("invalid kms\n");
  3270. return -EINVAL;
  3271. }
  3272. sde_kms = to_sde_kms(kms);
  3273. /* If splash handoff is done, early return*/
  3274. if (!sde_kms->splash_data.num_splash_displays)
  3275. return 0;
  3276. /* If all builtin-displays are having cont splash enabled, ignore lastclose*/
  3277. if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
  3278. return -EINVAL;
  3279. /*
  3280. * Trigger NULL flush if built-in secondary/primary is stuck in splash
  3281. * while the primary/secondary is running respectively before lastclose.
  3282. */
  3283. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  3284. splash_display = &sde_kms->splash_data.splash_display[i];
  3285. if (splash_display->cont_splash_enabled && splash_display->encoder) {
  3286. crtc = splash_display->encoder->crtc;
  3287. SDE_DEBUG("triggering null commit on enc:%d\n",
  3288. DRMID(splash_display->encoder));
  3289. SDE_EVT32(DRMID(splash_display->encoder), SDE_EVTLOG_FUNC_ENTRY);
  3290. rc = _sde_kms_null_commit(sde_kms->dev, splash_display->encoder);
  3291. if (!rc && crtc)
  3292. sde_kms_cancel_delayed_work(crtc);
  3293. if (rc)
  3294. DRM_ERROR("null flush commit failure during lastclose\n");
  3295. }
  3296. }
  3297. return 0;
  3298. }
  3299. static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
  3300. struct device *dev)
  3301. {
  3302. int ret, crtc_id = 0;
  3303. struct drm_device *ddev = dev_get_drvdata(dev);
  3304. struct drm_connector *conn;
  3305. struct drm_connector_list_iter conn_iter;
  3306. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3307. drm_connector_list_iter_begin(ddev, &conn_iter);
  3308. drm_for_each_connector_iter(conn, &conn_iter) {
  3309. uint64_t lp;
  3310. lp = sde_connector_get_lp(conn);
  3311. if (lp != SDE_MODE_DPMS_LP2)
  3312. continue;
  3313. if (sde_encoder_in_clone_mode(conn->encoder))
  3314. continue;
  3315. crtc_id = drm_crtc_index(conn->state->crtc);
  3316. if (priv->disp_thread[crtc_id].thread)
  3317. kthread_flush_worker(
  3318. &priv->disp_thread[crtc_id].worker);
  3319. ret = sde_encoder_wait_for_event(conn->encoder,
  3320. MSM_ENC_TX_COMPLETE);
  3321. if (ret && ret != -EWOULDBLOCK) {
  3322. SDE_ERROR(
  3323. "[conn: %d] wait for commit done returned %d\n",
  3324. conn->base.id, ret);
  3325. } else if (!ret) {
  3326. if (priv->event_thread[crtc_id].thread)
  3327. kthread_flush_worker(
  3328. &priv->event_thread[crtc_id].worker);
  3329. sde_encoder_idle_request(conn->encoder);
  3330. }
  3331. }
  3332. drm_connector_list_iter_end(&conn_iter);
  3333. msm_atomic_flush_display_threads(priv);
  3334. }
  3335. struct msm_display_mode *sde_kms_get_msm_mode(struct drm_connector_state *conn_state)
  3336. {
  3337. struct sde_connector_state *sde_conn_state;
  3338. if (!conn_state)
  3339. return NULL;
  3340. sde_conn_state = to_sde_connector_state(conn_state);
  3341. return &sde_conn_state->msm_mode;
  3342. }
  3343. static int sde_kms_pm_suspend(struct device *dev)
  3344. {
  3345. struct drm_device *ddev;
  3346. struct drm_modeset_acquire_ctx ctx;
  3347. struct drm_connector *conn;
  3348. struct drm_encoder *enc;
  3349. struct drm_connector_list_iter conn_iter;
  3350. struct drm_atomic_state *state = NULL;
  3351. struct sde_kms *sde_kms;
  3352. int ret = 0, num_crtcs = 0;
  3353. if (!dev)
  3354. return -EINVAL;
  3355. ddev = dev_get_drvdata(dev);
  3356. if (!ddev || !ddev_to_msm_kms(ddev))
  3357. return -EINVAL;
  3358. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3359. SDE_EVT32(0);
  3360. /* disable hot-plug polling */
  3361. drm_kms_helper_poll_disable(ddev);
  3362. /* if any built-in display is stuck in CS, skip PM suspend entry to
  3363. * avoid driver SW state changes. With speculative fence enabled, HAL depends
  3364. * on power_on notification for the first commit to exit the Wait completion
  3365. * instead of retire fence signal.
  3366. */
  3367. drm_for_each_encoder(enc, ddev) {
  3368. if (sde_encoder_in_cont_splash(enc) && enc->crtc) {
  3369. SDE_DEBUG("skip PM suspend, splash is enabled on enc:%d\n", DRMID(enc));
  3370. SDE_EVT32(DRMID(enc), SDE_EVTLOG_FUNC_EXIT);
  3371. return -EINVAL;
  3372. }
  3373. }
  3374. /* acquire modeset lock(s) */
  3375. drm_modeset_acquire_init(&ctx, 0);
  3376. retry:
  3377. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3378. if (ret)
  3379. goto unlock;
  3380. /* save current state for resume */
  3381. if (sde_kms->suspend_state)
  3382. drm_atomic_state_put(sde_kms->suspend_state);
  3383. sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
  3384. if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
  3385. ret = PTR_ERR(sde_kms->suspend_state);
  3386. DRM_ERROR("failed to back up suspend state, %d\n", ret);
  3387. sde_kms->suspend_state = NULL;
  3388. goto unlock;
  3389. }
  3390. /* create atomic state to disable all CRTCs */
  3391. state = drm_atomic_state_alloc(ddev);
  3392. if (!state) {
  3393. ret = -ENOMEM;
  3394. DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
  3395. goto unlock;
  3396. }
  3397. state->acquire_ctx = &ctx;
  3398. drm_connector_list_iter_begin(ddev, &conn_iter);
  3399. drm_for_each_connector_iter(conn, &conn_iter) {
  3400. struct drm_crtc_state *crtc_state;
  3401. uint64_t lp;
  3402. if (!conn->state || !conn->state->crtc ||
  3403. conn->dpms != DRM_MODE_DPMS_ON ||
  3404. sde_encoder_in_clone_mode(conn->encoder))
  3405. continue;
  3406. lp = sde_connector_get_lp(conn);
  3407. if (lp == SDE_MODE_DPMS_LP1) {
  3408. /* transition LP1->LP2 on pm suspend */
  3409. ret = sde_connector_set_property_for_commit(conn, state,
  3410. CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
  3411. if (ret) {
  3412. DRM_ERROR("failed to set lp2 for conn %d\n",
  3413. conn->base.id);
  3414. drm_connector_list_iter_end(&conn_iter);
  3415. goto unlock;
  3416. }
  3417. }
  3418. if (lp != SDE_MODE_DPMS_LP2) {
  3419. /* force CRTC to be inactive */
  3420. crtc_state = drm_atomic_get_crtc_state(state,
  3421. conn->state->crtc);
  3422. if (IS_ERR_OR_NULL(crtc_state)) {
  3423. DRM_ERROR("failed to get crtc %d state\n",
  3424. conn->state->crtc->base.id);
  3425. drm_connector_list_iter_end(&conn_iter);
  3426. ret = -EINVAL;
  3427. goto unlock;
  3428. }
  3429. if (lp != SDE_MODE_DPMS_LP1)
  3430. crtc_state->active = false;
  3431. ++num_crtcs;
  3432. }
  3433. }
  3434. drm_connector_list_iter_end(&conn_iter);
  3435. /* check for nothing to do */
  3436. if (num_crtcs == 0) {
  3437. DRM_DEBUG("all crtcs are already in the off state\n");
  3438. sde_kms->suspend_block = true;
  3439. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3440. goto unlock;
  3441. }
  3442. /* commit the "disable all" state */
  3443. ret = drm_atomic_commit(state);
  3444. if (ret < 0) {
  3445. DRM_ERROR("failed to disable crtcs, %d\n", ret);
  3446. goto unlock;
  3447. }
  3448. sde_kms->suspend_block = true;
  3449. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3450. unlock:
  3451. if (state) {
  3452. drm_atomic_state_put(state);
  3453. state = NULL;
  3454. }
  3455. if (ret == -EDEADLK) {
  3456. drm_modeset_backoff(&ctx);
  3457. goto retry;
  3458. }
  3459. if ((ret || !num_crtcs) && sde_kms->suspend_state) {
  3460. drm_atomic_state_put(sde_kms->suspend_state);
  3461. sde_kms->suspend_state = NULL;
  3462. }
  3463. drm_modeset_drop_locks(&ctx);
  3464. drm_modeset_acquire_fini(&ctx);
  3465. /*
  3466. * pm runtime driver avoids multiple runtime_suspend API call by
  3467. * checking runtime_status. However, this call helps when there is a
  3468. * race condition between pm_suspend call and doze_suspend/power_off
  3469. * commit. It removes the extra vote from suspend and adds it back
  3470. * later to allow power collapse during pm_suspend call
  3471. */
  3472. pm_runtime_put_sync(dev);
  3473. pm_runtime_get_noresume(dev);
  3474. /* dump clock state before entering suspend */
  3475. if (sde_kms->pm_suspend_clk_dump)
  3476. _sde_kms_dump_clks_state(sde_kms);
  3477. return ret;
  3478. }
  3479. static int sde_kms_pm_resume(struct device *dev)
  3480. {
  3481. struct drm_device *ddev;
  3482. struct sde_kms *sde_kms;
  3483. struct drm_encoder *enc;
  3484. struct drm_modeset_acquire_ctx ctx;
  3485. int ret, i;
  3486. if (!dev)
  3487. return -EINVAL;
  3488. ddev = dev_get_drvdata(dev);
  3489. if (!ddev || !ddev_to_msm_kms(ddev))
  3490. return -EINVAL;
  3491. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3492. SDE_EVT32(sde_kms->suspend_state != NULL);
  3493. /* if a display is in cont splash early exit */
  3494. drm_for_each_encoder(enc, ddev) {
  3495. if (sde_encoder_in_cont_splash(enc) && enc->crtc) {
  3496. SDE_DEBUG("skip PM resume entry splash is enabled on enc:%d\n", DRMID(enc));
  3497. SDE_EVT32(DRMID(enc), SDE_EVTLOG_FUNC_EXIT);
  3498. return -EINVAL;
  3499. }
  3500. }
  3501. if (sde_kms->suspend_state)
  3502. drm_mode_config_reset(ddev);
  3503. drm_modeset_acquire_init(&ctx, 0);
  3504. retry:
  3505. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3506. if (ret == -EDEADLK) {
  3507. drm_modeset_backoff(&ctx);
  3508. goto retry;
  3509. } else if (WARN_ON(ret)) {
  3510. goto end;
  3511. }
  3512. sde_kms->suspend_block = false;
  3513. if (sde_kms->suspend_state) {
  3514. sde_kms->suspend_state->acquire_ctx = &ctx;
  3515. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  3516. ret = drm_atomic_helper_commit_duplicated_state(
  3517. sde_kms->suspend_state, &ctx);
  3518. if (ret != -EDEADLK)
  3519. break;
  3520. drm_modeset_backoff(&ctx);
  3521. }
  3522. if (ret < 0)
  3523. DRM_ERROR("failed to restore state, %d\n", ret);
  3524. drm_atomic_state_put(sde_kms->suspend_state);
  3525. sde_kms->suspend_state = NULL;
  3526. }
  3527. end:
  3528. drm_modeset_drop_locks(&ctx);
  3529. drm_modeset_acquire_fini(&ctx);
  3530. /* enable hot-plug polling */
  3531. drm_kms_helper_poll_enable(ddev);
  3532. return 0;
  3533. }
  3534. static const struct msm_kms_funcs kms_funcs = {
  3535. .hw_init = sde_kms_hw_init,
  3536. .postinit = sde_kms_postinit,
  3537. .irq_preinstall = sde_irq_preinstall,
  3538. .irq_postinstall = sde_irq_postinstall,
  3539. .irq_uninstall = sde_irq_uninstall,
  3540. .irq = sde_irq,
  3541. .preclose = sde_kms_preclose,
  3542. .lastclose = sde_kms_lastclose,
  3543. .prepare_fence = sde_kms_prepare_fence,
  3544. .prepare_commit = sde_kms_prepare_commit,
  3545. .commit = sde_kms_commit,
  3546. .complete_commit = sde_kms_complete_commit,
  3547. .get_msm_mode = sde_kms_get_msm_mode,
  3548. .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
  3549. .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
  3550. .check_modified_format = sde_format_check_modified_format,
  3551. .atomic_check = sde_kms_atomic_check,
  3552. .get_format = sde_get_msm_format,
  3553. .round_pixclk = sde_kms_round_pixclk,
  3554. .display_early_wakeup = sde_kms_display_early_wakeup,
  3555. .pm_suspend = sde_kms_pm_suspend,
  3556. .pm_resume = sde_kms_pm_resume,
  3557. .destroy = sde_kms_destroy,
  3558. .debugfs_destroy = sde_kms_debugfs_destroy,
  3559. .cont_splash_config = sde_kms_cont_splash_config,
  3560. .register_events = _sde_kms_register_events,
  3561. .get_address_space = _sde_kms_get_address_space,
  3562. .get_address_space_device = _sde_kms_get_address_space_device,
  3563. .postopen = _sde_kms_post_open,
  3564. .check_for_splash = sde_kms_check_for_splash,
  3565. .trigger_null_flush = sde_kms_trigger_null_flush,
  3566. .get_mixer_count = sde_kms_get_mixer_count,
  3567. .get_dsc_count = sde_kms_get_dsc_count,
  3568. };
  3569. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
  3570. {
  3571. int i;
  3572. for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
  3573. if (!sde_kms->aspace[i])
  3574. continue;
  3575. msm_gem_address_space_put(sde_kms->aspace[i]);
  3576. sde_kms->aspace[i] = NULL;
  3577. }
  3578. return 0;
  3579. }
  3580. static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
  3581. {
  3582. struct msm_mmu *mmu;
  3583. struct resource *res;
  3584. struct platform_device *pdev;
  3585. int i, ret;
  3586. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  3587. int early_map = 0;
  3588. #endif
  3589. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev)
  3590. return -EINVAL;
  3591. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  3592. struct msm_gem_address_space *aspace;
  3593. mmu = msm_smmu_new(sde_kms->dev->dev, i);
  3594. if (IS_ERR(mmu)) {
  3595. ret = PTR_ERR(mmu);
  3596. SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
  3597. i, ret);
  3598. continue;
  3599. }
  3600. aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
  3601. mmu, "sde");
  3602. if (IS_ERR(aspace)) {
  3603. ret = PTR_ERR(aspace);
  3604. mmu->funcs->destroy(mmu);
  3605. goto fail;
  3606. }
  3607. sde_kms->aspace[i] = aspace;
  3608. aspace->domain_attached = true;
  3609. /* Mapping splash memory block */
  3610. if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
  3611. sde_kms->splash_data.num_splash_regions) {
  3612. ret = _sde_kms_map_all_splash_regions(sde_kms);
  3613. if (ret) {
  3614. SDE_ERROR("failed to map ret:%d\n", ret);
  3615. goto enable_trans_fail;
  3616. }
  3617. }
  3618. if (i == MSM_SMMU_DOMAIN_UNSECURE && sde_kms->catalog->hw_fence_rev) {
  3619. pdev = to_platform_device(sde_kms->dev->dev);
  3620. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipcc_reg");
  3621. if (!res) {
  3622. SDE_DEBUG("failed to get resource ipcc_reg, cannot map ipcc\n");
  3623. sde_kms->catalog->hw_fence_rev = 0;
  3624. } else {
  3625. sde_kms->ipcc_base_addr = res->start;
  3626. ret = _sde_kms_one2one_mem_map_ipcc_reg(sde_kms, resource_size(res),
  3627. HW_FENCE_IPCC_PROTOCOLp_CLIENTc(res->start,
  3628. sde_kms->catalog->ipcc_protocol_id,
  3629. HW_FENCE_IPCC_CLIENT_DPU));
  3630. /* if mapping fails disable hw-fences */
  3631. if (ret)
  3632. sde_kms->catalog->hw_fence_rev = 0;
  3633. }
  3634. }
  3635. /*
  3636. * disable early-map which would have been enabled during
  3637. * bootup by smmu through the device-tree hint for cont-spash
  3638. */
  3639. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  3640. ret = mmu->funcs->enable_smmu_translations(mmu);
  3641. if (ret) {
  3642. SDE_ERROR("failed to enable_s1_translations ret:%d\n", ret);
  3643. goto enable_trans_fail;
  3644. }
  3645. #else
  3646. ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
  3647. &early_map);
  3648. if (ret) {
  3649. SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
  3650. ret, early_map);
  3651. goto enable_trans_fail;
  3652. }
  3653. #endif
  3654. }
  3655. sde_kms->base.aspace = sde_kms->aspace[0];
  3656. return 0;
  3657. enable_trans_fail:
  3658. _sde_kms_unmap_all_splash_regions(sde_kms);
  3659. fail:
  3660. _sde_kms_mmu_destroy(sde_kms);
  3661. return ret;
  3662. }
  3663. static void sde_kms_init_rot_sid_hw(struct sde_kms *sde_kms)
  3664. {
  3665. if (!sde_kms || !sde_kms->hw_sid || sde_in_trusted_vm(sde_kms))
  3666. return;
  3667. sde_hw_set_rotator_sid(sde_kms->hw_sid);
  3668. }
  3669. static void sde_kms_init_hw_fences(struct sde_kms *sde_kms)
  3670. {
  3671. if (!sde_kms || !sde_kms->hw_mdp)
  3672. return;
  3673. if (sde_kms->hw_mdp->ops.setup_hw_fences)
  3674. sde_kms->hw_mdp->ops.setup_hw_fences(sde_kms->hw_mdp,
  3675. sde_kms->catalog->ipcc_protocol_id, sde_kms->ipcc_base_addr);
  3676. }
  3677. static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
  3678. {
  3679. if (!sde_kms || !sde_kms->hw_mdp || !sde_kms->catalog)
  3680. return;
  3681. if (sde_kms->hw_mdp->ops.reset_ubwc)
  3682. sde_kms->hw_mdp->ops.reset_ubwc(sde_kms->hw_mdp,
  3683. sde_kms->catalog);
  3684. }
  3685. static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
  3686. {
  3687. struct sde_vbif_set_qos_params qos_params;
  3688. struct sde_mdss_cfg *catalog;
  3689. if (!sde_kms->catalog)
  3690. return;
  3691. catalog = sde_kms->catalog;
  3692. memset(&qos_params, 0, sizeof(qos_params));
  3693. qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
  3694. qos_params.xin_id = catalog->dma_cfg.xin_id;
  3695. qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
  3696. qos_params.client_type = VBIF_LUTDMA_CLIENT;
  3697. sde_vbif_set_qos_remap(sde_kms, &qos_params);
  3698. }
  3699. static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
  3700. {
  3701. struct sde_hw_uidle *uidle;
  3702. if (!sde_kms) {
  3703. SDE_ERROR("invalid kms\n");
  3704. return -EINVAL;
  3705. }
  3706. uidle = sde_kms->hw_uidle;
  3707. if (uidle && uidle->ops.active_override_enable)
  3708. uidle->ops.active_override_enable(uidle, enable);
  3709. return 0;
  3710. }
  3711. static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
  3712. {
  3713. struct device *cpu_dev;
  3714. int cpu = 0;
  3715. u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
  3716. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  3717. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  3718. return;
  3719. }
  3720. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  3721. cpu_dev = get_cpu_device(cpu);
  3722. if (!cpu_dev) {
  3723. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  3724. cpu);
  3725. continue;
  3726. }
  3727. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  3728. dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
  3729. cpu_irq_latency);
  3730. else
  3731. dev_pm_qos_add_request(cpu_dev,
  3732. &sde_kms->pm_qos_irq_req[cpu],
  3733. DEV_PM_QOS_RESUME_LATENCY,
  3734. cpu_irq_latency);
  3735. }
  3736. }
  3737. static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
  3738. {
  3739. struct device *cpu_dev;
  3740. int cpu = 0;
  3741. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  3742. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  3743. return;
  3744. }
  3745. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  3746. cpu_dev = get_cpu_device(cpu);
  3747. if (!cpu_dev) {
  3748. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  3749. cpu);
  3750. continue;
  3751. }
  3752. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  3753. dev_pm_qos_remove_request(
  3754. &sde_kms->pm_qos_irq_req[cpu]);
  3755. }
  3756. }
  3757. void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
  3758. {
  3759. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3760. mutex_lock(&priv->phandle.phandle_lock);
  3761. if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
  3762. _sde_kms_update_pm_qos_irq_request(sde_kms);
  3763. else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
  3764. _sde_kms_remove_pm_qos_irq_request(sde_kms);
  3765. mutex_unlock(&priv->phandle.phandle_lock);
  3766. }
  3767. static void sde_kms_irq_affinity_notify(
  3768. struct irq_affinity_notify *affinity_notify,
  3769. const cpumask_t *mask)
  3770. {
  3771. struct msm_drm_private *priv;
  3772. struct sde_kms *sde_kms = container_of(affinity_notify,
  3773. struct sde_kms, affinity_notify);
  3774. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  3775. return;
  3776. priv = sde_kms->dev->dev_private;
  3777. mutex_lock(&priv->phandle.phandle_lock);
  3778. _sde_kms_remove_pm_qos_irq_request(sde_kms);
  3779. // save irq cpu mask
  3780. sde_kms->irq_cpu_mask = *mask;
  3781. // request vote with updated irq cpu mask
  3782. if (atomic_read(&sde_kms->irq_vote_count))
  3783. _sde_kms_update_pm_qos_irq_request(sde_kms);
  3784. mutex_unlock(&priv->phandle.phandle_lock);
  3785. }
  3786. static void sde_kms_irq_affinity_release(struct kref *ref) {}
  3787. static void sde_kms_handle_power_event(u32 event_type, void *usr)
  3788. {
  3789. struct sde_kms *sde_kms = usr;
  3790. struct msm_kms *msm_kms;
  3791. msm_kms = &sde_kms->base;
  3792. if (!sde_kms)
  3793. return;
  3794. SDE_DEBUG("event_type:%d\n", event_type);
  3795. SDE_EVT32_VERBOSE(event_type);
  3796. if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
  3797. sde_irq_update(msm_kms, true);
  3798. sde_kms->first_kickoff = true;
  3799. /**
  3800. * Rotator sid and hw fences need to be programmed since uefi doesn't
  3801. * configure them during continuous splash
  3802. */
  3803. sde_kms_init_rot_sid_hw(sde_kms);
  3804. sde_kms_init_hw_fences(sde_kms);
  3805. if (sde_kms->splash_data.num_splash_displays ||
  3806. sde_in_trusted_vm(sde_kms))
  3807. return;
  3808. sde_vbif_init_memtypes(sde_kms);
  3809. sde_kms_init_shared_hw(sde_kms);
  3810. _sde_kms_set_lutdma_vbif_remap(sde_kms);
  3811. } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
  3812. sde_irq_update(msm_kms, false);
  3813. sde_kms->first_kickoff = false;
  3814. if (sde_in_trusted_vm(sde_kms))
  3815. return;
  3816. _sde_kms_active_override(sde_kms, true);
  3817. if (!is_sde_rsc_available(SDE_RSC_INDEX))
  3818. sde_vbif_axi_halt_request(sde_kms);
  3819. }
  3820. }
  3821. #define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
  3822. static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
  3823. {
  3824. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3825. int rc = -EINVAL;
  3826. SDE_DEBUG("\n");
  3827. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  3828. rc = (rc > 0) ? 0 : rc;
  3829. SDE_EVT32(rc, genpd->device_count);
  3830. return rc;
  3831. }
  3832. static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
  3833. {
  3834. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3835. SDE_DEBUG("\n");
  3836. pm_runtime_put_sync(sde_kms->dev->dev);
  3837. SDE_EVT32(genpd->device_count);
  3838. return 0;
  3839. }
  3840. static int _sde_kms_get_demura_plane_data(struct sde_splash_data *data)
  3841. {
  3842. int i = 0;
  3843. int ret = 0;
  3844. int count = 0;
  3845. struct device_node *parent, *node;
  3846. struct resource r;
  3847. char node_name[DEMURA_REGION_NAME_MAX];
  3848. struct sde_splash_mem *mem;
  3849. struct sde_splash_display *splash_display;
  3850. if (!data->num_splash_displays) {
  3851. SDE_DEBUG("no splash displays. skipping\n");
  3852. return 0;
  3853. }
  3854. /**
  3855. * It is expected that each active demura block will have
  3856. * its own memory region defined.
  3857. */
  3858. parent = of_find_node_by_path("/reserved-memory");
  3859. for (i = 0; i < data->num_splash_displays; i++) {
  3860. splash_display = &data->splash_display[i];
  3861. snprintf(&node_name[0], DEMURA_REGION_NAME_MAX,
  3862. "demura_region_%d", i);
  3863. splash_display->demura = NULL;
  3864. node = of_find_node_by_name(parent, node_name);
  3865. if (!node) {
  3866. SDE_DEBUG("no Demura node %s! disp count: %d\n",
  3867. node_name, data->num_splash_displays);
  3868. continue;
  3869. } else if (of_address_to_resource(node, 0, &r)) {
  3870. SDE_ERROR("invalid data for:%s\n", node_name);
  3871. ret = -EINVAL;
  3872. break;
  3873. }
  3874. mem = &data->demura_mem[i];
  3875. mem->splash_buf_base = (unsigned long)r.start;
  3876. mem->splash_buf_size = (r.end - r.start) + 1;
  3877. if (!mem->splash_buf_base && !mem->splash_buf_size) {
  3878. SDE_DEBUG("dummy splash mem for disp %d. Skipping\n",
  3879. (i+1));
  3880. continue;
  3881. } else if (!mem->splash_buf_base || !mem->splash_buf_size) {
  3882. SDE_ERROR("mem for disp %d invalid: add:%lx size:%lx\n",
  3883. (i+1), mem->splash_buf_base,
  3884. mem->splash_buf_size);
  3885. continue;
  3886. }
  3887. mem->ref_cnt = 0;
  3888. splash_display->demura = mem;
  3889. count++;
  3890. SDE_DEBUG("demura mem for disp:%d add:%lx size:%x\n", (i + 1),
  3891. mem->splash_buf_base,
  3892. mem->splash_buf_size);
  3893. }
  3894. if (!ret && !count)
  3895. SDE_DEBUG("no demura regions for cont. splash found!\n");
  3896. return ret;
  3897. }
  3898. static int _sde_kms_get_splash_data(struct sde_splash_data *data)
  3899. {
  3900. int i = 0;
  3901. int ret = 0;
  3902. struct device_node *parent, *node, *node1;
  3903. struct resource r, r1;
  3904. const char *node_name = "splash_region";
  3905. struct sde_splash_mem *mem;
  3906. bool share_splash_mem = false;
  3907. int num_displays, num_regions;
  3908. struct sde_splash_display *splash_display;
  3909. if (!data)
  3910. return -EINVAL;
  3911. memset(data, 0, sizeof(*data));
  3912. parent = of_find_node_by_path("/reserved-memory");
  3913. if (!parent) {
  3914. SDE_ERROR("failed to find reserved-memory node\n");
  3915. return -EINVAL;
  3916. }
  3917. node = of_find_node_by_name(parent, node_name);
  3918. if (!node) {
  3919. SDE_DEBUG("failed to find node %s\n", node_name);
  3920. return -EINVAL;
  3921. }
  3922. node1 = of_find_node_by_name(NULL, "disp_rdump_region");
  3923. if (!node1)
  3924. SDE_DEBUG("failed to find disp ramdump memory reservation\n");
  3925. /**
  3926. * Support sharing a single splash memory for all the built in displays
  3927. * and also independent splash region per displays. Incase of
  3928. * independent splash region for each connected display, dtsi node of
  3929. * cont_splash_region should be collection of all memory regions
  3930. * Ex: <r1.start r1.end r2.start r2.end ... rn.start, rn.end>
  3931. */
  3932. num_displays = dsi_display_get_num_of_displays();
  3933. num_regions = of_property_count_u64_elems(node, "reg") / 2;
  3934. data->num_splash_displays = num_displays;
  3935. SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
  3936. if (num_displays > num_regions) {
  3937. share_splash_mem = true;
  3938. pr_info(":%d displays share same splash buf\n", num_displays);
  3939. }
  3940. for (i = 0; i < num_displays; i++) {
  3941. splash_display = &data->splash_display[i];
  3942. if (!i || !share_splash_mem) {
  3943. if (of_address_to_resource(node, i, &r)) {
  3944. SDE_ERROR("invalid data for:%s\n", node_name);
  3945. return -EINVAL;
  3946. }
  3947. mem = &data->splash_mem[i];
  3948. if (!node1 || of_address_to_resource(node1, i, &r1)) {
  3949. SDE_DEBUG("failed to find ramdump memory\n");
  3950. mem->ramdump_base = 0;
  3951. mem->ramdump_size = 0;
  3952. } else {
  3953. mem->ramdump_base = (unsigned long)r1.start;
  3954. mem->ramdump_size = (r1.end - r1.start) + 1;
  3955. }
  3956. mem->splash_buf_base = (unsigned long)r.start;
  3957. mem->splash_buf_size = (r.end - r.start) + 1;
  3958. mem->ref_cnt = 0;
  3959. splash_display->splash = mem;
  3960. data->num_splash_regions++;
  3961. } else {
  3962. data->splash_display[i].splash = &data->splash_mem[0];
  3963. }
  3964. SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
  3965. splash_display->splash->splash_buf_base,
  3966. splash_display->splash->splash_buf_size);
  3967. }
  3968. data->type = SDE_SPLASH_HANDOFF;
  3969. ret = _sde_kms_get_demura_plane_data(data);
  3970. return ret;
  3971. }
  3972. static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
  3973. struct platform_device *platformdev)
  3974. {
  3975. int rc = -EINVAL;
  3976. sde_kms->mmio = msm_ioremap(platformdev, "mdp_phys", "mdp_phys");
  3977. if (IS_ERR(sde_kms->mmio)) {
  3978. rc = PTR_ERR(sde_kms->mmio);
  3979. SDE_ERROR("mdp register memory map failed: %d\n", rc);
  3980. sde_kms->mmio = NULL;
  3981. goto error;
  3982. }
  3983. DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
  3984. sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys");
  3985. rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
  3986. sde_kms->mmio_len,
  3987. msm_get_phys_addr(platformdev, "mdp_phys"),
  3988. SDE_DBG_SDE);
  3989. if (rc)
  3990. SDE_ERROR("dbg base register kms failed: %d\n", rc);
  3991. sde_kms->vbif[VBIF_RT] = msm_ioremap(platformdev, "vbif_phys", "vbif_phys");
  3992. if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
  3993. rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
  3994. SDE_ERROR("vbif register memory map failed: %d\n", rc);
  3995. sde_kms->vbif[VBIF_RT] = NULL;
  3996. goto error;
  3997. }
  3998. sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(platformdev, "vbif_phys");
  3999. rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
  4000. sde_kms->vbif_len[VBIF_RT],
  4001. msm_get_phys_addr(platformdev, "vbif_phys"),
  4002. SDE_DBG_VBIF_RT);
  4003. if (rc)
  4004. SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
  4005. sde_kms->vbif[VBIF_NRT] = msm_ioremap(platformdev, "vbif_nrt_phys", "vbif_nrt_phys");
  4006. if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
  4007. sde_kms->vbif[VBIF_NRT] = NULL;
  4008. SDE_DEBUG("VBIF NRT is not defined");
  4009. } else {
  4010. sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(platformdev, "vbif_nrt_phys");
  4011. }
  4012. sde_kms->reg_dma = msm_ioremap(platformdev, "regdma_phys", "regdma_phys");
  4013. if (IS_ERR(sde_kms->reg_dma)) {
  4014. sde_kms->reg_dma = NULL;
  4015. SDE_DEBUG("REG_DMA is not defined");
  4016. } else {
  4017. unsigned long mdp_addr = msm_get_phys_addr(platformdev, "mdp_phys");
  4018. sde_kms->reg_dma_len = msm_iomap_size(platformdev, "regdma_phys");
  4019. sde_kms->reg_dma_off = msm_get_phys_addr(platformdev, "regdma_phys") - mdp_addr;
  4020. rc = sde_dbg_reg_register_base("reg_dma", sde_kms->reg_dma,
  4021. sde_kms->reg_dma_len,
  4022. msm_get_phys_addr(platformdev, "regdma_phys"),
  4023. SDE_DBG_LUTDMA);
  4024. if (rc)
  4025. SDE_ERROR("dbg base register reg_dma failed: %d\n", rc);
  4026. }
  4027. sde_kms->sid = msm_ioremap(platformdev, "sid_phys", "sid_phys");
  4028. if (IS_ERR(sde_kms->sid)) {
  4029. SDE_DEBUG("sid register is not defined: %d\n", rc);
  4030. sde_kms->sid = NULL;
  4031. } else {
  4032. sde_kms->sid_len = msm_iomap_size(platformdev, "sid_phys");
  4033. rc = sde_dbg_reg_register_base("sid", sde_kms->sid,
  4034. sde_kms->sid_len,
  4035. msm_get_phys_addr(platformdev, "sid_phys"),
  4036. SDE_DBG_SID);
  4037. if (rc)
  4038. SDE_ERROR("dbg base register sid failed: %d\n", rc);
  4039. }
  4040. error:
  4041. return rc;
  4042. }
  4043. static int _sde_kms_hw_init_power_helper(struct drm_device *dev,
  4044. struct sde_kms *sde_kms)
  4045. {
  4046. int rc = 0;
  4047. if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
  4048. sde_kms->genpd.name = dev->unique;
  4049. sde_kms->genpd.power_off = sde_kms_pd_disable;
  4050. sde_kms->genpd.power_on = sde_kms_pd_enable;
  4051. rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
  4052. if (rc < 0) {
  4053. SDE_ERROR("failed to init genpd provider %s: %d\n",
  4054. sde_kms->genpd.name, rc);
  4055. return rc;
  4056. }
  4057. rc = of_genpd_add_provider_simple(dev->dev->of_node,
  4058. &sde_kms->genpd);
  4059. if (rc < 0) {
  4060. SDE_ERROR("failed to add genpd provider %s: %d\n",
  4061. sde_kms->genpd.name, rc);
  4062. pm_genpd_remove(&sde_kms->genpd);
  4063. return rc;
  4064. }
  4065. sde_kms->genpd_init = true;
  4066. SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
  4067. }
  4068. return rc;
  4069. }
  4070. static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
  4071. struct drm_device *dev,
  4072. struct msm_drm_private *priv)
  4073. {
  4074. struct sde_rm *rm = NULL;
  4075. int i, rc = -EINVAL;
  4076. sde_kms->catalog = sde_hw_catalog_init(dev);
  4077. if (IS_ERR_OR_NULL(sde_kms->catalog)) {
  4078. rc = PTR_ERR(sde_kms->catalog);
  4079. if (!sde_kms->catalog)
  4080. rc = -EINVAL;
  4081. SDE_ERROR("catalog init failed: %d\n", rc);
  4082. sde_kms->catalog = NULL;
  4083. goto power_error;
  4084. }
  4085. sde_kms->core_rev = sde_kms->catalog->hw_rev;
  4086. pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
  4087. /* initialize power domain if defined */
  4088. rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
  4089. if (rc) {
  4090. SDE_ERROR("_sde_kms_hw_init_power_helper failed: %d\n", rc);
  4091. goto genpd_err;
  4092. }
  4093. rc = _sde_kms_mmu_init(sde_kms);
  4094. if (rc) {
  4095. SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
  4096. goto power_error;
  4097. }
  4098. /* Initialize reg dma block which is a singleton */
  4099. sde_kms->catalog->dma_cfg.base_off = sde_kms->reg_dma_off;
  4100. rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
  4101. sde_kms->dev);
  4102. if (rc) {
  4103. SDE_ERROR("failed: reg dma init failed\n");
  4104. goto power_error;
  4105. }
  4106. sde_dbg_init_dbg_buses(sde_kms->core_rev);
  4107. rm = &sde_kms->rm;
  4108. rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
  4109. sde_kms->dev);
  4110. if (rc) {
  4111. SDE_ERROR("rm init failed: %d\n", rc);
  4112. goto power_error;
  4113. }
  4114. sde_kms->rm_init = true;
  4115. sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
  4116. if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
  4117. rc = PTR_ERR(sde_kms->hw_intr);
  4118. SDE_ERROR("hw_intr init failed: %d\n", rc);
  4119. sde_kms->hw_intr = NULL;
  4120. goto hw_intr_init_err;
  4121. }
  4122. /*
  4123. * Attempt continuous splash handoff only if reserved
  4124. * splash memory is found & release resources on any error
  4125. * in finding display hw config in splash
  4126. */
  4127. if (sde_kms->splash_data.num_splash_regions) {
  4128. struct sde_splash_display *display;
  4129. int ret, display_count =
  4130. sde_kms->splash_data.num_splash_displays;
  4131. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4132. &sde_kms->splash_data, sde_kms->catalog);
  4133. for (i = 0; i < display_count; i++) {
  4134. display = &sde_kms->splash_data.splash_display[i];
  4135. /*
  4136. * free splash region on resource init failure and
  4137. * cont-splash disabled case
  4138. */
  4139. if (!display->cont_splash_enabled || ret)
  4140. _sde_kms_free_splash_display_data(
  4141. sde_kms, display);
  4142. }
  4143. }
  4144. sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
  4145. if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
  4146. rc = PTR_ERR(sde_kms->hw_mdp);
  4147. if (!sde_kms->hw_mdp)
  4148. rc = -EINVAL;
  4149. SDE_ERROR("failed to get hw_mdp: %d\n", rc);
  4150. sde_kms->hw_mdp = NULL;
  4151. goto power_error;
  4152. }
  4153. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  4154. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  4155. sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
  4156. sde_kms->vbif[vbif_idx], sde_kms->catalog);
  4157. if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
  4158. rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
  4159. if (!sde_kms->hw_vbif[vbif_idx])
  4160. rc = -EINVAL;
  4161. SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
  4162. sde_kms->hw_vbif[vbif_idx] = NULL;
  4163. goto power_error;
  4164. }
  4165. }
  4166. if (sde_kms->catalog->uidle_cfg.uidle_rev) {
  4167. sde_kms->hw_uidle = sde_hw_uidle_init(UIDLE, sde_kms->mmio,
  4168. sde_kms->mmio_len, sde_kms->catalog);
  4169. if (IS_ERR_OR_NULL(sde_kms->hw_uidle)) {
  4170. rc = PTR_ERR(sde_kms->hw_uidle);
  4171. if (!sde_kms->hw_uidle)
  4172. rc = -EINVAL;
  4173. /* uidle is optional, so do not make it a fatal error */
  4174. SDE_ERROR("failed to init uidle rc:%d\n", rc);
  4175. sde_kms->hw_uidle = NULL;
  4176. rc = 0;
  4177. }
  4178. } else {
  4179. sde_kms->hw_uidle = NULL;
  4180. }
  4181. if (sde_kms->sid) {
  4182. sde_kms->hw_sid = sde_hw_sid_init(sde_kms->sid,
  4183. sde_kms->sid_len, sde_kms->catalog);
  4184. if (IS_ERR_OR_NULL(sde_kms->hw_sid)) {
  4185. rc = PTR_ERR(sde_kms->hw_sid);
  4186. SDE_ERROR("failed to init sid %d\n", rc);
  4187. sde_kms->hw_sid = NULL;
  4188. goto power_error;
  4189. }
  4190. }
  4191. rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
  4192. &priv->phandle, "core_clk");
  4193. if (rc) {
  4194. SDE_ERROR("failed to init perf %d\n", rc);
  4195. goto perf_err;
  4196. }
  4197. /*
  4198. * set the disable_immediate flag when driver supports the precise vsync
  4199. * timestamp as the DRM hooks for vblank timestamp/counters would be set
  4200. * based on the feature
  4201. */
  4202. if (test_bit(SDE_FEATURE_HW_VSYNC_TS, sde_kms->catalog->features))
  4203. dev->vblank_disable_immediate = true;
  4204. /*
  4205. * _sde_kms_drm_obj_init should create the DRM related objects
  4206. * i.e. CRTCs, planes, encoders, connectors and so forth
  4207. */
  4208. rc = _sde_kms_drm_obj_init(sde_kms);
  4209. if (rc) {
  4210. SDE_ERROR("modeset init failed: %d\n", rc);
  4211. goto drm_obj_init_err;
  4212. }
  4213. return 0;
  4214. genpd_err:
  4215. drm_obj_init_err:
  4216. sde_core_perf_destroy(&sde_kms->perf);
  4217. hw_intr_init_err:
  4218. perf_err:
  4219. power_error:
  4220. return rc;
  4221. }
  4222. int _sde_kms_get_tvm_inclusion_mem(struct sde_mdss_cfg *catalog, struct list_head *mem_list)
  4223. {
  4224. struct list_head temp_head;
  4225. struct msm_io_mem_entry *io_mem;
  4226. int rc, i = 0;
  4227. INIT_LIST_HEAD(&temp_head);
  4228. for (i = 0; i < catalog->tvm_reg_count; i++) {
  4229. struct resource *res = &catalog->tvm_reg[i];
  4230. io_mem = kzalloc(sizeof(struct msm_io_mem_entry), GFP_KERNEL);
  4231. if (!io_mem) {
  4232. rc = -ENOMEM;
  4233. goto parse_fail;
  4234. }
  4235. io_mem->base = res->start;
  4236. io_mem->size = resource_size(res);
  4237. list_add(&io_mem->list, &temp_head);
  4238. }
  4239. list_splice(&temp_head, mem_list);
  4240. return 0;
  4241. parse_fail:
  4242. msm_dss_clean_io_mem(&temp_head);
  4243. return rc;
  4244. }
  4245. #ifdef CONFIG_DRM_SDE_VM
  4246. int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  4247. {
  4248. struct platform_device *pdev = to_platform_device(sde_kms->dev->dev);
  4249. int rc = 0;
  4250. rc = msm_dss_get_io_mem(pdev, &io_res->mem);
  4251. if (rc) {
  4252. SDE_ERROR("failed to get io mem for KMS, rc = %d\n", rc);
  4253. return rc;
  4254. }
  4255. rc = msm_dss_get_pmic_io_mem(pdev, &io_res->mem);
  4256. if (rc) {
  4257. SDE_ERROR("failed to get io mem for pmic, rc:%d\n", rc);
  4258. return rc;
  4259. }
  4260. rc = msm_dss_get_io_irq(pdev, &io_res->irq, GH_IRQ_LABEL_SDE);
  4261. if (rc) {
  4262. SDE_ERROR("failed to get io irq for KMS");
  4263. return rc;
  4264. }
  4265. rc = _sde_kms_get_tvm_inclusion_mem(sde_kms->catalog, &io_res->mem);
  4266. if (rc) {
  4267. SDE_ERROR("failed to get tvm inclusion mem ranges");
  4268. return rc;
  4269. }
  4270. return rc;
  4271. }
  4272. #endif
  4273. static int sde_kms_hw_init(struct msm_kms *kms)
  4274. {
  4275. struct sde_kms *sde_kms;
  4276. struct drm_device *dev;
  4277. struct msm_drm_private *priv;
  4278. struct platform_device *platformdev;
  4279. int irq_num, rc = -EINVAL;
  4280. if (!kms) {
  4281. SDE_ERROR("invalid kms\n");
  4282. goto end;
  4283. }
  4284. sde_kms = to_sde_kms(kms);
  4285. dev = sde_kms->dev;
  4286. if (!dev || !dev->dev) {
  4287. SDE_ERROR("invalid device\n");
  4288. goto end;
  4289. }
  4290. platformdev = to_platform_device(dev->dev);
  4291. priv = dev->dev_private;
  4292. if (!priv) {
  4293. SDE_ERROR("invalid private data\n");
  4294. goto end;
  4295. }
  4296. rc = _sde_kms_hw_init_ioremap(sde_kms, platformdev);
  4297. if (rc)
  4298. goto error;
  4299. rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
  4300. if (rc)
  4301. SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
  4302. rc = _sde_kms_hw_init_blocks(sde_kms, dev, priv);
  4303. if (rc)
  4304. goto error;
  4305. dev->mode_config.min_width = sde_kms->catalog->min_display_width;
  4306. dev->mode_config.min_height = sde_kms->catalog->min_display_height;
  4307. dev->mode_config.max_width = sde_kms->catalog->max_display_width;
  4308. dev->mode_config.max_height = sde_kms->catalog->max_display_height;
  4309. mutex_init(&sde_kms->secure_transition_lock);
  4310. atomic_set(&sde_kms->detach_sec_cb, 0);
  4311. atomic_set(&sde_kms->detach_all_cb, 0);
  4312. atomic_set(&sde_kms->irq_vote_count, 0);
  4313. /*
  4314. * Support format modifiers for compression etc.
  4315. */
  4316. dev->mode_config.allow_fb_modifiers = true;
  4317. sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
  4318. sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
  4319. irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0);
  4320. SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
  4321. irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
  4322. if (sde_in_trusted_vm(sde_kms)) {
  4323. rc = sde_vm_trusted_init(sde_kms);
  4324. sde_dbg_set_hw_ownership_status(false);
  4325. } else {
  4326. rc = sde_vm_primary_init(sde_kms);
  4327. sde_dbg_set_hw_ownership_status(true);
  4328. }
  4329. if (rc) {
  4330. SDE_ERROR("failed to initialize VM ops, rc: %d\n", rc);
  4331. goto error;
  4332. }
  4333. return 0;
  4334. error:
  4335. _sde_kms_hw_destroy(sde_kms, platformdev);
  4336. end:
  4337. return rc;
  4338. }
  4339. struct msm_kms *sde_kms_init(struct drm_device *dev)
  4340. {
  4341. struct msm_drm_private *priv;
  4342. struct sde_kms *sde_kms;
  4343. if (!dev || !dev->dev_private) {
  4344. SDE_ERROR("drm device node invalid\n");
  4345. return ERR_PTR(-EINVAL);
  4346. }
  4347. priv = dev->dev_private;
  4348. sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
  4349. if (!sde_kms) {
  4350. SDE_ERROR("failed to allocate sde kms\n");
  4351. return ERR_PTR(-ENOMEM);
  4352. }
  4353. msm_kms_init(&sde_kms->base, &kms_funcs);
  4354. sde_kms->dev = dev;
  4355. return &sde_kms->base;
  4356. }
  4357. void sde_kms_vm_trusted_resource_deinit(struct sde_kms *sde_kms)
  4358. {
  4359. struct dsi_display *display;
  4360. struct sde_splash_display *handoff_display;
  4361. int i;
  4362. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4363. handoff_display = &sde_kms->splash_data.splash_display[i];
  4364. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4365. if (handoff_display->cont_splash_enabled)
  4366. _sde_kms_free_splash_display_data(sde_kms,
  4367. handoff_display);
  4368. dsi_display_set_active_state(display, false);
  4369. }
  4370. memset(&sde_kms->splash_data, 0, sizeof(struct sde_splash_data));
  4371. }
  4372. int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
  4373. struct drm_atomic_state *state)
  4374. {
  4375. struct drm_device *dev;
  4376. struct msm_drm_private *priv;
  4377. struct sde_splash_display *handoff_display;
  4378. struct dsi_display *display;
  4379. int ret, i;
  4380. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
  4381. SDE_ERROR("invalid params\n");
  4382. return -EINVAL;
  4383. }
  4384. dev = sde_kms->dev;
  4385. priv = dev->dev_private;
  4386. sde_kms->splash_data.type = SDE_VM_HANDOFF;
  4387. sde_kms->splash_data.num_splash_displays = sde_kms->dsi_display_count;
  4388. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4389. &sde_kms->splash_data, sde_kms->catalog);
  4390. if (ret) {
  4391. SDE_ERROR("invalid cont splash init, ret:%d\n", ret);
  4392. return -EINVAL;
  4393. }
  4394. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4395. handoff_display = &sde_kms->splash_data.splash_display[i];
  4396. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4397. if (!handoff_display->cont_splash_enabled || ret)
  4398. _sde_kms_free_splash_display_data(sde_kms,
  4399. handoff_display);
  4400. else
  4401. dsi_display_set_active_state(display, true);
  4402. }
  4403. if (sde_kms->splash_data.num_splash_displays != 1) {
  4404. SDE_ERROR("no. of displays not supported:%d\n",
  4405. sde_kms->splash_data.num_splash_displays);
  4406. goto error;
  4407. }
  4408. ret = sde_kms_cont_splash_config(&sde_kms->base, state);
  4409. if (ret) {
  4410. SDE_ERROR("error in setting handoff configs\n");
  4411. goto error;
  4412. }
  4413. /**
  4414. * fill-in vote for the continuous splash hanodff path, which will be
  4415. * removed on the successful first commit.
  4416. */
  4417. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  4418. if (ret < 0) {
  4419. SDE_ERROR("failed to enable power resource %d\n", ret);
  4420. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  4421. goto error;
  4422. }
  4423. return 0;
  4424. error:
  4425. return ret;
  4426. }
  4427. static int _sde_kms_register_events(struct msm_kms *kms,
  4428. struct drm_mode_object *obj, u32 event, bool en)
  4429. {
  4430. int ret = 0;
  4431. struct drm_crtc *crtc;
  4432. struct drm_connector *conn;
  4433. struct sde_kms *sde_kms;
  4434. if (!kms || !obj) {
  4435. SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
  4436. return -EINVAL;
  4437. }
  4438. sde_kms = to_sde_kms(kms);
  4439. sde_vm_lock(sde_kms);
  4440. if (!sde_vm_owns_hw(sde_kms)) {
  4441. sde_vm_unlock(sde_kms);
  4442. SDE_DEBUG("HW is owned by other VM\n");
  4443. return -EACCES;
  4444. }
  4445. /* check vm ownership, if event registration requires HW access */
  4446. switch (obj->type) {
  4447. case DRM_MODE_OBJECT_CRTC:
  4448. crtc = obj_to_crtc(obj);
  4449. ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
  4450. break;
  4451. case DRM_MODE_OBJECT_CONNECTOR:
  4452. conn = obj_to_connector(obj);
  4453. ret = sde_connector_register_custom_event(sde_kms, conn, event,
  4454. en);
  4455. break;
  4456. }
  4457. sde_vm_unlock(sde_kms);
  4458. return ret;
  4459. }
  4460. int sde_kms_handle_recovery(struct drm_encoder *encoder)
  4461. {
  4462. SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
  4463. return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
  4464. }
  4465. void sde_kms_add_data_to_minidump_va(struct sde_kms *sde_kms)
  4466. {
  4467. struct msm_drm_private *priv;
  4468. struct sde_crtc *sde_crtc;
  4469. struct sde_crtc_state *cstate;
  4470. struct sde_connector *sde_conn;
  4471. struct sde_connector_state *conn_state;
  4472. u32 i;
  4473. priv = sde_kms->dev->dev_private;
  4474. sde_mini_dump_add_va_region("sde_kms", sizeof(*sde_kms), sde_kms);
  4475. for (i = 0; i < priv->num_crtcs; i++) {
  4476. sde_crtc = to_sde_crtc(priv->crtcs[i]);
  4477. cstate = to_sde_crtc_state(priv->crtcs[i]->state);
  4478. sde_mini_dump_add_va_region("sde_crtc", sizeof(*sde_crtc), sde_crtc);
  4479. sde_mini_dump_add_va_region("crtc_state", sizeof(*cstate), cstate);
  4480. }
  4481. for (i = 0; i < priv->num_planes; i++)
  4482. sde_plane_add_data_to_minidump_va(priv->planes[i]);
  4483. for (i = 0; i < priv->num_encoders; i++)
  4484. sde_encoder_add_data_to_minidump_va(priv->encoders[i]);
  4485. for (i = 0; i < priv->num_connectors; i++) {
  4486. sde_conn = to_sde_connector(priv->connectors[i]);
  4487. conn_state = to_sde_connector_state(priv->connectors[i]->state);
  4488. sde_mini_dump_add_va_region("sde_conn", sizeof(*sde_conn), sde_conn);
  4489. sde_mini_dump_add_va_region("conn_state", sizeof(*conn_state), conn_state);
  4490. }
  4491. }