sde_kms.c 138 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451
  1. /*
  2. * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  20. #include <drm/drm_crtc.h>
  21. #include <drm/drm_fixed.h>
  22. #include <drm/drm_panel.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/dma-buf.h>
  27. #include <linux/memblock.h>
  28. #include <linux/soc/qcom/panel_event_notifier.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_probe_helper.h>
  31. #include "msm_drv.h"
  32. #include "msm_mmu.h"
  33. #include "msm_gem.h"
  34. #include "dsi_display.h"
  35. #include "dsi_drm.h"
  36. #include "sde_wb.h"
  37. #include "dp_display.h"
  38. #include "dp_drm.h"
  39. #include "dp_mst_drm.h"
  40. #include "sde_kms.h"
  41. #include "sde_core_irq.h"
  42. #include "sde_formats.h"
  43. #include "sde_hw_vbif.h"
  44. #include "sde_vbif.h"
  45. #include "sde_encoder.h"
  46. #include "sde_plane.h"
  47. #include "sde_crtc.h"
  48. #include "sde_color_processing.h"
  49. #include "sde_reg_dma.h"
  50. #include "sde_connector.h"
  51. #include "sde_vm.h"
  52. #include "sde_fence.h"
  53. #include <linux/qcom_scm.h>
  54. #include <linux/qcom-iommu-util.h>
  55. #include "soc/qcom/secure_buffer.h"
  56. #include <linux/qtee_shmbridge.h>
  57. #ifdef CONFIG_DRM_SDE_VM
  58. #include <linux/gunyah/gh_irq_lend.h>
  59. #endif
  60. #define CREATE_TRACE_POINTS
  61. #include "sde_trace.h"
  62. /* defines for secure channel call */
  63. #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
  64. #define MDP_DEVICE_ID 0x1A
  65. #define DEMURA_REGION_NAME_MAX 32
  66. EXPORT_TRACEPOINT_SYMBOL(tracing_mark_write);
  67. static const char * const iommu_ports[] = {
  68. "mdp_0",
  69. };
  70. /**
  71. * Controls size of event log buffer. Specified as a power of 2.
  72. */
  73. #define SDE_EVTLOG_SIZE 1024
  74. /*
  75. * To enable overall DRM driver logging
  76. * # echo 0x2 > /sys/module/drm/parameters/debug
  77. *
  78. * To enable DRM driver h/w logging
  79. * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  80. *
  81. * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  82. */
  83. #define SDE_DEBUGFS_DIR "msm_sde"
  84. #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
  85. #define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
  86. #define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
  87. /**
  88. * sdecustom - enable certain driver customizations for sde clients
  89. * Enabling this modifies the standard DRM behavior slightly and assumes
  90. * that the clients have specific knowledge about the modifications that
  91. * are involved, so don't enable this unless you know what you're doing.
  92. *
  93. * Parts of the driver that are affected by this setting may be located by
  94. * searching for invocations of the 'sde_is_custom_client()' function.
  95. *
  96. * This is disabled by default.
  97. */
  98. static bool sdecustom = true;
  99. module_param(sdecustom, bool, 0400);
  100. MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
  101. static int sde_kms_hw_init(struct msm_kms *kms);
  102. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
  103. static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
  104. static int _sde_kms_register_events(struct msm_kms *kms,
  105. struct drm_mode_object *obj, u32 event, bool en);
  106. static void sde_kms_handle_power_event(u32 event_type, void *usr);
  107. bool sde_is_custom_client(void)
  108. {
  109. return sdecustom;
  110. }
  111. #if IS_ENABLED(CONFIG_DEBUG_FS)
  112. void *sde_debugfs_get_root(struct sde_kms *sde_kms)
  113. {
  114. struct msm_drm_private *priv;
  115. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  116. return NULL;
  117. priv = sde_kms->dev->dev_private;
  118. return priv->debug_root;
  119. }
  120. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  121. {
  122. void *p;
  123. int rc;
  124. void *debugfs_root;
  125. p = sde_hw_util_get_log_mask_ptr();
  126. if (!sde_kms || !p)
  127. return -EINVAL;
  128. debugfs_root = sde_debugfs_get_root(sde_kms);
  129. if (!debugfs_root)
  130. return -EINVAL;
  131. /* allow debugfs_root to be NULL */
  132. debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
  133. (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
  134. (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
  135. rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
  136. if (rc) {
  137. SDE_ERROR("failed to init perf %d\n", rc);
  138. return rc;
  139. }
  140. sde_rm_debugfs_init(&sde_kms->rm, debugfs_root);
  141. if (sde_kms->catalog->qdss_count)
  142. debugfs_create_u32("qdss", 0600, debugfs_root,
  143. (u32 *)&sde_kms->qdss_enabled);
  144. debugfs_create_u32("pm_suspend_clk_dump", 0600, debugfs_root,
  145. (u32 *)&sde_kms->pm_suspend_clk_dump);
  146. debugfs_create_u32("hw_fence_status", 0600, debugfs_root,
  147. (u32 *)&sde_kms->debugfs_hw_fence);
  148. return 0;
  149. }
  150. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  151. {
  152. struct sde_kms *sde_kms = to_sde_kms(kms);
  153. /* don't need to NULL check debugfs_root */
  154. if (sde_kms) {
  155. sde_debugfs_vbif_destroy(sde_kms);
  156. sde_debugfs_core_irq_destroy(sde_kms);
  157. }
  158. }
  159. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  160. {
  161. int i;
  162. struct device *dev = sde_kms->dev->dev;
  163. SDE_INFO("runtime PM suspended:%d", pm_runtime_suspended(dev));
  164. for (i = 0; i < sde_kms->dsi_display_count; i++)
  165. dsi_display_dump_clks_state(sde_kms->dsi_displays[i]);
  166. return 0;
  167. }
  168. #else
  169. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  170. {
  171. return 0;
  172. }
  173. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  174. {
  175. }
  176. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  177. {
  178. return 0;
  179. }
  180. #endif /* CONFIG_DEBUG_FS */
  181. static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
  182. struct drm_crtc *crtc)
  183. {
  184. struct drm_encoder *encoder;
  185. struct drm_device *dev;
  186. int ret;
  187. if (!kms || !crtc || !crtc->state || !crtc->dev) {
  188. SDE_ERROR("invalid params\n");
  189. return;
  190. }
  191. if (!crtc->state->enable) {
  192. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  193. return;
  194. }
  195. if (!crtc->state->active) {
  196. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  197. return;
  198. }
  199. dev = crtc->dev;
  200. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  201. if (encoder->crtc != crtc)
  202. continue;
  203. /*
  204. * Video Mode - Wait for VSYNC
  205. * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
  206. * complete
  207. */
  208. SDE_EVT32_VERBOSE(DRMID(crtc));
  209. ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
  210. if (ret && ret != -EWOULDBLOCK) {
  211. SDE_ERROR(
  212. "[crtc: %d][enc: %d] wait for commit done returned %d\n",
  213. crtc->base.id, encoder->base.id, ret);
  214. break;
  215. }
  216. }
  217. }
  218. static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
  219. struct drm_crtc *crtc, bool enable)
  220. {
  221. struct drm_device *dev;
  222. struct msm_drm_private *priv;
  223. struct sde_mdss_cfg *sde_cfg;
  224. struct drm_plane *plane;
  225. int i, ret;
  226. dev = sde_kms->dev;
  227. priv = dev->dev_private;
  228. sde_cfg = sde_kms->catalog;
  229. ret = sde_vbif_halt_xin_mask(sde_kms,
  230. sde_cfg->sui_block_xin_mask, enable);
  231. if (ret) {
  232. SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
  233. return ret;
  234. }
  235. if (enable) {
  236. for (i = 0; i < priv->num_planes; i++) {
  237. plane = priv->planes[i];
  238. sde_plane_secure_ctrl_xin_client(plane, crtc);
  239. }
  240. }
  241. return 0;
  242. }
  243. /**
  244. * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
  245. * @sde_kms: Pointer to sde_kms struct
  246. * @vimd: switch the stage 2 translation to this VMID
  247. */
  248. static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
  249. {
  250. struct device dummy = {};
  251. dma_addr_t dma_handle;
  252. uint32_t num_sids;
  253. uint32_t *sec_sid;
  254. struct sde_mdss_cfg *sde_cfg = sde_kms->catalog;
  255. int ret = 0, i;
  256. struct qtee_shm shm;
  257. bool qtee_en = qtee_shmbridge_is_enabled();
  258. phys_addr_t mem_addr;
  259. u64 mem_size;
  260. num_sids = sde_cfg->sec_sid_mask_count;
  261. if (!num_sids) {
  262. SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
  263. return -EINVAL;
  264. }
  265. if (qtee_en) {
  266. ret = qtee_shmbridge_allocate_shm(num_sids * sizeof(uint32_t),
  267. &shm);
  268. if (ret)
  269. return -ENOMEM;
  270. sec_sid = (uint32_t *) shm.vaddr;
  271. mem_addr = shm.paddr;
  272. /**
  273. * SMMUSecureModeSwitch requires the size to be number of SID's
  274. * but shm allocates size in pages. Modify the args as per
  275. * client requirement.
  276. */
  277. mem_size = sizeof(uint32_t) * num_sids;
  278. } else {
  279. sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
  280. if (!sec_sid)
  281. return -ENOMEM;
  282. mem_addr = virt_to_phys(sec_sid);
  283. mem_size = sizeof(uint32_t) * num_sids;
  284. }
  285. for (i = 0; i < num_sids; i++) {
  286. sec_sid[i] = sde_cfg->sec_sid_mask[i];
  287. SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
  288. }
  289. ret = dma_coerce_mask_and_coherent(&dummy, DMA_BIT_MASK(64));
  290. if (ret) {
  291. SDE_ERROR("Failed to set dma mask for dummy dev %d\n", ret);
  292. goto map_error;
  293. }
  294. set_dma_ops(&dummy, NULL);
  295. dma_handle = dma_map_single(&dummy, sec_sid,
  296. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  297. if (dma_mapping_error(&dummy, dma_handle)) {
  298. SDE_ERROR("dma_map_single for dummy dev failed vmid 0x%x\n",
  299. vmid);
  300. goto map_error;
  301. }
  302. SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d, qtee_en %d",
  303. vmid, num_sids, qtee_en);
  304. ret = qcom_scm_mem_protect_sd_ctrl(MDP_DEVICE_ID, mem_addr,
  305. mem_size, vmid);
  306. if (ret)
  307. SDE_ERROR("Error:scm_call2, vmid %d, ret%d\n",
  308. vmid, ret);
  309. SDE_EVT32(MEM_PROTECT_SD_CTRL_SWITCH, MDP_DEVICE_ID, mem_size,
  310. vmid, qtee_en, num_sids, ret);
  311. dma_unmap_single(&dummy, dma_handle,
  312. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  313. map_error:
  314. if (qtee_en)
  315. qtee_shmbridge_free_shm(&shm);
  316. else
  317. kfree(sec_sid);
  318. return ret;
  319. }
  320. static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
  321. {
  322. u32 ret;
  323. if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
  324. return 0;
  325. /* detach_all_contexts */
  326. ret = sde_kms_mmu_detach(sde_kms, false);
  327. if (ret) {
  328. SDE_ERROR("failed to detach all cb ret:%d\n", ret);
  329. goto mmu_error;
  330. }
  331. ret = _sde_kms_scm_call(sde_kms, vmid);
  332. if (ret) {
  333. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  334. goto scm_error;
  335. }
  336. return 0;
  337. scm_error:
  338. sde_kms_mmu_attach(sde_kms, false);
  339. mmu_error:
  340. atomic_dec(&sde_kms->detach_all_cb);
  341. return ret;
  342. }
  343. static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, u32 vmid,
  344. u32 old_vmid)
  345. {
  346. u32 ret;
  347. if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
  348. return 0;
  349. ret = _sde_kms_scm_call(sde_kms, vmid);
  350. if (ret) {
  351. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  352. goto scm_error;
  353. }
  354. /* attach_all_contexts */
  355. ret = sde_kms_mmu_attach(sde_kms, false);
  356. if (ret) {
  357. SDE_ERROR("failed to attach all cb ret:%d\n", ret);
  358. goto mmu_error;
  359. }
  360. return 0;
  361. mmu_error:
  362. _sde_kms_scm_call(sde_kms, old_vmid);
  363. scm_error:
  364. atomic_inc(&sde_kms->detach_all_cb);
  365. return ret;
  366. }
  367. static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
  368. {
  369. u32 ret;
  370. if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
  371. return 0;
  372. /* detach secure_context */
  373. ret = sde_kms_mmu_detach(sde_kms, true);
  374. if (ret) {
  375. SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
  376. goto mmu_error;
  377. }
  378. ret = _sde_kms_scm_call(sde_kms, vmid);
  379. if (ret) {
  380. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  381. goto scm_error;
  382. }
  383. return 0;
  384. scm_error:
  385. sde_kms_mmu_attach(sde_kms, true);
  386. mmu_error:
  387. atomic_dec(&sde_kms->detach_sec_cb);
  388. return ret;
  389. }
  390. static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, u32 vmid,
  391. u32 old_vmid)
  392. {
  393. u32 ret;
  394. if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
  395. return 0;
  396. ret = _sde_kms_scm_call(sde_kms, vmid);
  397. if (ret) {
  398. goto scm_error;
  399. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  400. }
  401. ret = sde_kms_mmu_attach(sde_kms, true);
  402. if (ret) {
  403. SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
  404. goto mmu_error;
  405. }
  406. return 0;
  407. mmu_error:
  408. _sde_kms_scm_call(sde_kms, old_vmid);
  409. scm_error:
  410. atomic_inc(&sde_kms->detach_sec_cb);
  411. return ret;
  412. }
  413. static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
  414. struct drm_crtc *crtc, bool enable)
  415. {
  416. int ret;
  417. if (enable) {
  418. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  419. if (ret < 0) {
  420. SDE_ERROR("failed to enable power resource %d\n", ret);
  421. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  422. return ret;
  423. }
  424. sde_crtc_misr_setup(crtc, true, 1);
  425. ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
  426. if (ret) {
  427. sde_crtc_misr_setup(crtc, false, 0);
  428. pm_runtime_put_sync(sde_kms->dev->dev);
  429. return ret;
  430. }
  431. } else {
  432. _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
  433. sde_crtc_misr_setup(crtc, false, 0);
  434. pm_runtime_put_sync(sde_kms->dev->dev);
  435. }
  436. return 0;
  437. }
  438. static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
  439. bool post_commit)
  440. {
  441. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  442. int old_smmu_state = smmu_state->state;
  443. int ret = 0;
  444. u32 vmid;
  445. if (!sde_kms || !crtc) {
  446. SDE_ERROR("invalid argument(s)\n");
  447. return -EINVAL;
  448. }
  449. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
  450. post_commit, smmu_state->sui_misr_state,
  451. smmu_state->secure_level, SDE_EVTLOG_FUNC_ENTRY);
  452. if ((!smmu_state->transition_type) ||
  453. ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
  454. /* Bail out */
  455. return 0;
  456. /* enable sui misr if requested, before the transition */
  457. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
  458. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
  459. if (ret) {
  460. smmu_state->sui_misr_state = NONE;
  461. goto end;
  462. }
  463. }
  464. mutex_lock(&sde_kms->secure_transition_lock);
  465. switch (smmu_state->state) {
  466. case DETACH_ALL_REQ:
  467. ret = _sde_kms_detach_all_cb(sde_kms, VMID_CP_SEC_DISPLAY);
  468. if (!ret)
  469. smmu_state->state = DETACHED;
  470. break;
  471. case ATTACH_ALL_REQ:
  472. ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL,
  473. VMID_CP_SEC_DISPLAY);
  474. if (!ret) {
  475. smmu_state->state = ATTACHED;
  476. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  477. }
  478. break;
  479. case DETACH_SEC_REQ:
  480. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  481. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  482. ret = _sde_kms_detach_sec_cb(sde_kms, vmid);
  483. if (!ret)
  484. smmu_state->state = DETACHED_SEC;
  485. break;
  486. case ATTACH_SEC_REQ:
  487. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  488. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  489. ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL, vmid);
  490. if (!ret) {
  491. smmu_state->state = ATTACHED;
  492. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  493. }
  494. break;
  495. default:
  496. SDE_ERROR("crtc%d: invalid smmu state %d transition type %d\n",
  497. DRMID(crtc), smmu_state->state,
  498. smmu_state->transition_type);
  499. ret = -EINVAL;
  500. break;
  501. }
  502. mutex_unlock(&sde_kms->secure_transition_lock);
  503. /* disable sui misr if requested, after the transition */
  504. if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
  505. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  506. if (ret)
  507. goto end;
  508. }
  509. end:
  510. smmu_state->transition_error = false;
  511. if (ret) {
  512. smmu_state->transition_error = true;
  513. SDE_ERROR(
  514. "crtc%d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  515. DRMID(crtc), old_smmu_state, smmu_state->state,
  516. smmu_state->secure_level, ret);
  517. smmu_state->state = smmu_state->prev_state;
  518. smmu_state->secure_level = smmu_state->prev_secure_level;
  519. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ)
  520. _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  521. }
  522. SDE_DEBUG("crtc %d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  523. DRMID(crtc), old_smmu_state, smmu_state->state,
  524. smmu_state->secure_level, ret);
  525. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->prev_state,
  526. smmu_state->transition_type,
  527. smmu_state->transition_error,
  528. smmu_state->secure_level, smmu_state->prev_secure_level,
  529. smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
  530. smmu_state->sui_misr_state = NONE;
  531. smmu_state->transition_type = NONE;
  532. return ret;
  533. }
  534. static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
  535. struct drm_atomic_state *state)
  536. {
  537. struct drm_crtc *crtc;
  538. struct drm_crtc_state *old_crtc_state;
  539. struct drm_plane_state *old_plane_state, *new_plane_state;
  540. struct drm_plane *plane;
  541. struct drm_plane_state *plane_state;
  542. struct sde_kms *sde_kms = to_sde_kms(kms);
  543. struct drm_device *dev = sde_kms->dev;
  544. int i, ops = 0, ret = 0;
  545. bool old_valid_fb = false;
  546. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  547. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  548. if (!crtc->state || !crtc->state->active)
  549. continue;
  550. /*
  551. * It is safe to assume only one active crtc,
  552. * and compatible translation modes on the
  553. * planes staged on this crtc.
  554. * otherwise validation would have failed.
  555. * For this CRTC,
  556. */
  557. /*
  558. * 1. Check if old state on the CRTC has planes
  559. * staged with valid fbs
  560. */
  561. for_each_old_plane_in_state(state, plane, plane_state, i) {
  562. if (!plane_state->crtc)
  563. continue;
  564. if (plane_state->fb) {
  565. old_valid_fb = true;
  566. break;
  567. }
  568. }
  569. /*
  570. * 2.Get the operations needed to be performed before
  571. * secure transition can be initiated.
  572. */
  573. ops = sde_crtc_get_secure_transition_ops(crtc,
  574. old_crtc_state, old_valid_fb);
  575. if (ops < 0) {
  576. SDE_ERROR("invalid secure operations %x\n", ops);
  577. return ops;
  578. }
  579. if (!ops) {
  580. smmu_state->transition_error = false;
  581. goto no_ops;
  582. }
  583. SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
  584. crtc->base.id, ops, crtc->state);
  585. SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
  586. /* 3. Perform operations needed for secure transition */
  587. if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
  588. SDE_DEBUG("wait_for_transfer_done\n");
  589. sde_kms_wait_for_frame_transfer_complete(kms, crtc);
  590. }
  591. if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
  592. SDE_DEBUG("cleanup planes\n");
  593. drm_atomic_helper_cleanup_planes(dev, state);
  594. for_each_oldnew_plane_in_state(state, plane,
  595. old_plane_state, new_plane_state, i)
  596. sde_plane_destroy_fb(old_plane_state);
  597. }
  598. if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
  599. SDE_DEBUG("secure ctrl\n");
  600. _sde_kms_secure_ctrl(sde_kms, crtc, false);
  601. }
  602. if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
  603. SDE_DEBUG("prepare planes %d",
  604. crtc->state->plane_mask);
  605. drm_atomic_crtc_for_each_plane(plane,
  606. crtc) {
  607. const struct drm_plane_helper_funcs *funcs;
  608. plane_state = plane->state;
  609. funcs = plane->helper_private;
  610. SDE_DEBUG("psde:%d FB[%u]\n",
  611. plane->base.id,
  612. plane->fb->base.id);
  613. if (!funcs)
  614. continue;
  615. if (funcs->prepare_fb(plane, plane_state)) {
  616. ret = funcs->prepare_fb(plane,
  617. plane_state);
  618. if (ret)
  619. return ret;
  620. }
  621. }
  622. }
  623. SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
  624. SDE_DEBUG("secure operations completed\n");
  625. }
  626. no_ops:
  627. return 0;
  628. }
  629. static int _sde_kms_release_shared_buffer(unsigned long mem_addr,
  630. unsigned int splash_buffer_size,
  631. unsigned int ramdump_base,
  632. unsigned int ramdump_buffer_size)
  633. {
  634. unsigned long pfn_start, pfn_end, pfn_idx;
  635. int ret = 0;
  636. if (!mem_addr || !splash_buffer_size) {
  637. SDE_ERROR("invalid params\n");
  638. return -EINVAL;
  639. }
  640. /* leave ramdump memory only if base address matches */
  641. if (ramdump_base == mem_addr &&
  642. ramdump_buffer_size <= splash_buffer_size) {
  643. mem_addr += ramdump_buffer_size;
  644. splash_buffer_size -= ramdump_buffer_size;
  645. }
  646. pfn_start = mem_addr >> PAGE_SHIFT;
  647. pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
  648. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  649. memblock_free((unsigned int*)mem_addr, splash_buffer_size);
  650. #else
  651. ret = memblock_free(mem_addr, splash_buffer_size);
  652. if (ret) {
  653. SDE_ERROR("continuous splash memory free failed:%d\n", ret);
  654. return ret;
  655. }
  656. #endif
  657. for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
  658. free_reserved_page(pfn_to_page(pfn_idx));
  659. return ret;
  660. }
  661. static int _sde_kms_one2one_mem_map_ipcc_reg(struct sde_kms *sde_kms, u32 buf_size,
  662. unsigned long buf_base)
  663. {
  664. struct msm_mmu *mmu = NULL;
  665. int ret = 0;
  666. if (!sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]
  667. || !sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu) {
  668. SDE_ERROR("aspace not found for sde kms node\n");
  669. return -EINVAL;
  670. }
  671. mmu = sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu;
  672. if (!mmu) {
  673. SDE_ERROR("mmu not found for aspace\n");
  674. return -EINVAL;
  675. }
  676. if (!mmu->funcs || !mmu->funcs->one_to_one_map) {
  677. SDE_ERROR("invalid input params for map\n");
  678. return -EINVAL;
  679. }
  680. ret = mmu->funcs->one_to_one_map(mmu, buf_base, buf_base, buf_size,
  681. IOMMU_READ | IOMMU_WRITE);
  682. if (ret)
  683. SDE_ERROR("one2one memory smmu map failed:%d\n", ret);
  684. return ret;
  685. }
  686. static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms,
  687. struct sde_splash_mem *splash)
  688. {
  689. struct msm_mmu *mmu = NULL;
  690. int ret = 0;
  691. if (!sde_kms->aspace[0]) {
  692. SDE_ERROR("aspace not found for sde kms node\n");
  693. return -EINVAL;
  694. }
  695. mmu = sde_kms->aspace[0]->mmu;
  696. if (!mmu) {
  697. SDE_ERROR("mmu not found for aspace\n");
  698. return -EINVAL;
  699. }
  700. if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) {
  701. SDE_ERROR("invalid input params for map\n");
  702. return -EINVAL;
  703. }
  704. if (!splash->ref_cnt) {
  705. ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base,
  706. splash->splash_buf_base,
  707. splash->splash_buf_size,
  708. IOMMU_READ | IOMMU_NOEXEC);
  709. if (ret)
  710. SDE_ERROR("splash memory smmu map failed:%d\n", ret);
  711. }
  712. splash->ref_cnt++;
  713. SDE_DEBUG("one2one mapping done for base:%lx size:%x ref_cnt:%d\n",
  714. splash->splash_buf_base,
  715. splash->splash_buf_size,
  716. splash->ref_cnt);
  717. return ret;
  718. }
  719. static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms)
  720. {
  721. int i = 0;
  722. int ret = 0;
  723. struct sde_splash_mem *region;
  724. if (!sde_kms)
  725. return -EINVAL;
  726. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  727. region = sde_kms->splash_data.splash_display[i].splash;
  728. ret = _sde_kms_splash_mem_get(sde_kms, region);
  729. if (ret)
  730. return ret;
  731. /* Demura is optional and need not exist */
  732. region = sde_kms->splash_data.splash_display[i].demura;
  733. if (region) {
  734. ret = _sde_kms_splash_mem_get(sde_kms, region);
  735. if (ret)
  736. return ret;
  737. }
  738. }
  739. return ret;
  740. }
  741. static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms,
  742. struct sde_splash_mem *splash)
  743. {
  744. struct msm_mmu *mmu = NULL;
  745. int rc = 0;
  746. if (!sde_kms || !sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
  747. SDE_ERROR("invalid params\n");
  748. return -EINVAL;
  749. }
  750. mmu = sde_kms->aspace[0]->mmu;
  751. if (!splash || !splash->ref_cnt ||
  752. !mmu || !mmu->funcs || !mmu->funcs->one_to_one_unmap)
  753. return -EINVAL;
  754. splash->ref_cnt--;
  755. SDE_DEBUG("splash base:%lx refcnt:%d\n",
  756. splash->splash_buf_base, splash->ref_cnt);
  757. if (!splash->ref_cnt) {
  758. mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base,
  759. splash->splash_buf_size);
  760. rc = _sde_kms_release_shared_buffer(splash->splash_buf_base,
  761. splash->splash_buf_size, splash->ramdump_base,
  762. splash->ramdump_size);
  763. splash->splash_buf_base = 0;
  764. splash->splash_buf_size = 0;
  765. }
  766. return rc;
  767. }
  768. static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
  769. {
  770. int i = 0;
  771. int ret = 0, failure = 0;
  772. struct sde_splash_mem *region;
  773. if (!sde_kms || !sde_kms->splash_data.num_splash_regions)
  774. return -EINVAL;
  775. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  776. region = sde_kms->splash_data.splash_display[i].splash;
  777. ret = _sde_kms_splash_mem_put(sde_kms, region);
  778. if (ret) {
  779. failure = 1;
  780. pr_err("Error unmapping splash mem for display %d\n",
  781. i);
  782. }
  783. /* Demura is optional and need not exist */
  784. region = sde_kms->splash_data.splash_display[i].demura;
  785. if (region) {
  786. ret = _sde_kms_splash_mem_put(sde_kms, region);
  787. if (ret) {
  788. failure = 1;
  789. pr_err("Error unmapping demura mem for display %d\n",
  790. i);
  791. }
  792. }
  793. }
  794. if (failure)
  795. ret = -EINVAL;
  796. return ret;
  797. }
  798. static int _sde_kms_get_blank(struct drm_crtc_state *crtc_state,
  799. struct drm_connector_state *conn_state)
  800. {
  801. int lp_mode, blank;
  802. if (crtc_state->active)
  803. lp_mode = sde_connector_get_property(conn_state,
  804. CONNECTOR_PROP_LP);
  805. else
  806. lp_mode = SDE_MODE_DPMS_OFF;
  807. switch (lp_mode) {
  808. case SDE_MODE_DPMS_ON:
  809. blank = DRM_PANEL_EVENT_UNBLANK;
  810. break;
  811. case SDE_MODE_DPMS_LP1:
  812. case SDE_MODE_DPMS_LP2:
  813. blank = DRM_PANEL_EVENT_BLANK_LP;
  814. break;
  815. case SDE_MODE_DPMS_OFF:
  816. default:
  817. blank = DRM_PANEL_EVENT_BLANK;
  818. break;
  819. }
  820. return blank;
  821. }
  822. static void _sde_kms_drm_check_dpms(struct drm_atomic_state *old_state,
  823. bool is_pre_commit)
  824. {
  825. struct panel_event_notification notification;
  826. struct drm_connector *connector;
  827. struct drm_connector_state *old_conn_state;
  828. struct drm_crtc_state *old_crtc_state;
  829. struct drm_crtc *crtc;
  830. struct sde_connector *c_conn;
  831. int i, old_mode, new_mode, old_fps, new_fps;
  832. enum panel_event_notifier_tag panel_type;
  833. for_each_old_connector_in_state(old_state, connector,
  834. old_conn_state, i) {
  835. crtc = connector->state->crtc ? connector->state->crtc :
  836. old_conn_state->crtc;
  837. if (!crtc)
  838. continue;
  839. new_fps = drm_mode_vrefresh(&crtc->state->mode);
  840. new_mode = _sde_kms_get_blank(crtc->state, connector->state);
  841. if (old_conn_state->crtc) {
  842. old_crtc_state = drm_atomic_get_existing_crtc_state(
  843. old_state, old_conn_state->crtc);
  844. old_fps = drm_mode_vrefresh(&old_crtc_state->mode);
  845. old_mode = _sde_kms_get_blank(old_crtc_state,
  846. old_conn_state);
  847. } else {
  848. old_fps = 0;
  849. old_mode = DRM_PANEL_EVENT_BLANK;
  850. }
  851. if ((old_mode != new_mode) || (old_fps != new_fps)) {
  852. c_conn = to_sde_connector(connector);
  853. SDE_EVT32(old_mode, new_mode, old_fps, new_fps,
  854. c_conn->panel, crtc->state->active,
  855. old_conn_state->crtc);
  856. pr_debug("change detected for connector:%s (power mode %d->%d, fps %d->%d)\n",
  857. c_conn->name, old_mode, new_mode, old_fps, new_fps);
  858. /* If suspend resume and fps change are happening
  859. * at the same time, give preference to power mode
  860. * changes rather than fps change.
  861. */
  862. if ((old_mode == new_mode) && (old_fps != new_fps))
  863. new_mode = DRM_PANEL_EVENT_FPS_CHANGE;
  864. if (!c_conn->panel)
  865. continue;
  866. panel_type = sde_encoder_is_primary_display(
  867. connector->encoder) ?
  868. PANEL_EVENT_NOTIFICATION_PRIMARY :
  869. PANEL_EVENT_NOTIFICATION_SECONDARY;
  870. notification.notif_type = new_mode;
  871. notification.panel = c_conn->panel;
  872. notification.notif_data.old_fps = old_fps;
  873. notification.notif_data.new_fps = new_fps;
  874. notification.notif_data.early_trigger = is_pre_commit;
  875. panel_event_notification_trigger(panel_type,
  876. &notification);
  877. }
  878. }
  879. }
  880. static struct drm_crtc *sde_kms_vm_get_vm_crtc(
  881. struct drm_atomic_state *state)
  882. {
  883. int i;
  884. enum sde_crtc_vm_req vm_req = VM_REQ_NONE;
  885. struct drm_crtc *crtc, *vm_crtc = NULL;
  886. struct drm_crtc_state *new_cstate, *old_cstate;
  887. struct sde_crtc_state *vm_cstate;
  888. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  889. if (!new_cstate->active && !old_cstate->active)
  890. continue;
  891. vm_cstate = to_sde_crtc_state(new_cstate);
  892. vm_req = sde_crtc_get_property(vm_cstate,
  893. CRTC_PROP_VM_REQ_STATE);
  894. if (vm_req != VM_REQ_NONE) {
  895. SDE_DEBUG("valid vm request:%d found on crtc-%d\n",
  896. vm_req, crtc->base.id);
  897. vm_crtc = crtc;
  898. break;
  899. }
  900. }
  901. return vm_crtc;
  902. }
  903. static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
  904. {
  905. struct device *cpu_dev;
  906. int cpu = 0;
  907. u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
  908. // save irq cpu mask
  909. sde_kms->irq_cpu_mask = *mask;
  910. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  911. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  912. return;
  913. }
  914. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  915. cpu_dev = get_cpu_device(cpu);
  916. if (!cpu_dev) {
  917. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  918. cpu);
  919. continue;
  920. }
  921. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  922. dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
  923. cpu_irq_latency);
  924. else
  925. dev_pm_qos_add_request(cpu_dev,
  926. &sde_kms->pm_qos_irq_req[cpu],
  927. DEV_PM_QOS_RESUME_LATENCY,
  928. cpu_irq_latency);
  929. }
  930. }
  931. static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
  932. {
  933. struct device *cpu_dev;
  934. int cpu = 0;
  935. if (cpumask_empty(mask)) {
  936. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  937. return;
  938. }
  939. for_each_cpu(cpu, mask) {
  940. cpu_dev = get_cpu_device(cpu);
  941. if (!cpu_dev) {
  942. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  943. cpu);
  944. continue;
  945. }
  946. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  947. dev_pm_qos_remove_request(
  948. &sde_kms->pm_qos_irq_req[cpu]);
  949. }
  950. }
  951. int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
  952. struct drm_atomic_state *state)
  953. {
  954. struct drm_device *ddev;
  955. struct drm_crtc *crtc;
  956. struct drm_crtc_state *new_cstate;
  957. struct drm_encoder *encoder;
  958. struct drm_connector *connector;
  959. struct sde_vm_ops *vm_ops;
  960. struct sde_crtc_state *cstate;
  961. struct drm_connector_list_iter iter;
  962. enum sde_crtc_vm_req vm_req;
  963. int rc = 0;
  964. ddev = sde_kms->dev;
  965. vm_ops = sde_vm_get_ops(sde_kms);
  966. if (!vm_ops)
  967. return -EINVAL;
  968. crtc = sde_kms_vm_get_vm_crtc(state);
  969. if (!crtc)
  970. return 0;
  971. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  972. cstate = to_sde_crtc_state(new_cstate);
  973. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  974. if (vm_req != VM_REQ_ACQUIRE)
  975. return 0;
  976. /* enable MDSS irq line */
  977. sde_irq_update(&sde_kms->base, true);
  978. /* clear the stale IRQ status bits */
  979. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  980. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  981. _sde_kms_remove_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
  982. /* enable the display path IRQ's */
  983. drm_for_each_encoder_mask(encoder, crtc->dev,
  984. crtc->state->encoder_mask) {
  985. if (sde_encoder_in_clone_mode(encoder))
  986. continue;
  987. sde_encoder_irq_control(encoder, true);
  988. }
  989. /* Schedule ESD work */
  990. drm_connector_list_iter_begin(ddev, &iter);
  991. drm_for_each_connector_iter(connector, &iter)
  992. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  993. sde_connector_schedule_status_work(connector, true);
  994. drm_connector_list_iter_end(&iter);
  995. /* enable vblank events */
  996. drm_crtc_vblank_on(crtc);
  997. sde_dbg_set_hw_ownership_status(true);
  998. /* handle non-SDE pre_acquire */
  999. if (vm_ops->vm_client_post_acquire)
  1000. rc = vm_ops->vm_client_post_acquire(sde_kms);
  1001. return rc;
  1002. }
  1003. void sde_kms_vm_set_sid(struct sde_kms *sde_kms, u32 vm)
  1004. {
  1005. struct drm_plane *plane;
  1006. struct drm_device *ddev;
  1007. struct sde_mdss_cfg *sde_cfg;
  1008. ddev = sde_kms->dev;
  1009. sde_cfg = sde_kms->catalog;
  1010. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  1011. sde_plane_set_sid(plane, vm);
  1012. if (sde_kms->hw_sid && sde_kms->hw_sid->ops.set_vm_sid)
  1013. sde_kms->hw_sid->ops.set_vm_sid(sde_kms->hw_sid, vm, sde_kms->catalog);
  1014. }
  1015. int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
  1016. struct drm_atomic_state *state)
  1017. {
  1018. struct drm_crtc *crtc;
  1019. struct drm_crtc_state *new_cstate;
  1020. struct sde_crtc_state *cstate;
  1021. enum sde_crtc_vm_req vm_req;
  1022. crtc = sde_kms_vm_get_vm_crtc(state);
  1023. if (!crtc)
  1024. return 0;
  1025. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1026. cstate = to_sde_crtc_state(new_cstate);
  1027. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1028. if (vm_req != VM_REQ_ACQUIRE)
  1029. return 0;
  1030. /* Clear the stale IRQ status bits */
  1031. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  1032. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  1033. /* Program the SID's for the trusted VM */
  1034. sde_kms_vm_set_sid(sde_kms, 1);
  1035. sde_dbg_set_hw_ownership_status(true);
  1036. return 0;
  1037. }
  1038. static void sde_kms_prepare_commit(struct msm_kms *kms,
  1039. struct drm_atomic_state *state)
  1040. {
  1041. struct sde_kms *sde_kms;
  1042. struct msm_drm_private *priv;
  1043. struct drm_device *dev;
  1044. struct drm_encoder *encoder;
  1045. struct drm_crtc *crtc;
  1046. struct drm_crtc_state *cstate;
  1047. struct sde_vm_ops *vm_ops;
  1048. int i, rc;
  1049. if (!kms)
  1050. return;
  1051. sde_kms = to_sde_kms(kms);
  1052. dev = sde_kms->dev;
  1053. if (!dev || !dev->dev_private)
  1054. return;
  1055. priv = dev->dev_private;
  1056. SDE_ATRACE_BEGIN("prepare_commit");
  1057. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  1058. if (rc < 0) {
  1059. SDE_ERROR("failed to enable power resources %d\n", rc);
  1060. SDE_EVT32(rc, SDE_EVTLOG_ERROR);
  1061. goto end;
  1062. }
  1063. if (sde_kms->first_kickoff) {
  1064. sde_power_scale_reg_bus(&priv->phandle, VOTE_INDEX_HIGH, false);
  1065. sde_kms->first_kickoff = false;
  1066. }
  1067. for_each_new_crtc_in_state(state, crtc, cstate, i) {
  1068. drm_for_each_encoder_mask(encoder, dev, cstate->encoder_mask) {
  1069. if (sde_encoder_prepare_commit(encoder) == -ETIMEDOUT) {
  1070. SDE_ERROR("crtc:%d, initiating hw reset\n",
  1071. DRMID(crtc));
  1072. sde_encoder_needs_hw_reset(encoder);
  1073. sde_crtc_set_needs_hw_reset(crtc);
  1074. }
  1075. }
  1076. }
  1077. /*
  1078. * NOTE: for secure use cases we want to apply the new HW
  1079. * configuration only after completing preparation for secure
  1080. * transitions prepare below if any transtions is required.
  1081. */
  1082. sde_kms_prepare_secure_transition(kms, state);
  1083. vm_ops = sde_vm_get_ops(sde_kms);
  1084. if (!vm_ops)
  1085. goto end_vm;
  1086. if (vm_ops->vm_prepare_commit)
  1087. vm_ops->vm_prepare_commit(sde_kms, state);
  1088. end_vm:
  1089. _sde_kms_drm_check_dpms(state, true);
  1090. end:
  1091. SDE_ATRACE_END("prepare_commit");
  1092. }
  1093. static void sde_kms_commit(struct msm_kms *kms,
  1094. struct drm_atomic_state *old_state)
  1095. {
  1096. struct sde_kms *sde_kms;
  1097. struct drm_crtc *crtc;
  1098. struct drm_crtc_state *old_crtc_state;
  1099. int i;
  1100. if (!kms || !old_state)
  1101. return;
  1102. sde_kms = to_sde_kms(kms);
  1103. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1104. SDE_ERROR("power resource is not enabled\n");
  1105. return;
  1106. }
  1107. SDE_ATRACE_BEGIN("sde_kms_commit");
  1108. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1109. if (crtc->state->active) {
  1110. SDE_EVT32(DRMID(crtc), old_state);
  1111. sde_crtc_commit_kickoff(crtc, old_crtc_state);
  1112. }
  1113. }
  1114. SDE_ATRACE_END("sde_kms_commit");
  1115. }
  1116. static void _sde_kms_free_splash_display_data(struct sde_kms *sde_kms,
  1117. struct sde_splash_display *splash_display)
  1118. {
  1119. if (!sde_kms || !splash_display ||
  1120. !sde_kms->splash_data.num_splash_displays)
  1121. return;
  1122. if (sde_kms->splash_data.num_splash_regions) {
  1123. _sde_kms_splash_mem_put(sde_kms, splash_display->splash);
  1124. if (splash_display->demura)
  1125. _sde_kms_splash_mem_put(sde_kms,
  1126. splash_display->demura);
  1127. }
  1128. sde_kms->splash_data.num_splash_displays--;
  1129. SDE_DEBUG("cont_splash handoff done, remaining:%d\n",
  1130. sde_kms->splash_data.num_splash_displays);
  1131. memset(splash_display, 0x0, sizeof(struct sde_splash_display));
  1132. }
  1133. static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
  1134. struct drm_crtc *crtc)
  1135. {
  1136. struct msm_drm_private *priv;
  1137. struct sde_splash_display *splash_display;
  1138. int i;
  1139. if (!sde_kms || !crtc)
  1140. return;
  1141. priv = sde_kms->dev->dev_private;
  1142. if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays)
  1143. return;
  1144. SDE_EVT32(DRMID(crtc), crtc->state->active,
  1145. sde_kms->splash_data.num_splash_displays);
  1146. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  1147. splash_display = &sde_kms->splash_data.splash_display[i];
  1148. if (splash_display->encoder &&
  1149. crtc == splash_display->encoder->crtc)
  1150. break;
  1151. }
  1152. if (i >= MAX_DSI_DISPLAYS)
  1153. return;
  1154. if (splash_display->cont_splash_enabled) {
  1155. sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
  1156. splash_display, false);
  1157. _sde_kms_free_splash_display_data(sde_kms, splash_display);
  1158. }
  1159. /* remove the votes if all displays are done with splash */
  1160. if (!sde_kms->splash_data.num_splash_displays) {
  1161. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1162. sde_power_data_bus_set_quota(&priv->phandle, i,
  1163. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1164. priv->phandle.ib_quota[i] ? priv->phandle.ib_quota[i] :
  1165. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1166. pm_runtime_put_sync(sde_kms->dev->dev);
  1167. }
  1168. }
  1169. static void sde_kms_cancel_delayed_work(struct drm_crtc *crtc)
  1170. {
  1171. struct drm_connector *connector;
  1172. struct drm_connector_list_iter iter;
  1173. struct drm_encoder *encoder;
  1174. /* Cancel CRTC work */
  1175. sde_crtc_cancel_delayed_work(crtc);
  1176. /* Cancel ESD work */
  1177. drm_connector_list_iter_begin(crtc->dev, &iter);
  1178. drm_for_each_connector_iter(connector, &iter)
  1179. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  1180. sde_connector_schedule_status_work(connector, false);
  1181. drm_connector_list_iter_end(&iter);
  1182. /* Cancel Idle-PC work */
  1183. drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
  1184. if (sde_encoder_in_clone_mode(encoder))
  1185. continue;
  1186. sde_encoder_cancel_delayed_work(encoder);
  1187. }
  1188. }
  1189. int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
  1190. struct drm_atomic_state *state, bool is_primary)
  1191. {
  1192. struct drm_crtc *crtc;
  1193. struct drm_encoder *encoder;
  1194. struct msm_drm_private *priv;
  1195. int rc = 0;
  1196. crtc = sde_kms_vm_get_vm_crtc(state);
  1197. if (!crtc)
  1198. return 0;
  1199. priv = sde_kms->dev->dev_private;
  1200. /* if vm_req is enabled, once CRTC on the commit is guaranteed */
  1201. sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
  1202. sde_dbg_set_hw_ownership_status(false);
  1203. sde_kms_cancel_delayed_work(crtc);
  1204. kthread_flush_worker(&priv->event_thread[crtc->index].worker);
  1205. /* Flush pp_event thread queue for any pending events */
  1206. kthread_flush_worker(&priv->pp_event_worker);
  1207. /* disable SDE encoder irq's */
  1208. drm_for_each_encoder_mask(encoder, crtc->dev,
  1209. crtc->state->encoder_mask) {
  1210. if (sde_encoder_in_clone_mode(encoder))
  1211. continue;
  1212. sde_encoder_irq_control(encoder, false);
  1213. }
  1214. if (is_primary) {
  1215. _sde_kms_update_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
  1216. /* disable vblank events */
  1217. drm_crtc_vblank_off(crtc);
  1218. /* reset sw state */
  1219. sde_crtc_reset_sw_state(crtc);
  1220. }
  1221. return rc;
  1222. }
  1223. int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
  1224. struct drm_atomic_state *state)
  1225. {
  1226. struct sde_vm_ops *vm_ops;
  1227. struct drm_crtc *crtc;
  1228. struct sde_crtc_state *cstate;
  1229. struct drm_crtc_state *new_cstate;
  1230. enum sde_crtc_vm_req vm_req;
  1231. int rc = 0;
  1232. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1233. return -EINVAL;
  1234. vm_ops = sde_vm_get_ops(sde_kms);
  1235. crtc = sde_kms_vm_get_vm_crtc(state);
  1236. if (!crtc)
  1237. return 0;
  1238. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1239. cstate = to_sde_crtc_state(new_cstate);
  1240. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1241. if (vm_req != VM_REQ_RELEASE)
  1242. return 0;
  1243. sde_kms_vm_pre_release(sde_kms, state, false);
  1244. sde_kms_vm_set_sid(sde_kms, 0);
  1245. sde_vm_lock(sde_kms);
  1246. if (vm_ops->vm_release)
  1247. rc = vm_ops->vm_release(sde_kms);
  1248. sde_vm_unlock(sde_kms);
  1249. return rc;
  1250. }
  1251. int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
  1252. struct drm_atomic_state *state)
  1253. {
  1254. struct sde_vm_ops *vm_ops;
  1255. struct sde_crtc_state *cstate;
  1256. struct drm_crtc *crtc;
  1257. struct drm_crtc_state *new_cstate;
  1258. enum sde_crtc_vm_req vm_req;
  1259. int rc = 0;
  1260. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1261. return -EINVAL;
  1262. vm_ops = sde_vm_get_ops(sde_kms);
  1263. crtc = sde_kms_vm_get_vm_crtc(state);
  1264. if (!crtc)
  1265. return 0;
  1266. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1267. cstate = to_sde_crtc_state(new_cstate);
  1268. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1269. if (vm_req != VM_REQ_RELEASE)
  1270. return 0;
  1271. /* handle SDE pre-release */
  1272. rc = sde_kms_vm_pre_release(sde_kms, state, true);
  1273. if (rc) {
  1274. SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
  1275. goto exit;
  1276. }
  1277. /* properly handoff color processing features */
  1278. sde_cp_crtc_vm_primary_handoff(crtc);
  1279. sde_vm_lock(sde_kms);
  1280. /* handle non-SDE clients pre-release */
  1281. if (vm_ops->vm_client_pre_release) {
  1282. rc = vm_ops->vm_client_pre_release(sde_kms);
  1283. if (rc) {
  1284. SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
  1285. rc);
  1286. sde_vm_unlock(sde_kms);
  1287. goto exit;
  1288. }
  1289. }
  1290. /* disable IRQ line */
  1291. sde_irq_update(&sde_kms->base, false);
  1292. /* release HW */
  1293. if (vm_ops->vm_release) {
  1294. rc = vm_ops->vm_release(sde_kms);
  1295. if (rc)
  1296. SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
  1297. }
  1298. sde_vm_unlock(sde_kms);
  1299. _sde_crtc_vm_release_notify(crtc);
  1300. exit:
  1301. return rc;
  1302. }
  1303. static void sde_kms_complete_commit(struct msm_kms *kms,
  1304. struct drm_atomic_state *old_state)
  1305. {
  1306. struct sde_kms *sde_kms;
  1307. struct msm_drm_private *priv;
  1308. struct drm_crtc *crtc;
  1309. struct drm_crtc_state *old_crtc_state;
  1310. struct drm_connector *connector;
  1311. struct drm_connector_state *old_conn_state;
  1312. struct msm_display_conn_params params;
  1313. struct sde_vm_ops *vm_ops;
  1314. int i, rc = 0;
  1315. if (!kms || !old_state)
  1316. return;
  1317. sde_kms = to_sde_kms(kms);
  1318. if (!sde_kms->dev || !sde_kms->dev->dev_private)
  1319. return;
  1320. priv = sde_kms->dev->dev_private;
  1321. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1322. SDE_ERROR("power resource is not enabled\n");
  1323. return;
  1324. }
  1325. SDE_ATRACE_BEGIN("sde_kms_complete_commit");
  1326. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1327. sde_crtc_complete_commit(crtc, old_crtc_state);
  1328. /* complete secure transitions if any */
  1329. if (sde_kms->smmu_state.transition_type == POST_COMMIT)
  1330. _sde_kms_secure_ctrl(sde_kms, crtc, true);
  1331. }
  1332. for_each_old_connector_in_state(old_state, connector,
  1333. old_conn_state, i) {
  1334. struct sde_connector *c_conn;
  1335. c_conn = to_sde_connector(connector);
  1336. if (!c_conn->ops.post_kickoff)
  1337. continue;
  1338. memset(&params, 0, sizeof(params));
  1339. sde_connector_complete_qsync_commit(connector, &params);
  1340. rc = c_conn->ops.post_kickoff(connector, &params);
  1341. if (rc) {
  1342. pr_err("Connector Post kickoff failed rc=%d\n",
  1343. rc);
  1344. }
  1345. }
  1346. vm_ops = sde_vm_get_ops(sde_kms);
  1347. if (vm_ops && vm_ops->vm_post_commit) {
  1348. rc = vm_ops->vm_post_commit(sde_kms, old_state);
  1349. if (rc)
  1350. SDE_ERROR("vm post commit failed, rc = %d\n",
  1351. rc);
  1352. }
  1353. _sde_kms_drm_check_dpms(old_state, false);
  1354. pm_runtime_put_sync(sde_kms->dev->dev);
  1355. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  1356. _sde_kms_release_splash_resource(sde_kms, crtc);
  1357. SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
  1358. SDE_ATRACE_END("sde_kms_complete_commit");
  1359. }
  1360. static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
  1361. struct drm_crtc *crtc)
  1362. {
  1363. struct sde_kms *sde_kms;
  1364. struct drm_encoder *encoder;
  1365. struct drm_device *dev;
  1366. int ret;
  1367. bool cwb_disabling;
  1368. if (!kms || !crtc || !crtc->state) {
  1369. SDE_ERROR("invalid params\n");
  1370. return;
  1371. }
  1372. dev = crtc->dev;
  1373. sde_kms = to_sde_kms(kms);
  1374. if (!crtc->state->enable) {
  1375. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  1376. return;
  1377. }
  1378. if (!crtc->state->active) {
  1379. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  1380. return;
  1381. }
  1382. if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
  1383. SDE_ERROR("power resource is not enabled\n");
  1384. return;
  1385. }
  1386. SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
  1387. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  1388. cwb_disabling = false;
  1389. if (encoder->crtc != crtc) {
  1390. cwb_disabling = sde_encoder_is_cwb_disabling(encoder,
  1391. crtc);
  1392. if (!cwb_disabling)
  1393. continue;
  1394. }
  1395. /*
  1396. * Wait for post-flush if necessary to delay before
  1397. * plane_cleanup. For example, wait for vsync in case of video
  1398. * mode panels. This may be a no-op for command mode panels.
  1399. */
  1400. SDE_EVT32_VERBOSE(DRMID(crtc));
  1401. ret = sde_encoder_wait_for_event(encoder, cwb_disabling ?
  1402. MSM_ENC_TX_COMPLETE : MSM_ENC_COMMIT_DONE);
  1403. if (ret && ret != -EWOULDBLOCK) {
  1404. SDE_ERROR("crtc:%d, enc:%d, cwb_d:%d, wait for commit done failed ret:%d\n",
  1405. DRMID(crtc), DRMID(encoder), cwb_disabling, ret);
  1406. SDE_EVT32(DRMID(crtc), DRMID(encoder), cwb_disabling,
  1407. ret, SDE_EVTLOG_ERROR);
  1408. sde_crtc_request_frame_reset(crtc, encoder);
  1409. /* call ensure virt_reset for cwb encoder before exiting the loop */
  1410. if (cwb_disabling)
  1411. sde_encoder_virt_reset(encoder);
  1412. break;
  1413. }
  1414. sde_encoder_hw_fence_error_handle(encoder);
  1415. sde_crtc_complete_flip(crtc, NULL);
  1416. if (cwb_disabling)
  1417. sde_encoder_virt_reset(encoder);
  1418. }
  1419. /* avoid system cache update to set rd-noalloc bit when NSE feature is enabled */
  1420. if (!test_bit(SDE_FEATURE_SYS_CACHE_NSE, sde_kms->catalog->features))
  1421. sde_crtc_static_cache_read_kickoff(crtc);
  1422. SDE_ATRACE_END("sde_kms_wait_for_commit_done");
  1423. }
  1424. static void sde_kms_prepare_fence(struct msm_kms *kms,
  1425. struct drm_atomic_state *old_state)
  1426. {
  1427. struct drm_crtc *crtc;
  1428. struct drm_crtc_state *old_crtc_state;
  1429. int i;
  1430. if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
  1431. SDE_ERROR("invalid argument(s)\n");
  1432. return;
  1433. }
  1434. SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
  1435. /* old_state actually contains updated crtc pointers */
  1436. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1437. if (crtc->state->active || crtc->state->active_changed)
  1438. sde_crtc_prepare_commit(crtc, old_crtc_state);
  1439. }
  1440. SDE_ATRACE_END("sde_kms_prepare_fence");
  1441. }
  1442. /**
  1443. * _sde_kms_get_displays - query for underlying display handles and cache them
  1444. * @sde_kms: Pointer to sde kms structure
  1445. * Returns: Zero on success
  1446. */
  1447. static int _sde_kms_get_displays(struct sde_kms *sde_kms)
  1448. {
  1449. int rc = -ENOMEM;
  1450. if (!sde_kms) {
  1451. SDE_ERROR("invalid sde kms\n");
  1452. return -EINVAL;
  1453. }
  1454. /* dsi */
  1455. sde_kms->dsi_displays = NULL;
  1456. sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
  1457. if (sde_kms->dsi_display_count) {
  1458. sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
  1459. sizeof(void *),
  1460. GFP_KERNEL);
  1461. if (!sde_kms->dsi_displays) {
  1462. SDE_ERROR("failed to allocate dsi displays\n");
  1463. goto exit_deinit_dsi;
  1464. }
  1465. sde_kms->dsi_display_count =
  1466. dsi_display_get_active_displays(sde_kms->dsi_displays,
  1467. sde_kms->dsi_display_count);
  1468. }
  1469. /* wb */
  1470. sde_kms->wb_displays = NULL;
  1471. sde_kms->wb_display_count = sde_wb_get_num_of_displays();
  1472. if (sde_kms->wb_display_count) {
  1473. sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
  1474. sizeof(void *),
  1475. GFP_KERNEL);
  1476. if (!sde_kms->wb_displays) {
  1477. SDE_ERROR("failed to allocate wb displays\n");
  1478. goto exit_deinit_wb;
  1479. }
  1480. sde_kms->wb_display_count =
  1481. wb_display_get_displays(sde_kms->wb_displays,
  1482. sde_kms->wb_display_count);
  1483. }
  1484. /* dp */
  1485. sde_kms->dp_displays = NULL;
  1486. sde_kms->dp_display_count = dp_display_get_num_of_displays();
  1487. if (sde_kms->dp_display_count) {
  1488. sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
  1489. sizeof(void *), GFP_KERNEL);
  1490. if (!sde_kms->dp_displays) {
  1491. SDE_ERROR("failed to allocate dp displays\n");
  1492. goto exit_deinit_dp;
  1493. }
  1494. sde_kms->dp_display_count =
  1495. dp_display_get_displays(sde_kms->dp_displays,
  1496. sde_kms->dp_display_count);
  1497. sde_kms->dp_stream_count = dp_display_get_num_of_streams();
  1498. }
  1499. return 0;
  1500. exit_deinit_dp:
  1501. kfree(sde_kms->dp_displays);
  1502. sde_kms->dp_stream_count = 0;
  1503. sde_kms->dp_display_count = 0;
  1504. sde_kms->dp_displays = NULL;
  1505. exit_deinit_wb:
  1506. kfree(sde_kms->wb_displays);
  1507. sde_kms->wb_display_count = 0;
  1508. sde_kms->wb_displays = NULL;
  1509. exit_deinit_dsi:
  1510. kfree(sde_kms->dsi_displays);
  1511. sde_kms->dsi_display_count = 0;
  1512. sde_kms->dsi_displays = NULL;
  1513. return rc;
  1514. }
  1515. /**
  1516. * _sde_kms_release_displays - release cache of underlying display handles
  1517. * @sde_kms: Pointer to sde kms structure
  1518. */
  1519. static void _sde_kms_release_displays(struct sde_kms *sde_kms)
  1520. {
  1521. if (!sde_kms) {
  1522. SDE_ERROR("invalid sde kms\n");
  1523. return;
  1524. }
  1525. kfree(sde_kms->wb_displays);
  1526. sde_kms->wb_displays = NULL;
  1527. sde_kms->wb_display_count = 0;
  1528. kfree(sde_kms->dsi_displays);
  1529. sde_kms->dsi_displays = NULL;
  1530. sde_kms->dsi_display_count = 0;
  1531. }
  1532. /**
  1533. * _sde_kms_setup_displays - create encoders, bridges and connectors
  1534. * for underlying displays
  1535. * @dev: Pointer to drm device structure
  1536. * @priv: Pointer to private drm device data
  1537. * @sde_kms: Pointer to sde kms structure
  1538. * Returns: Zero on success
  1539. */
  1540. static int _sde_kms_setup_displays(struct drm_device *dev,
  1541. struct msm_drm_private *priv,
  1542. struct sde_kms *sde_kms)
  1543. {
  1544. static const struct sde_connector_ops dsi_ops = {
  1545. .set_info_blob = dsi_conn_set_info_blob,
  1546. .detect = dsi_conn_detect,
  1547. .get_modes = dsi_connector_get_modes,
  1548. .pre_destroy = dsi_connector_put_modes,
  1549. .mode_valid = dsi_conn_mode_valid,
  1550. .get_info = dsi_display_get_info,
  1551. .set_backlight = dsi_display_set_backlight,
  1552. .soft_reset = dsi_display_soft_reset,
  1553. .pre_kickoff = dsi_conn_pre_kickoff,
  1554. .clk_ctrl = dsi_display_clk_ctrl,
  1555. .set_power = dsi_display_set_power,
  1556. .get_mode_info = dsi_conn_get_mode_info,
  1557. .get_dst_format = dsi_display_get_dst_format,
  1558. .post_kickoff = dsi_conn_post_kickoff,
  1559. .check_status = dsi_display_check_status,
  1560. .enable_event = dsi_conn_enable_event,
  1561. .cmd_transfer = dsi_display_cmd_transfer,
  1562. .cont_splash_config = dsi_display_cont_splash_config,
  1563. .cont_splash_res_disable = dsi_display_cont_splash_res_disable,
  1564. .get_panel_vfp = dsi_display_get_panel_vfp,
  1565. .get_default_lms = dsi_display_get_default_lms,
  1566. .cmd_receive = dsi_display_cmd_receive,
  1567. .install_properties = NULL,
  1568. .set_allowed_mode_switch = dsi_conn_set_allowed_mode_switch,
  1569. .set_dyn_bit_clk = dsi_conn_set_dyn_bit_clk,
  1570. .get_qsync_min_fps = dsi_conn_get_qsync_min_fps,
  1571. .get_avr_step_fps = dsi_conn_get_avr_step_fps,
  1572. .prepare_commit = dsi_conn_prepare_commit,
  1573. .set_submode_info = dsi_conn_set_submode_blob_info,
  1574. .get_num_lm_from_mode = dsi_conn_get_lm_from_mode,
  1575. .update_transfer_time = dsi_display_update_transfer_time,
  1576. .get_panel_scan_line = dsi_display_get_panel_scan_line,
  1577. };
  1578. static const struct sde_connector_ops wb_ops = {
  1579. .post_init = sde_wb_connector_post_init,
  1580. .set_info_blob = sde_wb_connector_set_info_blob,
  1581. .detect = sde_wb_connector_detect,
  1582. .get_modes = sde_wb_connector_get_modes,
  1583. .set_property = sde_wb_connector_set_property,
  1584. .get_info = sde_wb_get_info,
  1585. .soft_reset = NULL,
  1586. .get_mode_info = sde_wb_get_mode_info,
  1587. .get_dst_format = NULL,
  1588. .check_status = NULL,
  1589. .cmd_transfer = NULL,
  1590. .cont_splash_config = NULL,
  1591. .cont_splash_res_disable = NULL,
  1592. .get_panel_vfp = NULL,
  1593. .cmd_receive = NULL,
  1594. .install_properties = NULL,
  1595. .set_dyn_bit_clk = NULL,
  1596. .set_allowed_mode_switch = NULL,
  1597. .update_transfer_time = NULL,
  1598. };
  1599. static const struct sde_connector_ops dp_ops = {
  1600. .post_init = dp_connector_post_init,
  1601. .detect = dp_connector_detect,
  1602. .get_modes = dp_connector_get_modes,
  1603. .atomic_check = dp_connector_atomic_check,
  1604. .mode_valid = dp_connector_mode_valid,
  1605. .get_info = dp_connector_get_info,
  1606. .get_mode_info = dp_connector_get_mode_info,
  1607. .post_open = dp_connector_post_open,
  1608. .check_status = NULL,
  1609. .set_colorspace = dp_connector_set_colorspace,
  1610. .config_hdr = dp_connector_config_hdr,
  1611. .cmd_transfer = NULL,
  1612. .cont_splash_config = NULL,
  1613. .cont_splash_res_disable = NULL,
  1614. .get_panel_vfp = NULL,
  1615. .update_pps = dp_connector_update_pps,
  1616. .cmd_receive = NULL,
  1617. .install_properties = dp_connector_install_properties,
  1618. .set_allowed_mode_switch = NULL,
  1619. .set_dyn_bit_clk = NULL,
  1620. .update_transfer_time = NULL,
  1621. };
  1622. struct msm_display_info info;
  1623. struct drm_encoder *encoder;
  1624. void *display, *connector;
  1625. int i, max_encoders;
  1626. int rc = 0;
  1627. u32 dsc_count = 0, mixer_count = 0;
  1628. u32 max_dp_dsc_count, max_dp_mixer_count;
  1629. if (!dev || !priv || !sde_kms) {
  1630. SDE_ERROR("invalid argument(s)\n");
  1631. return -EINVAL;
  1632. }
  1633. max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
  1634. sde_kms->dp_display_count +
  1635. sde_kms->dp_stream_count;
  1636. if (max_encoders > ARRAY_SIZE(priv->encoders)) {
  1637. max_encoders = ARRAY_SIZE(priv->encoders);
  1638. SDE_ERROR("capping number of displays to %d", max_encoders);
  1639. }
  1640. /* wb */
  1641. for (i = 0; i < sde_kms->wb_display_count &&
  1642. priv->num_encoders < max_encoders; ++i) {
  1643. display = sde_kms->wb_displays[i];
  1644. encoder = NULL;
  1645. memset(&info, 0x0, sizeof(info));
  1646. rc = sde_wb_get_info(NULL, &info, display);
  1647. if (rc) {
  1648. SDE_ERROR("wb get_info %d failed\n", i);
  1649. continue;
  1650. }
  1651. encoder = sde_encoder_init(dev, &info);
  1652. if (IS_ERR_OR_NULL(encoder)) {
  1653. SDE_ERROR("encoder init failed for wb %d\n", i);
  1654. continue;
  1655. }
  1656. rc = sde_wb_drm_init(display, encoder);
  1657. if (rc) {
  1658. SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
  1659. sde_encoder_destroy(encoder);
  1660. continue;
  1661. }
  1662. connector = sde_connector_init(dev,
  1663. encoder,
  1664. 0,
  1665. display,
  1666. &wb_ops,
  1667. DRM_CONNECTOR_POLL_HPD,
  1668. DRM_MODE_CONNECTOR_VIRTUAL);
  1669. if (connector) {
  1670. priv->encoders[priv->num_encoders++] = encoder;
  1671. priv->connectors[priv->num_connectors++] = connector;
  1672. } else {
  1673. SDE_ERROR("wb %d connector init failed\n", i);
  1674. sde_wb_drm_deinit(display);
  1675. sde_encoder_destroy(encoder);
  1676. }
  1677. }
  1678. /* dsi */
  1679. for (i = 0; i < sde_kms->dsi_display_count &&
  1680. priv->num_encoders < max_encoders; ++i) {
  1681. display = sde_kms->dsi_displays[i];
  1682. encoder = NULL;
  1683. memset(&info, 0x0, sizeof(info));
  1684. rc = dsi_display_get_info(NULL, &info, display);
  1685. if (rc) {
  1686. SDE_ERROR("dsi get_info %d failed\n", i);
  1687. continue;
  1688. }
  1689. encoder = sde_encoder_init(dev, &info);
  1690. if (IS_ERR_OR_NULL(encoder)) {
  1691. SDE_ERROR("encoder init failed for dsi %d\n", i);
  1692. continue;
  1693. }
  1694. rc = dsi_display_drm_bridge_init(display, encoder);
  1695. if (rc) {
  1696. SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
  1697. sde_encoder_destroy(encoder);
  1698. continue;
  1699. }
  1700. connector = sde_connector_init(dev,
  1701. encoder,
  1702. dsi_display_get_drm_panel(display),
  1703. display,
  1704. &dsi_ops,
  1705. DRM_CONNECTOR_POLL_HPD,
  1706. DRM_MODE_CONNECTOR_DSI);
  1707. if (connector) {
  1708. priv->encoders[priv->num_encoders++] = encoder;
  1709. priv->connectors[priv->num_connectors++] = connector;
  1710. } else {
  1711. SDE_ERROR("dsi %d connector init failed\n", i);
  1712. dsi_display_drm_bridge_deinit(display);
  1713. sde_encoder_destroy(encoder);
  1714. continue;
  1715. }
  1716. rc = dsi_display_drm_ext_bridge_init(display,
  1717. encoder, connector);
  1718. if (rc) {
  1719. SDE_ERROR("dsi %d ext bridge init failed\n", rc);
  1720. dsi_display_drm_bridge_deinit(display);
  1721. sde_connector_destroy(connector);
  1722. sde_encoder_destroy(encoder);
  1723. }
  1724. dsc_count += info.dsc_count;
  1725. mixer_count += info.lm_count;
  1726. if (dsi_display_has_dsc_switch_support(display))
  1727. sde_kms->dsc_switch_support = true;
  1728. }
  1729. if (sde_kms->catalog->allowed_dsc_reservation_switch &&
  1730. !sde_kms->dsc_switch_support) {
  1731. SDE_DEBUG("dsc switch not supported\n");
  1732. sde_kms->catalog->allowed_dsc_reservation_switch = 0;
  1733. }
  1734. max_dp_mixer_count = sde_kms->catalog->mixer_count > mixer_count ?
  1735. sde_kms->catalog->mixer_count - mixer_count : 0;
  1736. max_dp_dsc_count = sde_kms->catalog->dsc_count > dsc_count ?
  1737. sde_kms->catalog->dsc_count - dsc_count : 0;
  1738. if (sde_kms->catalog->allowed_dsc_reservation_switch &
  1739. SDE_DP_DSC_RESERVATION_SWITCH)
  1740. max_dp_dsc_count = sde_kms->catalog->dsc_count;
  1741. /* dp */
  1742. for (i = 0; i < sde_kms->dp_display_count &&
  1743. priv->num_encoders < max_encoders; ++i) {
  1744. int idx;
  1745. display = sde_kms->dp_displays[i];
  1746. encoder = NULL;
  1747. memset(&info, 0x0, sizeof(info));
  1748. rc = dp_connector_get_info(NULL, &info, display);
  1749. if (rc) {
  1750. SDE_ERROR("dp get_info %d failed\n", i);
  1751. continue;
  1752. }
  1753. encoder = sde_encoder_init(dev, &info);
  1754. if (IS_ERR_OR_NULL(encoder)) {
  1755. SDE_ERROR("dp encoder init failed %d\n", i);
  1756. continue;
  1757. }
  1758. rc = dp_drm_bridge_init(display, encoder,
  1759. max_dp_mixer_count, max_dp_dsc_count);
  1760. if (rc) {
  1761. SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
  1762. sde_encoder_destroy(encoder);
  1763. continue;
  1764. }
  1765. connector = sde_connector_init(dev,
  1766. encoder,
  1767. NULL,
  1768. display,
  1769. &dp_ops,
  1770. DRM_CONNECTOR_POLL_HPD,
  1771. DRM_MODE_CONNECTOR_DisplayPort);
  1772. if (connector) {
  1773. priv->encoders[priv->num_encoders++] = encoder;
  1774. priv->connectors[priv->num_connectors++] = connector;
  1775. } else {
  1776. SDE_ERROR("dp %d connector init failed\n", i);
  1777. dp_drm_bridge_deinit(display);
  1778. sde_encoder_destroy(encoder);
  1779. }
  1780. /* update display cap to MST_MODE for DP MST encoders */
  1781. info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
  1782. for (idx = 0; idx < sde_kms->dp_stream_count &&
  1783. priv->num_encoders < max_encoders; idx++) {
  1784. info.h_tile_instance[0] = idx;
  1785. encoder = sde_encoder_init(dev, &info);
  1786. if (IS_ERR_OR_NULL(encoder)) {
  1787. SDE_ERROR("dp mst encoder init failed %d\n", i);
  1788. continue;
  1789. }
  1790. rc = dp_mst_drm_bridge_init(display, encoder);
  1791. if (rc) {
  1792. SDE_ERROR("dp mst bridge %d init failed, %d\n",
  1793. i, rc);
  1794. sde_encoder_destroy(encoder);
  1795. continue;
  1796. }
  1797. priv->encoders[priv->num_encoders++] = encoder;
  1798. }
  1799. }
  1800. return 0;
  1801. }
  1802. static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
  1803. {
  1804. struct msm_drm_private *priv;
  1805. int i;
  1806. if (!sde_kms) {
  1807. SDE_ERROR("invalid sde_kms\n");
  1808. return;
  1809. } else if (!sde_kms->dev) {
  1810. SDE_ERROR("invalid dev\n");
  1811. return;
  1812. } else if (!sde_kms->dev->dev_private) {
  1813. SDE_ERROR("invalid dev_private\n");
  1814. return;
  1815. }
  1816. priv = sde_kms->dev->dev_private;
  1817. for (i = 0; i < priv->num_crtcs; i++)
  1818. priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
  1819. priv->num_crtcs = 0;
  1820. for (i = 0; i < priv->num_planes; i++)
  1821. priv->planes[i]->funcs->destroy(priv->planes[i]);
  1822. priv->num_planes = 0;
  1823. for (i = 0; i < priv->num_connectors; i++)
  1824. priv->connectors[i]->funcs->destroy(priv->connectors[i]);
  1825. priv->num_connectors = 0;
  1826. for (i = 0; i < priv->num_encoders; i++)
  1827. priv->encoders[i]->funcs->destroy(priv->encoders[i]);
  1828. priv->num_encoders = 0;
  1829. _sde_kms_release_displays(sde_kms);
  1830. }
  1831. static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
  1832. {
  1833. struct drm_device *dev;
  1834. struct drm_plane *primary_planes[MAX_PLANES], *plane;
  1835. struct drm_crtc *crtc;
  1836. struct msm_drm_private *priv;
  1837. struct sde_mdss_cfg *catalog;
  1838. int primary_planes_idx = 0, i, ret;
  1839. int max_crtc_count;
  1840. u32 sspp_id[MAX_PLANES];
  1841. u32 master_plane_id[MAX_PLANES];
  1842. u32 num_virt_planes = 0, dummy_mixer_count = 0;
  1843. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1844. SDE_ERROR("invalid sde_kms\n");
  1845. return -EINVAL;
  1846. }
  1847. dev = sde_kms->dev;
  1848. priv = dev->dev_private;
  1849. catalog = sde_kms->catalog;
  1850. ret = sde_core_irq_domain_add(sde_kms);
  1851. if (ret)
  1852. goto fail_irq;
  1853. /*
  1854. * Query for underlying display drivers, and create connectors,
  1855. * bridges and encoders for them.
  1856. */
  1857. if (!_sde_kms_get_displays(sde_kms))
  1858. (void)_sde_kms_setup_displays(dev, priv, sde_kms);
  1859. for (i = 0; i < catalog->mixer_count; i++)
  1860. if (catalog->mixer[i].dummy_mixer)
  1861. dummy_mixer_count++;
  1862. max_crtc_count = catalog->mixer_count - dummy_mixer_count;
  1863. /* Create the planes */
  1864. for (i = 0; i < catalog->sspp_count; i++) {
  1865. bool primary = true;
  1866. if (primary_planes_idx >= max_crtc_count)
  1867. primary = false;
  1868. plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
  1869. (1UL << max_crtc_count) - 1, 0);
  1870. if (IS_ERR(plane)) {
  1871. SDE_ERROR("sde_plane_init failed\n");
  1872. ret = PTR_ERR(plane);
  1873. goto fail;
  1874. }
  1875. priv->planes[priv->num_planes++] = plane;
  1876. if (primary)
  1877. primary_planes[primary_planes_idx++] = plane;
  1878. if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
  1879. sde_is_custom_client()) {
  1880. int priority =
  1881. catalog->sspp[i].sblk->smart_dma_priority;
  1882. sspp_id[priority - 1] = catalog->sspp[i].id;
  1883. master_plane_id[priority - 1] = plane->base.id;
  1884. num_virt_planes++;
  1885. }
  1886. }
  1887. /* Initialize smart DMA virtual planes */
  1888. for (i = 0; i < num_virt_planes; i++) {
  1889. plane = sde_plane_init(dev, sspp_id[i], false,
  1890. (1UL << max_crtc_count) - 1, master_plane_id[i]);
  1891. if (IS_ERR(plane)) {
  1892. SDE_ERROR("sde_plane for virtual SSPP init failed\n");
  1893. ret = PTR_ERR(plane);
  1894. goto fail;
  1895. }
  1896. priv->planes[priv->num_planes++] = plane;
  1897. }
  1898. max_crtc_count = min(max_crtc_count, primary_planes_idx);
  1899. /* Create one CRTC per encoder */
  1900. for (i = 0; i < max_crtc_count; i++) {
  1901. crtc = sde_crtc_init(dev, primary_planes[i]);
  1902. if (IS_ERR(crtc)) {
  1903. ret = PTR_ERR(crtc);
  1904. goto fail;
  1905. }
  1906. priv->crtcs[priv->num_crtcs++] = crtc;
  1907. }
  1908. if (sde_is_custom_client()) {
  1909. /* All CRTCs are compatible with all planes */
  1910. for (i = 0; i < priv->num_planes; i++)
  1911. priv->planes[i]->possible_crtcs =
  1912. (1 << priv->num_crtcs) - 1;
  1913. }
  1914. /* All CRTCs are compatible with all encoders */
  1915. for (i = 0; i < priv->num_encoders; i++)
  1916. priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
  1917. return 0;
  1918. fail:
  1919. _sde_kms_drm_obj_destroy(sde_kms);
  1920. fail_irq:
  1921. sde_core_irq_domain_fini(sde_kms);
  1922. return ret;
  1923. }
  1924. /**
  1925. * sde_kms_timeline_status - provides current timeline status
  1926. * This API should be called without mode config lock.
  1927. * @dev: Pointer to drm device
  1928. */
  1929. void sde_kms_timeline_status(struct drm_device *dev)
  1930. {
  1931. struct drm_crtc *crtc;
  1932. struct drm_connector *conn;
  1933. struct drm_connector_list_iter conn_iter;
  1934. if (!dev) {
  1935. SDE_ERROR("invalid drm device node\n");
  1936. return;
  1937. }
  1938. drm_for_each_crtc(crtc, dev)
  1939. sde_crtc_timeline_status(crtc);
  1940. if (mutex_is_locked(&dev->mode_config.mutex)) {
  1941. /*
  1942. *Probably locked from last close dumping status anyway
  1943. */
  1944. SDE_ERROR("dumping conn_timeline without mode_config lock\n");
  1945. drm_connector_list_iter_begin(dev, &conn_iter);
  1946. drm_for_each_connector_iter(conn, &conn_iter)
  1947. sde_conn_timeline_status(conn);
  1948. drm_connector_list_iter_end(&conn_iter);
  1949. return;
  1950. }
  1951. mutex_lock(&dev->mode_config.mutex);
  1952. drm_connector_list_iter_begin(dev, &conn_iter);
  1953. drm_for_each_connector_iter(conn, &conn_iter)
  1954. sde_conn_timeline_status(conn);
  1955. drm_connector_list_iter_end(&conn_iter);
  1956. mutex_unlock(&dev->mode_config.mutex);
  1957. }
  1958. static int sde_kms_postinit(struct msm_kms *kms)
  1959. {
  1960. struct sde_kms *sde_kms = to_sde_kms(kms);
  1961. struct drm_device *dev;
  1962. struct drm_crtc *crtc;
  1963. struct msm_drm_private *priv;
  1964. int i, rc;
  1965. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev ||
  1966. !sde_kms->dev->dev_private) {
  1967. SDE_ERROR("invalid sde_kms\n");
  1968. return -EINVAL;
  1969. }
  1970. dev = sde_kms->dev;
  1971. priv = sde_kms->dev->dev_private;
  1972. /*
  1973. * Handle (re)initializations during power enable, the sde power
  1974. * event call has to be after drm_irq_install to handle irq update.
  1975. */
  1976. sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
  1977. sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
  1978. SDE_POWER_EVENT_POST_ENABLE |
  1979. SDE_POWER_EVENT_PRE_DISABLE,
  1980. sde_kms_handle_power_event, sde_kms, "kms");
  1981. if (sde_kms->splash_data.num_splash_displays) {
  1982. SDE_DEBUG("Skipping MDP Resources disable\n");
  1983. } else {
  1984. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1985. sde_power_data_bus_set_quota(&priv->phandle, i,
  1986. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1987. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1988. pm_runtime_put_sync(sde_kms->dev->dev);
  1989. }
  1990. rc = _sde_debugfs_init(sde_kms);
  1991. if (rc)
  1992. SDE_ERROR("sde_debugfs init failed: %d\n", rc);
  1993. drm_for_each_crtc(crtc, dev)
  1994. sde_crtc_post_init(dev, crtc);
  1995. return rc;
  1996. }
  1997. static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
  1998. struct drm_encoder *encoder)
  1999. {
  2000. return rate;
  2001. }
  2002. static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
  2003. struct platform_device *pdev)
  2004. {
  2005. struct drm_device *dev;
  2006. struct msm_drm_private *priv;
  2007. struct sde_vm_ops *vm_ops;
  2008. int i;
  2009. if (!sde_kms || !pdev)
  2010. return;
  2011. dev = sde_kms->dev;
  2012. if (!dev)
  2013. return;
  2014. priv = dev->dev_private;
  2015. if (!priv)
  2016. return;
  2017. if (sde_kms->genpd_init) {
  2018. sde_kms->genpd_init = false;
  2019. pm_genpd_remove(&sde_kms->genpd);
  2020. of_genpd_del_provider(pdev->dev.of_node);
  2021. }
  2022. vm_ops = sde_vm_get_ops(sde_kms);
  2023. if (vm_ops && vm_ops->vm_deinit)
  2024. vm_ops->vm_deinit(sde_kms, vm_ops);
  2025. if (sde_kms->hw_intr)
  2026. sde_hw_intr_destroy(sde_kms->hw_intr);
  2027. sde_kms->hw_intr = NULL;
  2028. if (sde_kms->power_event)
  2029. sde_power_handle_unregister_event(
  2030. &priv->phandle, sde_kms->power_event);
  2031. _sde_kms_release_displays(sde_kms);
  2032. _sde_kms_unmap_all_splash_regions(sde_kms);
  2033. if (sde_kms->catalog) {
  2034. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  2035. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  2036. if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
  2037. sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
  2038. }
  2039. }
  2040. if (sde_kms->rm_init)
  2041. sde_rm_destroy(&sde_kms->rm);
  2042. sde_kms->rm_init = false;
  2043. if (sde_kms->catalog)
  2044. sde_hw_catalog_deinit(sde_kms->catalog);
  2045. sde_kms->catalog = NULL;
  2046. if (sde_kms->sid)
  2047. msm_iounmap(pdev, sde_kms->sid);
  2048. sde_kms->sid = NULL;
  2049. if (sde_kms->reg_dma)
  2050. msm_iounmap(pdev, sde_kms->reg_dma);
  2051. sde_kms->reg_dma = NULL;
  2052. if (sde_kms->vbif[VBIF_NRT])
  2053. msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
  2054. sde_kms->vbif[VBIF_NRT] = NULL;
  2055. if (sde_kms->vbif[VBIF_RT])
  2056. msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
  2057. sde_kms->vbif[VBIF_RT] = NULL;
  2058. if (sde_kms->mmio)
  2059. msm_iounmap(pdev, sde_kms->mmio);
  2060. sde_kms->mmio = NULL;
  2061. sde_reg_dma_deinit();
  2062. _sde_kms_mmu_destroy(sde_kms);
  2063. }
  2064. int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
  2065. {
  2066. int i;
  2067. if (!sde_kms)
  2068. return -EINVAL;
  2069. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  2070. struct msm_mmu *mmu;
  2071. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  2072. if (!aspace)
  2073. continue;
  2074. mmu = sde_kms->aspace[i]->mmu;
  2075. if (secure_only &&
  2076. !aspace->mmu->funcs->is_domain_secure(mmu))
  2077. continue;
  2078. /* cleanup aspace before detaching */
  2079. msm_gem_aspace_domain_attach_detach_update(aspace, true);
  2080. SDE_DEBUG("Detaching domain:%d\n", i);
  2081. aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
  2082. ARRAY_SIZE(iommu_ports));
  2083. aspace->domain_attached = false;
  2084. }
  2085. return 0;
  2086. }
  2087. int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
  2088. {
  2089. int i;
  2090. if (!sde_kms)
  2091. return -EINVAL;
  2092. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  2093. struct msm_mmu *mmu;
  2094. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  2095. if (!aspace)
  2096. continue;
  2097. mmu = sde_kms->aspace[i]->mmu;
  2098. if (secure_only &&
  2099. !aspace->mmu->funcs->is_domain_secure(mmu))
  2100. continue;
  2101. SDE_DEBUG("Attaching domain:%d\n", i);
  2102. aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
  2103. ARRAY_SIZE(iommu_ports));
  2104. aspace->domain_attached = true;
  2105. msm_gem_aspace_domain_attach_detach_update(aspace, false);
  2106. }
  2107. return 0;
  2108. }
  2109. static void sde_kms_destroy(struct msm_kms *kms)
  2110. {
  2111. struct sde_kms *sde_kms;
  2112. struct drm_device *dev;
  2113. if (!kms) {
  2114. SDE_ERROR("invalid kms\n");
  2115. return;
  2116. }
  2117. sde_kms = to_sde_kms(kms);
  2118. dev = sde_kms->dev;
  2119. if (!dev || !dev->dev) {
  2120. SDE_ERROR("invalid device\n");
  2121. return;
  2122. }
  2123. _sde_kms_hw_destroy(sde_kms, to_platform_device(dev->dev));
  2124. kfree(sde_kms);
  2125. }
  2126. static void sde_kms_helper_clear_dim_layers(struct drm_atomic_state *state, struct drm_crtc *crtc)
  2127. {
  2128. struct drm_crtc_state *crtc_state = NULL;
  2129. struct sde_crtc_state *c_state;
  2130. if (!state || !crtc) {
  2131. SDE_ERROR("invalid params\n");
  2132. return;
  2133. }
  2134. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2135. c_state = to_sde_crtc_state(crtc_state);
  2136. _sde_crtc_clear_dim_layers_v1(crtc_state);
  2137. set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, c_state->dirty);
  2138. }
  2139. static int sde_kms_set_crtc_for_conn(struct drm_device *dev,
  2140. struct drm_encoder *enc, struct drm_atomic_state *state)
  2141. {
  2142. struct drm_connector *conn = NULL;
  2143. struct drm_connector *tmp_conn = NULL;
  2144. struct drm_connector_list_iter conn_iter;
  2145. struct drm_crtc_state *crtc_state = NULL;
  2146. struct drm_connector_state *conn_state = NULL;
  2147. int ret = 0;
  2148. drm_connector_list_iter_begin(dev, &conn_iter);
  2149. drm_for_each_connector_iter(tmp_conn, &conn_iter) {
  2150. if (enc == tmp_conn->state->best_encoder) {
  2151. conn = tmp_conn;
  2152. break;
  2153. }
  2154. }
  2155. drm_connector_list_iter_end(&conn_iter);
  2156. if (!conn || !enc->crtc) {
  2157. SDE_ERROR("invalid params for enc:%d\n", DRMID(enc));
  2158. return -EINVAL;
  2159. }
  2160. crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
  2161. if (IS_ERR(crtc_state)) {
  2162. ret = PTR_ERR(crtc_state);
  2163. SDE_ERROR("error %d getting crtc %d state\n",
  2164. ret, DRMID(enc->crtc));
  2165. return ret;
  2166. }
  2167. conn_state = drm_atomic_get_connector_state(state, conn);
  2168. if (IS_ERR(conn_state)) {
  2169. ret = PTR_ERR(conn_state);
  2170. SDE_ERROR("error %d getting connector %d state\n",
  2171. ret, DRMID(conn));
  2172. return ret;
  2173. }
  2174. crtc_state->active = true;
  2175. crtc_state->enable = true;
  2176. ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
  2177. if (ret)
  2178. SDE_ERROR("error %d setting the crtc\n", ret);
  2179. return ret;
  2180. }
  2181. static void _sde_kms_plane_force_remove(struct drm_plane *plane,
  2182. struct drm_atomic_state *state)
  2183. {
  2184. struct drm_plane_state *plane_state;
  2185. int ret = 0;
  2186. plane_state = drm_atomic_get_plane_state(state, plane);
  2187. if (IS_ERR(plane_state)) {
  2188. ret = PTR_ERR(plane_state);
  2189. SDE_ERROR("error %d getting plane %d state\n",
  2190. ret, plane->base.id);
  2191. return;
  2192. }
  2193. plane->old_fb = plane->fb;
  2194. SDE_DEBUG("disabling plane %d\n", plane->base.id);
  2195. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2196. if (ret != 0)
  2197. SDE_ERROR("error %d disabling plane %d\n", ret,
  2198. plane->base.id);
  2199. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2200. }
  2201. static int _sde_kms_connector_add_refcount(struct sde_kms *sde_kms,
  2202. struct drm_atomic_state *state)
  2203. {
  2204. struct drm_device *dev = sde_kms->dev;
  2205. struct drm_connector *conn;
  2206. struct drm_connector_state *conn_state;
  2207. struct drm_connector_list_iter conn_iter;
  2208. struct sde_connector_state *c_state;
  2209. int ret = 0;
  2210. drm_connector_list_iter_begin(dev, &conn_iter);
  2211. drm_for_each_connector_iter(conn, &conn_iter) {
  2212. /*
  2213. * Acquire a connector reference to avoid removing
  2214. * connector in drm_release for splash and recovery cases.
  2215. */
  2216. conn_state = drm_atomic_get_connector_state(state, conn);
  2217. if (IS_ERR(conn_state)) {
  2218. ret = PTR_ERR(conn_state);
  2219. SDE_ERROR("error %d getting connector %d state\n",
  2220. ret, DRMID(conn));
  2221. return ret;
  2222. }
  2223. c_state = to_sde_connector_state(conn_state);
  2224. if (c_state->out_fb)
  2225. drm_framebuffer_put(c_state->out_fb);
  2226. }
  2227. drm_connector_list_iter_end(&conn_iter);
  2228. return ret;
  2229. }
  2230. static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
  2231. struct drm_atomic_state *state)
  2232. {
  2233. struct drm_device *dev = sde_kms->dev;
  2234. struct drm_framebuffer *fb, *tfb;
  2235. struct list_head fbs;
  2236. struct drm_plane *plane;
  2237. struct drm_crtc *crtc = NULL;
  2238. unsigned int crtc_mask = 0;
  2239. int ret = 0;
  2240. INIT_LIST_HEAD(&fbs);
  2241. list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
  2242. if (drm_framebuffer_read_refcount(fb) > 1) {
  2243. list_move_tail(&fb->filp_head, &fbs);
  2244. drm_for_each_plane(plane, dev) {
  2245. if (plane->state && plane->state->fb == fb) {
  2246. if (plane->state->crtc)
  2247. crtc_mask |= drm_crtc_mask(plane->state->crtc);
  2248. _sde_kms_plane_force_remove(plane, state);
  2249. }
  2250. }
  2251. } else {
  2252. list_del_init(&fb->filp_head);
  2253. drm_framebuffer_put(fb);
  2254. }
  2255. }
  2256. if (list_empty(&fbs)) {
  2257. SDE_DEBUG("skip commit as no fb(s)\n");
  2258. if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
  2259. _sde_kms_connector_add_refcount(sde_kms, state);
  2260. return 0;
  2261. }
  2262. drm_for_each_crtc(crtc, dev) {
  2263. if ((crtc_mask & drm_crtc_mask(crtc)) && crtc->state->active) {
  2264. struct drm_encoder *drm_enc;
  2265. drm_for_each_encoder_mask(drm_enc, crtc->dev,
  2266. crtc->state->encoder_mask) {
  2267. ret = sde_kms_set_crtc_for_conn(dev, drm_enc, state);
  2268. if (ret)
  2269. goto error;
  2270. }
  2271. sde_kms_helper_clear_dim_layers(state, crtc);
  2272. }
  2273. }
  2274. SDE_EVT32(state, crtc_mask);
  2275. SDE_DEBUG("null commit after removing all the pipes\n");
  2276. ret = drm_atomic_commit(state);
  2277. error:
  2278. if (ret) {
  2279. /*
  2280. * move the fbs back to original list, so it would be
  2281. * handled during drm_release
  2282. */
  2283. list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
  2284. list_move_tail(&fb->filp_head, &file->fbs);
  2285. if (ret == -EDEADLK || ret == -ERESTARTSYS)
  2286. SDE_DEBUG("atomic commit failed in preclose, ret:%d\n", ret);
  2287. else
  2288. SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
  2289. goto end;
  2290. }
  2291. while (!list_empty(&fbs)) {
  2292. fb = list_first_entry(&fbs, typeof(*fb), filp_head);
  2293. list_del_init(&fb->filp_head);
  2294. drm_framebuffer_put(fb);
  2295. }
  2296. drm_for_each_crtc(crtc, dev) {
  2297. if (!ret && crtc_mask & drm_crtc_mask(crtc))
  2298. sde_kms_cancel_delayed_work(crtc);
  2299. }
  2300. end:
  2301. return ret;
  2302. }
  2303. static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
  2304. {
  2305. struct sde_kms *sde_kms = to_sde_kms(kms);
  2306. struct drm_device *dev = sde_kms->dev;
  2307. struct msm_drm_private *priv = dev->dev_private;
  2308. unsigned int i;
  2309. struct drm_atomic_state *state = NULL;
  2310. struct drm_modeset_acquire_ctx ctx;
  2311. int ret = 0;
  2312. /* cancel pending flip event */
  2313. for (i = 0; i < priv->num_crtcs; i++)
  2314. sde_crtc_complete_flip(priv->crtcs[i], file);
  2315. drm_modeset_acquire_init(&ctx, 0);
  2316. retry:
  2317. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2318. if (ret == -EDEADLK) {
  2319. drm_modeset_backoff(&ctx);
  2320. goto retry;
  2321. } else if (WARN_ON(ret)) {
  2322. goto end;
  2323. }
  2324. state = drm_atomic_state_alloc(dev);
  2325. if (!state) {
  2326. ret = -ENOMEM;
  2327. goto end;
  2328. }
  2329. state->acquire_ctx = &ctx;
  2330. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  2331. ret = _sde_kms_remove_fbs(sde_kms, file, state);
  2332. if (ret != -EDEADLK && ret != -ERESTARTSYS)
  2333. break;
  2334. drm_atomic_state_clear(state);
  2335. drm_modeset_backoff(&ctx);
  2336. }
  2337. end:
  2338. if (state)
  2339. drm_atomic_state_put(state);
  2340. SDE_DEBUG("sde preclose done, ret:%d\n", ret);
  2341. drm_modeset_drop_locks(&ctx);
  2342. drm_modeset_acquire_fini(&ctx);
  2343. }
  2344. static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
  2345. struct drm_atomic_state *state)
  2346. {
  2347. struct drm_device *dev = sde_kms->dev;
  2348. struct drm_plane *plane;
  2349. struct drm_plane_state *plane_state;
  2350. struct drm_crtc *crtc;
  2351. struct drm_crtc_state *crtc_state;
  2352. struct drm_connector *conn;
  2353. struct drm_connector_state *conn_state;
  2354. struct drm_connector_list_iter conn_iter;
  2355. int ret = 0;
  2356. drm_for_each_plane(plane, dev) {
  2357. plane_state = drm_atomic_get_plane_state(state, plane);
  2358. if (IS_ERR(plane_state)) {
  2359. ret = PTR_ERR(plane_state);
  2360. SDE_ERROR("error %d getting plane %d state\n",
  2361. ret, DRMID(plane));
  2362. return ret;
  2363. }
  2364. ret = sde_plane_helper_reset_custom_properties(plane,
  2365. plane_state);
  2366. if (ret) {
  2367. SDE_ERROR("error %d resetting plane props %d\n",
  2368. ret, DRMID(plane));
  2369. return ret;
  2370. }
  2371. }
  2372. drm_for_each_crtc(crtc, dev) {
  2373. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2374. if (IS_ERR(crtc_state)) {
  2375. ret = PTR_ERR(crtc_state);
  2376. SDE_ERROR("error %d getting crtc %d state\n",
  2377. ret, DRMID(crtc));
  2378. return ret;
  2379. }
  2380. ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
  2381. if (ret) {
  2382. SDE_ERROR("error %d resetting crtc props %d\n",
  2383. ret, DRMID(crtc));
  2384. return ret;
  2385. }
  2386. }
  2387. drm_connector_list_iter_begin(dev, &conn_iter);
  2388. drm_for_each_connector_iter(conn, &conn_iter) {
  2389. conn_state = drm_atomic_get_connector_state(state, conn);
  2390. if (IS_ERR(conn_state)) {
  2391. ret = PTR_ERR(conn_state);
  2392. SDE_ERROR("error %d getting connector %d state\n",
  2393. ret, DRMID(conn));
  2394. return ret;
  2395. }
  2396. ret = sde_connector_helper_reset_custom_properties(conn,
  2397. conn_state);
  2398. if (ret) {
  2399. SDE_ERROR("error %d resetting connector props %d\n",
  2400. ret, DRMID(conn));
  2401. return ret;
  2402. }
  2403. }
  2404. drm_connector_list_iter_end(&conn_iter);
  2405. return ret;
  2406. }
  2407. static void sde_kms_lastclose(struct msm_kms *kms)
  2408. {
  2409. struct sde_kms *sde_kms;
  2410. struct drm_device *dev;
  2411. struct drm_atomic_state *state;
  2412. struct drm_modeset_acquire_ctx ctx;
  2413. int ret;
  2414. if (!kms) {
  2415. SDE_ERROR("invalid argument\n");
  2416. return;
  2417. }
  2418. sde_kms = to_sde_kms(kms);
  2419. dev = sde_kms->dev;
  2420. drm_modeset_acquire_init(&ctx, 0);
  2421. state = drm_atomic_state_alloc(dev);
  2422. if (!state) {
  2423. ret = -ENOMEM;
  2424. goto out_ctx;
  2425. }
  2426. state->acquire_ctx = &ctx;
  2427. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  2428. retry:
  2429. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2430. if (ret)
  2431. goto out_state;
  2432. ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
  2433. if (ret)
  2434. goto out_state;
  2435. ret = drm_atomic_commit(state);
  2436. out_state:
  2437. if (ret == -EDEADLK)
  2438. goto backoff;
  2439. drm_atomic_state_put(state);
  2440. out_ctx:
  2441. drm_modeset_drop_locks(&ctx);
  2442. drm_modeset_acquire_fini(&ctx);
  2443. if (ret)
  2444. SDE_ERROR("kms lastclose failed: %d\n", ret);
  2445. SDE_EVT32(ret, SDE_EVTLOG_FUNC_EXIT);
  2446. return;
  2447. backoff:
  2448. drm_atomic_state_clear(state);
  2449. drm_modeset_backoff(&ctx);
  2450. SDE_EVT32(ret, SDE_EVTLOG_FUNC_CASE1);
  2451. goto retry;
  2452. }
  2453. static int _sde_kms_validate_vm_request(struct drm_atomic_state *state, struct sde_kms *sde_kms,
  2454. enum sde_crtc_vm_req vm_req, bool vm_owns_hw)
  2455. {
  2456. struct drm_crtc *crtc, *active_crtc = NULL, *global_active_crtc = NULL;
  2457. struct drm_crtc_state *new_cstate, *old_cstate, *active_cstate;
  2458. struct drm_encoder *encoder;
  2459. struct drm_connector *connector;
  2460. struct drm_connector_state *new_connstate;
  2461. struct sde_vm_ops *vm_ops = sde_vm_get_ops(sde_kms);
  2462. struct sde_mdss_cfg *catalog = sde_kms->catalog;
  2463. struct sde_connector *sde_conn;
  2464. struct dsi_display *dsi_display;
  2465. uint32_t i, commit_crtc_cnt = 0, global_crtc_cnt = 0;
  2466. uint32_t crtc_encoder_cnt = 0;
  2467. enum sde_crtc_idle_pc_state idle_pc_state;
  2468. int rc = 0;
  2469. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2470. struct sde_crtc_state *new_state = NULL;
  2471. if (!new_cstate->active && !old_cstate->active)
  2472. continue;
  2473. new_state = to_sde_crtc_state(new_cstate);
  2474. idle_pc_state = sde_crtc_get_property(new_state, CRTC_PROP_IDLE_PC_STATE);
  2475. active_crtc = crtc;
  2476. active_cstate = new_cstate;
  2477. commit_crtc_cnt++;
  2478. }
  2479. list_for_each_entry(crtc, &sde_kms->dev->mode_config.crtc_list, head) {
  2480. if (!crtc->state->active)
  2481. continue;
  2482. global_crtc_cnt++;
  2483. global_active_crtc = crtc;
  2484. }
  2485. if (active_crtc) {
  2486. drm_for_each_encoder_mask(encoder, active_crtc->dev, active_cstate->encoder_mask)
  2487. crtc_encoder_cnt++;
  2488. }
  2489. for_each_new_connector_in_state(state, connector, new_connstate, i) {
  2490. int conn_mask = active_cstate->connector_mask;
  2491. if (drm_connector_mask(connector) & conn_mask) {
  2492. sde_conn = to_sde_connector(connector);
  2493. dsi_display = (struct dsi_display *) sde_conn->display;
  2494. SDE_EVT32(DRMID(connector), DRMID(active_crtc), i, dsi_display->type,
  2495. dsi_display->trusted_vm_env);
  2496. SDE_DEBUG("VM display:%s, conn:%d, crtc:%d, type:%d, tvm:%d\n",
  2497. dsi_display->name, DRMID(connector), DRMID(active_crtc),
  2498. dsi_display->type, dsi_display->trusted_vm_env);
  2499. break;
  2500. }
  2501. }
  2502. /* Check for single crtc commits only on valid VM requests */
  2503. if (active_crtc && global_active_crtc &&
  2504. (commit_crtc_cnt > catalog->max_trusted_vm_displays ||
  2505. global_crtc_cnt > catalog->max_trusted_vm_displays ||
  2506. active_crtc != global_active_crtc)) {
  2507. SDE_ERROR("VM switch failed; MAX:%d a_cnt:%d g_cnt:%d a_crtc:%d g_crtc:%d\n",
  2508. catalog->max_trusted_vm_displays, commit_crtc_cnt, global_crtc_cnt,
  2509. DRMID(active_crtc), DRMID(global_active_crtc));
  2510. return -E2BIG;
  2511. } else if ((vm_req == VM_REQ_RELEASE) &&
  2512. ((idle_pc_state == IDLE_PC_ENABLE) ||
  2513. (crtc_encoder_cnt > TRUSTED_VM_MAX_ENCODER_PER_CRTC))) {
  2514. /*
  2515. * disable idle-pc before releasing the HW
  2516. * allow only specified number of encoders on a given crtc
  2517. */
  2518. SDE_ERROR("VM switch failed; idle-pc:%d max:%d encoder_cnt:%d\n",
  2519. idle_pc_state, TRUSTED_VM_MAX_ENCODER_PER_CRTC, crtc_encoder_cnt);
  2520. return -EINVAL;
  2521. }
  2522. if ((vm_req == VM_REQ_ACQUIRE) && !vm_owns_hw) {
  2523. rc = vm_ops->vm_acquire(sde_kms);
  2524. if (rc) {
  2525. SDE_ERROR("VM acquire failed; hw_owner:%d, rc:%d\n", vm_owns_hw, rc);
  2526. return rc;
  2527. }
  2528. if (vm_ops->vm_resource_init)
  2529. rc = vm_ops->vm_resource_init(sde_kms, state);
  2530. }
  2531. return rc;
  2532. }
  2533. static int sde_kms_check_vm_request(struct msm_kms *kms,
  2534. struct drm_atomic_state *state)
  2535. {
  2536. struct sde_kms *sde_kms;
  2537. struct drm_crtc *crtc;
  2538. struct drm_crtc_state *new_cstate, *old_cstate;
  2539. struct sde_vm_ops *vm_ops;
  2540. enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
  2541. int i, rc = 0;
  2542. bool vm_req_active = false, prev_vm_req = false;
  2543. bool vm_owns_hw;
  2544. if (!kms || !state)
  2545. return -EINVAL;
  2546. sde_kms = to_sde_kms(kms);
  2547. vm_ops = sde_vm_get_ops(sde_kms);
  2548. if (!vm_ops)
  2549. return 0;
  2550. if (!vm_ops->vm_request_valid || !vm_ops->vm_owns_hw || !vm_ops->vm_acquire)
  2551. return -EINVAL;
  2552. drm_for_each_crtc(crtc, state->dev) {
  2553. if (crtc->state && (sde_crtc_get_property(to_sde_crtc_state(crtc->state),
  2554. CRTC_PROP_VM_REQ_STATE) == VM_REQ_RELEASE)) {
  2555. prev_vm_req = true;
  2556. break;
  2557. }
  2558. }
  2559. /* check for an active vm request */
  2560. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2561. struct sde_crtc_state *old_state = NULL, *new_state = NULL;
  2562. if (!new_cstate->active && !old_cstate->active)
  2563. continue;
  2564. new_state = to_sde_crtc_state(new_cstate);
  2565. new_vm_req = sde_crtc_get_property(new_state, CRTC_PROP_VM_REQ_STATE);
  2566. old_state = to_sde_crtc_state(old_cstate);
  2567. old_vm_req = sde_crtc_get_property(old_state, CRTC_PROP_VM_REQ_STATE);
  2568. /*
  2569. * VM request should be validated in the following usecases
  2570. * - There is a vm request(other than VM_REQ_NONE) on current/prev crtc state.
  2571. * - Previously, vm transition has taken place on one of the crtc's.
  2572. */
  2573. if (old_vm_req || new_vm_req || prev_vm_req) {
  2574. if (!vm_req_active) {
  2575. sde_vm_lock(sde_kms);
  2576. vm_owns_hw = sde_vm_owns_hw(sde_kms);
  2577. }
  2578. rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
  2579. if (rc) {
  2580. SDE_ERROR(
  2581. "VM transition check failed; o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n",
  2582. old_vm_req, new_vm_req, vm_owns_hw, rc);
  2583. sde_vm_unlock(sde_kms);
  2584. vm_req_active = false;
  2585. break;
  2586. } else if (old_vm_req == VM_REQ_ACQUIRE && new_vm_req == VM_REQ_NONE) {
  2587. SDE_DEBUG("VM transition valid; ignore further checks\n");
  2588. if (!vm_req_active)
  2589. sde_vm_unlock(sde_kms);
  2590. } else {
  2591. vm_req_active = true;
  2592. }
  2593. }
  2594. }
  2595. /* validate active requests and perform acquire if necessary */
  2596. if (vm_req_active) {
  2597. rc = _sde_kms_validate_vm_request(state, sde_kms, new_vm_req, vm_owns_hw);
  2598. sde_vm_unlock(sde_kms);
  2599. SDE_EVT32(old_vm_req, new_vm_req, vm_req_active, vm_owns_hw, rc);
  2600. SDE_DEBUG("VM o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n", old_vm_req, new_vm_req,
  2601. vm_req_active ? vm_owns_hw : -1, rc);
  2602. }
  2603. return rc;
  2604. }
  2605. static int sde_kms_check_secure_transition(struct msm_kms *kms,
  2606. struct drm_atomic_state *state)
  2607. {
  2608. struct sde_kms *sde_kms;
  2609. struct drm_device *dev;
  2610. struct drm_crtc *crtc;
  2611. struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
  2612. struct drm_crtc_state *crtc_state;
  2613. int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
  2614. bool sec_session = false, global_sec_session = false;
  2615. uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
  2616. int i;
  2617. if (!kms || !state) {
  2618. return -EINVAL;
  2619. SDE_ERROR("invalid arguments\n");
  2620. }
  2621. sde_kms = to_sde_kms(kms);
  2622. dev = sde_kms->dev;
  2623. /* iterate state object for active secure/non-secure crtc */
  2624. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2625. if (!crtc_state->active)
  2626. continue;
  2627. active_crtc_cnt++;
  2628. sde_crtc_state_find_plane_fb_modes(crtc_state, &fb_ns,
  2629. &fb_sec, &fb_sec_dir);
  2630. if (fb_sec_dir)
  2631. sec_session = true;
  2632. cur_crtc = crtc;
  2633. }
  2634. /* iterate global list for active and secure/non-secure crtc */
  2635. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2636. if (!crtc->state->active)
  2637. continue;
  2638. global_active_crtc_cnt++;
  2639. /* update only when crtc is not the same as current crtc */
  2640. if (crtc != cur_crtc) {
  2641. fb_ns = fb_sec = fb_sec_dir = 0;
  2642. sde_crtc_find_plane_fb_modes(crtc, &fb_ns,
  2643. &fb_sec, &fb_sec_dir);
  2644. if (fb_sec_dir)
  2645. global_sec_session = true;
  2646. global_crtc = crtc;
  2647. }
  2648. }
  2649. if (!global_sec_session && !sec_session)
  2650. return 0;
  2651. /*
  2652. * - fail crtc commit, if secure-camera/secure-ui session is
  2653. * in-progress in any other display
  2654. * - fail secure-camera/secure-ui crtc commit, if any other display
  2655. * session is in-progress
  2656. */
  2657. if ((global_active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
  2658. (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
  2659. SDE_ERROR(
  2660. "crtc%d secure check failed global_active:%d active:%d\n",
  2661. cur_crtc ? cur_crtc->base.id : -1,
  2662. global_active_crtc_cnt, active_crtc_cnt);
  2663. return -EPERM;
  2664. /*
  2665. * As only one crtc is allowed during secure session, the crtc
  2666. * in this commit should match with the global crtc
  2667. */
  2668. } else if (global_crtc && cur_crtc && (global_crtc != cur_crtc)) {
  2669. SDE_ERROR("crtc%d-sec%d not allowed during crtc%d-sec%d\n",
  2670. cur_crtc->base.id, sec_session,
  2671. global_crtc->base.id, global_sec_session);
  2672. return -EPERM;
  2673. }
  2674. return 0;
  2675. }
  2676. static void sde_kms_vm_res_release(struct msm_kms *kms,
  2677. struct drm_atomic_state *state)
  2678. {
  2679. struct drm_crtc *crtc;
  2680. struct drm_crtc_state *new_cstate;
  2681. struct sde_crtc_state *cstate;
  2682. struct sde_vm_ops *vm_ops;
  2683. enum sde_crtc_vm_req vm_req;
  2684. struct sde_kms *sde_kms = to_sde_kms(kms);
  2685. vm_ops = sde_vm_get_ops(sde_kms);
  2686. if (!vm_ops)
  2687. return;
  2688. crtc = sde_kms_vm_get_vm_crtc(state);
  2689. if (!crtc)
  2690. return;
  2691. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  2692. cstate = to_sde_crtc_state(new_cstate);
  2693. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  2694. if (vm_req != VM_REQ_ACQUIRE)
  2695. return;
  2696. sde_vm_lock(sde_kms);
  2697. if (vm_ops->vm_acquire_fail_handler)
  2698. vm_ops->vm_acquire_fail_handler(sde_kms);
  2699. sde_vm_unlock(sde_kms);
  2700. }
  2701. static int sde_kms_check_cwb_concurreny(struct msm_kms *kms,
  2702. struct drm_atomic_state *state)
  2703. {
  2704. struct sde_kms *sde_kms;
  2705. struct drm_crtc *crtc;
  2706. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  2707. struct drm_encoder *encoder;
  2708. struct sde_crtc_state *cstate;
  2709. int i = 0, cnt = 0, max_cwb = 0;
  2710. if (!kms || !state) {
  2711. SDE_ERROR("invalid arguments\n");
  2712. return -EINVAL;
  2713. }
  2714. sde_kms = to_sde_kms(kms);
  2715. max_cwb = sde_kms->catalog->max_cwb;
  2716. if (!max_cwb)
  2717. return 0;
  2718. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  2719. cstate = to_sde_crtc_state(new_crtc_state);
  2720. drm_for_each_encoder_mask(encoder, crtc->dev, cstate->cwb_enc_mask) {
  2721. cnt++;
  2722. SDE_DEBUG("crtc%d has cwb%d attached to it\n", crtc->base.id,
  2723. encoder->base.id);
  2724. }
  2725. if (cnt > max_cwb) {
  2726. SDE_ERROR("found %d cwb in the atomic state, max supported %d\n",
  2727. cnt, max_cwb);
  2728. return -EOPNOTSUPP;
  2729. }
  2730. }
  2731. return 0;
  2732. }
  2733. static int sde_kms_atomic_check(struct msm_kms *kms,
  2734. struct drm_atomic_state *state)
  2735. {
  2736. struct sde_kms *sde_kms;
  2737. struct drm_device *dev;
  2738. int ret;
  2739. if (!kms || !state)
  2740. return -EINVAL;
  2741. sde_kms = to_sde_kms(kms);
  2742. dev = sde_kms->dev;
  2743. SDE_ATRACE_BEGIN("atomic_check");
  2744. if (sde_kms_is_suspend_blocked(dev)) {
  2745. SDE_DEBUG("suspended, skip atomic_check\n");
  2746. ret = -EBUSY;
  2747. goto end;
  2748. }
  2749. ret = sde_kms_check_vm_request(kms, state);
  2750. if (ret) {
  2751. SDE_ERROR("vm switch request checks failed\n");
  2752. goto end;
  2753. }
  2754. ret = drm_atomic_helper_check(dev, state);
  2755. if (ret)
  2756. goto vm_clean_up;
  2757. /*
  2758. * Check if any secure transition(moving CRTC between secure and
  2759. * non-secure state and vice-versa) is allowed or not. when moving
  2760. * to secure state, planes with fb_mode set to dir_translated only can
  2761. * be staged on the CRTC, and only one CRTC can be active during
  2762. * Secure state
  2763. */
  2764. ret = sde_kms_check_secure_transition(kms, state);
  2765. if (ret)
  2766. goto vm_clean_up;
  2767. ret = sde_kms_check_cwb_concurreny(kms, state);
  2768. if (ret)
  2769. goto vm_clean_up;
  2770. goto end;
  2771. vm_clean_up:
  2772. sde_kms_vm_res_release(kms, state);
  2773. end:
  2774. SDE_ATRACE_END("atomic_check");
  2775. return ret;
  2776. }
  2777. static struct msm_gem_address_space*
  2778. _sde_kms_get_address_space(struct msm_kms *kms,
  2779. unsigned int domain)
  2780. {
  2781. struct sde_kms *sde_kms;
  2782. if (!kms) {
  2783. SDE_ERROR("invalid kms\n");
  2784. return NULL;
  2785. }
  2786. sde_kms = to_sde_kms(kms);
  2787. if (!sde_kms) {
  2788. SDE_ERROR("invalid sde_kms\n");
  2789. return NULL;
  2790. }
  2791. if (domain >= MSM_SMMU_DOMAIN_MAX)
  2792. return NULL;
  2793. return (sde_kms->aspace[domain] &&
  2794. sde_kms->aspace[domain]->domain_attached) ?
  2795. sde_kms->aspace[domain] : NULL;
  2796. }
  2797. static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
  2798. unsigned int domain)
  2799. {
  2800. struct sde_kms *sde_kms;
  2801. struct msm_gem_address_space *aspace;
  2802. if (!kms) {
  2803. SDE_ERROR("invalid kms\n");
  2804. return NULL;
  2805. }
  2806. sde_kms = to_sde_kms(kms);
  2807. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  2808. SDE_ERROR("invalid params\n");
  2809. return NULL;
  2810. }
  2811. aspace = _sde_kms_get_address_space(kms, domain);
  2812. return (aspace && aspace->domain_attached) ?
  2813. msm_gem_get_aspace_device(aspace) : NULL;
  2814. }
  2815. static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
  2816. {
  2817. struct drm_device *dev = NULL;
  2818. struct sde_kms *sde_kms = NULL;
  2819. struct drm_connector *connector = NULL;
  2820. struct drm_connector_list_iter conn_iter;
  2821. struct sde_connector *sde_conn = NULL;
  2822. if (!kms) {
  2823. SDE_ERROR("invalid kms\n");
  2824. return;
  2825. }
  2826. sde_kms = to_sde_kms(kms);
  2827. dev = sde_kms->dev;
  2828. if (!dev) {
  2829. SDE_ERROR("invalid device\n");
  2830. return;
  2831. }
  2832. if (!dev->mode_config.poll_enabled)
  2833. return;
  2834. mutex_lock(&dev->mode_config.mutex);
  2835. drm_connector_list_iter_begin(dev, &conn_iter);
  2836. drm_for_each_connector_iter(connector, &conn_iter) {
  2837. /* Only handle HPD capable connectors. */
  2838. if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
  2839. continue;
  2840. sde_conn = to_sde_connector(connector);
  2841. if (sde_conn->ops.post_open)
  2842. sde_conn->ops.post_open(&sde_conn->base,
  2843. sde_conn->display);
  2844. }
  2845. drm_connector_list_iter_end(&conn_iter);
  2846. mutex_unlock(&dev->mode_config.mutex);
  2847. }
  2848. static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
  2849. struct sde_splash_display *splash_display,
  2850. struct drm_crtc *crtc)
  2851. {
  2852. struct msm_drm_private *priv;
  2853. struct drm_plane *plane;
  2854. struct sde_splash_mem *splash;
  2855. struct sde_splash_mem *demura;
  2856. struct sde_plane_state *pstate;
  2857. struct sde_sspp_index_info *pipe_info;
  2858. enum sde_sspp pipe_id;
  2859. bool is_virtual;
  2860. int i;
  2861. if (!sde_kms || !splash_display || !crtc) {
  2862. SDE_ERROR("invalid input args\n");
  2863. return -EINVAL;
  2864. }
  2865. priv = sde_kms->dev->dev_private;
  2866. pipe_info = &splash_display->pipe_info;
  2867. splash = splash_display->splash;
  2868. demura = splash_display->demura;
  2869. for (i = 0; i < priv->num_planes; i++) {
  2870. plane = priv->planes[i];
  2871. pipe_id = sde_plane_pipe(plane);
  2872. is_virtual = is_sde_plane_virtual(plane);
  2873. if ((is_virtual && test_bit(pipe_id, pipe_info->virt_pipes)) ||
  2874. (!is_virtual && test_bit(pipe_id, pipe_info->pipes))) {
  2875. if (splash && sde_plane_validate_src_addr(plane,
  2876. splash->splash_buf_base,
  2877. splash->splash_buf_size)) {
  2878. if (!demura || sde_plane_validate_src_addr(
  2879. plane, demura->splash_buf_base,
  2880. demura->splash_buf_size)) {
  2881. SDE_ERROR("invalid adr on pipe:%d crtc:%d\n",
  2882. pipe_id, DRMID(crtc));
  2883. continue;
  2884. }
  2885. }
  2886. plane->state->crtc = crtc;
  2887. crtc->state->plane_mask |= drm_plane_mask(plane);
  2888. pstate = to_sde_plane_state(plane->state);
  2889. pstate->cont_splash_populated = true;
  2890. SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n",
  2891. DRMID(crtc), DRMID(plane), is_virtual);
  2892. }
  2893. }
  2894. return 0;
  2895. }
  2896. static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
  2897. struct dsi_display *dsi_display)
  2898. {
  2899. void *display;
  2900. struct drm_encoder *encoder = NULL;
  2901. struct msm_display_info info;
  2902. struct drm_device *dev;
  2903. struct sde_kms *sde_kms;
  2904. struct drm_connector_list_iter conn_iter;
  2905. struct drm_connector *connector = NULL;
  2906. struct sde_connector *sde_conn = NULL;
  2907. int rc = 0;
  2908. sde_kms = to_sde_kms(kms);
  2909. dev = sde_kms->dev;
  2910. display = dsi_display;
  2911. if (dsi_display) {
  2912. if (dsi_display->bridge->base.encoder) {
  2913. encoder = dsi_display->bridge->base.encoder;
  2914. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2915. }
  2916. memset(&info, 0x0, sizeof(info));
  2917. rc = dsi_display_get_info(NULL, &info, display);
  2918. if (rc) {
  2919. SDE_ERROR("%s: dsi get_info failed: %d\n",
  2920. __func__, rc);
  2921. encoder = NULL;
  2922. }
  2923. }
  2924. drm_connector_list_iter_begin(dev, &conn_iter);
  2925. drm_for_each_connector_iter(connector, &conn_iter) {
  2926. struct drm_encoder *c_encoder;
  2927. drm_connector_for_each_possible_encoder(connector,
  2928. c_encoder)
  2929. break;
  2930. if (!c_encoder) {
  2931. SDE_ERROR("c_encoder not found\n");
  2932. return -EINVAL;
  2933. }
  2934. /**
  2935. * Inform cont_splash is disabled to each interface/connector.
  2936. * This is currently supported for DSI interface.
  2937. */
  2938. sde_conn = to_sde_connector(connector);
  2939. if (sde_conn && sde_conn->ops.cont_splash_res_disable) {
  2940. if (!dsi_display || !encoder) {
  2941. sde_conn->ops.cont_splash_res_disable
  2942. (sde_conn->display);
  2943. } else if (c_encoder->base.id == encoder->base.id) {
  2944. /**
  2945. * This handles dual DSI
  2946. * configuration where one DSI
  2947. * interface has cont_splash
  2948. * enabled and the other doesn't.
  2949. */
  2950. sde_conn->ops.cont_splash_res_disable
  2951. (sde_conn->display);
  2952. break;
  2953. }
  2954. }
  2955. }
  2956. drm_connector_list_iter_end(&conn_iter);
  2957. return 0;
  2958. }
  2959. static int sde_kms_vm_trusted_cont_splash_res_init(struct sde_kms *sde_kms)
  2960. {
  2961. int i;
  2962. void *display;
  2963. struct dsi_display *dsi_display;
  2964. struct drm_encoder *encoder;
  2965. if (!sde_kms)
  2966. return -EINVAL;
  2967. if (!sde_in_trusted_vm(sde_kms))
  2968. return 0;
  2969. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  2970. display = sde_kms->dsi_displays[i];
  2971. dsi_display = (struct dsi_display *)display;
  2972. if (!dsi_display->bridge->base.encoder) {
  2973. SDE_ERROR("no encoder on dsi display:%d", i);
  2974. return -EINVAL;
  2975. }
  2976. encoder = dsi_display->bridge->base.encoder;
  2977. encoder->possible_crtcs = 1 << i;
  2978. SDE_DEBUG(
  2979. "dsi-display:%d encoder id[%d]=%d name=%s crtcs=%x\n", i,
  2980. encoder->index, encoder->base.id,
  2981. encoder->name, encoder->possible_crtcs);
  2982. }
  2983. return 0;
  2984. }
  2985. static struct drm_display_mode *_sde_kms_get_splash_mode(
  2986. struct sde_kms *sde_kms, struct drm_connector *connector,
  2987. struct drm_atomic_state *state)
  2988. {
  2989. struct drm_display_mode *mode, *cur_mode = NULL;
  2990. struct drm_crtc *crtc;
  2991. struct drm_crtc_state *new_cstate, *old_cstate;
  2992. u32 i = 0;
  2993. if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
  2994. list_for_each_entry(mode, &connector->modes, head) {
  2995. if (mode->type & DRM_MODE_TYPE_PREFERRED) {
  2996. cur_mode = mode;
  2997. break;
  2998. }
  2999. }
  3000. } else if (state) {
  3001. /* get the mode from first atomic_check phase for trusted_vm*/
  3002. for_each_oldnew_crtc_in_state(state, crtc, old_cstate,
  3003. new_cstate, i) {
  3004. if (!new_cstate->active && !old_cstate->active)
  3005. continue;
  3006. list_for_each_entry(mode, &connector->modes, head) {
  3007. if (drm_mode_equal(&new_cstate->mode, mode)) {
  3008. cur_mode = mode;
  3009. break;
  3010. }
  3011. }
  3012. }
  3013. }
  3014. return cur_mode;
  3015. }
  3016. static int sde_kms_cont_splash_config(struct msm_kms *kms,
  3017. struct drm_atomic_state *state)
  3018. {
  3019. void *display;
  3020. struct dsi_display *dsi_display;
  3021. struct msm_display_info info;
  3022. struct drm_encoder *encoder = NULL;
  3023. struct drm_crtc *crtc = NULL;
  3024. int i, rc = 0;
  3025. struct drm_display_mode *drm_mode = NULL;
  3026. struct drm_device *dev;
  3027. struct msm_drm_private *priv;
  3028. struct sde_kms *sde_kms;
  3029. struct drm_connector_list_iter conn_iter;
  3030. struct drm_connector *connector = NULL;
  3031. struct sde_connector *sde_conn = NULL;
  3032. struct sde_splash_display *splash_display;
  3033. if (!kms) {
  3034. SDE_ERROR("invalid kms\n");
  3035. return -EINVAL;
  3036. }
  3037. sde_kms = to_sde_kms(kms);
  3038. dev = sde_kms->dev;
  3039. if (!dev) {
  3040. SDE_ERROR("invalid device\n");
  3041. return -EINVAL;
  3042. }
  3043. rc = sde_kms_vm_trusted_cont_splash_res_init(sde_kms);
  3044. if (rc) {
  3045. SDE_ERROR("failed vm cont splash resource init, rc=%d", rc);
  3046. return -EINVAL;
  3047. }
  3048. if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
  3049. && (!sde_kms->splash_data.num_splash_regions)) ||
  3050. !sde_kms->splash_data.num_splash_displays) {
  3051. DRM_INFO("cont_splash feature not enabled\n");
  3052. sde_kms_inform_cont_splash_res_disable(kms, NULL);
  3053. return rc;
  3054. }
  3055. DRM_INFO("cont_splash enabled in %d of %d display(s)\n",
  3056. sde_kms->splash_data.num_splash_displays,
  3057. sde_kms->dsi_display_count);
  3058. /* dsi */
  3059. for (i = 0; i < sde_kms->dsi_display_count; ++i) {
  3060. struct sde_crtc_state *cstate;
  3061. struct sde_connector_state *conn_state;
  3062. display = sde_kms->dsi_displays[i];
  3063. dsi_display = (struct dsi_display *)display;
  3064. splash_display = &sde_kms->splash_data.splash_display[i];
  3065. if (!splash_display->cont_splash_enabled) {
  3066. SDE_DEBUG("display->name = %s splash not enabled\n",
  3067. dsi_display->name);
  3068. sde_kms_inform_cont_splash_res_disable(kms,
  3069. dsi_display);
  3070. continue;
  3071. }
  3072. SDE_DEBUG("display->name = %s\n", dsi_display->name);
  3073. if (dsi_display->bridge->base.encoder) {
  3074. encoder = dsi_display->bridge->base.encoder;
  3075. SDE_DEBUG("encoder name = %s\n", encoder->name);
  3076. }
  3077. memset(&info, 0x0, sizeof(info));
  3078. rc = dsi_display_get_info(NULL, &info, display);
  3079. if (rc) {
  3080. SDE_ERROR("dsi get_info %d failed\n", i);
  3081. encoder = NULL;
  3082. continue;
  3083. }
  3084. SDE_DEBUG("info.is_connected = %s, info.display_type = %d\n",
  3085. ((info.is_connected) ? "true" : "false"),
  3086. info.display_type);
  3087. if (!encoder) {
  3088. SDE_ERROR("encoder not initialized\n");
  3089. return -EINVAL;
  3090. }
  3091. priv = sde_kms->dev->dev_private;
  3092. encoder->crtc = priv->crtcs[i];
  3093. crtc = encoder->crtc;
  3094. splash_display->encoder = encoder;
  3095. SDE_DEBUG("for dsi-display:%d crtc id[%d]:%d enc id[%d]:%d\n",
  3096. i, crtc->index, crtc->base.id, encoder->index,
  3097. encoder->base.id);
  3098. mutex_lock(&dev->mode_config.mutex);
  3099. drm_connector_list_iter_begin(dev, &conn_iter);
  3100. drm_for_each_connector_iter(connector, &conn_iter) {
  3101. struct drm_encoder *c_encoder;
  3102. drm_connector_for_each_possible_encoder(connector,
  3103. c_encoder)
  3104. break;
  3105. if (!c_encoder) {
  3106. SDE_ERROR("c_encoder not found\n");
  3107. mutex_unlock(&dev->mode_config.mutex);
  3108. return -EINVAL;
  3109. }
  3110. /**
  3111. * SDE_KMS doesn't attach more than one encoder to
  3112. * a DSI connector. So it is safe to check only with
  3113. * the first encoder entry. Revisit this logic if we
  3114. * ever have to support continuous splash for
  3115. * external displays in MST configuration.
  3116. */
  3117. if (c_encoder->base.id == encoder->base.id)
  3118. break;
  3119. }
  3120. drm_connector_list_iter_end(&conn_iter);
  3121. if (!connector) {
  3122. SDE_ERROR("connector not initialized\n");
  3123. mutex_unlock(&dev->mode_config.mutex);
  3124. return -EINVAL;
  3125. }
  3126. mutex_unlock(&dev->mode_config.mutex);
  3127. crtc->state->encoder_mask = drm_encoder_mask(encoder);
  3128. crtc->state->connector_mask = drm_connector_mask(connector);
  3129. connector->state->crtc = crtc;
  3130. drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, state);
  3131. if (!drm_mode) {
  3132. SDE_ERROR("drm_mode not found; handoff_type:%d\n",
  3133. sde_kms->splash_data.type);
  3134. return -EINVAL;
  3135. }
  3136. SDE_DEBUG(
  3137. "drm_mode->name:%s, type:0x%x, flags:0x%x, handoff_type:%d\n",
  3138. drm_mode->name, drm_mode->type,
  3139. drm_mode->flags, sde_kms->splash_data.type);
  3140. /* Update CRTC drm structure */
  3141. crtc->state->active = true;
  3142. rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
  3143. if (rc) {
  3144. SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
  3145. return rc;
  3146. }
  3147. drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
  3148. drm_mode_copy(&crtc->mode, drm_mode);
  3149. cstate = to_sde_crtc_state(crtc->state);
  3150. cstate->cont_splash_populated = true;
  3151. /* Update encoder structure */
  3152. sde_encoder_update_caps_for_cont_splash(encoder,
  3153. splash_display, true);
  3154. sde_crtc_update_cont_splash_settings(crtc);
  3155. sde_conn = to_sde_connector(connector);
  3156. if (sde_conn && sde_conn->ops.cont_splash_config)
  3157. sde_conn->ops.cont_splash_config(sde_conn->display);
  3158. conn_state = to_sde_connector_state(connector->state);
  3159. conn_state->cont_splash_populated = true;
  3160. rc = _sde_kms_update_planes_for_cont_splash(sde_kms,
  3161. splash_display, crtc);
  3162. if (rc) {
  3163. SDE_ERROR("Failed: updating plane status rc=%d\n", rc);
  3164. return rc;
  3165. }
  3166. }
  3167. return rc;
  3168. }
  3169. static bool sde_kms_check_for_splash(struct msm_kms *kms)
  3170. {
  3171. struct sde_kms *sde_kms;
  3172. if (!kms) {
  3173. SDE_ERROR("invalid kms\n");
  3174. return false;
  3175. }
  3176. sde_kms = to_sde_kms(kms);
  3177. return sde_kms->splash_data.num_splash_displays;
  3178. }
  3179. static int sde_kms_get_mixer_count(const struct msm_kms *kms,
  3180. const struct drm_display_mode *mode,
  3181. const struct msm_resource_caps_info *res, u32 *num_lm)
  3182. {
  3183. struct sde_kms *sde_kms;
  3184. s64 mode_clock_hz = 0;
  3185. s64 max_mdp_clock_hz = 0;
  3186. s64 max_lm_width = 0;
  3187. s64 hdisplay_fp = 0;
  3188. s64 htotal_fp = 0;
  3189. s64 vtotal_fp = 0;
  3190. s64 vrefresh_fp = 0;
  3191. s64 mdp_fudge_factor = 0;
  3192. s64 num_lm_fp = 0;
  3193. s64 lm_clk_fp = 0;
  3194. s64 lm_width_fp = 0;
  3195. int rc = 0;
  3196. if (!num_lm) {
  3197. SDE_ERROR("invalid num_lm pointer\n");
  3198. return -EINVAL;
  3199. }
  3200. /* default to 1 layer mixer */
  3201. *num_lm = 1;
  3202. if (!kms || !mode || !res) {
  3203. SDE_ERROR("invalid input args\n");
  3204. return -EINVAL;
  3205. }
  3206. sde_kms = to_sde_kms(kms);
  3207. max_mdp_clock_hz = drm_int2fixp(sde_kms->perf.max_core_clk_rate);
  3208. max_lm_width = drm_int2fixp(res->max_mixer_width);
  3209. hdisplay_fp = drm_int2fixp(mode->hdisplay);
  3210. htotal_fp = drm_int2fixp(mode->htotal);
  3211. vtotal_fp = drm_int2fixp(mode->vtotal);
  3212. vrefresh_fp = drm_int2fixp(drm_mode_vrefresh(mode));
  3213. mdp_fudge_factor = drm_fixp_from_fraction(105, 100);
  3214. /* mode clock = [(h * v * fps * 1.05) / (num_lm)] */
  3215. mode_clock_hz = drm_fixp_mul(htotal_fp, vtotal_fp);
  3216. mode_clock_hz = drm_fixp_mul(mode_clock_hz, vrefresh_fp);
  3217. mode_clock_hz = drm_fixp_mul(mode_clock_hz, mdp_fudge_factor);
  3218. if (mode_clock_hz > max_mdp_clock_hz ||
  3219. hdisplay_fp > max_lm_width) {
  3220. *num_lm = 0;
  3221. do {
  3222. *num_lm += 2;
  3223. num_lm_fp = drm_int2fixp(*num_lm);
  3224. lm_clk_fp = drm_fixp_div(mode_clock_hz, num_lm_fp);
  3225. lm_width_fp = drm_fixp_div(hdisplay_fp, num_lm_fp);
  3226. if (*num_lm > 4) {
  3227. rc = -EINVAL;
  3228. goto error;
  3229. }
  3230. } while (lm_clk_fp > max_mdp_clock_hz ||
  3231. lm_width_fp > max_lm_width);
  3232. mode_clock_hz = lm_clk_fp;
  3233. }
  3234. SDE_DEBUG("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3235. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3236. *num_lm, drm_fixp2int(mode_clock_hz),
  3237. sde_kms->perf.max_core_clk_rate);
  3238. return 0;
  3239. error:
  3240. SDE_ERROR("required mode clk exceeds max mdp clk\n");
  3241. SDE_ERROR("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3242. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3243. *num_lm, drm_fixp2int(mode_clock_hz),
  3244. sde_kms->perf.max_core_clk_rate);
  3245. return rc;
  3246. }
  3247. static int sde_kms_get_dsc_count(const struct msm_kms *kms,
  3248. u32 hdisplay, u32 *num_dsc)
  3249. {
  3250. struct sde_kms *sde_kms;
  3251. uint32_t max_dsc_width;
  3252. if (!num_dsc) {
  3253. SDE_ERROR("invalid num_dsc pointer\n");
  3254. return -EINVAL;
  3255. }
  3256. *num_dsc = 0;
  3257. if (!kms || !hdisplay) {
  3258. SDE_ERROR("invalid input args\n");
  3259. return -EINVAL;
  3260. }
  3261. sde_kms = to_sde_kms(kms);
  3262. max_dsc_width = sde_kms->catalog->max_dsc_width;
  3263. *num_dsc = DIV_ROUND_UP(hdisplay, max_dsc_width);
  3264. SDE_DEBUG("h=%d, max_dsc_width=%d, num_dsc=%d\n",
  3265. hdisplay, max_dsc_width,
  3266. *num_dsc);
  3267. return 0;
  3268. }
  3269. static bool sde_kms_in_trusted_vm(const struct msm_kms *kms)
  3270. {
  3271. struct sde_kms *sde_kms;
  3272. if (!kms) {
  3273. SDE_ERROR("invalid kms\n");
  3274. return false;
  3275. }
  3276. sde_kms = to_sde_kms(kms);
  3277. return sde_in_trusted_vm(sde_kms);
  3278. }
  3279. static int _sde_kms_null_commit(struct drm_device *dev,
  3280. struct drm_encoder *enc)
  3281. {
  3282. struct drm_modeset_acquire_ctx ctx;
  3283. struct drm_atomic_state *state = NULL;
  3284. int retry_cnt = 0;
  3285. int ret = 0;
  3286. drm_modeset_acquire_init(&ctx, 0);
  3287. retry:
  3288. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  3289. if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
  3290. drm_modeset_backoff(&ctx);
  3291. retry_cnt++;
  3292. udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
  3293. goto retry;
  3294. } else if (WARN_ON(ret)) {
  3295. goto end;
  3296. }
  3297. state = drm_atomic_state_alloc(dev);
  3298. if (!state) {
  3299. DRM_ERROR("failed to allocate atomic state, %d\n", ret);
  3300. goto end;
  3301. }
  3302. state->acquire_ctx = &ctx;
  3303. ret = sde_kms_set_crtc_for_conn(dev, enc, state);
  3304. if (ret)
  3305. goto end;
  3306. ret = drm_atomic_commit(state);
  3307. if (ret)
  3308. SDE_ERROR("Error %d doing the atomic commit\n", ret);
  3309. end:
  3310. if (state)
  3311. drm_atomic_state_put(state);
  3312. drm_modeset_drop_locks(&ctx);
  3313. drm_modeset_acquire_fini(&ctx);
  3314. return ret;
  3315. }
  3316. void sde_kms_display_early_wakeup(struct drm_device *dev,
  3317. const int32_t connector_id)
  3318. {
  3319. struct drm_connector_list_iter conn_iter;
  3320. struct drm_connector *conn;
  3321. struct drm_encoder *drm_enc;
  3322. drm_connector_list_iter_begin(dev, &conn_iter);
  3323. drm_for_each_connector_iter(conn, &conn_iter) {
  3324. if (connector_id != DRM_MSM_WAKE_UP_ALL_DISPLAYS &&
  3325. connector_id != conn->base.id)
  3326. continue;
  3327. if (conn->state && conn->state->best_encoder)
  3328. drm_enc = conn->state->best_encoder;
  3329. else
  3330. drm_enc = conn->encoder;
  3331. if (drm_enc)
  3332. sde_encoder_early_wakeup(drm_enc);
  3333. }
  3334. drm_connector_list_iter_end(&conn_iter);
  3335. }
  3336. static int sde_kms_trigger_null_flush(struct msm_kms *kms)
  3337. {
  3338. struct sde_kms *sde_kms;
  3339. struct sde_splash_display *splash_display;
  3340. struct drm_crtc *crtc;
  3341. int i, rc = 0;
  3342. if (!kms) {
  3343. SDE_ERROR("invalid kms\n");
  3344. return -EINVAL;
  3345. }
  3346. sde_kms = to_sde_kms(kms);
  3347. /* If splash handoff is done, early return*/
  3348. if (!sde_kms->splash_data.num_splash_displays)
  3349. return 0;
  3350. /* If all builtin-displays are having cont splash enabled, ignore lastclose*/
  3351. if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
  3352. return -EINVAL;
  3353. /*
  3354. * Trigger NULL flush if built-in secondary/primary is stuck in splash
  3355. * while the primary/secondary is running respectively before lastclose.
  3356. */
  3357. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  3358. splash_display = &sde_kms->splash_data.splash_display[i];
  3359. if (splash_display->cont_splash_enabled && splash_display->encoder) {
  3360. crtc = splash_display->encoder->crtc;
  3361. SDE_DEBUG("triggering null commit on enc:%d\n",
  3362. DRMID(splash_display->encoder));
  3363. SDE_EVT32(DRMID(splash_display->encoder), SDE_EVTLOG_FUNC_ENTRY);
  3364. rc = _sde_kms_null_commit(sde_kms->dev, splash_display->encoder);
  3365. if (!rc && crtc)
  3366. sde_kms_cancel_delayed_work(crtc);
  3367. if (rc)
  3368. DRM_ERROR("null flush commit failure during lastclose\n");
  3369. }
  3370. }
  3371. return 0;
  3372. }
  3373. static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
  3374. struct device *dev)
  3375. {
  3376. int ret, crtc_id = 0;
  3377. struct drm_device *ddev = dev_get_drvdata(dev);
  3378. struct drm_connector *conn;
  3379. struct drm_connector_list_iter conn_iter;
  3380. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3381. drm_connector_list_iter_begin(ddev, &conn_iter);
  3382. drm_for_each_connector_iter(conn, &conn_iter) {
  3383. uint64_t lp;
  3384. lp = sde_connector_get_lp(conn);
  3385. if (lp != SDE_MODE_DPMS_LP2)
  3386. continue;
  3387. if (sde_encoder_in_clone_mode(conn->encoder))
  3388. continue;
  3389. crtc_id = drm_crtc_index(conn->state->crtc);
  3390. if (priv->disp_thread[crtc_id].thread)
  3391. kthread_flush_worker(
  3392. &priv->disp_thread[crtc_id].worker);
  3393. ret = sde_encoder_wait_for_event(conn->encoder,
  3394. MSM_ENC_TX_COMPLETE);
  3395. if (ret && ret != -EWOULDBLOCK) {
  3396. SDE_ERROR(
  3397. "[conn: %d] wait for commit done returned %d\n",
  3398. conn->base.id, ret);
  3399. } else if (!ret) {
  3400. if (priv->event_thread[crtc_id].thread)
  3401. kthread_flush_worker(
  3402. &priv->event_thread[crtc_id].worker);
  3403. sde_encoder_idle_request(conn->encoder);
  3404. }
  3405. }
  3406. drm_connector_list_iter_end(&conn_iter);
  3407. msm_atomic_flush_display_threads(priv);
  3408. }
  3409. struct msm_display_mode *sde_kms_get_msm_mode(struct drm_connector_state *conn_state)
  3410. {
  3411. struct sde_connector_state *sde_conn_state;
  3412. if (!conn_state)
  3413. return NULL;
  3414. sde_conn_state = to_sde_connector_state(conn_state);
  3415. return &sde_conn_state->msm_mode;
  3416. }
  3417. static int sde_kms_pm_suspend(struct device *dev)
  3418. {
  3419. struct drm_device *ddev;
  3420. struct drm_modeset_acquire_ctx ctx;
  3421. struct drm_connector *conn;
  3422. struct drm_encoder *enc;
  3423. struct drm_connector_list_iter conn_iter;
  3424. struct drm_atomic_state *state = NULL;
  3425. struct sde_kms *sde_kms;
  3426. int ret = 0, num_crtcs = 0;
  3427. if (!dev)
  3428. return -EINVAL;
  3429. ddev = dev_get_drvdata(dev);
  3430. if (!ddev || !ddev_to_msm_kms(ddev))
  3431. return -EINVAL;
  3432. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3433. SDE_EVT32(0);
  3434. /* disable hot-plug polling */
  3435. drm_kms_helper_poll_disable(ddev);
  3436. /* if any built-in display is stuck in CS, skip PM suspend entry to
  3437. * avoid driver SW state changes. With speculative fence enabled, HAL depends
  3438. * on power_on notification for the first commit to exit the Wait completion
  3439. * instead of retire fence signal.
  3440. */
  3441. drm_for_each_encoder(enc, ddev) {
  3442. if (sde_encoder_in_cont_splash(enc) && enc->crtc) {
  3443. SDE_DEBUG("skip PM suspend, splash is enabled on enc:%d\n", DRMID(enc));
  3444. SDE_EVT32(DRMID(enc), SDE_EVTLOG_FUNC_EXIT);
  3445. return -EINVAL;
  3446. }
  3447. }
  3448. /* acquire modeset lock(s) */
  3449. drm_modeset_acquire_init(&ctx, 0);
  3450. retry:
  3451. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3452. if (ret)
  3453. goto unlock;
  3454. /* save current state for resume */
  3455. if (sde_kms->suspend_state)
  3456. drm_atomic_state_put(sde_kms->suspend_state);
  3457. sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
  3458. if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
  3459. ret = PTR_ERR(sde_kms->suspend_state);
  3460. DRM_ERROR("failed to back up suspend state, %d\n", ret);
  3461. sde_kms->suspend_state = NULL;
  3462. goto unlock;
  3463. }
  3464. /* create atomic state to disable all CRTCs */
  3465. state = drm_atomic_state_alloc(ddev);
  3466. if (!state) {
  3467. ret = -ENOMEM;
  3468. DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
  3469. goto unlock;
  3470. }
  3471. state->acquire_ctx = &ctx;
  3472. drm_connector_list_iter_begin(ddev, &conn_iter);
  3473. drm_for_each_connector_iter(conn, &conn_iter) {
  3474. struct drm_crtc_state *crtc_state;
  3475. uint64_t lp;
  3476. if (!conn->state || !conn->state->crtc ||
  3477. conn->dpms != DRM_MODE_DPMS_ON ||
  3478. sde_encoder_in_clone_mode(conn->encoder))
  3479. continue;
  3480. lp = sde_connector_get_lp(conn);
  3481. if (lp == SDE_MODE_DPMS_LP1) {
  3482. /* transition LP1->LP2 on pm suspend */
  3483. ret = sde_connector_set_property_for_commit(conn, state,
  3484. CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
  3485. if (ret) {
  3486. DRM_ERROR("failed to set lp2 for conn %d\n",
  3487. conn->base.id);
  3488. drm_connector_list_iter_end(&conn_iter);
  3489. goto unlock;
  3490. }
  3491. }
  3492. if (lp != SDE_MODE_DPMS_LP2) {
  3493. /* force CRTC to be inactive */
  3494. crtc_state = drm_atomic_get_crtc_state(state,
  3495. conn->state->crtc);
  3496. if (IS_ERR_OR_NULL(crtc_state)) {
  3497. DRM_ERROR("failed to get crtc %d state\n",
  3498. conn->state->crtc->base.id);
  3499. drm_connector_list_iter_end(&conn_iter);
  3500. ret = -EINVAL;
  3501. goto unlock;
  3502. }
  3503. if (lp != SDE_MODE_DPMS_LP1)
  3504. crtc_state->active = false;
  3505. ++num_crtcs;
  3506. }
  3507. }
  3508. drm_connector_list_iter_end(&conn_iter);
  3509. /* check for nothing to do */
  3510. if (num_crtcs == 0) {
  3511. DRM_DEBUG("all crtcs are already in the off state\n");
  3512. sde_kms->suspend_block = true;
  3513. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3514. goto unlock;
  3515. }
  3516. /* commit the "disable all" state */
  3517. ret = drm_atomic_commit(state);
  3518. if (ret < 0) {
  3519. DRM_ERROR("failed to disable crtcs, %d\n", ret);
  3520. goto unlock;
  3521. }
  3522. sde_kms->suspend_block = true;
  3523. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3524. unlock:
  3525. if (state) {
  3526. drm_atomic_state_put(state);
  3527. state = NULL;
  3528. }
  3529. if (ret == -EDEADLK) {
  3530. drm_modeset_backoff(&ctx);
  3531. goto retry;
  3532. }
  3533. if ((ret || !num_crtcs) && sde_kms->suspend_state) {
  3534. drm_atomic_state_put(sde_kms->suspend_state);
  3535. sde_kms->suspend_state = NULL;
  3536. }
  3537. drm_modeset_drop_locks(&ctx);
  3538. drm_modeset_acquire_fini(&ctx);
  3539. /*
  3540. * pm runtime driver avoids multiple runtime_suspend API call by
  3541. * checking runtime_status. However, this call helps when there is a
  3542. * race condition between pm_suspend call and doze_suspend/power_off
  3543. * commit. It removes the extra vote from suspend and adds it back
  3544. * later to allow power collapse during pm_suspend call
  3545. */
  3546. pm_runtime_put_sync(dev);
  3547. pm_runtime_get_noresume(dev);
  3548. /* dump clock state before entering suspend */
  3549. if (sde_kms->pm_suspend_clk_dump)
  3550. _sde_kms_dump_clks_state(sde_kms);
  3551. return ret;
  3552. }
  3553. static int sde_kms_pm_resume(struct device *dev)
  3554. {
  3555. struct drm_device *ddev;
  3556. struct sde_kms *sde_kms;
  3557. struct drm_encoder *enc;
  3558. struct drm_modeset_acquire_ctx ctx;
  3559. int ret, i;
  3560. if (!dev)
  3561. return -EINVAL;
  3562. ddev = dev_get_drvdata(dev);
  3563. if (!ddev || !ddev_to_msm_kms(ddev))
  3564. return -EINVAL;
  3565. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3566. SDE_EVT32(sde_kms->suspend_state != NULL);
  3567. /* if a display is in cont splash early exit */
  3568. drm_for_each_encoder(enc, ddev) {
  3569. if (sde_encoder_in_cont_splash(enc) && enc->crtc) {
  3570. SDE_DEBUG("skip PM resume entry splash is enabled on enc:%d\n", DRMID(enc));
  3571. SDE_EVT32(DRMID(enc), SDE_EVTLOG_FUNC_EXIT);
  3572. return -EINVAL;
  3573. }
  3574. }
  3575. if (sde_kms->suspend_state)
  3576. drm_mode_config_reset(ddev);
  3577. drm_modeset_acquire_init(&ctx, 0);
  3578. retry:
  3579. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3580. if (ret == -EDEADLK) {
  3581. drm_modeset_backoff(&ctx);
  3582. goto retry;
  3583. } else if (WARN_ON(ret)) {
  3584. goto end;
  3585. }
  3586. sde_kms->suspend_block = false;
  3587. if (sde_kms->suspend_state) {
  3588. sde_kms->suspend_state->acquire_ctx = &ctx;
  3589. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  3590. ret = drm_atomic_helper_commit_duplicated_state(
  3591. sde_kms->suspend_state, &ctx);
  3592. if (ret != -EDEADLK)
  3593. break;
  3594. drm_modeset_backoff(&ctx);
  3595. }
  3596. if (ret < 0)
  3597. DRM_ERROR("failed to restore state, %d\n", ret);
  3598. drm_atomic_state_put(sde_kms->suspend_state);
  3599. sde_kms->suspend_state = NULL;
  3600. }
  3601. end:
  3602. drm_modeset_drop_locks(&ctx);
  3603. drm_modeset_acquire_fini(&ctx);
  3604. /* enable hot-plug polling */
  3605. drm_kms_helper_poll_enable(ddev);
  3606. return 0;
  3607. }
  3608. static const struct msm_kms_funcs kms_funcs = {
  3609. .hw_init = sde_kms_hw_init,
  3610. .postinit = sde_kms_postinit,
  3611. .irq_preinstall = sde_irq_preinstall,
  3612. .irq_postinstall = sde_irq_postinstall,
  3613. .irq_uninstall = sde_irq_uninstall,
  3614. .irq = sde_irq,
  3615. .preclose = sde_kms_preclose,
  3616. .lastclose = sde_kms_lastclose,
  3617. .prepare_fence = sde_kms_prepare_fence,
  3618. .prepare_commit = sde_kms_prepare_commit,
  3619. .commit = sde_kms_commit,
  3620. .complete_commit = sde_kms_complete_commit,
  3621. .get_msm_mode = sde_kms_get_msm_mode,
  3622. .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
  3623. .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
  3624. .check_modified_format = sde_format_check_modified_format,
  3625. .atomic_check = sde_kms_atomic_check,
  3626. .get_format = sde_get_msm_format,
  3627. .round_pixclk = sde_kms_round_pixclk,
  3628. .display_early_wakeup = sde_kms_display_early_wakeup,
  3629. .pm_suspend = sde_kms_pm_suspend,
  3630. .pm_resume = sde_kms_pm_resume,
  3631. .destroy = sde_kms_destroy,
  3632. .debugfs_destroy = sde_kms_debugfs_destroy,
  3633. .cont_splash_config = sde_kms_cont_splash_config,
  3634. .register_events = _sde_kms_register_events,
  3635. .get_address_space = _sde_kms_get_address_space,
  3636. .get_address_space_device = _sde_kms_get_address_space_device,
  3637. .postopen = _sde_kms_post_open,
  3638. .check_for_splash = sde_kms_check_for_splash,
  3639. .trigger_null_flush = sde_kms_trigger_null_flush,
  3640. .get_mixer_count = sde_kms_get_mixer_count,
  3641. .get_dsc_count = sde_kms_get_dsc_count,
  3642. .in_trusted_vm = sde_kms_in_trusted_vm,
  3643. };
  3644. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
  3645. {
  3646. int i;
  3647. for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
  3648. if (!sde_kms->aspace[i])
  3649. continue;
  3650. msm_gem_address_space_put(sde_kms->aspace[i]);
  3651. sde_kms->aspace[i] = NULL;
  3652. }
  3653. return 0;
  3654. }
  3655. static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
  3656. {
  3657. struct msm_mmu *mmu;
  3658. struct resource *res;
  3659. struct platform_device *pdev;
  3660. int i, ret;
  3661. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  3662. int early_map = 0;
  3663. #endif
  3664. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev)
  3665. return -EINVAL;
  3666. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  3667. struct msm_gem_address_space *aspace;
  3668. mmu = msm_smmu_new(sde_kms->dev->dev, i);
  3669. if (IS_ERR(mmu)) {
  3670. ret = PTR_ERR(mmu);
  3671. SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
  3672. i, ret);
  3673. continue;
  3674. }
  3675. aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
  3676. mmu, "sde");
  3677. if (IS_ERR(aspace)) {
  3678. ret = PTR_ERR(aspace);
  3679. mmu->funcs->destroy(mmu);
  3680. goto fail;
  3681. }
  3682. sde_kms->aspace[i] = aspace;
  3683. aspace->domain_attached = true;
  3684. /* Mapping splash memory block */
  3685. if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
  3686. sde_kms->splash_data.num_splash_regions) {
  3687. ret = _sde_kms_map_all_splash_regions(sde_kms);
  3688. if (ret) {
  3689. SDE_ERROR("failed to map ret:%d\n", ret);
  3690. goto enable_trans_fail;
  3691. }
  3692. }
  3693. if (i == MSM_SMMU_DOMAIN_UNSECURE && sde_kms->catalog->hw_fence_rev) {
  3694. pdev = to_platform_device(sde_kms->dev->dev);
  3695. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipcc_reg");
  3696. if (!res) {
  3697. SDE_DEBUG("failed to get resource ipcc_reg, cannot map ipcc\n");
  3698. sde_kms->catalog->hw_fence_rev = 0;
  3699. } else {
  3700. sde_kms->ipcc_base_addr = res->start;
  3701. ret = _sde_kms_one2one_mem_map_ipcc_reg(sde_kms, resource_size(res),
  3702. HW_FENCE_IPCC_PROTOCOLp_CLIENTc(res->start,
  3703. sde_kms->catalog->ipcc_protocol_id,
  3704. sde_kms->catalog->ipcc_client_phys_id));
  3705. /* if mapping fails disable hw-fences */
  3706. if (ret)
  3707. sde_kms->catalog->hw_fence_rev = 0;
  3708. }
  3709. }
  3710. /*
  3711. * disable early-map which would have been enabled during
  3712. * bootup by smmu through the device-tree hint for cont-spash
  3713. */
  3714. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  3715. ret = mmu->funcs->enable_smmu_translations(mmu);
  3716. if (ret) {
  3717. SDE_ERROR("failed to enable_s1_translations ret:%d\n", ret);
  3718. goto enable_trans_fail;
  3719. }
  3720. #else
  3721. ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
  3722. &early_map);
  3723. if (ret) {
  3724. SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
  3725. ret, early_map);
  3726. goto enable_trans_fail;
  3727. }
  3728. #endif
  3729. }
  3730. sde_kms->base.aspace = sde_kms->aspace[0];
  3731. return 0;
  3732. enable_trans_fail:
  3733. _sde_kms_unmap_all_splash_regions(sde_kms);
  3734. fail:
  3735. _sde_kms_mmu_destroy(sde_kms);
  3736. return ret;
  3737. }
  3738. static void sde_kms_init_rot_sid_hw(struct sde_kms *sde_kms)
  3739. {
  3740. if (!sde_kms || !sde_kms->hw_sid || sde_in_trusted_vm(sde_kms))
  3741. return;
  3742. sde_hw_set_rotator_sid(sde_kms->hw_sid);
  3743. }
  3744. static void sde_kms_init_hw_fences(struct sde_kms *sde_kms)
  3745. {
  3746. if (!sde_kms || !sde_kms->hw_mdp)
  3747. return;
  3748. if (sde_kms->hw_mdp->ops.setup_hw_fences)
  3749. sde_kms->hw_mdp->ops.setup_hw_fences(sde_kms->hw_mdp,
  3750. sde_kms->catalog->ipcc_protocol_id, sde_kms->catalog->ipcc_client_phys_id,
  3751. sde_kms->ipcc_base_addr);
  3752. }
  3753. static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
  3754. {
  3755. if (!sde_kms || !sde_kms->hw_mdp || !sde_kms->catalog)
  3756. return;
  3757. if (sde_kms->hw_mdp->ops.reset_ubwc)
  3758. sde_kms->hw_mdp->ops.reset_ubwc(sde_kms->hw_mdp,
  3759. sde_kms->catalog);
  3760. }
  3761. static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
  3762. {
  3763. struct sde_vbif_set_qos_params qos_params;
  3764. struct sde_mdss_cfg *catalog;
  3765. if (!sde_kms->catalog)
  3766. return;
  3767. catalog = sde_kms->catalog;
  3768. memset(&qos_params, 0, sizeof(qos_params));
  3769. qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
  3770. qos_params.xin_id = catalog->dma_cfg.xin_id;
  3771. qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
  3772. qos_params.client_type = VBIF_LUTDMA_CLIENT;
  3773. sde_vbif_set_qos_remap(sde_kms, &qos_params);
  3774. }
  3775. static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
  3776. {
  3777. struct sde_hw_uidle *uidle;
  3778. if (!sde_kms) {
  3779. SDE_ERROR("invalid kms\n");
  3780. return -EINVAL;
  3781. }
  3782. uidle = sde_kms->hw_uidle;
  3783. if (uidle && uidle->ops.active_override_enable)
  3784. uidle->ops.active_override_enable(uidle, enable);
  3785. return 0;
  3786. }
  3787. void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
  3788. {
  3789. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3790. mutex_lock(&priv->phandle.phandle_lock);
  3791. if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
  3792. _sde_kms_update_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
  3793. else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
  3794. _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
  3795. mutex_unlock(&priv->phandle.phandle_lock);
  3796. }
  3797. static void sde_kms_irq_affinity_notify(
  3798. struct irq_affinity_notify *affinity_notify,
  3799. const cpumask_t *mask)
  3800. {
  3801. struct msm_drm_private *priv;
  3802. struct sde_kms *sde_kms = container_of(affinity_notify,
  3803. struct sde_kms, affinity_notify);
  3804. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  3805. return;
  3806. priv = sde_kms->dev->dev_private;
  3807. mutex_lock(&priv->phandle.phandle_lock);
  3808. _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
  3809. // request vote with updated irq cpu mask
  3810. if (atomic_read(&sde_kms->irq_vote_count))
  3811. _sde_kms_update_pm_qos_irq_request(sde_kms, mask);
  3812. mutex_unlock(&priv->phandle.phandle_lock);
  3813. }
  3814. static void sde_kms_irq_affinity_release(struct kref *ref) {}
  3815. static void sde_kms_handle_power_event(u32 event_type, void *usr)
  3816. {
  3817. struct sde_kms *sde_kms = usr;
  3818. struct msm_kms *msm_kms;
  3819. msm_kms = &sde_kms->base;
  3820. if (!sde_kms)
  3821. return;
  3822. SDE_DEBUG("event_type:%d\n", event_type);
  3823. SDE_EVT32_VERBOSE(event_type);
  3824. if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
  3825. sde_irq_update(msm_kms, true);
  3826. sde_kms->first_kickoff = true;
  3827. /**
  3828. * Rotator sid and hw fences need to be programmed since uefi doesn't
  3829. * configure them during continuous splash
  3830. */
  3831. sde_kms_init_rot_sid_hw(sde_kms);
  3832. sde_kms_init_hw_fences(sde_kms);
  3833. if (sde_kms->splash_data.num_splash_displays ||
  3834. sde_in_trusted_vm(sde_kms))
  3835. return;
  3836. sde_vbif_init_memtypes(sde_kms);
  3837. sde_kms_init_shared_hw(sde_kms);
  3838. _sde_kms_set_lutdma_vbif_remap(sde_kms);
  3839. } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
  3840. sde_irq_update(msm_kms, false);
  3841. sde_kms->first_kickoff = false;
  3842. if (sde_in_trusted_vm(sde_kms))
  3843. return;
  3844. _sde_kms_active_override(sde_kms, true);
  3845. sde_vbif_axi_halt_request(sde_kms);
  3846. }
  3847. }
  3848. #define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
  3849. static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
  3850. {
  3851. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3852. int rc = -EINVAL;
  3853. SDE_DEBUG("\n");
  3854. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  3855. rc = (rc > 0) ? 0 : rc;
  3856. SDE_EVT32(rc, genpd->device_count);
  3857. return rc;
  3858. }
  3859. static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
  3860. {
  3861. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3862. SDE_DEBUG("\n");
  3863. pm_runtime_put_sync(sde_kms->dev->dev);
  3864. SDE_EVT32(genpd->device_count);
  3865. return 0;
  3866. }
  3867. static int _sde_kms_get_demura_plane_data(struct sde_splash_data *data)
  3868. {
  3869. int i = 0;
  3870. int ret = 0;
  3871. int count = 0;
  3872. struct device_node *parent, *node;
  3873. struct resource r;
  3874. char node_name[DEMURA_REGION_NAME_MAX];
  3875. struct sde_splash_mem *mem;
  3876. struct sde_splash_display *splash_display;
  3877. if (!data->num_splash_displays) {
  3878. SDE_DEBUG("no splash displays. skipping\n");
  3879. return 0;
  3880. }
  3881. /**
  3882. * It is expected that each active demura block will have
  3883. * its own memory region defined.
  3884. */
  3885. parent = of_find_node_by_path("/reserved-memory");
  3886. for (i = 0; i < data->num_splash_displays; i++) {
  3887. splash_display = &data->splash_display[i];
  3888. snprintf(&node_name[0], DEMURA_REGION_NAME_MAX,
  3889. "demura_region_%d", i);
  3890. splash_display->demura = NULL;
  3891. node = of_find_node_by_name(parent, node_name);
  3892. if (!node) {
  3893. SDE_DEBUG("no Demura node %s! disp count: %d\n",
  3894. node_name, data->num_splash_displays);
  3895. continue;
  3896. } else if (of_address_to_resource(node, 0, &r)) {
  3897. SDE_ERROR("invalid data for:%s\n", node_name);
  3898. ret = -EINVAL;
  3899. break;
  3900. }
  3901. mem = &data->demura_mem[i];
  3902. mem->splash_buf_base = (unsigned long)r.start;
  3903. mem->splash_buf_size = (r.end - r.start) + 1;
  3904. if (!mem->splash_buf_base && !mem->splash_buf_size) {
  3905. SDE_DEBUG("dummy splash mem for disp %d. Skipping\n",
  3906. (i+1));
  3907. continue;
  3908. } else if (!mem->splash_buf_base || !mem->splash_buf_size) {
  3909. SDE_ERROR("mem for disp %d invalid: add:%lx size:%lx\n",
  3910. (i+1), mem->splash_buf_base,
  3911. mem->splash_buf_size);
  3912. continue;
  3913. }
  3914. mem->ref_cnt = 0;
  3915. splash_display->demura = mem;
  3916. count++;
  3917. SDE_DEBUG("demura mem for disp:%d add:%lx size:%x\n", (i + 1),
  3918. mem->splash_buf_base,
  3919. mem->splash_buf_size);
  3920. }
  3921. if (!ret && !count)
  3922. SDE_DEBUG("no demura regions for cont. splash found!\n");
  3923. return ret;
  3924. }
  3925. static int _sde_kms_get_splash_data(struct sde_splash_data *data)
  3926. {
  3927. int i = 0;
  3928. int ret = 0;
  3929. struct device_node *parent, *node, *node1;
  3930. struct resource r, r1;
  3931. const char *node_name = "splash_region";
  3932. struct sde_splash_mem *mem;
  3933. bool share_splash_mem = false;
  3934. int num_displays, num_regions;
  3935. struct sde_splash_display *splash_display;
  3936. if (of_find_node_with_property(NULL, "qcom,sde-emulated-env"))
  3937. return 0;
  3938. if (!data)
  3939. return -EINVAL;
  3940. memset(data, 0, sizeof(*data));
  3941. parent = of_find_node_by_path("/reserved-memory");
  3942. if (!parent) {
  3943. SDE_ERROR("failed to find reserved-memory node\n");
  3944. return -EINVAL;
  3945. }
  3946. node = of_find_node_by_name(parent, node_name);
  3947. if (!node) {
  3948. SDE_DEBUG("failed to find node %s\n", node_name);
  3949. return -EINVAL;
  3950. }
  3951. node1 = of_find_node_by_name(NULL, "disp_rdump_region");
  3952. if (!node1)
  3953. SDE_DEBUG("failed to find disp ramdump memory reservation\n");
  3954. /**
  3955. * Support sharing a single splash memory for all the built in displays
  3956. * and also independent splash region per displays. Incase of
  3957. * independent splash region for each connected display, dtsi node of
  3958. * cont_splash_region should be collection of all memory regions
  3959. * Ex: <r1.start r1.end r2.start r2.end ... rn.start, rn.end>
  3960. */
  3961. num_displays = dsi_display_get_num_of_displays();
  3962. num_regions = of_property_count_u64_elems(node, "reg") / 2;
  3963. data->num_splash_displays = num_displays;
  3964. SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
  3965. if (num_displays > num_regions) {
  3966. share_splash_mem = true;
  3967. pr_info(":%d displays share same splash buf\n", num_displays);
  3968. }
  3969. for (i = 0; i < num_displays; i++) {
  3970. splash_display = &data->splash_display[i];
  3971. if (!i || !share_splash_mem) {
  3972. if (of_address_to_resource(node, i, &r)) {
  3973. SDE_ERROR("invalid data for:%s\n", node_name);
  3974. return -EINVAL;
  3975. }
  3976. mem = &data->splash_mem[i];
  3977. if (!node1 || of_address_to_resource(node1, i, &r1)) {
  3978. SDE_DEBUG("failed to find ramdump memory\n");
  3979. mem->ramdump_base = 0;
  3980. mem->ramdump_size = 0;
  3981. } else {
  3982. mem->ramdump_base = (unsigned long)r1.start;
  3983. mem->ramdump_size = (r1.end - r1.start) + 1;
  3984. }
  3985. mem->splash_buf_base = (unsigned long)r.start;
  3986. mem->splash_buf_size = (r.end - r.start) + 1;
  3987. mem->ref_cnt = 0;
  3988. splash_display->splash = mem;
  3989. data->num_splash_regions++;
  3990. } else {
  3991. data->splash_display[i].splash = &data->splash_mem[0];
  3992. }
  3993. SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
  3994. splash_display->splash->splash_buf_base,
  3995. splash_display->splash->splash_buf_size);
  3996. }
  3997. data->type = SDE_SPLASH_HANDOFF;
  3998. ret = _sde_kms_get_demura_plane_data(data);
  3999. return ret;
  4000. }
  4001. static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
  4002. struct platform_device *platformdev)
  4003. {
  4004. int rc = -EINVAL;
  4005. sde_kms->mmio = msm_ioremap(platformdev, "mdp_phys", "mdp_phys");
  4006. if (IS_ERR(sde_kms->mmio)) {
  4007. rc = PTR_ERR(sde_kms->mmio);
  4008. SDE_ERROR("mdp register memory map failed: %d\n", rc);
  4009. sde_kms->mmio = NULL;
  4010. goto error;
  4011. }
  4012. DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
  4013. sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys");
  4014. rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
  4015. sde_kms->mmio_len,
  4016. msm_get_phys_addr(platformdev, "mdp_phys"),
  4017. SDE_DBG_SDE);
  4018. if (rc)
  4019. SDE_ERROR("dbg base register kms failed: %d\n", rc);
  4020. sde_kms->vbif[VBIF_RT] = msm_ioremap(platformdev, "vbif_phys", "vbif_phys");
  4021. if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
  4022. rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
  4023. SDE_ERROR("vbif register memory map failed: %d\n", rc);
  4024. sde_kms->vbif[VBIF_RT] = NULL;
  4025. goto error;
  4026. }
  4027. sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(platformdev, "vbif_phys");
  4028. rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
  4029. sde_kms->vbif_len[VBIF_RT],
  4030. msm_get_phys_addr(platformdev, "vbif_phys"),
  4031. SDE_DBG_VBIF_RT);
  4032. if (rc)
  4033. SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
  4034. sde_kms->vbif[VBIF_NRT] = msm_ioremap(platformdev, "vbif_nrt_phys", "vbif_nrt_phys");
  4035. if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
  4036. sde_kms->vbif[VBIF_NRT] = NULL;
  4037. SDE_DEBUG("VBIF NRT is not defined");
  4038. } else {
  4039. sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(platformdev, "vbif_nrt_phys");
  4040. }
  4041. sde_kms->reg_dma = msm_ioremap(platformdev, "regdma_phys", "regdma_phys");
  4042. if (IS_ERR(sde_kms->reg_dma)) {
  4043. sde_kms->reg_dma = NULL;
  4044. SDE_DEBUG("REG_DMA is not defined");
  4045. } else {
  4046. unsigned long mdp_addr = msm_get_phys_addr(platformdev, "mdp_phys");
  4047. sde_kms->reg_dma_len = msm_iomap_size(platformdev, "regdma_phys");
  4048. sde_kms->reg_dma_off = msm_get_phys_addr(platformdev, "regdma_phys") - mdp_addr;
  4049. rc = sde_dbg_reg_register_base(LUTDMA_DBG_NAME, sde_kms->reg_dma,
  4050. sde_kms->reg_dma_len,
  4051. msm_get_phys_addr(platformdev, "regdma_phys"),
  4052. SDE_DBG_LUTDMA);
  4053. if (rc)
  4054. SDE_ERROR("dbg base register reg_dma failed: %d\n", rc);
  4055. }
  4056. sde_kms->sid = msm_ioremap(platformdev, "sid_phys", "sid_phys");
  4057. if (IS_ERR(sde_kms->sid)) {
  4058. SDE_DEBUG("sid register is not defined: %d\n", rc);
  4059. sde_kms->sid = NULL;
  4060. } else {
  4061. sde_kms->sid_len = msm_iomap_size(platformdev, "sid_phys");
  4062. rc = sde_dbg_reg_register_base("sid", sde_kms->sid,
  4063. sde_kms->sid_len,
  4064. msm_get_phys_addr(platformdev, "sid_phys"),
  4065. SDE_DBG_SID);
  4066. if (rc)
  4067. SDE_ERROR("dbg base register sid failed: %d\n", rc);
  4068. }
  4069. error:
  4070. return rc;
  4071. }
  4072. static int _sde_kms_hw_init_power_helper(struct drm_device *dev,
  4073. struct sde_kms *sde_kms)
  4074. {
  4075. int rc = 0;
  4076. if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
  4077. sde_kms->genpd.name = dev->unique;
  4078. sde_kms->genpd.power_off = sde_kms_pd_disable;
  4079. sde_kms->genpd.power_on = sde_kms_pd_enable;
  4080. rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
  4081. if (rc < 0) {
  4082. SDE_ERROR("failed to init genpd provider %s: %d\n",
  4083. sde_kms->genpd.name, rc);
  4084. return rc;
  4085. }
  4086. rc = of_genpd_add_provider_simple(dev->dev->of_node,
  4087. &sde_kms->genpd);
  4088. if (rc < 0) {
  4089. SDE_ERROR("failed to add genpd provider %s: %d\n",
  4090. sde_kms->genpd.name, rc);
  4091. pm_genpd_remove(&sde_kms->genpd);
  4092. return rc;
  4093. }
  4094. sde_kms->genpd_init = true;
  4095. SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
  4096. }
  4097. return rc;
  4098. }
  4099. static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
  4100. struct drm_device *dev,
  4101. struct msm_drm_private *priv)
  4102. {
  4103. int i, rc = -EINVAL;
  4104. sde_kms->catalog = sde_hw_catalog_init(dev);
  4105. if (IS_ERR_OR_NULL(sde_kms->catalog)) {
  4106. rc = PTR_ERR(sde_kms->catalog);
  4107. if (!sde_kms->catalog)
  4108. rc = -EINVAL;
  4109. SDE_ERROR("catalog init failed: %d\n", rc);
  4110. sde_kms->catalog = NULL;
  4111. goto power_error;
  4112. }
  4113. sde_kms->core_rev = sde_kms->catalog->hw_rev;
  4114. pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
  4115. /* initialize power domain if defined */
  4116. rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
  4117. if (rc) {
  4118. SDE_ERROR("_sde_kms_hw_init_power_helper failed: %d\n", rc);
  4119. goto genpd_err;
  4120. }
  4121. rc = _sde_kms_mmu_init(sde_kms);
  4122. if (rc) {
  4123. SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
  4124. goto power_error;
  4125. }
  4126. /* Initialize reg dma block which is a singleton */
  4127. sde_kms->catalog->dma_cfg.base_off = sde_kms->reg_dma_off;
  4128. rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
  4129. sde_kms->dev);
  4130. if (rc) {
  4131. SDE_ERROR("failed: reg dma init failed\n");
  4132. goto power_error;
  4133. }
  4134. sde_dbg_init_dbg_buses(sde_kms->core_rev);
  4135. rc = sde_rm_init(&sde_kms->rm);
  4136. if (rc) {
  4137. SDE_ERROR("rm init failed: %d\n", rc);
  4138. goto power_error;
  4139. }
  4140. sde_kms->rm_init = true;
  4141. sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
  4142. if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
  4143. rc = PTR_ERR(sde_kms->hw_intr);
  4144. SDE_ERROR("hw_intr init failed: %d\n", rc);
  4145. sde_kms->hw_intr = NULL;
  4146. goto hw_intr_init_err;
  4147. }
  4148. /*
  4149. * Attempt continuous splash handoff only if reserved
  4150. * splash memory is found & release resources on any error
  4151. * in finding display hw config in splash
  4152. */
  4153. if (sde_kms->splash_data.num_splash_regions) {
  4154. struct sde_splash_display *display;
  4155. int ret, display_count =
  4156. sde_kms->splash_data.num_splash_displays;
  4157. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4158. &sde_kms->splash_data, sde_kms->catalog);
  4159. for (i = 0; i < display_count; i++) {
  4160. display = &sde_kms->splash_data.splash_display[i];
  4161. /*
  4162. * free splash region on resource init failure and
  4163. * cont-splash disabled case
  4164. */
  4165. if (!display->cont_splash_enabled || ret)
  4166. _sde_kms_free_splash_display_data(
  4167. sde_kms, display);
  4168. }
  4169. }
  4170. sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
  4171. if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
  4172. rc = PTR_ERR(sde_kms->hw_mdp);
  4173. if (!sde_kms->hw_mdp)
  4174. rc = -EINVAL;
  4175. SDE_ERROR("failed to get hw_mdp: %d\n", rc);
  4176. sde_kms->hw_mdp = NULL;
  4177. goto power_error;
  4178. }
  4179. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  4180. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  4181. sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
  4182. sde_kms->vbif[vbif_idx], sde_kms->catalog);
  4183. if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
  4184. rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
  4185. if (!sde_kms->hw_vbif[vbif_idx])
  4186. rc = -EINVAL;
  4187. SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
  4188. sde_kms->hw_vbif[vbif_idx] = NULL;
  4189. goto power_error;
  4190. }
  4191. }
  4192. if (sde_kms->catalog->uidle_cfg.uidle_rev) {
  4193. sde_kms->hw_uidle = sde_hw_uidle_init(UIDLE, sde_kms->mmio,
  4194. sde_kms->mmio_len, sde_kms->catalog);
  4195. if (IS_ERR_OR_NULL(sde_kms->hw_uidle)) {
  4196. rc = PTR_ERR(sde_kms->hw_uidle);
  4197. if (!sde_kms->hw_uidle)
  4198. rc = -EINVAL;
  4199. /* uidle is optional, so do not make it a fatal error */
  4200. SDE_ERROR("failed to init uidle rc:%d\n", rc);
  4201. sde_kms->hw_uidle = NULL;
  4202. rc = 0;
  4203. }
  4204. } else {
  4205. sde_kms->hw_uidle = NULL;
  4206. }
  4207. if (sde_kms->sid) {
  4208. sde_kms->hw_sid = sde_hw_sid_init(sde_kms->sid,
  4209. sde_kms->sid_len, sde_kms->catalog);
  4210. if (IS_ERR_OR_NULL(sde_kms->hw_sid)) {
  4211. rc = PTR_ERR(sde_kms->hw_sid);
  4212. SDE_ERROR("failed to init sid %d\n", rc);
  4213. sde_kms->hw_sid = NULL;
  4214. goto power_error;
  4215. }
  4216. }
  4217. rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
  4218. &priv->phandle, "core_clk");
  4219. if (rc) {
  4220. SDE_ERROR("failed to init perf %d\n", rc);
  4221. goto perf_err;
  4222. }
  4223. /*
  4224. * set the disable_immediate flag when driver supports the precise vsync
  4225. * timestamp as the DRM hooks for vblank timestamp/counters would be set
  4226. * based on the feature
  4227. */
  4228. if (test_bit(SDE_FEATURE_HW_VSYNC_TS, sde_kms->catalog->features))
  4229. dev->vblank_disable_immediate = true;
  4230. /*
  4231. * _sde_kms_drm_obj_init should create the DRM related objects
  4232. * i.e. CRTCs, planes, encoders, connectors and so forth
  4233. */
  4234. rc = _sde_kms_drm_obj_init(sde_kms);
  4235. if (rc) {
  4236. SDE_ERROR("modeset init failed: %d\n", rc);
  4237. goto drm_obj_init_err;
  4238. }
  4239. return 0;
  4240. genpd_err:
  4241. drm_obj_init_err:
  4242. sde_core_perf_destroy(&sde_kms->perf);
  4243. hw_intr_init_err:
  4244. perf_err:
  4245. power_error:
  4246. return rc;
  4247. }
  4248. int _sde_kms_get_tvm_inclusion_mem(struct sde_mdss_cfg *catalog, struct list_head *mem_list)
  4249. {
  4250. struct list_head temp_head;
  4251. struct msm_io_mem_entry *io_mem;
  4252. int rc, i = 0;
  4253. INIT_LIST_HEAD(&temp_head);
  4254. for (i = 0; i < catalog->tvm_reg_count; i++) {
  4255. struct resource *res = &catalog->tvm_reg[i];
  4256. io_mem = kzalloc(sizeof(struct msm_io_mem_entry), GFP_KERNEL);
  4257. if (!io_mem) {
  4258. rc = -ENOMEM;
  4259. goto parse_fail;
  4260. }
  4261. io_mem->base = res->start;
  4262. io_mem->size = resource_size(res);
  4263. list_add(&io_mem->list, &temp_head);
  4264. }
  4265. list_splice(&temp_head, mem_list);
  4266. return 0;
  4267. parse_fail:
  4268. msm_dss_clean_io_mem(&temp_head);
  4269. return rc;
  4270. }
  4271. #ifdef CONFIG_DRM_SDE_VM
  4272. int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  4273. {
  4274. struct platform_device *pdev = to_platform_device(sde_kms->dev->dev);
  4275. int rc = 0;
  4276. rc = msm_dss_get_io_mem(pdev, &io_res->mem);
  4277. if (rc) {
  4278. SDE_ERROR("failed to get io mem for KMS, rc = %d\n", rc);
  4279. return rc;
  4280. }
  4281. rc = msm_dss_get_pmic_io_mem(pdev, &io_res->mem);
  4282. if (rc) {
  4283. SDE_ERROR("failed to get io mem for pmic, rc:%d\n", rc);
  4284. return rc;
  4285. }
  4286. rc = msm_dss_get_io_irq(pdev, &io_res->irq, GH_IRQ_LABEL_SDE);
  4287. if (rc) {
  4288. SDE_ERROR("failed to get io irq for KMS");
  4289. return rc;
  4290. }
  4291. rc = _sde_kms_get_tvm_inclusion_mem(sde_kms->catalog, &io_res->mem);
  4292. if (rc) {
  4293. SDE_ERROR("failed to get tvm inclusion mem ranges");
  4294. return rc;
  4295. }
  4296. return rc;
  4297. }
  4298. #endif
  4299. static int sde_kms_hw_init(struct msm_kms *kms)
  4300. {
  4301. struct sde_kms *sde_kms;
  4302. struct drm_device *dev;
  4303. struct msm_drm_private *priv;
  4304. struct platform_device *platformdev;
  4305. int irq_num, rc = -EINVAL;
  4306. if (!kms) {
  4307. SDE_ERROR("invalid kms\n");
  4308. goto end;
  4309. }
  4310. sde_kms = to_sde_kms(kms);
  4311. dev = sde_kms->dev;
  4312. if (!dev || !dev->dev) {
  4313. SDE_ERROR("invalid device\n");
  4314. goto end;
  4315. }
  4316. platformdev = to_platform_device(dev->dev);
  4317. priv = dev->dev_private;
  4318. if (!priv) {
  4319. SDE_ERROR("invalid private data\n");
  4320. goto end;
  4321. }
  4322. rc = _sde_kms_hw_init_ioremap(sde_kms, platformdev);
  4323. if (rc)
  4324. goto error;
  4325. rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
  4326. if (rc)
  4327. SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
  4328. rc = _sde_kms_hw_init_blocks(sde_kms, dev, priv);
  4329. if (rc)
  4330. goto error;
  4331. dev->mode_config.min_width = sde_kms->catalog->min_display_width;
  4332. dev->mode_config.min_height = sde_kms->catalog->min_display_height;
  4333. dev->mode_config.max_width = sde_kms->catalog->max_display_width;
  4334. dev->mode_config.max_height = sde_kms->catalog->max_display_height;
  4335. mutex_init(&sde_kms->secure_transition_lock);
  4336. atomic_set(&sde_kms->detach_sec_cb, 0);
  4337. atomic_set(&sde_kms->detach_all_cb, 0);
  4338. atomic_set(&sde_kms->irq_vote_count, 0);
  4339. /*
  4340. * Support format modifiers for compression etc.
  4341. */
  4342. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0))
  4343. dev->mode_config.allow_fb_modifiers = true;
  4344. #endif
  4345. sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
  4346. sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
  4347. irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0);
  4348. SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
  4349. irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
  4350. if (sde_in_trusted_vm(sde_kms)) {
  4351. rc = sde_vm_trusted_init(sde_kms);
  4352. sde_dbg_set_hw_ownership_status(false);
  4353. } else {
  4354. rc = sde_vm_primary_init(sde_kms);
  4355. sde_dbg_set_hw_ownership_status(true);
  4356. }
  4357. if (rc) {
  4358. SDE_ERROR("failed to initialize VM ops, rc: %d\n", rc);
  4359. goto error;
  4360. }
  4361. return 0;
  4362. error:
  4363. _sde_kms_hw_destroy(sde_kms, platformdev);
  4364. end:
  4365. return rc;
  4366. }
  4367. struct msm_kms *sde_kms_init(struct drm_device *dev)
  4368. {
  4369. struct msm_drm_private *priv;
  4370. struct sde_kms *sde_kms;
  4371. if (!dev || !dev->dev_private) {
  4372. SDE_ERROR("drm device node invalid\n");
  4373. return ERR_PTR(-EINVAL);
  4374. }
  4375. priv = dev->dev_private;
  4376. sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
  4377. if (!sde_kms) {
  4378. SDE_ERROR("failed to allocate sde kms\n");
  4379. return ERR_PTR(-ENOMEM);
  4380. }
  4381. msm_kms_init(&sde_kms->base, &kms_funcs);
  4382. sde_kms->dev = dev;
  4383. return &sde_kms->base;
  4384. }
  4385. void sde_kms_vm_trusted_resource_deinit(struct sde_kms *sde_kms)
  4386. {
  4387. struct dsi_display *display;
  4388. struct sde_splash_display *handoff_display;
  4389. int i;
  4390. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4391. handoff_display = &sde_kms->splash_data.splash_display[i];
  4392. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4393. if (handoff_display->cont_splash_enabled)
  4394. _sde_kms_free_splash_display_data(sde_kms,
  4395. handoff_display);
  4396. dsi_display_set_active_state(display, false);
  4397. }
  4398. memset(&sde_kms->splash_data, 0, sizeof(struct sde_splash_data));
  4399. }
  4400. int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
  4401. struct drm_atomic_state *state)
  4402. {
  4403. struct drm_device *dev;
  4404. struct msm_drm_private *priv;
  4405. struct sde_splash_display *handoff_display;
  4406. struct dsi_display *display;
  4407. int ret, i;
  4408. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
  4409. SDE_ERROR("invalid params\n");
  4410. return -EINVAL;
  4411. }
  4412. dev = sde_kms->dev;
  4413. priv = dev->dev_private;
  4414. sde_kms->splash_data.type = SDE_VM_HANDOFF;
  4415. sde_kms->splash_data.num_splash_displays = sde_kms->dsi_display_count;
  4416. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4417. &sde_kms->splash_data, sde_kms->catalog);
  4418. if (ret) {
  4419. SDE_ERROR("invalid cont splash init, ret:%d\n", ret);
  4420. return -EINVAL;
  4421. }
  4422. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4423. handoff_display = &sde_kms->splash_data.splash_display[i];
  4424. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4425. if (!handoff_display->cont_splash_enabled || ret)
  4426. _sde_kms_free_splash_display_data(sde_kms,
  4427. handoff_display);
  4428. else
  4429. dsi_display_set_active_state(display, true);
  4430. }
  4431. if (sde_kms->splash_data.num_splash_displays != 1) {
  4432. SDE_ERROR("no. of displays not supported:%d\n",
  4433. sde_kms->splash_data.num_splash_displays);
  4434. ret = -EINVAL;
  4435. goto error;
  4436. }
  4437. ret = sde_kms_cont_splash_config(&sde_kms->base, state);
  4438. if (ret) {
  4439. SDE_ERROR("error in setting handoff configs\n");
  4440. goto error;
  4441. }
  4442. /**
  4443. * fill-in vote for the continuous splash hanodff path, which will be
  4444. * removed on the successful first commit.
  4445. */
  4446. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  4447. if (ret < 0) {
  4448. SDE_ERROR("failed to enable power resource %d\n", ret);
  4449. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  4450. goto error;
  4451. }
  4452. return 0;
  4453. error:
  4454. return ret;
  4455. }
  4456. static int _sde_kms_register_events(struct msm_kms *kms,
  4457. struct drm_mode_object *obj, u32 event, bool en)
  4458. {
  4459. int ret = 0;
  4460. struct drm_crtc *crtc;
  4461. struct drm_connector *conn;
  4462. struct sde_kms *sde_kms;
  4463. if (!kms || !obj) {
  4464. SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
  4465. return -EINVAL;
  4466. }
  4467. sde_kms = to_sde_kms(kms);
  4468. sde_vm_lock(sde_kms);
  4469. if (!sde_vm_owns_hw(sde_kms)) {
  4470. sde_vm_unlock(sde_kms);
  4471. SDE_DEBUG("HW is owned by other VM\n");
  4472. return -EACCES;
  4473. }
  4474. /* check vm ownership, if event registration requires HW access */
  4475. switch (obj->type) {
  4476. case DRM_MODE_OBJECT_CRTC:
  4477. crtc = obj_to_crtc(obj);
  4478. ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
  4479. break;
  4480. case DRM_MODE_OBJECT_CONNECTOR:
  4481. conn = obj_to_connector(obj);
  4482. ret = sde_connector_register_custom_event(sde_kms, conn, event,
  4483. en);
  4484. break;
  4485. }
  4486. sde_vm_unlock(sde_kms);
  4487. return ret;
  4488. }
  4489. int sde_kms_handle_recovery(struct drm_encoder *encoder)
  4490. {
  4491. SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
  4492. return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
  4493. }
  4494. void sde_kms_add_data_to_minidump_va(struct sde_kms *sde_kms)
  4495. {
  4496. struct msm_drm_private *priv;
  4497. struct sde_crtc *sde_crtc;
  4498. struct sde_crtc_state *cstate;
  4499. struct sde_connector *sde_conn;
  4500. struct sde_connector_state *conn_state;
  4501. u32 i;
  4502. priv = sde_kms->dev->dev_private;
  4503. sde_mini_dump_add_va_region("sde_kms", sizeof(*sde_kms), sde_kms);
  4504. for (i = 0; i < priv->num_crtcs; i++) {
  4505. sde_crtc = to_sde_crtc(priv->crtcs[i]);
  4506. cstate = to_sde_crtc_state(priv->crtcs[i]->state);
  4507. sde_mini_dump_add_va_region("sde_crtc", sizeof(*sde_crtc), sde_crtc);
  4508. sde_mini_dump_add_va_region("crtc_state", sizeof(*cstate), cstate);
  4509. }
  4510. for (i = 0; i < priv->num_planes; i++)
  4511. sde_plane_add_data_to_minidump_va(priv->planes[i]);
  4512. for (i = 0; i < priv->num_encoders; i++)
  4513. sde_encoder_add_data_to_minidump_va(priv->encoders[i]);
  4514. for (i = 0; i < priv->num_connectors; i++) {
  4515. sde_conn = to_sde_connector(priv->connectors[i]);
  4516. conn_state = to_sde_connector_state(priv->connectors[i]->state);
  4517. sde_mini_dump_add_va_region("sde_conn", sizeof(*sde_conn), sde_conn);
  4518. sde_mini_dump_add_va_region("conn_state", sizeof(*conn_state), conn_state);
  4519. }
  4520. }