sde_kms.c 139 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472
  1. /*
  2. * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  20. #include <drm/drm_crtc.h>
  21. #include <drm/drm_fixed.h>
  22. #include <drm/drm_panel.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/dma-buf.h>
  27. #include <linux/memblock.h>
  28. #include <linux/soc/qcom/panel_event_notifier.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_probe_helper.h>
  31. #include "msm_drv.h"
  32. #include "msm_mmu.h"
  33. #include "msm_gem.h"
  34. #include "dsi_display.h"
  35. #include "dsi_drm.h"
  36. #include "sde_wb.h"
  37. #include "dp_display.h"
  38. #include "dp_drm.h"
  39. #include "dp_mst_drm.h"
  40. #include "sde_kms.h"
  41. #include "sde_core_irq.h"
  42. #include "sde_formats.h"
  43. #include "sde_hw_vbif.h"
  44. #include "sde_vbif.h"
  45. #include "sde_encoder.h"
  46. #include "sde_plane.h"
  47. #include "sde_crtc.h"
  48. #include "sde_color_processing.h"
  49. #include "sde_reg_dma.h"
  50. #include "sde_connector.h"
  51. #include "sde_vm.h"
  52. #include "sde_fence.h"
  53. #include <linux/qcom_scm.h>
  54. #include <linux/qcom-iommu-util.h>
  55. #include "soc/qcom/secure_buffer.h"
  56. #include <linux/qtee_shmbridge.h>
  57. #ifdef CONFIG_DRM_SDE_VM
  58. #include <linux/gunyah/gh_irq_lend.h>
  59. #endif
  60. #define CREATE_TRACE_POINTS
  61. #include "sde_trace.h"
  62. /* defines for secure channel call */
  63. #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
  64. #define MDP_DEVICE_ID 0x1A
  65. #define DEMURA_REGION_NAME_MAX 32
  66. EXPORT_TRACEPOINT_SYMBOL(tracing_mark_write);
  67. static const char * const iommu_ports[] = {
  68. "mdp_0",
  69. };
  70. /**
  71. * Controls size of event log buffer. Specified as a power of 2.
  72. */
  73. #define SDE_EVTLOG_SIZE 1024
  74. /*
  75. * To enable overall DRM driver logging
  76. * # echo 0x2 > /sys/module/drm/parameters/debug
  77. *
  78. * To enable DRM driver h/w logging
  79. * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  80. *
  81. * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  82. */
  83. #define SDE_DEBUGFS_DIR "msm_sde"
  84. #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
  85. #define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
  86. #define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
  87. /**
  88. * sdecustom - enable certain driver customizations for sde clients
  89. * Enabling this modifies the standard DRM behavior slightly and assumes
  90. * that the clients have specific knowledge about the modifications that
  91. * are involved, so don't enable this unless you know what you're doing.
  92. *
  93. * Parts of the driver that are affected by this setting may be located by
  94. * searching for invocations of the 'sde_is_custom_client()' function.
  95. *
  96. * This is disabled by default.
  97. */
  98. static bool sdecustom = true;
  99. module_param(sdecustom, bool, 0400);
  100. MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
  101. static int sde_kms_hw_init(struct msm_kms *kms);
  102. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
  103. static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
  104. static int _sde_kms_register_events(struct msm_kms *kms,
  105. struct drm_mode_object *obj, u32 event, bool en);
  106. static void sde_kms_handle_power_event(u32 event_type, void *usr);
  107. bool sde_is_custom_client(void)
  108. {
  109. return sdecustom;
  110. }
  111. #if IS_ENABLED(CONFIG_DEBUG_FS)
  112. void *sde_debugfs_get_root(struct sde_kms *sde_kms)
  113. {
  114. struct msm_drm_private *priv;
  115. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  116. return NULL;
  117. priv = sde_kms->dev->dev_private;
  118. return priv->debug_root;
  119. }
  120. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  121. {
  122. void *p;
  123. int rc;
  124. void *debugfs_root;
  125. p = sde_hw_util_get_log_mask_ptr();
  126. if (!sde_kms || !p)
  127. return -EINVAL;
  128. debugfs_root = sde_debugfs_get_root(sde_kms);
  129. if (!debugfs_root)
  130. return -EINVAL;
  131. /* allow debugfs_root to be NULL */
  132. debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
  133. (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
  134. (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
  135. rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
  136. if (rc) {
  137. SDE_ERROR("failed to init perf %d\n", rc);
  138. return rc;
  139. }
  140. sde_rm_debugfs_init(&sde_kms->rm, debugfs_root);
  141. if (sde_kms->catalog->qdss_count)
  142. debugfs_create_u32("qdss", 0600, debugfs_root,
  143. (u32 *)&sde_kms->qdss_enabled);
  144. debugfs_create_u32("pm_suspend_clk_dump", 0600, debugfs_root,
  145. (u32 *)&sde_kms->pm_suspend_clk_dump);
  146. debugfs_create_u32("hw_fence_status", 0600, debugfs_root,
  147. (u32 *)&sde_kms->debugfs_hw_fence);
  148. return 0;
  149. }
  150. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  151. {
  152. struct sde_kms *sde_kms = to_sde_kms(kms);
  153. /* don't need to NULL check debugfs_root */
  154. if (sde_kms) {
  155. sde_debugfs_vbif_destroy(sde_kms);
  156. sde_debugfs_core_irq_destroy(sde_kms);
  157. }
  158. }
  159. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  160. {
  161. int i;
  162. struct device *dev = sde_kms->dev->dev;
  163. SDE_INFO("runtime PM suspended:%d", pm_runtime_suspended(dev));
  164. for (i = 0; i < sde_kms->dsi_display_count; i++)
  165. dsi_display_dump_clks_state(sde_kms->dsi_displays[i]);
  166. return 0;
  167. }
  168. #else
  169. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  170. {
  171. return 0;
  172. }
  173. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  174. {
  175. }
  176. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  177. {
  178. return 0;
  179. }
  180. #endif /* CONFIG_DEBUG_FS */
  181. static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
  182. struct drm_crtc *crtc)
  183. {
  184. struct drm_encoder *encoder;
  185. struct drm_device *dev;
  186. int ret;
  187. if (!kms || !crtc || !crtc->state || !crtc->dev) {
  188. SDE_ERROR("invalid params\n");
  189. return;
  190. }
  191. if (!crtc->state->enable) {
  192. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  193. return;
  194. }
  195. if (!crtc->state->active) {
  196. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  197. return;
  198. }
  199. dev = crtc->dev;
  200. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  201. if (encoder->crtc != crtc)
  202. continue;
  203. /*
  204. * Video Mode - Wait for VSYNC
  205. * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
  206. * complete
  207. */
  208. SDE_EVT32_VERBOSE(DRMID(crtc));
  209. ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
  210. if (ret && ret != -EWOULDBLOCK) {
  211. SDE_ERROR(
  212. "[crtc: %d][enc: %d] wait for commit done returned %d\n",
  213. crtc->base.id, encoder->base.id, ret);
  214. break;
  215. }
  216. }
  217. }
  218. static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
  219. struct drm_crtc *crtc, bool enable)
  220. {
  221. struct drm_device *dev;
  222. struct msm_drm_private *priv;
  223. struct sde_mdss_cfg *sde_cfg;
  224. struct drm_plane *plane;
  225. int i, ret;
  226. dev = sde_kms->dev;
  227. priv = dev->dev_private;
  228. sde_cfg = sde_kms->catalog;
  229. ret = sde_vbif_halt_xin_mask(sde_kms,
  230. sde_cfg->sui_block_xin_mask, enable);
  231. if (ret) {
  232. SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
  233. return ret;
  234. }
  235. if (enable) {
  236. for (i = 0; i < priv->num_planes; i++) {
  237. plane = priv->planes[i];
  238. sde_plane_secure_ctrl_xin_client(plane, crtc);
  239. }
  240. }
  241. return 0;
  242. }
  243. /**
  244. * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
  245. * @sde_kms: Pointer to sde_kms struct
  246. * @vimd: switch the stage 2 translation to this VMID
  247. */
  248. static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
  249. {
  250. struct device dummy = {};
  251. dma_addr_t dma_handle;
  252. uint32_t num_sids;
  253. uint32_t *sec_sid;
  254. struct sde_mdss_cfg *sde_cfg = sde_kms->catalog;
  255. int ret = 0, i;
  256. struct qtee_shm shm;
  257. bool qtee_en = qtee_shmbridge_is_enabled();
  258. phys_addr_t mem_addr;
  259. u64 mem_size;
  260. num_sids = sde_cfg->sec_sid_mask_count;
  261. if (!num_sids) {
  262. SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
  263. return -EINVAL;
  264. }
  265. if (qtee_en) {
  266. ret = qtee_shmbridge_allocate_shm(num_sids * sizeof(uint32_t),
  267. &shm);
  268. if (ret)
  269. return -ENOMEM;
  270. sec_sid = (uint32_t *) shm.vaddr;
  271. mem_addr = shm.paddr;
  272. /**
  273. * SMMUSecureModeSwitch requires the size to be number of SID's
  274. * but shm allocates size in pages. Modify the args as per
  275. * client requirement.
  276. */
  277. mem_size = sizeof(uint32_t) * num_sids;
  278. } else {
  279. sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
  280. if (!sec_sid)
  281. return -ENOMEM;
  282. mem_addr = virt_to_phys(sec_sid);
  283. mem_size = sizeof(uint32_t) * num_sids;
  284. }
  285. for (i = 0; i < num_sids; i++) {
  286. sec_sid[i] = sde_cfg->sec_sid_mask[i];
  287. SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
  288. }
  289. ret = dma_coerce_mask_and_coherent(&dummy, DMA_BIT_MASK(64));
  290. if (ret) {
  291. SDE_ERROR("Failed to set dma mask for dummy dev %d\n", ret);
  292. goto map_error;
  293. }
  294. set_dma_ops(&dummy, NULL);
  295. dma_handle = dma_map_single(&dummy, sec_sid,
  296. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  297. if (dma_mapping_error(&dummy, dma_handle)) {
  298. SDE_ERROR("dma_map_single for dummy dev failed vmid 0x%x\n",
  299. vmid);
  300. goto map_error;
  301. }
  302. SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d, qtee_en %d",
  303. vmid, num_sids, qtee_en);
  304. ret = qcom_scm_mem_protect_sd_ctrl(MDP_DEVICE_ID, mem_addr,
  305. mem_size, vmid);
  306. if (ret)
  307. SDE_ERROR("Error:scm_call2, vmid %d, ret%d\n",
  308. vmid, ret);
  309. SDE_EVT32(MEM_PROTECT_SD_CTRL_SWITCH, MDP_DEVICE_ID, mem_size,
  310. vmid, qtee_en, num_sids, ret);
  311. dma_unmap_single(&dummy, dma_handle,
  312. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  313. map_error:
  314. if (qtee_en)
  315. qtee_shmbridge_free_shm(&shm);
  316. else
  317. kfree(sec_sid);
  318. return ret;
  319. }
  320. static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
  321. {
  322. u32 ret;
  323. if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
  324. return 0;
  325. /* detach_all_contexts */
  326. ret = sde_kms_mmu_detach(sde_kms, false);
  327. if (ret) {
  328. SDE_ERROR("failed to detach all cb ret:%d\n", ret);
  329. goto mmu_error;
  330. }
  331. ret = _sde_kms_scm_call(sde_kms, vmid);
  332. if (ret) {
  333. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  334. goto scm_error;
  335. }
  336. return 0;
  337. scm_error:
  338. sde_kms_mmu_attach(sde_kms, false);
  339. mmu_error:
  340. atomic_dec(&sde_kms->detach_all_cb);
  341. return ret;
  342. }
  343. static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, u32 vmid,
  344. u32 old_vmid)
  345. {
  346. u32 ret;
  347. if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
  348. return 0;
  349. ret = _sde_kms_scm_call(sde_kms, vmid);
  350. if (ret) {
  351. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  352. goto scm_error;
  353. }
  354. /* attach_all_contexts */
  355. ret = sde_kms_mmu_attach(sde_kms, false);
  356. if (ret) {
  357. SDE_ERROR("failed to attach all cb ret:%d\n", ret);
  358. goto mmu_error;
  359. }
  360. return 0;
  361. mmu_error:
  362. _sde_kms_scm_call(sde_kms, old_vmid);
  363. scm_error:
  364. atomic_inc(&sde_kms->detach_all_cb);
  365. return ret;
  366. }
  367. static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
  368. {
  369. u32 ret;
  370. if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
  371. return 0;
  372. /* detach secure_context */
  373. ret = sde_kms_mmu_detach(sde_kms, true);
  374. if (ret) {
  375. SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
  376. goto mmu_error;
  377. }
  378. ret = _sde_kms_scm_call(sde_kms, vmid);
  379. if (ret) {
  380. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  381. goto scm_error;
  382. }
  383. return 0;
  384. scm_error:
  385. sde_kms_mmu_attach(sde_kms, true);
  386. mmu_error:
  387. atomic_dec(&sde_kms->detach_sec_cb);
  388. return ret;
  389. }
  390. static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, u32 vmid,
  391. u32 old_vmid)
  392. {
  393. u32 ret;
  394. if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
  395. return 0;
  396. ret = _sde_kms_scm_call(sde_kms, vmid);
  397. if (ret) {
  398. goto scm_error;
  399. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  400. }
  401. ret = sde_kms_mmu_attach(sde_kms, true);
  402. if (ret) {
  403. SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
  404. goto mmu_error;
  405. }
  406. return 0;
  407. mmu_error:
  408. _sde_kms_scm_call(sde_kms, old_vmid);
  409. scm_error:
  410. atomic_inc(&sde_kms->detach_sec_cb);
  411. return ret;
  412. }
  413. static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
  414. struct drm_crtc *crtc, bool enable)
  415. {
  416. int ret;
  417. if (enable) {
  418. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  419. if (ret < 0) {
  420. SDE_ERROR("failed to enable power resource %d\n", ret);
  421. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  422. return ret;
  423. }
  424. sde_crtc_misr_setup(crtc, true, 1);
  425. ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
  426. if (ret) {
  427. sde_crtc_misr_setup(crtc, false, 0);
  428. pm_runtime_put_sync(sde_kms->dev->dev);
  429. return ret;
  430. }
  431. } else {
  432. _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
  433. sde_crtc_misr_setup(crtc, false, 0);
  434. pm_runtime_put_sync(sde_kms->dev->dev);
  435. }
  436. return 0;
  437. }
  438. static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
  439. bool post_commit)
  440. {
  441. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  442. int old_smmu_state = smmu_state->state;
  443. int ret = 0;
  444. u32 vmid;
  445. if (!sde_kms || !crtc) {
  446. SDE_ERROR("invalid argument(s)\n");
  447. return -EINVAL;
  448. }
  449. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
  450. post_commit, smmu_state->sui_misr_state,
  451. smmu_state->secure_level, SDE_EVTLOG_FUNC_ENTRY);
  452. if ((!smmu_state->transition_type) ||
  453. ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
  454. /* Bail out */
  455. return 0;
  456. /* enable sui misr if requested, before the transition */
  457. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
  458. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
  459. if (ret) {
  460. smmu_state->sui_misr_state = NONE;
  461. goto end;
  462. }
  463. }
  464. mutex_lock(&sde_kms->secure_transition_lock);
  465. switch (smmu_state->state) {
  466. case DETACH_ALL_REQ:
  467. ret = _sde_kms_detach_all_cb(sde_kms, VMID_CP_SEC_DISPLAY);
  468. if (!ret)
  469. smmu_state->state = DETACHED;
  470. break;
  471. case ATTACH_ALL_REQ:
  472. ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL,
  473. VMID_CP_SEC_DISPLAY);
  474. if (!ret) {
  475. smmu_state->state = ATTACHED;
  476. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  477. }
  478. break;
  479. case DETACH_SEC_REQ:
  480. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  481. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  482. ret = _sde_kms_detach_sec_cb(sde_kms, vmid);
  483. if (!ret)
  484. smmu_state->state = DETACHED_SEC;
  485. break;
  486. case ATTACH_SEC_REQ:
  487. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  488. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  489. ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL, vmid);
  490. if (!ret) {
  491. smmu_state->state = ATTACHED;
  492. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  493. }
  494. break;
  495. default:
  496. SDE_ERROR("crtc%d: invalid smmu state %d transition type %d\n",
  497. DRMID(crtc), smmu_state->state,
  498. smmu_state->transition_type);
  499. ret = -EINVAL;
  500. break;
  501. }
  502. mutex_unlock(&sde_kms->secure_transition_lock);
  503. /* disable sui misr if requested, after the transition */
  504. if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
  505. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  506. if (ret)
  507. goto end;
  508. }
  509. end:
  510. smmu_state->transition_error = false;
  511. if (ret) {
  512. smmu_state->transition_error = true;
  513. SDE_ERROR(
  514. "crtc%d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  515. DRMID(crtc), old_smmu_state, smmu_state->state,
  516. smmu_state->secure_level, ret);
  517. smmu_state->state = smmu_state->prev_state;
  518. smmu_state->secure_level = smmu_state->prev_secure_level;
  519. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ)
  520. _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  521. }
  522. SDE_DEBUG("crtc %d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  523. DRMID(crtc), old_smmu_state, smmu_state->state,
  524. smmu_state->secure_level, ret);
  525. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->prev_state,
  526. smmu_state->transition_type,
  527. smmu_state->transition_error,
  528. smmu_state->secure_level, smmu_state->prev_secure_level,
  529. smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
  530. smmu_state->sui_misr_state = NONE;
  531. smmu_state->transition_type = NONE;
  532. return ret;
  533. }
  534. static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
  535. struct drm_atomic_state *state)
  536. {
  537. struct drm_crtc *crtc;
  538. struct drm_crtc_state *old_crtc_state;
  539. struct drm_plane_state *old_plane_state, *new_plane_state;
  540. struct drm_plane *plane;
  541. struct drm_plane_state *plane_state;
  542. struct sde_kms *sde_kms = to_sde_kms(kms);
  543. struct drm_device *dev = sde_kms->dev;
  544. int i, ops = 0, ret = 0;
  545. bool old_valid_fb = false;
  546. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  547. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  548. if (!crtc->state || !crtc->state->active)
  549. continue;
  550. /*
  551. * It is safe to assume only one active crtc,
  552. * and compatible translation modes on the
  553. * planes staged on this crtc.
  554. * otherwise validation would have failed.
  555. * For this CRTC,
  556. */
  557. /*
  558. * 1. Check if old state on the CRTC has planes
  559. * staged with valid fbs
  560. */
  561. for_each_old_plane_in_state(state, plane, plane_state, i) {
  562. if (!plane_state->crtc)
  563. continue;
  564. if (plane_state->fb) {
  565. old_valid_fb = true;
  566. break;
  567. }
  568. }
  569. /*
  570. * 2.Get the operations needed to be performed before
  571. * secure transition can be initiated.
  572. */
  573. ops = sde_crtc_get_secure_transition_ops(crtc,
  574. old_crtc_state, old_valid_fb);
  575. if (ops < 0) {
  576. SDE_ERROR("invalid secure operations %x\n", ops);
  577. return ops;
  578. }
  579. if (!ops) {
  580. smmu_state->transition_error = false;
  581. goto no_ops;
  582. }
  583. SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
  584. crtc->base.id, ops, crtc->state);
  585. SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
  586. /* 3. Perform operations needed for secure transition */
  587. if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
  588. SDE_DEBUG("wait_for_transfer_done\n");
  589. sde_kms_wait_for_frame_transfer_complete(kms, crtc);
  590. }
  591. if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
  592. SDE_DEBUG("cleanup planes\n");
  593. drm_atomic_helper_cleanup_planes(dev, state);
  594. for_each_oldnew_plane_in_state(state, plane,
  595. old_plane_state, new_plane_state, i)
  596. sde_plane_destroy_fb(old_plane_state);
  597. }
  598. if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
  599. SDE_DEBUG("secure ctrl\n");
  600. _sde_kms_secure_ctrl(sde_kms, crtc, false);
  601. }
  602. if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
  603. SDE_DEBUG("prepare planes %d",
  604. crtc->state->plane_mask);
  605. drm_atomic_crtc_for_each_plane(plane,
  606. crtc) {
  607. const struct drm_plane_helper_funcs *funcs;
  608. plane_state = plane->state;
  609. funcs = plane->helper_private;
  610. SDE_DEBUG("psde:%d FB[%u]\n",
  611. plane->base.id,
  612. plane->fb->base.id);
  613. if (!funcs)
  614. continue;
  615. if (funcs->prepare_fb(plane, plane_state)) {
  616. ret = funcs->prepare_fb(plane,
  617. plane_state);
  618. if (ret)
  619. return ret;
  620. }
  621. }
  622. }
  623. SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
  624. SDE_DEBUG("secure operations completed\n");
  625. }
  626. no_ops:
  627. return 0;
  628. }
  629. static int _sde_kms_release_shared_buffer(unsigned long mem_addr,
  630. unsigned int splash_buffer_size,
  631. unsigned int ramdump_base,
  632. unsigned int ramdump_buffer_size)
  633. {
  634. unsigned long pfn_start, pfn_end, pfn_idx;
  635. int ret = 0;
  636. if (!mem_addr || !splash_buffer_size) {
  637. SDE_ERROR("invalid params\n");
  638. return -EINVAL;
  639. }
  640. /* leave ramdump memory only if base address matches */
  641. if (ramdump_base == mem_addr &&
  642. ramdump_buffer_size <= splash_buffer_size) {
  643. mem_addr += ramdump_buffer_size;
  644. splash_buffer_size -= ramdump_buffer_size;
  645. }
  646. pfn_start = mem_addr >> PAGE_SHIFT;
  647. pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
  648. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 19, 0))
  649. memblock_free((unsigned int*)mem_addr, splash_buffer_size);
  650. #else
  651. ret = memblock_free(mem_addr, splash_buffer_size);
  652. if (ret) {
  653. SDE_ERROR("continuous splash memory free failed:%d\n", ret);
  654. return ret;
  655. }
  656. #endif
  657. for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
  658. free_reserved_page(pfn_to_page(pfn_idx));
  659. return ret;
  660. }
  661. static int _sde_kms_one2one_mem_map_ipcc_reg(struct sde_kms *sde_kms, u32 buf_size,
  662. unsigned long buf_base)
  663. {
  664. struct msm_mmu *mmu = NULL;
  665. int ret = 0;
  666. if (!sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]
  667. || !sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu) {
  668. SDE_ERROR("aspace not found for sde kms node\n");
  669. return -EINVAL;
  670. }
  671. mmu = sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE]->mmu;
  672. if (!mmu) {
  673. SDE_ERROR("mmu not found for aspace\n");
  674. return -EINVAL;
  675. }
  676. if (!mmu->funcs || !mmu->funcs->one_to_one_map) {
  677. SDE_ERROR("invalid input params for map\n");
  678. return -EINVAL;
  679. }
  680. ret = mmu->funcs->one_to_one_map(mmu, buf_base, buf_base, buf_size,
  681. IOMMU_READ | IOMMU_WRITE);
  682. if (ret)
  683. SDE_ERROR("one2one memory smmu map failed:%d\n", ret);
  684. return ret;
  685. }
  686. static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms,
  687. struct sde_splash_mem *splash)
  688. {
  689. struct msm_mmu *mmu = NULL;
  690. int ret = 0;
  691. if (!sde_kms->aspace[0]) {
  692. SDE_ERROR("aspace not found for sde kms node\n");
  693. return -EINVAL;
  694. }
  695. mmu = sde_kms->aspace[0]->mmu;
  696. if (!mmu) {
  697. SDE_ERROR("mmu not found for aspace\n");
  698. return -EINVAL;
  699. }
  700. if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) {
  701. SDE_ERROR("invalid input params for map\n");
  702. return -EINVAL;
  703. }
  704. if (!splash->ref_cnt) {
  705. ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base,
  706. splash->splash_buf_base,
  707. splash->splash_buf_size,
  708. IOMMU_READ | IOMMU_NOEXEC);
  709. if (ret)
  710. SDE_ERROR("splash memory smmu map failed:%d\n", ret);
  711. }
  712. splash->ref_cnt++;
  713. SDE_DEBUG("one2one mapping done for base:%lx size:%x ref_cnt:%d\n",
  714. splash->splash_buf_base,
  715. splash->splash_buf_size,
  716. splash->ref_cnt);
  717. return ret;
  718. }
  719. static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms)
  720. {
  721. int i = 0;
  722. int ret = 0;
  723. struct sde_splash_mem *region;
  724. if (!sde_kms)
  725. return -EINVAL;
  726. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  727. region = sde_kms->splash_data.splash_display[i].splash;
  728. ret = _sde_kms_splash_mem_get(sde_kms, region);
  729. if (ret)
  730. return ret;
  731. /* Demura is optional and need not exist */
  732. region = sde_kms->splash_data.splash_display[i].demura;
  733. if (region) {
  734. ret = _sde_kms_splash_mem_get(sde_kms, region);
  735. if (ret)
  736. return ret;
  737. }
  738. }
  739. return ret;
  740. }
  741. static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms,
  742. struct sde_splash_mem *splash)
  743. {
  744. struct msm_mmu *mmu = NULL;
  745. int rc = 0;
  746. if (!sde_kms || !sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
  747. SDE_ERROR("invalid params\n");
  748. return -EINVAL;
  749. }
  750. mmu = sde_kms->aspace[0]->mmu;
  751. if (!splash || !splash->ref_cnt ||
  752. !mmu || !mmu->funcs || !mmu->funcs->one_to_one_unmap)
  753. return -EINVAL;
  754. splash->ref_cnt--;
  755. SDE_DEBUG("splash base:%lx refcnt:%d\n",
  756. splash->splash_buf_base, splash->ref_cnt);
  757. if (!splash->ref_cnt) {
  758. mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base,
  759. splash->splash_buf_size);
  760. rc = _sde_kms_release_shared_buffer(splash->splash_buf_base,
  761. splash->splash_buf_size, splash->ramdump_base,
  762. splash->ramdump_size);
  763. splash->splash_buf_base = 0;
  764. splash->splash_buf_size = 0;
  765. }
  766. return rc;
  767. }
  768. static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
  769. {
  770. int i = 0;
  771. int ret = 0, failure = 0;
  772. struct sde_splash_mem *region;
  773. if (!sde_kms || !sde_kms->splash_data.num_splash_regions)
  774. return -EINVAL;
  775. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  776. region = sde_kms->splash_data.splash_display[i].splash;
  777. ret = _sde_kms_splash_mem_put(sde_kms, region);
  778. if (ret) {
  779. failure = 1;
  780. pr_err("Error unmapping splash mem for display %d\n",
  781. i);
  782. }
  783. /* Demura is optional and need not exist */
  784. region = sde_kms->splash_data.splash_display[i].demura;
  785. if (region) {
  786. ret = _sde_kms_splash_mem_put(sde_kms, region);
  787. if (ret) {
  788. failure = 1;
  789. pr_err("Error unmapping demura mem for display %d\n",
  790. i);
  791. }
  792. }
  793. }
  794. if (failure)
  795. ret = -EINVAL;
  796. return ret;
  797. }
  798. static int _sde_kms_get_blank(struct drm_crtc_state *crtc_state,
  799. struct drm_connector_state *conn_state)
  800. {
  801. int lp_mode, blank;
  802. if (crtc_state->active)
  803. lp_mode = sde_connector_get_property(conn_state,
  804. CONNECTOR_PROP_LP);
  805. else
  806. lp_mode = SDE_MODE_DPMS_OFF;
  807. switch (lp_mode) {
  808. case SDE_MODE_DPMS_ON:
  809. blank = DRM_PANEL_EVENT_UNBLANK;
  810. break;
  811. case SDE_MODE_DPMS_LP1:
  812. case SDE_MODE_DPMS_LP2:
  813. blank = DRM_PANEL_EVENT_BLANK_LP;
  814. break;
  815. case SDE_MODE_DPMS_OFF:
  816. default:
  817. blank = DRM_PANEL_EVENT_BLANK;
  818. break;
  819. }
  820. return blank;
  821. }
  822. static void _sde_kms_drm_check_dpms(struct drm_atomic_state *old_state,
  823. bool is_pre_commit)
  824. {
  825. struct panel_event_notification notification;
  826. struct drm_connector *connector;
  827. struct drm_connector_state *old_conn_state;
  828. struct drm_crtc_state *old_crtc_state;
  829. struct drm_crtc *crtc;
  830. struct sde_connector *c_conn;
  831. int i, old_mode, new_mode, old_fps, new_fps;
  832. enum panel_event_notifier_tag panel_type;
  833. for_each_old_connector_in_state(old_state, connector,
  834. old_conn_state, i) {
  835. crtc = connector->state->crtc ? connector->state->crtc :
  836. old_conn_state->crtc;
  837. if (!crtc)
  838. continue;
  839. new_fps = drm_mode_vrefresh(&crtc->state->mode);
  840. new_mode = _sde_kms_get_blank(crtc->state, connector->state);
  841. if (old_conn_state->crtc) {
  842. old_crtc_state = drm_atomic_get_existing_crtc_state(
  843. old_state, old_conn_state->crtc);
  844. old_fps = drm_mode_vrefresh(&old_crtc_state->mode);
  845. old_mode = _sde_kms_get_blank(old_crtc_state,
  846. old_conn_state);
  847. } else {
  848. old_fps = 0;
  849. old_mode = DRM_PANEL_EVENT_BLANK;
  850. }
  851. if ((old_mode != new_mode) || (old_fps != new_fps)) {
  852. c_conn = to_sde_connector(connector);
  853. SDE_EVT32(old_mode, new_mode, old_fps, new_fps,
  854. c_conn->panel, crtc->state->active,
  855. old_conn_state->crtc);
  856. pr_debug("change detected for connector:%s (power mode %d->%d, fps %d->%d)\n",
  857. c_conn->name, old_mode, new_mode, old_fps, new_fps);
  858. /* If suspend resume and fps change are happening
  859. * at the same time, give preference to power mode
  860. * changes rather than fps change.
  861. */
  862. if ((old_mode == new_mode) && (old_fps != new_fps))
  863. new_mode = DRM_PANEL_EVENT_FPS_CHANGE;
  864. if (!c_conn->panel)
  865. continue;
  866. panel_type = sde_encoder_is_primary_display(
  867. connector->encoder) ?
  868. PANEL_EVENT_NOTIFICATION_PRIMARY :
  869. PANEL_EVENT_NOTIFICATION_SECONDARY;
  870. notification.notif_type = new_mode;
  871. notification.panel = c_conn->panel;
  872. notification.notif_data.old_fps = old_fps;
  873. notification.notif_data.new_fps = new_fps;
  874. notification.notif_data.early_trigger = is_pre_commit;
  875. panel_event_notification_trigger(panel_type,
  876. &notification);
  877. }
  878. }
  879. }
  880. static struct drm_crtc *sde_kms_vm_get_vm_crtc(
  881. struct drm_atomic_state *state)
  882. {
  883. int i;
  884. enum sde_crtc_vm_req vm_req = VM_REQ_NONE;
  885. struct drm_crtc *crtc, *vm_crtc = NULL;
  886. struct drm_crtc_state *new_cstate, *old_cstate;
  887. struct sde_crtc_state *vm_cstate;
  888. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  889. if (!new_cstate->active && !old_cstate->active)
  890. continue;
  891. vm_cstate = to_sde_crtc_state(new_cstate);
  892. vm_req = sde_crtc_get_property(vm_cstate,
  893. CRTC_PROP_VM_REQ_STATE);
  894. if (vm_req != VM_REQ_NONE) {
  895. SDE_DEBUG("valid vm request:%d found on crtc-%d\n",
  896. vm_req, crtc->base.id);
  897. vm_crtc = crtc;
  898. break;
  899. }
  900. }
  901. return vm_crtc;
  902. }
  903. static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
  904. {
  905. struct device *cpu_dev;
  906. int cpu = 0;
  907. u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
  908. // save irq cpu mask
  909. sde_kms->irq_cpu_mask = *mask;
  910. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  911. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  912. return;
  913. }
  914. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  915. cpu_dev = get_cpu_device(cpu);
  916. if (!cpu_dev) {
  917. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  918. cpu);
  919. continue;
  920. }
  921. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  922. dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
  923. cpu_irq_latency);
  924. else
  925. dev_pm_qos_add_request(cpu_dev,
  926. &sde_kms->pm_qos_irq_req[cpu],
  927. DEV_PM_QOS_RESUME_LATENCY,
  928. cpu_irq_latency);
  929. }
  930. }
  931. static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms, const cpumask_t *mask)
  932. {
  933. struct device *cpu_dev;
  934. int cpu = 0;
  935. if (cpumask_empty(mask)) {
  936. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  937. return;
  938. }
  939. for_each_cpu(cpu, mask) {
  940. cpu_dev = get_cpu_device(cpu);
  941. if (!cpu_dev) {
  942. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  943. cpu);
  944. continue;
  945. }
  946. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  947. dev_pm_qos_remove_request(
  948. &sde_kms->pm_qos_irq_req[cpu]);
  949. }
  950. }
  951. int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
  952. struct drm_atomic_state *state)
  953. {
  954. struct drm_device *ddev;
  955. struct drm_crtc *crtc;
  956. struct drm_crtc_state *new_cstate;
  957. struct drm_encoder *encoder;
  958. struct drm_connector *connector;
  959. struct sde_vm_ops *vm_ops;
  960. struct sde_crtc_state *cstate;
  961. struct drm_connector_list_iter iter;
  962. enum sde_crtc_vm_req vm_req;
  963. int rc = 0;
  964. ddev = sde_kms->dev;
  965. vm_ops = sde_vm_get_ops(sde_kms);
  966. if (!vm_ops)
  967. return -EINVAL;
  968. crtc = sde_kms_vm_get_vm_crtc(state);
  969. if (!crtc)
  970. return 0;
  971. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  972. cstate = to_sde_crtc_state(new_cstate);
  973. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  974. if (vm_req != VM_REQ_ACQUIRE)
  975. return 0;
  976. /* enable MDSS irq line */
  977. sde_irq_update(&sde_kms->base, true);
  978. /* clear the stale IRQ status bits */
  979. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  980. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  981. _sde_kms_remove_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
  982. /* enable the display path IRQ's */
  983. drm_for_each_encoder_mask(encoder, crtc->dev,
  984. crtc->state->encoder_mask) {
  985. if (sde_encoder_in_clone_mode(encoder))
  986. continue;
  987. sde_encoder_irq_control(encoder, true);
  988. }
  989. /* Schedule ESD work */
  990. drm_connector_list_iter_begin(ddev, &iter);
  991. drm_for_each_connector_iter(connector, &iter)
  992. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  993. sde_connector_schedule_status_work(connector, true);
  994. drm_connector_list_iter_end(&iter);
  995. /* enable vblank events */
  996. drm_crtc_vblank_on(crtc);
  997. sde_dbg_set_hw_ownership_status(true);
  998. /* handle non-SDE pre_acquire */
  999. if (vm_ops->vm_client_post_acquire)
  1000. rc = vm_ops->vm_client_post_acquire(sde_kms);
  1001. return rc;
  1002. }
  1003. void sde_kms_vm_set_sid(struct sde_kms *sde_kms, u32 vm)
  1004. {
  1005. struct drm_plane *plane;
  1006. struct drm_device *ddev;
  1007. struct sde_mdss_cfg *sde_cfg;
  1008. ddev = sde_kms->dev;
  1009. sde_cfg = sde_kms->catalog;
  1010. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  1011. sde_plane_set_sid(plane, vm);
  1012. if (sde_kms->hw_sid && sde_kms->hw_sid->ops.set_vm_sid)
  1013. sde_kms->hw_sid->ops.set_vm_sid(sde_kms->hw_sid, vm, sde_kms->catalog);
  1014. }
  1015. int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
  1016. struct drm_atomic_state *state)
  1017. {
  1018. struct drm_crtc *crtc;
  1019. struct drm_crtc_state *new_cstate;
  1020. struct sde_crtc_state *cstate;
  1021. enum sde_crtc_vm_req vm_req;
  1022. crtc = sde_kms_vm_get_vm_crtc(state);
  1023. if (!crtc)
  1024. return 0;
  1025. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1026. cstate = to_sde_crtc_state(new_cstate);
  1027. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1028. if (vm_req != VM_REQ_ACQUIRE)
  1029. return 0;
  1030. /* Clear the stale IRQ status bits */
  1031. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  1032. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  1033. /* Program the SID's for the trusted VM */
  1034. sde_kms_vm_set_sid(sde_kms, 1);
  1035. sde_dbg_set_hw_ownership_status(true);
  1036. return 0;
  1037. }
  1038. static void sde_kms_prepare_commit(struct msm_kms *kms,
  1039. struct drm_atomic_state *state)
  1040. {
  1041. struct sde_kms *sde_kms;
  1042. struct msm_drm_private *priv;
  1043. struct drm_device *dev;
  1044. struct drm_encoder *encoder;
  1045. struct drm_crtc *crtc;
  1046. struct drm_crtc_state *cstate;
  1047. struct sde_vm_ops *vm_ops;
  1048. int i, rc;
  1049. if (!kms)
  1050. return;
  1051. sde_kms = to_sde_kms(kms);
  1052. dev = sde_kms->dev;
  1053. if (!dev || !dev->dev_private)
  1054. return;
  1055. priv = dev->dev_private;
  1056. SDE_ATRACE_BEGIN("prepare_commit");
  1057. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  1058. if (rc < 0) {
  1059. SDE_ERROR("failed to enable power resources %d\n", rc);
  1060. SDE_EVT32(rc, SDE_EVTLOG_ERROR);
  1061. goto end;
  1062. }
  1063. if (sde_kms->first_kickoff) {
  1064. sde_power_scale_reg_bus(&priv->phandle, VOTE_INDEX_HIGH, false);
  1065. sde_kms->first_kickoff = false;
  1066. }
  1067. for_each_new_crtc_in_state(state, crtc, cstate, i) {
  1068. drm_for_each_encoder_mask(encoder, dev, cstate->encoder_mask) {
  1069. if (sde_encoder_prepare_commit(encoder) == -ETIMEDOUT) {
  1070. SDE_ERROR("crtc:%d, initiating hw reset\n",
  1071. DRMID(crtc));
  1072. sde_encoder_needs_hw_reset(encoder);
  1073. sde_crtc_set_needs_hw_reset(crtc);
  1074. }
  1075. }
  1076. }
  1077. /*
  1078. * NOTE: for secure use cases we want to apply the new HW
  1079. * configuration only after completing preparation for secure
  1080. * transitions prepare below if any transtions is required.
  1081. */
  1082. sde_kms_prepare_secure_transition(kms, state);
  1083. vm_ops = sde_vm_get_ops(sde_kms);
  1084. if (!vm_ops)
  1085. goto end_vm;
  1086. if (vm_ops->vm_prepare_commit)
  1087. vm_ops->vm_prepare_commit(sde_kms, state);
  1088. end_vm:
  1089. _sde_kms_drm_check_dpms(state, true);
  1090. end:
  1091. SDE_ATRACE_END("prepare_commit");
  1092. }
  1093. static void sde_kms_commit(struct msm_kms *kms,
  1094. struct drm_atomic_state *old_state)
  1095. {
  1096. struct sde_kms *sde_kms;
  1097. struct drm_crtc *crtc;
  1098. struct drm_crtc_state *old_crtc_state;
  1099. int i;
  1100. if (!kms || !old_state)
  1101. return;
  1102. sde_kms = to_sde_kms(kms);
  1103. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1104. SDE_ERROR("power resource is not enabled\n");
  1105. return;
  1106. }
  1107. SDE_ATRACE_BEGIN("sde_kms_commit");
  1108. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1109. if (crtc->state->active) {
  1110. SDE_EVT32(DRMID(crtc), old_state);
  1111. sde_crtc_commit_kickoff(crtc, old_crtc_state);
  1112. }
  1113. }
  1114. SDE_ATRACE_END("sde_kms_commit");
  1115. }
  1116. static void _sde_kms_free_splash_display_data(struct sde_kms *sde_kms,
  1117. struct sde_splash_display *splash_display)
  1118. {
  1119. if (!sde_kms || !splash_display ||
  1120. !sde_kms->splash_data.num_splash_displays)
  1121. return;
  1122. if (sde_kms->splash_data.num_splash_regions) {
  1123. _sde_kms_splash_mem_put(sde_kms, splash_display->splash);
  1124. if (splash_display->demura)
  1125. _sde_kms_splash_mem_put(sde_kms,
  1126. splash_display->demura);
  1127. }
  1128. sde_kms->splash_data.num_splash_displays--;
  1129. SDE_DEBUG("cont_splash handoff done, remaining:%d\n",
  1130. sde_kms->splash_data.num_splash_displays);
  1131. memset(splash_display, 0x0, sizeof(struct sde_splash_display));
  1132. }
  1133. static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
  1134. struct drm_crtc *crtc)
  1135. {
  1136. struct msm_drm_private *priv;
  1137. struct sde_splash_display *splash_display;
  1138. int i;
  1139. if (!sde_kms || !crtc)
  1140. return;
  1141. priv = sde_kms->dev->dev_private;
  1142. if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays)
  1143. return;
  1144. SDE_EVT32(DRMID(crtc), crtc->state->active,
  1145. sde_kms->splash_data.num_splash_displays);
  1146. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  1147. splash_display = &sde_kms->splash_data.splash_display[i];
  1148. if (splash_display->encoder &&
  1149. crtc == splash_display->encoder->crtc)
  1150. break;
  1151. }
  1152. if (i >= MAX_DSI_DISPLAYS)
  1153. return;
  1154. if (splash_display->cont_splash_enabled) {
  1155. sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
  1156. splash_display, false);
  1157. _sde_kms_free_splash_display_data(sde_kms, splash_display);
  1158. }
  1159. /* remove the votes if all displays are done with splash */
  1160. if (!sde_kms->splash_data.num_splash_displays) {
  1161. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1162. sde_power_data_bus_set_quota(&priv->phandle, i,
  1163. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1164. priv->phandle.ib_quota[i] ? priv->phandle.ib_quota[i] :
  1165. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1166. pm_runtime_put_sync(sde_kms->dev->dev);
  1167. }
  1168. }
  1169. static void sde_kms_cancel_delayed_work(struct drm_crtc *crtc)
  1170. {
  1171. struct drm_connector *connector;
  1172. struct drm_connector_list_iter iter;
  1173. struct drm_encoder *encoder;
  1174. /* Cancel CRTC work */
  1175. sde_crtc_cancel_delayed_work(crtc);
  1176. /* Cancel ESD work */
  1177. drm_connector_list_iter_begin(crtc->dev, &iter);
  1178. drm_for_each_connector_iter(connector, &iter)
  1179. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  1180. sde_connector_schedule_status_work(connector, false);
  1181. drm_connector_list_iter_end(&iter);
  1182. /* Cancel Idle-PC work */
  1183. drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
  1184. if (sde_encoder_in_clone_mode(encoder))
  1185. continue;
  1186. sde_encoder_cancel_delayed_work(encoder);
  1187. }
  1188. }
  1189. int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
  1190. struct drm_atomic_state *state, bool is_primary)
  1191. {
  1192. struct drm_crtc *crtc;
  1193. struct drm_encoder *encoder;
  1194. struct msm_drm_private *priv;
  1195. int rc = 0;
  1196. crtc = sde_kms_vm_get_vm_crtc(state);
  1197. if (!crtc)
  1198. return 0;
  1199. priv = sde_kms->dev->dev_private;
  1200. /* if vm_req is enabled, once CRTC on the commit is guaranteed */
  1201. sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
  1202. sde_dbg_set_hw_ownership_status(false);
  1203. sde_kms_cancel_delayed_work(crtc);
  1204. kthread_flush_worker(&priv->event_thread[crtc->index].worker);
  1205. /* Flush pp_event thread queue for any pending events */
  1206. kthread_flush_worker(&priv->pp_event_worker);
  1207. /* disable SDE encoder irq's */
  1208. drm_for_each_encoder_mask(encoder, crtc->dev,
  1209. crtc->state->encoder_mask) {
  1210. if (sde_encoder_in_clone_mode(encoder))
  1211. continue;
  1212. sde_encoder_irq_control(encoder, false);
  1213. }
  1214. if (is_primary) {
  1215. _sde_kms_update_pm_qos_irq_request(sde_kms, &CPU_MASK_ALL);
  1216. /* disable vblank events */
  1217. drm_crtc_vblank_off(crtc);
  1218. /* reset sw state */
  1219. sde_crtc_reset_sw_state(crtc);
  1220. }
  1221. return rc;
  1222. }
  1223. int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
  1224. struct drm_atomic_state *state)
  1225. {
  1226. struct sde_vm_ops *vm_ops;
  1227. struct drm_crtc *crtc;
  1228. struct sde_crtc_state *cstate;
  1229. struct drm_crtc_state *new_cstate;
  1230. enum sde_crtc_vm_req vm_req;
  1231. int rc = 0;
  1232. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1233. return -EINVAL;
  1234. vm_ops = sde_vm_get_ops(sde_kms);
  1235. crtc = sde_kms_vm_get_vm_crtc(state);
  1236. if (sde_kms->vm->lastclose_in_progress && !crtc) {
  1237. sde_dbg_set_hw_ownership_status(false);
  1238. goto relase_vm;
  1239. }
  1240. if (!crtc)
  1241. return 0;
  1242. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1243. cstate = to_sde_crtc_state(new_cstate);
  1244. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1245. if (vm_req != VM_REQ_RELEASE)
  1246. return 0;
  1247. relase_vm:
  1248. sde_kms_vm_pre_release(sde_kms, state, false);
  1249. sde_kms_vm_set_sid(sde_kms, 0);
  1250. sde_vm_lock(sde_kms);
  1251. if (vm_ops->vm_release)
  1252. rc = vm_ops->vm_release(sde_kms);
  1253. sde_vm_unlock(sde_kms);
  1254. return rc;
  1255. }
  1256. int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
  1257. struct drm_atomic_state *state)
  1258. {
  1259. struct sde_vm_ops *vm_ops;
  1260. struct sde_crtc_state *cstate;
  1261. struct drm_crtc *crtc;
  1262. struct drm_crtc_state *new_cstate;
  1263. enum sde_crtc_vm_req vm_req;
  1264. int rc = 0;
  1265. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1266. return -EINVAL;
  1267. vm_ops = sde_vm_get_ops(sde_kms);
  1268. crtc = sde_kms_vm_get_vm_crtc(state);
  1269. if (!crtc)
  1270. return 0;
  1271. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1272. cstate = to_sde_crtc_state(new_cstate);
  1273. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1274. if (vm_req != VM_REQ_RELEASE)
  1275. return 0;
  1276. /* handle SDE pre-release */
  1277. rc = sde_kms_vm_pre_release(sde_kms, state, true);
  1278. if (rc) {
  1279. SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
  1280. goto exit;
  1281. }
  1282. /* properly handoff color processing features */
  1283. sde_cp_crtc_vm_primary_handoff(crtc);
  1284. sde_vm_lock(sde_kms);
  1285. /* handle non-SDE clients pre-release */
  1286. if (vm_ops->vm_client_pre_release) {
  1287. rc = vm_ops->vm_client_pre_release(sde_kms);
  1288. if (rc) {
  1289. SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
  1290. rc);
  1291. sde_vm_unlock(sde_kms);
  1292. goto exit;
  1293. }
  1294. }
  1295. /* disable IRQ line */
  1296. sde_irq_update(&sde_kms->base, false);
  1297. /* release HW */
  1298. if (vm_ops->vm_release) {
  1299. rc = vm_ops->vm_release(sde_kms);
  1300. if (rc)
  1301. SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
  1302. }
  1303. sde_vm_unlock(sde_kms);
  1304. _sde_crtc_vm_release_notify(crtc);
  1305. exit:
  1306. return rc;
  1307. }
  1308. static void sde_kms_complete_commit(struct msm_kms *kms,
  1309. struct drm_atomic_state *old_state)
  1310. {
  1311. struct sde_kms *sde_kms;
  1312. struct msm_drm_private *priv;
  1313. struct drm_crtc *crtc;
  1314. struct drm_crtc_state *old_crtc_state;
  1315. struct drm_connector *connector;
  1316. struct drm_connector_state *old_conn_state;
  1317. struct msm_display_conn_params params;
  1318. struct sde_vm_ops *vm_ops;
  1319. int i, rc = 0;
  1320. if (!kms || !old_state)
  1321. return;
  1322. sde_kms = to_sde_kms(kms);
  1323. if (!sde_kms->dev || !sde_kms->dev->dev_private)
  1324. return;
  1325. priv = sde_kms->dev->dev_private;
  1326. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1327. SDE_ERROR("power resource is not enabled\n");
  1328. return;
  1329. }
  1330. SDE_ATRACE_BEGIN("sde_kms_complete_commit");
  1331. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1332. sde_crtc_complete_commit(crtc, old_crtc_state);
  1333. /* complete secure transitions if any */
  1334. if (sde_kms->smmu_state.transition_type == POST_COMMIT)
  1335. _sde_kms_secure_ctrl(sde_kms, crtc, true);
  1336. }
  1337. for_each_old_connector_in_state(old_state, connector,
  1338. old_conn_state, i) {
  1339. struct sde_connector *c_conn;
  1340. c_conn = to_sde_connector(connector);
  1341. if (!c_conn->ops.post_kickoff)
  1342. continue;
  1343. memset(&params, 0, sizeof(params));
  1344. sde_connector_complete_qsync_commit(connector, &params);
  1345. rc = c_conn->ops.post_kickoff(connector, &params);
  1346. if (rc) {
  1347. pr_err("Connector Post kickoff failed rc=%d\n",
  1348. rc);
  1349. }
  1350. }
  1351. vm_ops = sde_vm_get_ops(sde_kms);
  1352. if (vm_ops && vm_ops->vm_post_commit) {
  1353. rc = vm_ops->vm_post_commit(sde_kms, old_state);
  1354. if (rc)
  1355. SDE_ERROR("vm post commit failed, rc = %d\n",
  1356. rc);
  1357. }
  1358. _sde_kms_drm_check_dpms(old_state, false);
  1359. pm_runtime_put_sync(sde_kms->dev->dev);
  1360. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  1361. _sde_kms_release_splash_resource(sde_kms, crtc);
  1362. SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
  1363. SDE_ATRACE_END("sde_kms_complete_commit");
  1364. }
  1365. static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
  1366. struct drm_crtc *crtc)
  1367. {
  1368. struct sde_kms *sde_kms;
  1369. struct drm_encoder *encoder, *cwb_enc = NULL;
  1370. struct drm_device *dev;
  1371. int ret;
  1372. bool cwb_disabling;
  1373. if (!kms || !crtc || !crtc->state) {
  1374. SDE_ERROR("invalid params\n");
  1375. return;
  1376. }
  1377. dev = crtc->dev;
  1378. sde_kms = to_sde_kms(kms);
  1379. if (!crtc->state->enable) {
  1380. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  1381. return;
  1382. }
  1383. if (!crtc->state->active) {
  1384. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  1385. return;
  1386. }
  1387. if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
  1388. SDE_ERROR("power resource is not enabled\n");
  1389. return;
  1390. }
  1391. SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
  1392. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  1393. cwb_disabling = false;
  1394. if (encoder->crtc != crtc) {
  1395. cwb_disabling = sde_encoder_is_cwb_disabling(encoder, crtc);
  1396. if (cwb_disabling)
  1397. cwb_enc = encoder;
  1398. else
  1399. continue;
  1400. }
  1401. /*
  1402. * Wait for post-flush if necessary to delay before
  1403. * plane_cleanup. For example, wait for vsync in case of video
  1404. * mode panels. This may be a no-op for command mode panels.
  1405. */
  1406. SDE_EVT32_VERBOSE(DRMID(crtc));
  1407. ret = sde_encoder_wait_for_event(encoder, cwb_disabling ?
  1408. MSM_ENC_TX_COMPLETE : MSM_ENC_COMMIT_DONE);
  1409. if (ret && ret != -EWOULDBLOCK) {
  1410. SDE_ERROR("crtc:%d, enc:%d, cwb_d:%d, wait for commit done failed ret:%d\n",
  1411. DRMID(crtc), DRMID(encoder), cwb_disabling, ret);
  1412. SDE_EVT32(DRMID(crtc), DRMID(encoder), cwb_disabling,
  1413. ret, SDE_EVTLOG_ERROR);
  1414. sde_crtc_request_frame_reset(crtc, encoder);
  1415. break;
  1416. }
  1417. sde_encoder_hw_fence_error_handle(encoder);
  1418. sde_crtc_complete_flip(crtc, NULL);
  1419. }
  1420. if (cwb_enc)
  1421. sde_encoder_virt_reset(cwb_enc);
  1422. if (drm_atomic_crtc_needs_modeset(crtc->state)) {
  1423. drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
  1424. sde_encoder_reset_kickoff_timeout_ms(encoder);
  1425. }
  1426. /* avoid system cache update to set rd-noalloc bit when NSE feature is enabled */
  1427. if (!test_bit(SDE_FEATURE_SYS_CACHE_NSE, sde_kms->catalog->features))
  1428. sde_crtc_static_cache_read_kickoff(crtc);
  1429. SDE_ATRACE_END("sde_kms_wait_for_commit_done");
  1430. }
  1431. static void sde_kms_prepare_fence(struct msm_kms *kms,
  1432. struct drm_atomic_state *old_state)
  1433. {
  1434. struct drm_crtc *crtc;
  1435. struct drm_crtc_state *old_crtc_state;
  1436. int i;
  1437. if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
  1438. SDE_ERROR("invalid argument(s)\n");
  1439. return;
  1440. }
  1441. SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
  1442. /* old_state actually contains updated crtc pointers */
  1443. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1444. if (crtc->state->active || crtc->state->active_changed)
  1445. sde_crtc_prepare_commit(crtc, old_crtc_state);
  1446. }
  1447. SDE_ATRACE_END("sde_kms_prepare_fence");
  1448. }
  1449. /**
  1450. * _sde_kms_get_displays - query for underlying display handles and cache them
  1451. * @sde_kms: Pointer to sde kms structure
  1452. * Returns: Zero on success
  1453. */
  1454. static int _sde_kms_get_displays(struct sde_kms *sde_kms)
  1455. {
  1456. int rc = -ENOMEM;
  1457. if (!sde_kms) {
  1458. SDE_ERROR("invalid sde kms\n");
  1459. return -EINVAL;
  1460. }
  1461. /* dsi */
  1462. sde_kms->dsi_displays = NULL;
  1463. sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
  1464. if (sde_kms->dsi_display_count) {
  1465. sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
  1466. sizeof(void *),
  1467. GFP_KERNEL);
  1468. if (!sde_kms->dsi_displays) {
  1469. SDE_ERROR("failed to allocate dsi displays\n");
  1470. goto exit_deinit_dsi;
  1471. }
  1472. sde_kms->dsi_display_count =
  1473. dsi_display_get_active_displays(sde_kms->dsi_displays,
  1474. sde_kms->dsi_display_count);
  1475. }
  1476. /* wb */
  1477. sde_kms->wb_displays = NULL;
  1478. sde_kms->wb_display_count = sde_wb_get_num_of_displays();
  1479. if (sde_kms->wb_display_count) {
  1480. sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
  1481. sizeof(void *),
  1482. GFP_KERNEL);
  1483. if (!sde_kms->wb_displays) {
  1484. SDE_ERROR("failed to allocate wb displays\n");
  1485. goto exit_deinit_wb;
  1486. }
  1487. sde_kms->wb_display_count =
  1488. wb_display_get_displays(sde_kms->wb_displays,
  1489. sde_kms->wb_display_count);
  1490. }
  1491. /* dp */
  1492. sde_kms->dp_displays = NULL;
  1493. sde_kms->dp_display_count = dp_display_get_num_of_displays();
  1494. if (sde_kms->dp_display_count) {
  1495. sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
  1496. sizeof(void *), GFP_KERNEL);
  1497. if (!sde_kms->dp_displays) {
  1498. SDE_ERROR("failed to allocate dp displays\n");
  1499. goto exit_deinit_dp;
  1500. }
  1501. sde_kms->dp_display_count =
  1502. dp_display_get_displays(sde_kms->dp_displays,
  1503. sde_kms->dp_display_count);
  1504. sde_kms->dp_stream_count = dp_display_get_num_of_streams();
  1505. }
  1506. return 0;
  1507. exit_deinit_dp:
  1508. kfree(sde_kms->dp_displays);
  1509. sde_kms->dp_stream_count = 0;
  1510. sde_kms->dp_display_count = 0;
  1511. sde_kms->dp_displays = NULL;
  1512. exit_deinit_wb:
  1513. kfree(sde_kms->wb_displays);
  1514. sde_kms->wb_display_count = 0;
  1515. sde_kms->wb_displays = NULL;
  1516. exit_deinit_dsi:
  1517. kfree(sde_kms->dsi_displays);
  1518. sde_kms->dsi_display_count = 0;
  1519. sde_kms->dsi_displays = NULL;
  1520. return rc;
  1521. }
  1522. /**
  1523. * _sde_kms_release_displays - release cache of underlying display handles
  1524. * @sde_kms: Pointer to sde kms structure
  1525. */
  1526. static void _sde_kms_release_displays(struct sde_kms *sde_kms)
  1527. {
  1528. if (!sde_kms) {
  1529. SDE_ERROR("invalid sde kms\n");
  1530. return;
  1531. }
  1532. kfree(sde_kms->wb_displays);
  1533. sde_kms->wb_displays = NULL;
  1534. sde_kms->wb_display_count = 0;
  1535. kfree(sde_kms->dsi_displays);
  1536. sde_kms->dsi_displays = NULL;
  1537. sde_kms->dsi_display_count = 0;
  1538. }
  1539. /**
  1540. * _sde_kms_setup_displays - create encoders, bridges and connectors
  1541. * for underlying displays
  1542. * @dev: Pointer to drm device structure
  1543. * @priv: Pointer to private drm device data
  1544. * @sde_kms: Pointer to sde kms structure
  1545. * Returns: Zero on success
  1546. */
  1547. static int _sde_kms_setup_displays(struct drm_device *dev,
  1548. struct msm_drm_private *priv,
  1549. struct sde_kms *sde_kms)
  1550. {
  1551. static const struct sde_connector_ops dsi_ops = {
  1552. .set_info_blob = dsi_conn_set_info_blob,
  1553. .detect = dsi_conn_detect,
  1554. .get_modes = dsi_connector_get_modes,
  1555. .pre_destroy = dsi_connector_put_modes,
  1556. .mode_valid = dsi_conn_mode_valid,
  1557. .get_info = dsi_display_get_info,
  1558. .set_backlight = dsi_display_set_backlight,
  1559. .soft_reset = dsi_display_soft_reset,
  1560. .pre_kickoff = dsi_conn_pre_kickoff,
  1561. .clk_ctrl = dsi_display_clk_ctrl,
  1562. .set_power = dsi_display_set_power,
  1563. .get_mode_info = dsi_conn_get_mode_info,
  1564. .get_dst_format = dsi_display_get_dst_format,
  1565. .post_kickoff = dsi_conn_post_kickoff,
  1566. .check_status = dsi_display_check_status,
  1567. .enable_event = dsi_conn_enable_event,
  1568. .cmd_transfer = dsi_display_cmd_transfer,
  1569. .cont_splash_config = dsi_display_cont_splash_config,
  1570. .cont_splash_res_disable = dsi_display_cont_splash_res_disable,
  1571. .get_panel_vfp = dsi_display_get_panel_vfp,
  1572. .get_default_lms = dsi_display_get_default_lms,
  1573. .cmd_receive = dsi_display_cmd_receive,
  1574. .install_properties = NULL,
  1575. .set_allowed_mode_switch = dsi_conn_set_allowed_mode_switch,
  1576. .set_dyn_bit_clk = dsi_conn_set_dyn_bit_clk,
  1577. .get_qsync_min_fps = dsi_conn_get_qsync_min_fps,
  1578. .get_avr_step_fps = dsi_conn_get_avr_step_fps,
  1579. .prepare_commit = dsi_conn_prepare_commit,
  1580. .set_submode_info = dsi_conn_set_submode_blob_info,
  1581. .get_num_lm_from_mode = dsi_conn_get_lm_from_mode,
  1582. .update_transfer_time = dsi_display_update_transfer_time,
  1583. .get_panel_scan_line = dsi_display_get_panel_scan_line,
  1584. };
  1585. static const struct sde_connector_ops wb_ops = {
  1586. .post_init = sde_wb_connector_post_init,
  1587. .set_info_blob = sde_wb_connector_set_info_blob,
  1588. .detect = sde_wb_connector_detect,
  1589. .get_modes = sde_wb_connector_get_modes,
  1590. .set_property = sde_wb_connector_set_property,
  1591. .get_info = sde_wb_get_info,
  1592. .soft_reset = NULL,
  1593. .get_mode_info = sde_wb_get_mode_info,
  1594. .get_dst_format = NULL,
  1595. .check_status = NULL,
  1596. .cmd_transfer = NULL,
  1597. .cont_splash_config = NULL,
  1598. .cont_splash_res_disable = NULL,
  1599. .get_panel_vfp = NULL,
  1600. .cmd_receive = NULL,
  1601. .install_properties = NULL,
  1602. .set_dyn_bit_clk = NULL,
  1603. .set_allowed_mode_switch = NULL,
  1604. .update_transfer_time = NULL,
  1605. };
  1606. static const struct sde_connector_ops dp_ops = {
  1607. .post_init = dp_connector_post_init,
  1608. .detect = dp_connector_detect,
  1609. .get_modes = dp_connector_get_modes,
  1610. .atomic_check = dp_connector_atomic_check,
  1611. .mode_valid = dp_connector_mode_valid,
  1612. .get_info = dp_connector_get_info,
  1613. .get_mode_info = dp_connector_get_mode_info,
  1614. .post_open = dp_connector_post_open,
  1615. .check_status = NULL,
  1616. .set_colorspace = dp_connector_set_colorspace,
  1617. .config_hdr = dp_connector_config_hdr,
  1618. .cmd_transfer = NULL,
  1619. .cont_splash_config = NULL,
  1620. .cont_splash_res_disable = NULL,
  1621. .get_panel_vfp = NULL,
  1622. .update_pps = dp_connector_update_pps,
  1623. .cmd_receive = NULL,
  1624. .install_properties = dp_connector_install_properties,
  1625. .set_allowed_mode_switch = NULL,
  1626. .set_dyn_bit_clk = NULL,
  1627. .update_transfer_time = NULL,
  1628. };
  1629. struct msm_display_info info;
  1630. struct drm_encoder *encoder;
  1631. void *display, *connector;
  1632. int i, max_encoders;
  1633. int rc = 0;
  1634. u32 dsc_count = 0, mixer_count = 0;
  1635. u32 max_dp_dsc_count, max_dp_mixer_count;
  1636. if (!dev || !priv || !sde_kms) {
  1637. SDE_ERROR("invalid argument(s)\n");
  1638. return -EINVAL;
  1639. }
  1640. max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
  1641. sde_kms->dp_display_count +
  1642. sde_kms->dp_stream_count;
  1643. if (max_encoders > ARRAY_SIZE(priv->encoders)) {
  1644. max_encoders = ARRAY_SIZE(priv->encoders);
  1645. SDE_ERROR("capping number of displays to %d", max_encoders);
  1646. }
  1647. /* wb */
  1648. for (i = 0; i < sde_kms->wb_display_count &&
  1649. priv->num_encoders < max_encoders; ++i) {
  1650. display = sde_kms->wb_displays[i];
  1651. encoder = NULL;
  1652. memset(&info, 0x0, sizeof(info));
  1653. rc = sde_wb_get_info(NULL, &info, display);
  1654. if (rc) {
  1655. SDE_ERROR("wb get_info %d failed\n", i);
  1656. continue;
  1657. }
  1658. encoder = sde_encoder_init(dev, &info);
  1659. if (IS_ERR_OR_NULL(encoder)) {
  1660. SDE_ERROR("encoder init failed for wb %d\n", i);
  1661. continue;
  1662. }
  1663. rc = sde_wb_drm_init(display, encoder);
  1664. if (rc) {
  1665. SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
  1666. sde_encoder_destroy(encoder);
  1667. continue;
  1668. }
  1669. connector = sde_connector_init(dev,
  1670. encoder,
  1671. 0,
  1672. display,
  1673. &wb_ops,
  1674. DRM_CONNECTOR_POLL_HPD,
  1675. DRM_MODE_CONNECTOR_VIRTUAL);
  1676. if (connector) {
  1677. priv->encoders[priv->num_encoders++] = encoder;
  1678. priv->connectors[priv->num_connectors++] = connector;
  1679. } else {
  1680. SDE_ERROR("wb %d connector init failed\n", i);
  1681. sde_wb_drm_deinit(display);
  1682. sde_encoder_destroy(encoder);
  1683. }
  1684. }
  1685. /* dsi */
  1686. for (i = 0; i < sde_kms->dsi_display_count &&
  1687. priv->num_encoders < max_encoders; ++i) {
  1688. display = sde_kms->dsi_displays[i];
  1689. encoder = NULL;
  1690. memset(&info, 0x0, sizeof(info));
  1691. rc = dsi_display_get_info(NULL, &info, display);
  1692. if (rc) {
  1693. SDE_ERROR("dsi get_info %d failed\n", i);
  1694. continue;
  1695. }
  1696. encoder = sde_encoder_init(dev, &info);
  1697. if (IS_ERR_OR_NULL(encoder)) {
  1698. SDE_ERROR("encoder init failed for dsi %d\n", i);
  1699. continue;
  1700. }
  1701. rc = dsi_display_drm_bridge_init(display, encoder);
  1702. if (rc) {
  1703. SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
  1704. sde_encoder_destroy(encoder);
  1705. continue;
  1706. }
  1707. connector = sde_connector_init(dev,
  1708. encoder,
  1709. dsi_display_get_drm_panel(display),
  1710. display,
  1711. &dsi_ops,
  1712. DRM_CONNECTOR_POLL_HPD,
  1713. DRM_MODE_CONNECTOR_DSI);
  1714. if (connector) {
  1715. priv->encoders[priv->num_encoders++] = encoder;
  1716. priv->connectors[priv->num_connectors++] = connector;
  1717. } else {
  1718. SDE_ERROR("dsi %d connector init failed\n", i);
  1719. dsi_display_drm_bridge_deinit(display);
  1720. sde_encoder_destroy(encoder);
  1721. continue;
  1722. }
  1723. rc = dsi_display_drm_ext_bridge_init(display,
  1724. encoder, connector);
  1725. if (rc) {
  1726. SDE_ERROR("dsi %d ext bridge init failed\n", rc);
  1727. dsi_display_drm_bridge_deinit(display);
  1728. sde_connector_destroy(connector);
  1729. sde_encoder_destroy(encoder);
  1730. }
  1731. dsc_count += info.dsc_count;
  1732. mixer_count += info.lm_count;
  1733. if (dsi_display_has_dsc_switch_support(display))
  1734. sde_kms->dsc_switch_support = true;
  1735. }
  1736. if (sde_kms->catalog->allowed_dsc_reservation_switch &&
  1737. !sde_kms->dsc_switch_support) {
  1738. SDE_DEBUG("dsc switch not supported\n");
  1739. sde_kms->catalog->allowed_dsc_reservation_switch = 0;
  1740. }
  1741. max_dp_mixer_count = sde_kms->catalog->mixer_count > mixer_count ?
  1742. sde_kms->catalog->mixer_count - mixer_count : 0;
  1743. max_dp_dsc_count = sde_kms->catalog->dsc_count > dsc_count ?
  1744. sde_kms->catalog->dsc_count - dsc_count : 0;
  1745. if (sde_kms->catalog->allowed_dsc_reservation_switch &
  1746. SDE_DP_DSC_RESERVATION_SWITCH)
  1747. max_dp_dsc_count = sde_kms->catalog->dsc_count;
  1748. /* dp */
  1749. for (i = 0; i < sde_kms->dp_display_count &&
  1750. priv->num_encoders < max_encoders; ++i) {
  1751. int idx;
  1752. display = sde_kms->dp_displays[i];
  1753. encoder = NULL;
  1754. memset(&info, 0x0, sizeof(info));
  1755. rc = dp_connector_get_info(NULL, &info, display);
  1756. if (rc) {
  1757. SDE_ERROR("dp get_info %d failed\n", i);
  1758. continue;
  1759. }
  1760. encoder = sde_encoder_init(dev, &info);
  1761. if (IS_ERR_OR_NULL(encoder)) {
  1762. SDE_ERROR("dp encoder init failed %d\n", i);
  1763. continue;
  1764. }
  1765. rc = dp_drm_bridge_init(display, encoder,
  1766. max_dp_mixer_count, max_dp_dsc_count);
  1767. if (rc) {
  1768. SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
  1769. sde_encoder_destroy(encoder);
  1770. continue;
  1771. }
  1772. connector = sde_connector_init(dev,
  1773. encoder,
  1774. NULL,
  1775. display,
  1776. &dp_ops,
  1777. DRM_CONNECTOR_POLL_HPD,
  1778. DRM_MODE_CONNECTOR_DisplayPort);
  1779. if (connector) {
  1780. priv->encoders[priv->num_encoders++] = encoder;
  1781. priv->connectors[priv->num_connectors++] = connector;
  1782. } else {
  1783. SDE_ERROR("dp %d connector init failed\n", i);
  1784. dp_drm_bridge_deinit(display);
  1785. sde_encoder_destroy(encoder);
  1786. }
  1787. /* update display cap to MST_MODE for DP MST encoders */
  1788. info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
  1789. for (idx = 0; idx < sde_kms->dp_stream_count &&
  1790. priv->num_encoders < max_encoders; idx++) {
  1791. info.h_tile_instance[0] = idx;
  1792. encoder = sde_encoder_init(dev, &info);
  1793. if (IS_ERR_OR_NULL(encoder)) {
  1794. SDE_ERROR("dp mst encoder init failed %d\n", i);
  1795. continue;
  1796. }
  1797. rc = dp_mst_drm_bridge_init(display, encoder);
  1798. if (rc) {
  1799. SDE_ERROR("dp mst bridge %d init failed, %d\n",
  1800. i, rc);
  1801. sde_encoder_destroy(encoder);
  1802. continue;
  1803. }
  1804. priv->encoders[priv->num_encoders++] = encoder;
  1805. }
  1806. }
  1807. return 0;
  1808. }
  1809. static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
  1810. {
  1811. struct msm_drm_private *priv;
  1812. int i;
  1813. if (!sde_kms) {
  1814. SDE_ERROR("invalid sde_kms\n");
  1815. return;
  1816. } else if (!sde_kms->dev) {
  1817. SDE_ERROR("invalid dev\n");
  1818. return;
  1819. } else if (!sde_kms->dev->dev_private) {
  1820. SDE_ERROR("invalid dev_private\n");
  1821. return;
  1822. }
  1823. priv = sde_kms->dev->dev_private;
  1824. for (i = 0; i < priv->num_crtcs; i++)
  1825. priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
  1826. priv->num_crtcs = 0;
  1827. for (i = 0; i < priv->num_planes; i++)
  1828. priv->planes[i]->funcs->destroy(priv->planes[i]);
  1829. priv->num_planes = 0;
  1830. for (i = 0; i < priv->num_connectors; i++)
  1831. priv->connectors[i]->funcs->destroy(priv->connectors[i]);
  1832. priv->num_connectors = 0;
  1833. for (i = 0; i < priv->num_encoders; i++)
  1834. priv->encoders[i]->funcs->destroy(priv->encoders[i]);
  1835. priv->num_encoders = 0;
  1836. _sde_kms_release_displays(sde_kms);
  1837. }
  1838. static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
  1839. {
  1840. struct drm_device *dev;
  1841. struct drm_plane *primary_planes[MAX_PLANES], *plane;
  1842. struct drm_crtc *crtc;
  1843. struct msm_drm_private *priv;
  1844. struct sde_mdss_cfg *catalog;
  1845. int primary_planes_idx = 0, i, ret;
  1846. int max_crtc_count;
  1847. u32 sspp_id[MAX_PLANES];
  1848. u32 master_plane_id[MAX_PLANES];
  1849. u32 num_virt_planes = 0, dummy_mixer_count = 0;
  1850. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1851. SDE_ERROR("invalid sde_kms\n");
  1852. return -EINVAL;
  1853. }
  1854. dev = sde_kms->dev;
  1855. priv = dev->dev_private;
  1856. catalog = sde_kms->catalog;
  1857. ret = sde_core_irq_domain_add(sde_kms);
  1858. if (ret)
  1859. goto fail_irq;
  1860. /*
  1861. * Query for underlying display drivers, and create connectors,
  1862. * bridges and encoders for them.
  1863. */
  1864. if (!_sde_kms_get_displays(sde_kms))
  1865. (void)_sde_kms_setup_displays(dev, priv, sde_kms);
  1866. for (i = 0; i < catalog->mixer_count; i++)
  1867. if (catalog->mixer[i].dummy_mixer)
  1868. dummy_mixer_count++;
  1869. max_crtc_count = catalog->mixer_count - dummy_mixer_count;
  1870. /* Create the planes */
  1871. for (i = 0; i < catalog->sspp_count; i++) {
  1872. bool primary = true;
  1873. if (primary_planes_idx >= max_crtc_count)
  1874. primary = false;
  1875. plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
  1876. (1UL << max_crtc_count) - 1, 0);
  1877. if (IS_ERR(plane)) {
  1878. SDE_ERROR("sde_plane_init failed\n");
  1879. ret = PTR_ERR(plane);
  1880. goto fail;
  1881. }
  1882. priv->planes[priv->num_planes++] = plane;
  1883. if (primary)
  1884. primary_planes[primary_planes_idx++] = plane;
  1885. if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
  1886. sde_is_custom_client()) {
  1887. int priority =
  1888. catalog->sspp[i].sblk->smart_dma_priority;
  1889. sspp_id[priority - 1] = catalog->sspp[i].id;
  1890. master_plane_id[priority - 1] = plane->base.id;
  1891. num_virt_planes++;
  1892. }
  1893. }
  1894. /* Initialize smart DMA virtual planes */
  1895. for (i = 0; i < num_virt_planes; i++) {
  1896. plane = sde_plane_init(dev, sspp_id[i], false,
  1897. (1UL << max_crtc_count) - 1, master_plane_id[i]);
  1898. if (IS_ERR(plane)) {
  1899. SDE_ERROR("sde_plane for virtual SSPP init failed\n");
  1900. ret = PTR_ERR(plane);
  1901. goto fail;
  1902. }
  1903. priv->planes[priv->num_planes++] = plane;
  1904. }
  1905. max_crtc_count = min(max_crtc_count, primary_planes_idx);
  1906. /* Create one CRTC per encoder */
  1907. for (i = 0; i < max_crtc_count; i++) {
  1908. crtc = sde_crtc_init(dev, primary_planes[i]);
  1909. if (IS_ERR(crtc)) {
  1910. ret = PTR_ERR(crtc);
  1911. goto fail;
  1912. }
  1913. priv->crtcs[priv->num_crtcs++] = crtc;
  1914. }
  1915. if (sde_is_custom_client()) {
  1916. /* All CRTCs are compatible with all planes */
  1917. for (i = 0; i < priv->num_planes; i++)
  1918. priv->planes[i]->possible_crtcs =
  1919. (1 << priv->num_crtcs) - 1;
  1920. }
  1921. /* All CRTCs are compatible with all encoders */
  1922. for (i = 0; i < priv->num_encoders; i++)
  1923. priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
  1924. return 0;
  1925. fail:
  1926. _sde_kms_drm_obj_destroy(sde_kms);
  1927. fail_irq:
  1928. sde_core_irq_domain_fini(sde_kms);
  1929. return ret;
  1930. }
  1931. /**
  1932. * sde_kms_timeline_status - provides current timeline status
  1933. * This API should be called without mode config lock.
  1934. * @dev: Pointer to drm device
  1935. */
  1936. void sde_kms_timeline_status(struct drm_device *dev)
  1937. {
  1938. struct drm_crtc *crtc;
  1939. struct drm_connector *conn;
  1940. struct drm_connector_list_iter conn_iter;
  1941. if (!dev) {
  1942. SDE_ERROR("invalid drm device node\n");
  1943. return;
  1944. }
  1945. drm_for_each_crtc(crtc, dev)
  1946. sde_crtc_timeline_status(crtc);
  1947. if (mutex_is_locked(&dev->mode_config.mutex)) {
  1948. /*
  1949. *Probably locked from last close dumping status anyway
  1950. */
  1951. SDE_ERROR("dumping conn_timeline without mode_config lock\n");
  1952. drm_connector_list_iter_begin(dev, &conn_iter);
  1953. drm_for_each_connector_iter(conn, &conn_iter)
  1954. sde_conn_timeline_status(conn);
  1955. drm_connector_list_iter_end(&conn_iter);
  1956. return;
  1957. }
  1958. mutex_lock(&dev->mode_config.mutex);
  1959. drm_connector_list_iter_begin(dev, &conn_iter);
  1960. drm_for_each_connector_iter(conn, &conn_iter)
  1961. sde_conn_timeline_status(conn);
  1962. drm_connector_list_iter_end(&conn_iter);
  1963. mutex_unlock(&dev->mode_config.mutex);
  1964. }
  1965. static int sde_kms_postinit(struct msm_kms *kms)
  1966. {
  1967. struct sde_kms *sde_kms = to_sde_kms(kms);
  1968. struct drm_device *dev;
  1969. struct drm_crtc *crtc;
  1970. struct msm_drm_private *priv;
  1971. int i, rc;
  1972. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev ||
  1973. !sde_kms->dev->dev_private) {
  1974. SDE_ERROR("invalid sde_kms\n");
  1975. return -EINVAL;
  1976. }
  1977. dev = sde_kms->dev;
  1978. priv = sde_kms->dev->dev_private;
  1979. /*
  1980. * Handle (re)initializations during power enable, the sde power
  1981. * event call has to be after drm_irq_install to handle irq update.
  1982. */
  1983. sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
  1984. sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
  1985. SDE_POWER_EVENT_POST_ENABLE |
  1986. SDE_POWER_EVENT_PRE_DISABLE,
  1987. sde_kms_handle_power_event, sde_kms, "kms");
  1988. if (sde_kms->splash_data.num_splash_displays) {
  1989. SDE_DEBUG("Skipping MDP Resources disable\n");
  1990. } else {
  1991. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1992. sde_power_data_bus_set_quota(&priv->phandle, i,
  1993. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1994. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1995. pm_runtime_put_sync(sde_kms->dev->dev);
  1996. }
  1997. rc = _sde_debugfs_init(sde_kms);
  1998. if (rc)
  1999. SDE_ERROR("sde_debugfs init failed: %d\n", rc);
  2000. drm_for_each_crtc(crtc, dev)
  2001. sde_crtc_post_init(dev, crtc);
  2002. return rc;
  2003. }
  2004. static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
  2005. struct drm_encoder *encoder)
  2006. {
  2007. return rate;
  2008. }
  2009. static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
  2010. struct platform_device *pdev)
  2011. {
  2012. struct drm_device *dev;
  2013. struct msm_drm_private *priv;
  2014. struct sde_vm_ops *vm_ops;
  2015. int i;
  2016. if (!sde_kms || !pdev)
  2017. return;
  2018. dev = sde_kms->dev;
  2019. if (!dev)
  2020. return;
  2021. priv = dev->dev_private;
  2022. if (!priv)
  2023. return;
  2024. if (sde_kms->genpd_init) {
  2025. sde_kms->genpd_init = false;
  2026. pm_genpd_remove(&sde_kms->genpd);
  2027. of_genpd_del_provider(pdev->dev.of_node);
  2028. }
  2029. vm_ops = sde_vm_get_ops(sde_kms);
  2030. if (vm_ops && vm_ops->vm_deinit)
  2031. vm_ops->vm_deinit(sde_kms, vm_ops);
  2032. if (sde_kms->hw_intr)
  2033. sde_hw_intr_destroy(sde_kms->hw_intr);
  2034. sde_kms->hw_intr = NULL;
  2035. if (sde_kms->power_event)
  2036. sde_power_handle_unregister_event(
  2037. &priv->phandle, sde_kms->power_event);
  2038. _sde_kms_release_displays(sde_kms);
  2039. _sde_kms_unmap_all_splash_regions(sde_kms);
  2040. if (sde_kms->catalog) {
  2041. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  2042. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  2043. if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
  2044. sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
  2045. }
  2046. }
  2047. if (sde_kms->rm_init)
  2048. sde_rm_destroy(&sde_kms->rm);
  2049. sde_kms->rm_init = false;
  2050. if (sde_kms->catalog)
  2051. sde_hw_catalog_deinit(sde_kms->catalog);
  2052. sde_kms->catalog = NULL;
  2053. if (sde_kms->sid)
  2054. msm_iounmap(pdev, sde_kms->sid);
  2055. sde_kms->sid = NULL;
  2056. if (sde_kms->reg_dma)
  2057. msm_iounmap(pdev, sde_kms->reg_dma);
  2058. sde_kms->reg_dma = NULL;
  2059. if (sde_kms->vbif[VBIF_NRT])
  2060. msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
  2061. sde_kms->vbif[VBIF_NRT] = NULL;
  2062. if (sde_kms->vbif[VBIF_RT])
  2063. msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
  2064. sde_kms->vbif[VBIF_RT] = NULL;
  2065. if (sde_kms->mmio)
  2066. msm_iounmap(pdev, sde_kms->mmio);
  2067. sde_kms->mmio = NULL;
  2068. sde_reg_dma_deinit();
  2069. _sde_kms_mmu_destroy(sde_kms);
  2070. }
  2071. int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
  2072. {
  2073. int i;
  2074. if (!sde_kms)
  2075. return -EINVAL;
  2076. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  2077. struct msm_mmu *mmu;
  2078. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  2079. if (!aspace)
  2080. continue;
  2081. mmu = sde_kms->aspace[i]->mmu;
  2082. if (secure_only &&
  2083. !aspace->mmu->funcs->is_domain_secure(mmu))
  2084. continue;
  2085. /* cleanup aspace before detaching */
  2086. msm_gem_aspace_domain_attach_detach_update(aspace, true);
  2087. SDE_DEBUG("Detaching domain:%d\n", i);
  2088. aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
  2089. ARRAY_SIZE(iommu_ports));
  2090. aspace->domain_attached = false;
  2091. }
  2092. return 0;
  2093. }
  2094. int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
  2095. {
  2096. int i;
  2097. if (!sde_kms)
  2098. return -EINVAL;
  2099. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  2100. struct msm_mmu *mmu;
  2101. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  2102. if (!aspace)
  2103. continue;
  2104. mmu = sde_kms->aspace[i]->mmu;
  2105. if (secure_only &&
  2106. !aspace->mmu->funcs->is_domain_secure(mmu))
  2107. continue;
  2108. SDE_DEBUG("Attaching domain:%d\n", i);
  2109. aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
  2110. ARRAY_SIZE(iommu_ports));
  2111. aspace->domain_attached = true;
  2112. msm_gem_aspace_domain_attach_detach_update(aspace, false);
  2113. }
  2114. return 0;
  2115. }
  2116. static void sde_kms_destroy(struct msm_kms *kms)
  2117. {
  2118. struct sde_kms *sde_kms;
  2119. struct drm_device *dev;
  2120. if (!kms) {
  2121. SDE_ERROR("invalid kms\n");
  2122. return;
  2123. }
  2124. sde_kms = to_sde_kms(kms);
  2125. dev = sde_kms->dev;
  2126. if (!dev || !dev->dev) {
  2127. SDE_ERROR("invalid device\n");
  2128. return;
  2129. }
  2130. _sde_kms_hw_destroy(sde_kms, to_platform_device(dev->dev));
  2131. kfree(sde_kms);
  2132. }
  2133. static void sde_kms_helper_clear_dim_layers(struct drm_atomic_state *state, struct drm_crtc *crtc)
  2134. {
  2135. struct drm_crtc_state *crtc_state = NULL;
  2136. struct sde_crtc_state *c_state;
  2137. if (!state || !crtc) {
  2138. SDE_ERROR("invalid params\n");
  2139. return;
  2140. }
  2141. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2142. c_state = to_sde_crtc_state(crtc_state);
  2143. _sde_crtc_clear_dim_layers_v1(crtc_state);
  2144. set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, c_state->dirty);
  2145. }
  2146. static int sde_kms_set_crtc_for_conn(struct drm_device *dev,
  2147. struct drm_encoder *enc, struct drm_atomic_state *state)
  2148. {
  2149. struct drm_connector *conn = NULL;
  2150. struct drm_connector *tmp_conn = NULL;
  2151. struct drm_connector_list_iter conn_iter;
  2152. struct drm_crtc_state *crtc_state = NULL;
  2153. struct drm_connector_state *conn_state = NULL;
  2154. int ret = 0;
  2155. drm_connector_list_iter_begin(dev, &conn_iter);
  2156. drm_for_each_connector_iter(tmp_conn, &conn_iter) {
  2157. if (enc == tmp_conn->state->best_encoder) {
  2158. conn = tmp_conn;
  2159. break;
  2160. }
  2161. }
  2162. drm_connector_list_iter_end(&conn_iter);
  2163. if (!conn || !enc->crtc) {
  2164. SDE_ERROR("invalid params for enc:%d\n", DRMID(enc));
  2165. return -EINVAL;
  2166. }
  2167. crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
  2168. if (IS_ERR(crtc_state)) {
  2169. ret = PTR_ERR(crtc_state);
  2170. SDE_ERROR("error %d getting crtc %d state\n",
  2171. ret, DRMID(enc->crtc));
  2172. return ret;
  2173. }
  2174. conn_state = drm_atomic_get_connector_state(state, conn);
  2175. if (IS_ERR(conn_state)) {
  2176. ret = PTR_ERR(conn_state);
  2177. SDE_ERROR("error %d getting connector %d state\n",
  2178. ret, DRMID(conn));
  2179. return ret;
  2180. }
  2181. crtc_state->active = true;
  2182. crtc_state->enable = true;
  2183. ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
  2184. if (ret)
  2185. SDE_ERROR("error %d setting the crtc\n", ret);
  2186. return ret;
  2187. }
  2188. static void _sde_kms_plane_force_remove(struct drm_plane *plane,
  2189. struct drm_atomic_state *state)
  2190. {
  2191. struct drm_plane_state *plane_state;
  2192. int ret = 0;
  2193. plane_state = drm_atomic_get_plane_state(state, plane);
  2194. if (IS_ERR(plane_state)) {
  2195. ret = PTR_ERR(plane_state);
  2196. SDE_ERROR("error %d getting plane %d state\n",
  2197. ret, plane->base.id);
  2198. return;
  2199. }
  2200. plane->old_fb = plane->fb;
  2201. SDE_DEBUG("disabling plane %d\n", plane->base.id);
  2202. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2203. if (ret != 0)
  2204. SDE_ERROR("error %d disabling plane %d\n", ret,
  2205. plane->base.id);
  2206. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2207. }
  2208. static int _sde_kms_connector_add_refcount(struct sde_kms *sde_kms,
  2209. struct drm_atomic_state *state)
  2210. {
  2211. struct drm_device *dev = sde_kms->dev;
  2212. struct drm_connector *conn;
  2213. struct drm_connector_state *conn_state;
  2214. struct drm_connector_list_iter conn_iter;
  2215. struct sde_connector_state *c_state;
  2216. int ret = 0;
  2217. drm_connector_list_iter_begin(dev, &conn_iter);
  2218. drm_for_each_connector_iter(conn, &conn_iter) {
  2219. /*
  2220. * Acquire a connector reference to avoid removing
  2221. * connector in drm_release for splash and recovery cases.
  2222. */
  2223. conn_state = drm_atomic_get_connector_state(state, conn);
  2224. if (IS_ERR(conn_state)) {
  2225. ret = PTR_ERR(conn_state);
  2226. SDE_ERROR("error %d getting connector %d state\n",
  2227. ret, DRMID(conn));
  2228. return ret;
  2229. }
  2230. c_state = to_sde_connector_state(conn_state);
  2231. if (c_state->out_fb)
  2232. drm_framebuffer_put(c_state->out_fb);
  2233. }
  2234. drm_connector_list_iter_end(&conn_iter);
  2235. return ret;
  2236. }
  2237. static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
  2238. struct drm_atomic_state *state)
  2239. {
  2240. struct drm_device *dev = sde_kms->dev;
  2241. struct drm_framebuffer *fb, *tfb;
  2242. struct list_head fbs;
  2243. struct drm_plane *plane;
  2244. struct drm_crtc *crtc = NULL;
  2245. unsigned int crtc_mask = 0;
  2246. int ret = 0;
  2247. INIT_LIST_HEAD(&fbs);
  2248. list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
  2249. if (drm_framebuffer_read_refcount(fb) > 1) {
  2250. list_move_tail(&fb->filp_head, &fbs);
  2251. drm_for_each_plane(plane, dev) {
  2252. if (plane->state && plane->state->fb == fb) {
  2253. if (plane->state->crtc)
  2254. crtc_mask |= drm_crtc_mask(plane->state->crtc);
  2255. _sde_kms_plane_force_remove(plane, state);
  2256. }
  2257. }
  2258. } else {
  2259. list_del_init(&fb->filp_head);
  2260. drm_framebuffer_put(fb);
  2261. }
  2262. }
  2263. if (list_empty(&fbs)) {
  2264. SDE_DEBUG("skip commit as no fb(s)\n");
  2265. if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
  2266. _sde_kms_connector_add_refcount(sde_kms, state);
  2267. return 0;
  2268. }
  2269. drm_for_each_crtc(crtc, dev) {
  2270. if ((crtc_mask & drm_crtc_mask(crtc)) && crtc->state->active) {
  2271. struct drm_encoder *drm_enc;
  2272. drm_for_each_encoder_mask(drm_enc, crtc->dev,
  2273. crtc->state->encoder_mask) {
  2274. ret = sde_kms_set_crtc_for_conn(dev, drm_enc, state);
  2275. if (ret)
  2276. goto error;
  2277. }
  2278. sde_kms_helper_clear_dim_layers(state, crtc);
  2279. }
  2280. }
  2281. SDE_EVT32(state, crtc_mask);
  2282. SDE_DEBUG("null commit after removing all the pipes\n");
  2283. ret = drm_atomic_commit(state);
  2284. error:
  2285. if (ret) {
  2286. /*
  2287. * move the fbs back to original list, so it would be
  2288. * handled during drm_release
  2289. */
  2290. list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
  2291. list_move_tail(&fb->filp_head, &file->fbs);
  2292. if (ret == -EDEADLK || ret == -ERESTARTSYS)
  2293. SDE_DEBUG("atomic commit failed in preclose, ret:%d\n", ret);
  2294. else
  2295. SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
  2296. goto end;
  2297. }
  2298. while (!list_empty(&fbs)) {
  2299. fb = list_first_entry(&fbs, typeof(*fb), filp_head);
  2300. list_del_init(&fb->filp_head);
  2301. drm_framebuffer_put(fb);
  2302. }
  2303. drm_for_each_crtc(crtc, dev) {
  2304. if (!ret && crtc_mask & drm_crtc_mask(crtc))
  2305. sde_kms_cancel_delayed_work(crtc);
  2306. }
  2307. end:
  2308. return ret;
  2309. }
  2310. static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
  2311. {
  2312. struct sde_kms *sde_kms = to_sde_kms(kms);
  2313. struct drm_device *dev = sde_kms->dev;
  2314. struct msm_drm_private *priv = dev->dev_private;
  2315. unsigned int i;
  2316. struct drm_atomic_state *state = NULL;
  2317. struct drm_modeset_acquire_ctx ctx;
  2318. int ret = 0;
  2319. /* cancel pending flip event */
  2320. for (i = 0; i < priv->num_crtcs; i++)
  2321. sde_crtc_complete_flip(priv->crtcs[i], file);
  2322. drm_modeset_acquire_init(&ctx, 0);
  2323. retry:
  2324. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2325. if (ret == -EDEADLK) {
  2326. drm_modeset_backoff(&ctx);
  2327. goto retry;
  2328. } else if (WARN_ON(ret)) {
  2329. goto end;
  2330. }
  2331. state = drm_atomic_state_alloc(dev);
  2332. if (!state) {
  2333. ret = -ENOMEM;
  2334. goto end;
  2335. }
  2336. state->acquire_ctx = &ctx;
  2337. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  2338. ret = _sde_kms_remove_fbs(sde_kms, file, state);
  2339. if (ret != -EDEADLK && ret != -ERESTARTSYS)
  2340. break;
  2341. drm_atomic_state_clear(state);
  2342. drm_modeset_backoff(&ctx);
  2343. }
  2344. end:
  2345. if (state)
  2346. drm_atomic_state_put(state);
  2347. SDE_DEBUG("sde preclose done, ret:%d\n", ret);
  2348. drm_modeset_drop_locks(&ctx);
  2349. drm_modeset_acquire_fini(&ctx);
  2350. }
  2351. static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
  2352. struct drm_atomic_state *state)
  2353. {
  2354. struct drm_device *dev = sde_kms->dev;
  2355. struct drm_plane *plane;
  2356. struct drm_plane_state *plane_state;
  2357. struct drm_crtc *crtc;
  2358. struct drm_crtc_state *crtc_state;
  2359. struct drm_connector *conn;
  2360. struct drm_connector_state *conn_state;
  2361. struct drm_connector_list_iter conn_iter;
  2362. int ret = 0;
  2363. drm_for_each_plane(plane, dev) {
  2364. plane_state = drm_atomic_get_plane_state(state, plane);
  2365. if (IS_ERR(plane_state)) {
  2366. ret = PTR_ERR(plane_state);
  2367. SDE_ERROR("error %d getting plane %d state\n",
  2368. ret, DRMID(plane));
  2369. return ret;
  2370. }
  2371. ret = sde_plane_helper_reset_custom_properties(plane,
  2372. plane_state);
  2373. if (ret) {
  2374. SDE_ERROR("error %d resetting plane props %d\n",
  2375. ret, DRMID(plane));
  2376. return ret;
  2377. }
  2378. }
  2379. drm_for_each_crtc(crtc, dev) {
  2380. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2381. if (IS_ERR(crtc_state)) {
  2382. ret = PTR_ERR(crtc_state);
  2383. SDE_ERROR("error %d getting crtc %d state\n",
  2384. ret, DRMID(crtc));
  2385. return ret;
  2386. }
  2387. ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
  2388. if (ret) {
  2389. SDE_ERROR("error %d resetting crtc props %d\n",
  2390. ret, DRMID(crtc));
  2391. return ret;
  2392. }
  2393. }
  2394. drm_connector_list_iter_begin(dev, &conn_iter);
  2395. drm_for_each_connector_iter(conn, &conn_iter) {
  2396. conn_state = drm_atomic_get_connector_state(state, conn);
  2397. if (IS_ERR(conn_state)) {
  2398. ret = PTR_ERR(conn_state);
  2399. SDE_ERROR("error %d getting connector %d state\n",
  2400. ret, DRMID(conn));
  2401. return ret;
  2402. }
  2403. ret = sde_connector_helper_reset_custom_properties(conn,
  2404. conn_state);
  2405. if (ret) {
  2406. SDE_ERROR("error %d resetting connector props %d\n",
  2407. ret, DRMID(conn));
  2408. return ret;
  2409. }
  2410. }
  2411. drm_connector_list_iter_end(&conn_iter);
  2412. return ret;
  2413. }
  2414. static void sde_kms_lastclose(struct msm_kms *kms)
  2415. {
  2416. struct sde_kms *sde_kms;
  2417. struct drm_device *dev;
  2418. struct drm_atomic_state *state;
  2419. struct drm_modeset_acquire_ctx ctx;
  2420. int ret;
  2421. if (!kms) {
  2422. SDE_ERROR("invalid argument\n");
  2423. return;
  2424. }
  2425. sde_kms = to_sde_kms(kms);
  2426. dev = sde_kms->dev;
  2427. if (sde_kms && sde_kms->vm)
  2428. sde_kms->vm->lastclose_in_progress = true;
  2429. drm_modeset_acquire_init(&ctx, 0);
  2430. state = drm_atomic_state_alloc(dev);
  2431. if (!state) {
  2432. ret = -ENOMEM;
  2433. goto out_ctx;
  2434. }
  2435. state->acquire_ctx = &ctx;
  2436. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  2437. retry:
  2438. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2439. if (ret)
  2440. goto out_state;
  2441. ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
  2442. if (ret)
  2443. goto out_state;
  2444. ret = drm_atomic_commit(state);
  2445. out_state:
  2446. if (ret == -EDEADLK)
  2447. goto backoff;
  2448. drm_atomic_state_put(state);
  2449. out_ctx:
  2450. drm_modeset_drop_locks(&ctx);
  2451. drm_modeset_acquire_fini(&ctx);
  2452. if (ret)
  2453. SDE_ERROR("kms lastclose failed: %d\n", ret);
  2454. SDE_EVT32(ret, SDE_EVTLOG_FUNC_EXIT);
  2455. if (sde_kms && sde_kms->vm)
  2456. sde_kms->vm->lastclose_in_progress = false;
  2457. return;
  2458. backoff:
  2459. drm_atomic_state_clear(state);
  2460. drm_modeset_backoff(&ctx);
  2461. SDE_EVT32(ret, SDE_EVTLOG_FUNC_CASE1);
  2462. goto retry;
  2463. }
  2464. static int _sde_kms_validate_vm_request(struct drm_atomic_state *state, struct sde_kms *sde_kms,
  2465. enum sde_crtc_vm_req vm_req, bool vm_owns_hw)
  2466. {
  2467. struct drm_crtc *crtc, *active_crtc = NULL, *global_active_crtc = NULL;
  2468. struct drm_crtc_state *new_cstate, *old_cstate, *active_cstate;
  2469. struct drm_encoder *encoder;
  2470. struct drm_connector *connector;
  2471. struct drm_connector_state *new_connstate;
  2472. struct sde_vm_ops *vm_ops = sde_vm_get_ops(sde_kms);
  2473. struct sde_mdss_cfg *catalog = sde_kms->catalog;
  2474. struct sde_connector *sde_conn;
  2475. struct dsi_display *dsi_display;
  2476. uint32_t i, commit_crtc_cnt = 0, global_crtc_cnt = 0;
  2477. uint32_t crtc_encoder_cnt = 0;
  2478. enum sde_crtc_idle_pc_state idle_pc_state;
  2479. int rc = 0;
  2480. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2481. struct sde_crtc_state *new_state = NULL;
  2482. if (!new_cstate->active && !old_cstate->active)
  2483. continue;
  2484. new_state = to_sde_crtc_state(new_cstate);
  2485. idle_pc_state = sde_crtc_get_property(new_state, CRTC_PROP_IDLE_PC_STATE);
  2486. active_crtc = crtc;
  2487. active_cstate = new_cstate;
  2488. commit_crtc_cnt++;
  2489. }
  2490. list_for_each_entry(crtc, &sde_kms->dev->mode_config.crtc_list, head) {
  2491. if (!crtc->state->active)
  2492. continue;
  2493. global_crtc_cnt++;
  2494. global_active_crtc = crtc;
  2495. }
  2496. if (active_crtc) {
  2497. drm_for_each_encoder_mask(encoder, active_crtc->dev, active_cstate->encoder_mask)
  2498. crtc_encoder_cnt++;
  2499. }
  2500. for_each_new_connector_in_state(state, connector, new_connstate, i) {
  2501. int conn_mask = active_cstate->connector_mask;
  2502. if (drm_connector_mask(connector) & conn_mask) {
  2503. sde_conn = to_sde_connector(connector);
  2504. dsi_display = (struct dsi_display *) sde_conn->display;
  2505. SDE_EVT32(DRMID(connector), DRMID(active_crtc), i, dsi_display->type,
  2506. dsi_display->trusted_vm_env);
  2507. SDE_DEBUG("VM display:%s, conn:%d, crtc:%d, type:%d, tvm:%d\n",
  2508. dsi_display->name, DRMID(connector), DRMID(active_crtc),
  2509. dsi_display->type, dsi_display->trusted_vm_env);
  2510. break;
  2511. }
  2512. }
  2513. /* Check for single crtc commits only on valid VM requests */
  2514. if (active_crtc && global_active_crtc &&
  2515. (commit_crtc_cnt > catalog->max_trusted_vm_displays ||
  2516. global_crtc_cnt > catalog->max_trusted_vm_displays ||
  2517. active_crtc != global_active_crtc)) {
  2518. SDE_ERROR("VM switch failed; MAX:%d a_cnt:%d g_cnt:%d a_crtc:%d g_crtc:%d\n",
  2519. catalog->max_trusted_vm_displays, commit_crtc_cnt, global_crtc_cnt,
  2520. DRMID(active_crtc), DRMID(global_active_crtc));
  2521. return -E2BIG;
  2522. } else if ((vm_req == VM_REQ_RELEASE) &&
  2523. ((idle_pc_state == IDLE_PC_ENABLE) ||
  2524. (crtc_encoder_cnt > TRUSTED_VM_MAX_ENCODER_PER_CRTC))) {
  2525. /*
  2526. * disable idle-pc before releasing the HW
  2527. * allow only specified number of encoders on a given crtc
  2528. */
  2529. SDE_ERROR("VM switch failed; idle-pc:%d max:%d encoder_cnt:%d\n",
  2530. idle_pc_state, TRUSTED_VM_MAX_ENCODER_PER_CRTC, crtc_encoder_cnt);
  2531. return -EINVAL;
  2532. }
  2533. if ((vm_req == VM_REQ_ACQUIRE) && !vm_owns_hw) {
  2534. rc = vm_ops->vm_acquire(sde_kms);
  2535. if (rc) {
  2536. SDE_ERROR("VM acquire failed; hw_owner:%d, rc:%d\n", vm_owns_hw, rc);
  2537. return rc;
  2538. }
  2539. if (vm_ops->vm_resource_init) {
  2540. rc = vm_ops->vm_resource_init(sde_kms, state);
  2541. if (rc && vm_ops->vm_release)
  2542. rc = vm_ops->vm_release(sde_kms);
  2543. }
  2544. }
  2545. return rc;
  2546. }
  2547. static int sde_kms_check_vm_request(struct msm_kms *kms,
  2548. struct drm_atomic_state *state)
  2549. {
  2550. struct sde_kms *sde_kms;
  2551. struct drm_crtc *crtc;
  2552. struct drm_crtc_state *new_cstate, *old_cstate;
  2553. struct sde_vm_ops *vm_ops;
  2554. enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
  2555. int i, rc = 0;
  2556. bool vm_req_active = false, prev_vm_req = false;
  2557. bool vm_owns_hw;
  2558. if (!kms || !state)
  2559. return -EINVAL;
  2560. sde_kms = to_sde_kms(kms);
  2561. vm_ops = sde_vm_get_ops(sde_kms);
  2562. if (!vm_ops)
  2563. return 0;
  2564. if (!vm_ops->vm_request_valid || !vm_ops->vm_owns_hw || !vm_ops->vm_acquire)
  2565. return -EINVAL;
  2566. drm_for_each_crtc(crtc, state->dev) {
  2567. if (crtc->state && (sde_crtc_get_property(to_sde_crtc_state(crtc->state),
  2568. CRTC_PROP_VM_REQ_STATE) == VM_REQ_RELEASE)) {
  2569. prev_vm_req = true;
  2570. break;
  2571. }
  2572. }
  2573. /* check for an active vm request */
  2574. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2575. struct sde_crtc_state *old_state = NULL, *new_state = NULL;
  2576. if (!new_cstate->active && !old_cstate->active)
  2577. continue;
  2578. new_state = to_sde_crtc_state(new_cstate);
  2579. new_vm_req = sde_crtc_get_property(new_state, CRTC_PROP_VM_REQ_STATE);
  2580. old_state = to_sde_crtc_state(old_cstate);
  2581. old_vm_req = sde_crtc_get_property(old_state, CRTC_PROP_VM_REQ_STATE);
  2582. /*
  2583. * VM request should be validated in the following usecases
  2584. * - There is a vm request(other than VM_REQ_NONE) on current/prev crtc state.
  2585. * - Previously, vm transition has taken place on one of the crtc's.
  2586. */
  2587. if (old_vm_req || new_vm_req || prev_vm_req) {
  2588. if (!vm_req_active) {
  2589. sde_vm_lock(sde_kms);
  2590. vm_owns_hw = sde_vm_owns_hw(sde_kms);
  2591. }
  2592. rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
  2593. if (rc) {
  2594. SDE_ERROR(
  2595. "VM transition check failed; o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n",
  2596. old_vm_req, new_vm_req, vm_owns_hw, rc);
  2597. sde_vm_unlock(sde_kms);
  2598. vm_req_active = false;
  2599. break;
  2600. } else if (old_vm_req == VM_REQ_ACQUIRE && new_vm_req == VM_REQ_NONE) {
  2601. SDE_DEBUG("VM transition valid; ignore further checks\n");
  2602. if (!vm_req_active)
  2603. sde_vm_unlock(sde_kms);
  2604. } else {
  2605. vm_req_active = true;
  2606. }
  2607. }
  2608. }
  2609. /* validate active requests and perform acquire if necessary */
  2610. if (vm_req_active) {
  2611. rc = _sde_kms_validate_vm_request(state, sde_kms, new_vm_req, vm_owns_hw);
  2612. sde_vm_unlock(sde_kms);
  2613. SDE_EVT32(old_vm_req, new_vm_req, vm_req_active, vm_owns_hw, rc);
  2614. SDE_DEBUG("VM o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n", old_vm_req, new_vm_req,
  2615. vm_req_active ? vm_owns_hw : -1, rc);
  2616. }
  2617. return rc;
  2618. }
  2619. static int sde_kms_check_secure_transition(struct msm_kms *kms,
  2620. struct drm_atomic_state *state)
  2621. {
  2622. struct sde_kms *sde_kms;
  2623. struct drm_device *dev;
  2624. struct drm_crtc *crtc;
  2625. struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
  2626. struct drm_crtc_state *crtc_state;
  2627. int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
  2628. bool sec_session = false, global_sec_session = false;
  2629. uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
  2630. int i;
  2631. if (!kms || !state) {
  2632. return -EINVAL;
  2633. SDE_ERROR("invalid arguments\n");
  2634. }
  2635. sde_kms = to_sde_kms(kms);
  2636. dev = sde_kms->dev;
  2637. /* iterate state object for active secure/non-secure crtc */
  2638. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2639. if (!crtc_state->active)
  2640. continue;
  2641. active_crtc_cnt++;
  2642. sde_crtc_state_find_plane_fb_modes(crtc_state, &fb_ns,
  2643. &fb_sec, &fb_sec_dir);
  2644. if (fb_sec_dir)
  2645. sec_session = true;
  2646. cur_crtc = crtc;
  2647. }
  2648. /* iterate global list for active and secure/non-secure crtc */
  2649. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2650. if (!crtc->state->active)
  2651. continue;
  2652. global_active_crtc_cnt++;
  2653. /* update only when crtc is not the same as current crtc */
  2654. if (crtc != cur_crtc) {
  2655. fb_ns = fb_sec = fb_sec_dir = 0;
  2656. sde_crtc_find_plane_fb_modes(crtc, &fb_ns,
  2657. &fb_sec, &fb_sec_dir);
  2658. if (fb_sec_dir)
  2659. global_sec_session = true;
  2660. global_crtc = crtc;
  2661. }
  2662. }
  2663. if (!global_sec_session && !sec_session)
  2664. return 0;
  2665. /*
  2666. * - fail crtc commit, if secure-camera/secure-ui session is
  2667. * in-progress in any other display
  2668. * - fail secure-camera/secure-ui crtc commit, if any other display
  2669. * session is in-progress
  2670. */
  2671. if ((global_active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
  2672. (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
  2673. SDE_ERROR(
  2674. "crtc%d secure check failed global_active:%d active:%d\n",
  2675. cur_crtc ? cur_crtc->base.id : -1,
  2676. global_active_crtc_cnt, active_crtc_cnt);
  2677. return -EPERM;
  2678. /*
  2679. * As only one crtc is allowed during secure session, the crtc
  2680. * in this commit should match with the global crtc
  2681. */
  2682. } else if (global_crtc && cur_crtc && (global_crtc != cur_crtc)) {
  2683. SDE_ERROR("crtc%d-sec%d not allowed during crtc%d-sec%d\n",
  2684. cur_crtc->base.id, sec_session,
  2685. global_crtc->base.id, global_sec_session);
  2686. return -EPERM;
  2687. }
  2688. return 0;
  2689. }
  2690. static void sde_kms_vm_res_release(struct msm_kms *kms,
  2691. struct drm_atomic_state *state)
  2692. {
  2693. struct drm_crtc *crtc;
  2694. struct drm_crtc_state *new_cstate;
  2695. struct sde_crtc_state *cstate;
  2696. struct sde_vm_ops *vm_ops;
  2697. enum sde_crtc_vm_req vm_req;
  2698. struct sde_kms *sde_kms = to_sde_kms(kms);
  2699. vm_ops = sde_vm_get_ops(sde_kms);
  2700. if (!vm_ops)
  2701. return;
  2702. crtc = sde_kms_vm_get_vm_crtc(state);
  2703. if (!crtc)
  2704. return;
  2705. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  2706. cstate = to_sde_crtc_state(new_cstate);
  2707. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  2708. if (vm_req != VM_REQ_ACQUIRE)
  2709. return;
  2710. sde_vm_lock(sde_kms);
  2711. if (vm_ops->vm_acquire_fail_handler)
  2712. vm_ops->vm_acquire_fail_handler(sde_kms);
  2713. sde_vm_unlock(sde_kms);
  2714. }
  2715. static int sde_kms_check_cwb_concurreny(struct msm_kms *kms,
  2716. struct drm_atomic_state *state)
  2717. {
  2718. struct sde_kms *sde_kms;
  2719. struct drm_crtc *crtc;
  2720. struct drm_crtc_state *old_crtc_state, *new_crtc_state;
  2721. struct drm_encoder *encoder;
  2722. struct sde_crtc_state *cstate;
  2723. int i = 0, cnt = 0, max_cwb = 0;
  2724. if (!kms || !state) {
  2725. SDE_ERROR("invalid arguments\n");
  2726. return -EINVAL;
  2727. }
  2728. sde_kms = to_sde_kms(kms);
  2729. max_cwb = sde_kms->catalog->max_cwb;
  2730. if (!max_cwb)
  2731. return 0;
  2732. for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
  2733. cstate = to_sde_crtc_state(new_crtc_state);
  2734. drm_for_each_encoder_mask(encoder, crtc->dev, cstate->cwb_enc_mask) {
  2735. cnt++;
  2736. SDE_DEBUG("crtc%d has cwb%d attached to it\n", crtc->base.id,
  2737. encoder->base.id);
  2738. }
  2739. if (cnt > max_cwb) {
  2740. SDE_ERROR("found %d cwb in the atomic state, max supported %d\n",
  2741. cnt, max_cwb);
  2742. return -EOPNOTSUPP;
  2743. }
  2744. }
  2745. return 0;
  2746. }
  2747. static int sde_kms_atomic_check(struct msm_kms *kms,
  2748. struct drm_atomic_state *state)
  2749. {
  2750. struct sde_kms *sde_kms;
  2751. struct drm_device *dev;
  2752. int ret;
  2753. if (!kms || !state)
  2754. return -EINVAL;
  2755. sde_kms = to_sde_kms(kms);
  2756. dev = sde_kms->dev;
  2757. SDE_ATRACE_BEGIN("atomic_check");
  2758. if (sde_kms_is_suspend_blocked(dev)) {
  2759. SDE_DEBUG("suspended, skip atomic_check\n");
  2760. ret = -EBUSY;
  2761. goto end;
  2762. }
  2763. ret = sde_kms_check_vm_request(kms, state);
  2764. if (ret) {
  2765. SDE_ERROR("vm switch request checks failed\n");
  2766. goto end;
  2767. }
  2768. ret = drm_atomic_helper_check(dev, state);
  2769. if (ret)
  2770. goto vm_clean_up;
  2771. /*
  2772. * Check if any secure transition(moving CRTC between secure and
  2773. * non-secure state and vice-versa) is allowed or not. when moving
  2774. * to secure state, planes with fb_mode set to dir_translated only can
  2775. * be staged on the CRTC, and only one CRTC can be active during
  2776. * Secure state
  2777. */
  2778. ret = sde_kms_check_secure_transition(kms, state);
  2779. if (ret)
  2780. goto vm_clean_up;
  2781. ret = sde_kms_check_cwb_concurreny(kms, state);
  2782. if (ret)
  2783. goto vm_clean_up;
  2784. goto end;
  2785. vm_clean_up:
  2786. sde_kms_vm_res_release(kms, state);
  2787. end:
  2788. SDE_ATRACE_END("atomic_check");
  2789. return ret;
  2790. }
  2791. static struct msm_gem_address_space*
  2792. _sde_kms_get_address_space(struct msm_kms *kms,
  2793. unsigned int domain)
  2794. {
  2795. struct sde_kms *sde_kms;
  2796. if (!kms) {
  2797. SDE_ERROR("invalid kms\n");
  2798. return NULL;
  2799. }
  2800. sde_kms = to_sde_kms(kms);
  2801. if (!sde_kms) {
  2802. SDE_ERROR("invalid sde_kms\n");
  2803. return NULL;
  2804. }
  2805. if (domain >= MSM_SMMU_DOMAIN_MAX)
  2806. return NULL;
  2807. return (sde_kms->aspace[domain] &&
  2808. sde_kms->aspace[domain]->domain_attached) ?
  2809. sde_kms->aspace[domain] : NULL;
  2810. }
  2811. static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
  2812. unsigned int domain)
  2813. {
  2814. struct sde_kms *sde_kms;
  2815. struct msm_gem_address_space *aspace;
  2816. if (!kms) {
  2817. SDE_ERROR("invalid kms\n");
  2818. return NULL;
  2819. }
  2820. sde_kms = to_sde_kms(kms);
  2821. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  2822. SDE_ERROR("invalid params\n");
  2823. return NULL;
  2824. }
  2825. aspace = _sde_kms_get_address_space(kms, domain);
  2826. return (aspace && aspace->domain_attached) ?
  2827. msm_gem_get_aspace_device(aspace) : NULL;
  2828. }
  2829. static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
  2830. {
  2831. struct drm_device *dev = NULL;
  2832. struct sde_kms *sde_kms = NULL;
  2833. struct drm_connector *connector = NULL;
  2834. struct drm_connector_list_iter conn_iter;
  2835. struct sde_connector *sde_conn = NULL;
  2836. if (!kms) {
  2837. SDE_ERROR("invalid kms\n");
  2838. return;
  2839. }
  2840. sde_kms = to_sde_kms(kms);
  2841. dev = sde_kms->dev;
  2842. if (!dev) {
  2843. SDE_ERROR("invalid device\n");
  2844. return;
  2845. }
  2846. if (!dev->mode_config.poll_enabled)
  2847. return;
  2848. mutex_lock(&dev->mode_config.mutex);
  2849. drm_connector_list_iter_begin(dev, &conn_iter);
  2850. drm_for_each_connector_iter(connector, &conn_iter) {
  2851. /* Only handle HPD capable connectors. */
  2852. if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
  2853. continue;
  2854. sde_conn = to_sde_connector(connector);
  2855. if (sde_conn->ops.post_open)
  2856. sde_conn->ops.post_open(&sde_conn->base,
  2857. sde_conn->display);
  2858. }
  2859. drm_connector_list_iter_end(&conn_iter);
  2860. mutex_unlock(&dev->mode_config.mutex);
  2861. }
  2862. static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
  2863. struct sde_splash_display *splash_display,
  2864. struct drm_crtc *crtc)
  2865. {
  2866. struct msm_drm_private *priv;
  2867. struct drm_plane *plane;
  2868. struct sde_splash_mem *splash;
  2869. struct sde_splash_mem *demura;
  2870. struct sde_plane_state *pstate;
  2871. struct sde_sspp_index_info *pipe_info;
  2872. enum sde_sspp pipe_id;
  2873. bool is_virtual;
  2874. int i;
  2875. if (!sde_kms || !splash_display || !crtc) {
  2876. SDE_ERROR("invalid input args\n");
  2877. return -EINVAL;
  2878. }
  2879. priv = sde_kms->dev->dev_private;
  2880. pipe_info = &splash_display->pipe_info;
  2881. splash = splash_display->splash;
  2882. demura = splash_display->demura;
  2883. for (i = 0; i < priv->num_planes; i++) {
  2884. plane = priv->planes[i];
  2885. pipe_id = sde_plane_pipe(plane);
  2886. is_virtual = is_sde_plane_virtual(plane);
  2887. if ((is_virtual && test_bit(pipe_id, pipe_info->virt_pipes)) ||
  2888. (!is_virtual && test_bit(pipe_id, pipe_info->pipes))) {
  2889. if (splash && sde_plane_validate_src_addr(plane,
  2890. splash->splash_buf_base,
  2891. splash->splash_buf_size)) {
  2892. if (!demura || sde_plane_validate_src_addr(
  2893. plane, demura->splash_buf_base,
  2894. demura->splash_buf_size)) {
  2895. SDE_ERROR("invalid adr on pipe:%d crtc:%d\n",
  2896. pipe_id, DRMID(crtc));
  2897. continue;
  2898. }
  2899. }
  2900. plane->state->crtc = crtc;
  2901. crtc->state->plane_mask |= drm_plane_mask(plane);
  2902. pstate = to_sde_plane_state(plane->state);
  2903. pstate->cont_splash_populated = true;
  2904. SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n",
  2905. DRMID(crtc), DRMID(plane), is_virtual);
  2906. }
  2907. }
  2908. return 0;
  2909. }
  2910. static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
  2911. struct dsi_display *dsi_display)
  2912. {
  2913. void *display;
  2914. struct drm_encoder *encoder = NULL;
  2915. struct msm_display_info info;
  2916. struct drm_device *dev;
  2917. struct sde_kms *sde_kms;
  2918. struct drm_connector_list_iter conn_iter;
  2919. struct drm_connector *connector = NULL;
  2920. struct sde_connector *sde_conn = NULL;
  2921. int rc = 0;
  2922. sde_kms = to_sde_kms(kms);
  2923. dev = sde_kms->dev;
  2924. display = dsi_display;
  2925. if (dsi_display) {
  2926. if (dsi_display->bridge->base.encoder) {
  2927. encoder = dsi_display->bridge->base.encoder;
  2928. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2929. }
  2930. memset(&info, 0x0, sizeof(info));
  2931. rc = dsi_display_get_info(NULL, &info, display);
  2932. if (rc) {
  2933. SDE_ERROR("%s: dsi get_info failed: %d\n",
  2934. __func__, rc);
  2935. encoder = NULL;
  2936. }
  2937. }
  2938. drm_connector_list_iter_begin(dev, &conn_iter);
  2939. drm_for_each_connector_iter(connector, &conn_iter) {
  2940. struct drm_encoder *c_encoder;
  2941. drm_connector_for_each_possible_encoder(connector,
  2942. c_encoder)
  2943. break;
  2944. if (!c_encoder) {
  2945. SDE_ERROR("c_encoder not found\n");
  2946. return -EINVAL;
  2947. }
  2948. /**
  2949. * Inform cont_splash is disabled to each interface/connector.
  2950. * This is currently supported for DSI interface.
  2951. */
  2952. sde_conn = to_sde_connector(connector);
  2953. if (sde_conn && sde_conn->ops.cont_splash_res_disable) {
  2954. if (!dsi_display || !encoder) {
  2955. sde_conn->ops.cont_splash_res_disable
  2956. (sde_conn->display);
  2957. } else if (c_encoder->base.id == encoder->base.id) {
  2958. /**
  2959. * This handles dual DSI
  2960. * configuration where one DSI
  2961. * interface has cont_splash
  2962. * enabled and the other doesn't.
  2963. */
  2964. sde_conn->ops.cont_splash_res_disable
  2965. (sde_conn->display);
  2966. break;
  2967. }
  2968. }
  2969. }
  2970. drm_connector_list_iter_end(&conn_iter);
  2971. return 0;
  2972. }
  2973. static int sde_kms_vm_trusted_cont_splash_res_init(struct sde_kms *sde_kms)
  2974. {
  2975. int i;
  2976. void *display;
  2977. struct dsi_display *dsi_display;
  2978. struct drm_encoder *encoder;
  2979. if (!sde_kms)
  2980. return -EINVAL;
  2981. if (!sde_in_trusted_vm(sde_kms))
  2982. return 0;
  2983. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  2984. display = sde_kms->dsi_displays[i];
  2985. dsi_display = (struct dsi_display *)display;
  2986. if (!dsi_display->bridge->base.encoder) {
  2987. SDE_ERROR("no encoder on dsi display:%d", i);
  2988. return -EINVAL;
  2989. }
  2990. encoder = dsi_display->bridge->base.encoder;
  2991. encoder->possible_crtcs = 1 << i;
  2992. SDE_DEBUG(
  2993. "dsi-display:%d encoder id[%d]=%d name=%s crtcs=%x\n", i,
  2994. encoder->index, encoder->base.id,
  2995. encoder->name, encoder->possible_crtcs);
  2996. }
  2997. return 0;
  2998. }
  2999. static struct drm_display_mode *_sde_kms_get_splash_mode(
  3000. struct sde_kms *sde_kms, struct drm_connector *connector,
  3001. struct drm_atomic_state *state)
  3002. {
  3003. struct drm_display_mode *mode, *cur_mode = NULL;
  3004. struct drm_crtc *crtc;
  3005. struct drm_crtc_state *new_cstate, *old_cstate;
  3006. u32 i = 0;
  3007. if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
  3008. list_for_each_entry(mode, &connector->modes, head) {
  3009. if (mode->type & DRM_MODE_TYPE_PREFERRED) {
  3010. cur_mode = mode;
  3011. break;
  3012. }
  3013. }
  3014. } else if (state) {
  3015. /* get the mode from first atomic_check phase for trusted_vm*/
  3016. for_each_oldnew_crtc_in_state(state, crtc, old_cstate,
  3017. new_cstate, i) {
  3018. if (!new_cstate->active && !old_cstate->active)
  3019. continue;
  3020. list_for_each_entry(mode, &connector->modes, head) {
  3021. if (drm_mode_equal(&new_cstate->mode, mode)) {
  3022. cur_mode = mode;
  3023. break;
  3024. }
  3025. }
  3026. }
  3027. }
  3028. return cur_mode;
  3029. }
  3030. static int sde_kms_cont_splash_config(struct msm_kms *kms,
  3031. struct drm_atomic_state *state)
  3032. {
  3033. void *display;
  3034. struct dsi_display *dsi_display;
  3035. struct msm_display_info info;
  3036. struct drm_encoder *encoder = NULL;
  3037. struct drm_crtc *crtc = NULL;
  3038. int i, rc = 0;
  3039. struct drm_display_mode *drm_mode = NULL;
  3040. struct drm_device *dev;
  3041. struct msm_drm_private *priv;
  3042. struct sde_kms *sde_kms;
  3043. struct drm_connector_list_iter conn_iter;
  3044. struct drm_connector *connector = NULL;
  3045. struct sde_connector *sde_conn = NULL;
  3046. struct sde_splash_display *splash_display;
  3047. if (!kms) {
  3048. SDE_ERROR("invalid kms\n");
  3049. return -EINVAL;
  3050. }
  3051. sde_kms = to_sde_kms(kms);
  3052. dev = sde_kms->dev;
  3053. if (!dev) {
  3054. SDE_ERROR("invalid device\n");
  3055. return -EINVAL;
  3056. }
  3057. rc = sde_kms_vm_trusted_cont_splash_res_init(sde_kms);
  3058. if (rc) {
  3059. SDE_ERROR("failed vm cont splash resource init, rc=%d", rc);
  3060. return -EINVAL;
  3061. }
  3062. if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
  3063. && (!sde_kms->splash_data.num_splash_regions)) ||
  3064. !sde_kms->splash_data.num_splash_displays) {
  3065. DRM_INFO("cont_splash feature not enabled\n");
  3066. sde_kms_inform_cont_splash_res_disable(kms, NULL);
  3067. return rc;
  3068. }
  3069. DRM_INFO("cont_splash enabled in %d of %d display(s)\n",
  3070. sde_kms->splash_data.num_splash_displays,
  3071. sde_kms->dsi_display_count);
  3072. /* dsi */
  3073. for (i = 0; i < sde_kms->dsi_display_count; ++i) {
  3074. struct sde_crtc_state *cstate;
  3075. struct sde_connector_state *conn_state;
  3076. display = sde_kms->dsi_displays[i];
  3077. dsi_display = (struct dsi_display *)display;
  3078. splash_display = &sde_kms->splash_data.splash_display[i];
  3079. if (!splash_display->cont_splash_enabled) {
  3080. SDE_DEBUG("display->name = %s splash not enabled\n",
  3081. dsi_display->name);
  3082. sde_kms_inform_cont_splash_res_disable(kms,
  3083. dsi_display);
  3084. continue;
  3085. }
  3086. SDE_DEBUG("display->name = %s\n", dsi_display->name);
  3087. if (dsi_display->bridge->base.encoder) {
  3088. encoder = dsi_display->bridge->base.encoder;
  3089. SDE_DEBUG("encoder name = %s\n", encoder->name);
  3090. }
  3091. memset(&info, 0x0, sizeof(info));
  3092. rc = dsi_display_get_info(NULL, &info, display);
  3093. if (rc) {
  3094. SDE_ERROR("dsi get_info %d failed\n", i);
  3095. encoder = NULL;
  3096. continue;
  3097. }
  3098. SDE_DEBUG("info.is_connected = %s, info.display_type = %d\n",
  3099. ((info.is_connected) ? "true" : "false"),
  3100. info.display_type);
  3101. if (!encoder) {
  3102. SDE_ERROR("encoder not initialized\n");
  3103. return -EINVAL;
  3104. }
  3105. priv = sde_kms->dev->dev_private;
  3106. encoder->crtc = priv->crtcs[i];
  3107. crtc = encoder->crtc;
  3108. splash_display->encoder = encoder;
  3109. SDE_DEBUG("for dsi-display:%d crtc id[%d]:%d enc id[%d]:%d\n",
  3110. i, crtc->index, crtc->base.id, encoder->index,
  3111. encoder->base.id);
  3112. mutex_lock(&dev->mode_config.mutex);
  3113. drm_connector_list_iter_begin(dev, &conn_iter);
  3114. drm_for_each_connector_iter(connector, &conn_iter) {
  3115. struct drm_encoder *c_encoder;
  3116. drm_connector_for_each_possible_encoder(connector,
  3117. c_encoder)
  3118. break;
  3119. if (!c_encoder) {
  3120. SDE_ERROR("c_encoder not found\n");
  3121. mutex_unlock(&dev->mode_config.mutex);
  3122. return -EINVAL;
  3123. }
  3124. /**
  3125. * SDE_KMS doesn't attach more than one encoder to
  3126. * a DSI connector. So it is safe to check only with
  3127. * the first encoder entry. Revisit this logic if we
  3128. * ever have to support continuous splash for
  3129. * external displays in MST configuration.
  3130. */
  3131. if (c_encoder->base.id == encoder->base.id)
  3132. break;
  3133. }
  3134. drm_connector_list_iter_end(&conn_iter);
  3135. if (!connector) {
  3136. SDE_ERROR("connector not initialized\n");
  3137. mutex_unlock(&dev->mode_config.mutex);
  3138. return -EINVAL;
  3139. }
  3140. mutex_unlock(&dev->mode_config.mutex);
  3141. crtc->state->encoder_mask = drm_encoder_mask(encoder);
  3142. crtc->state->connector_mask = drm_connector_mask(connector);
  3143. connector->state->crtc = crtc;
  3144. drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, state);
  3145. if (!drm_mode) {
  3146. SDE_ERROR("drm_mode not found; handoff_type:%d\n",
  3147. sde_kms->splash_data.type);
  3148. return -EINVAL;
  3149. }
  3150. SDE_DEBUG(
  3151. "drm_mode->name:%s, type:0x%x, flags:0x%x, handoff_type:%d\n",
  3152. drm_mode->name, drm_mode->type,
  3153. drm_mode->flags, sde_kms->splash_data.type);
  3154. /* Update CRTC drm structure */
  3155. crtc->state->active = true;
  3156. rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
  3157. if (rc) {
  3158. SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
  3159. return rc;
  3160. }
  3161. drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
  3162. drm_mode_copy(&crtc->mode, drm_mode);
  3163. cstate = to_sde_crtc_state(crtc->state);
  3164. cstate->cont_splash_populated = true;
  3165. /* Update encoder structure */
  3166. sde_encoder_update_caps_for_cont_splash(encoder,
  3167. splash_display, true);
  3168. sde_crtc_update_cont_splash_settings(crtc);
  3169. sde_conn = to_sde_connector(connector);
  3170. if (sde_conn && sde_conn->ops.cont_splash_config)
  3171. sde_conn->ops.cont_splash_config(sde_conn->display);
  3172. conn_state = to_sde_connector_state(connector->state);
  3173. conn_state->cont_splash_populated = true;
  3174. rc = _sde_kms_update_planes_for_cont_splash(sde_kms,
  3175. splash_display, crtc);
  3176. if (rc) {
  3177. SDE_ERROR("Failed: updating plane status rc=%d\n", rc);
  3178. return rc;
  3179. }
  3180. }
  3181. return rc;
  3182. }
  3183. static bool sde_kms_check_for_splash(struct msm_kms *kms)
  3184. {
  3185. struct sde_kms *sde_kms;
  3186. if (!kms) {
  3187. SDE_ERROR("invalid kms\n");
  3188. return false;
  3189. }
  3190. sde_kms = to_sde_kms(kms);
  3191. return sde_kms->splash_data.num_splash_displays;
  3192. }
  3193. static int sde_kms_get_mixer_count(const struct msm_kms *kms,
  3194. const struct drm_display_mode *mode,
  3195. const struct msm_resource_caps_info *res, u32 *num_lm)
  3196. {
  3197. struct sde_kms *sde_kms;
  3198. s64 mode_clock_hz = 0;
  3199. s64 max_mdp_clock_hz = 0;
  3200. s64 max_lm_width = 0;
  3201. s64 hdisplay_fp = 0;
  3202. s64 htotal_fp = 0;
  3203. s64 vtotal_fp = 0;
  3204. s64 vrefresh_fp = 0;
  3205. s64 mdp_fudge_factor = 0;
  3206. s64 num_lm_fp = 0;
  3207. s64 lm_clk_fp = 0;
  3208. s64 lm_width_fp = 0;
  3209. int rc = 0;
  3210. if (!num_lm) {
  3211. SDE_ERROR("invalid num_lm pointer\n");
  3212. return -EINVAL;
  3213. }
  3214. /* default to 1 layer mixer */
  3215. *num_lm = 1;
  3216. if (!kms || !mode || !res) {
  3217. SDE_ERROR("invalid input args\n");
  3218. return -EINVAL;
  3219. }
  3220. sde_kms = to_sde_kms(kms);
  3221. max_mdp_clock_hz = drm_int2fixp(sde_kms->perf.max_core_clk_rate);
  3222. max_lm_width = drm_int2fixp(res->max_mixer_width);
  3223. hdisplay_fp = drm_int2fixp(mode->hdisplay);
  3224. htotal_fp = drm_int2fixp(mode->htotal);
  3225. vtotal_fp = drm_int2fixp(mode->vtotal);
  3226. vrefresh_fp = drm_int2fixp(drm_mode_vrefresh(mode));
  3227. mdp_fudge_factor = drm_fixp_from_fraction(105, 100);
  3228. /* mode clock = [(h * v * fps * 1.05) / (num_lm)] */
  3229. mode_clock_hz = drm_fixp_mul(htotal_fp, vtotal_fp);
  3230. mode_clock_hz = drm_fixp_mul(mode_clock_hz, vrefresh_fp);
  3231. mode_clock_hz = drm_fixp_mul(mode_clock_hz, mdp_fudge_factor);
  3232. if (mode_clock_hz > max_mdp_clock_hz ||
  3233. hdisplay_fp > max_lm_width) {
  3234. *num_lm = 0;
  3235. do {
  3236. *num_lm += 2;
  3237. num_lm_fp = drm_int2fixp(*num_lm);
  3238. lm_clk_fp = drm_fixp_div(mode_clock_hz, num_lm_fp);
  3239. lm_width_fp = drm_fixp_div(hdisplay_fp, num_lm_fp);
  3240. if (*num_lm > 4) {
  3241. rc = -EINVAL;
  3242. goto error;
  3243. }
  3244. } while (lm_clk_fp > max_mdp_clock_hz ||
  3245. lm_width_fp > max_lm_width);
  3246. mode_clock_hz = lm_clk_fp;
  3247. }
  3248. SDE_DEBUG("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3249. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3250. *num_lm, drm_fixp2int(mode_clock_hz),
  3251. sde_kms->perf.max_core_clk_rate);
  3252. return 0;
  3253. error:
  3254. SDE_ERROR("required mode clk exceeds max mdp clk\n");
  3255. SDE_ERROR("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3256. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3257. *num_lm, drm_fixp2int(mode_clock_hz),
  3258. sde_kms->perf.max_core_clk_rate);
  3259. return rc;
  3260. }
  3261. static int sde_kms_get_dsc_count(const struct msm_kms *kms,
  3262. u32 hdisplay, u32 *num_dsc)
  3263. {
  3264. struct sde_kms *sde_kms;
  3265. uint32_t max_dsc_width;
  3266. if (!num_dsc) {
  3267. SDE_ERROR("invalid num_dsc pointer\n");
  3268. return -EINVAL;
  3269. }
  3270. *num_dsc = 0;
  3271. if (!kms || !hdisplay) {
  3272. SDE_ERROR("invalid input args\n");
  3273. return -EINVAL;
  3274. }
  3275. sde_kms = to_sde_kms(kms);
  3276. max_dsc_width = sde_kms->catalog->max_dsc_width;
  3277. *num_dsc = DIV_ROUND_UP(hdisplay, max_dsc_width);
  3278. SDE_DEBUG("h=%d, max_dsc_width=%d, num_dsc=%d\n",
  3279. hdisplay, max_dsc_width,
  3280. *num_dsc);
  3281. return 0;
  3282. }
  3283. static bool sde_kms_in_trusted_vm(const struct msm_kms *kms)
  3284. {
  3285. struct sde_kms *sde_kms;
  3286. if (!kms) {
  3287. SDE_ERROR("invalid kms\n");
  3288. return false;
  3289. }
  3290. sde_kms = to_sde_kms(kms);
  3291. return sde_in_trusted_vm(sde_kms);
  3292. }
  3293. static int _sde_kms_null_commit(struct drm_device *dev,
  3294. struct drm_encoder *enc)
  3295. {
  3296. struct drm_modeset_acquire_ctx ctx;
  3297. struct drm_atomic_state *state = NULL;
  3298. int retry_cnt = 0;
  3299. int ret = 0;
  3300. drm_modeset_acquire_init(&ctx, 0);
  3301. retry:
  3302. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  3303. if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
  3304. drm_modeset_backoff(&ctx);
  3305. retry_cnt++;
  3306. udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
  3307. goto retry;
  3308. } else if (WARN_ON(ret)) {
  3309. goto end;
  3310. }
  3311. state = drm_atomic_state_alloc(dev);
  3312. if (!state) {
  3313. DRM_ERROR("failed to allocate atomic state, %d\n", ret);
  3314. goto end;
  3315. }
  3316. state->acquire_ctx = &ctx;
  3317. ret = sde_kms_set_crtc_for_conn(dev, enc, state);
  3318. if (ret)
  3319. goto end;
  3320. ret = drm_atomic_commit(state);
  3321. if (ret)
  3322. SDE_ERROR("Error %d doing the atomic commit\n", ret);
  3323. end:
  3324. if (state)
  3325. drm_atomic_state_put(state);
  3326. drm_modeset_drop_locks(&ctx);
  3327. drm_modeset_acquire_fini(&ctx);
  3328. return ret;
  3329. }
  3330. void sde_kms_display_early_wakeup(struct drm_device *dev,
  3331. const int32_t connector_id)
  3332. {
  3333. struct drm_connector_list_iter conn_iter;
  3334. struct drm_connector *conn;
  3335. struct drm_encoder *drm_enc;
  3336. drm_connector_list_iter_begin(dev, &conn_iter);
  3337. drm_for_each_connector_iter(conn, &conn_iter) {
  3338. if (connector_id != DRM_MSM_WAKE_UP_ALL_DISPLAYS &&
  3339. connector_id != conn->base.id)
  3340. continue;
  3341. if (conn->state && conn->state->best_encoder)
  3342. drm_enc = conn->state->best_encoder;
  3343. else
  3344. drm_enc = conn->encoder;
  3345. if (drm_enc)
  3346. sde_encoder_early_wakeup(drm_enc);
  3347. }
  3348. drm_connector_list_iter_end(&conn_iter);
  3349. }
  3350. static int sde_kms_trigger_null_flush(struct msm_kms *kms)
  3351. {
  3352. struct sde_kms *sde_kms;
  3353. struct sde_splash_display *splash_display;
  3354. struct drm_crtc *crtc;
  3355. int i, rc = 0;
  3356. if (!kms) {
  3357. SDE_ERROR("invalid kms\n");
  3358. return -EINVAL;
  3359. }
  3360. sde_kms = to_sde_kms(kms);
  3361. /* If splash handoff is done, early return*/
  3362. if (!sde_kms->splash_data.num_splash_displays)
  3363. return 0;
  3364. /* If all builtin-displays are having cont splash enabled, ignore lastclose*/
  3365. if (sde_kms->dsi_display_count == sde_kms->splash_data.num_splash_displays)
  3366. return -EINVAL;
  3367. /*
  3368. * Trigger NULL flush if built-in secondary/primary is stuck in splash
  3369. * while the primary/secondary is running respectively before lastclose.
  3370. */
  3371. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  3372. splash_display = &sde_kms->splash_data.splash_display[i];
  3373. if (splash_display->cont_splash_enabled && splash_display->encoder) {
  3374. crtc = splash_display->encoder->crtc;
  3375. SDE_DEBUG("triggering null commit on enc:%d\n",
  3376. DRMID(splash_display->encoder));
  3377. SDE_EVT32(DRMID(splash_display->encoder), SDE_EVTLOG_FUNC_ENTRY);
  3378. rc = _sde_kms_null_commit(sde_kms->dev, splash_display->encoder);
  3379. if (!rc && crtc)
  3380. sde_kms_cancel_delayed_work(crtc);
  3381. if (rc)
  3382. DRM_ERROR("null flush commit failure during lastclose\n");
  3383. }
  3384. }
  3385. return 0;
  3386. }
  3387. static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
  3388. struct device *dev)
  3389. {
  3390. int ret, crtc_id = 0;
  3391. struct drm_device *ddev = dev_get_drvdata(dev);
  3392. struct drm_connector *conn;
  3393. struct drm_connector_list_iter conn_iter;
  3394. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3395. drm_connector_list_iter_begin(ddev, &conn_iter);
  3396. drm_for_each_connector_iter(conn, &conn_iter) {
  3397. uint64_t lp;
  3398. lp = sde_connector_get_lp(conn);
  3399. if (lp != SDE_MODE_DPMS_LP2)
  3400. continue;
  3401. if (sde_encoder_in_clone_mode(conn->encoder))
  3402. continue;
  3403. crtc_id = drm_crtc_index(conn->state->crtc);
  3404. if (priv->disp_thread[crtc_id].thread)
  3405. kthread_flush_worker(
  3406. &priv->disp_thread[crtc_id].worker);
  3407. ret = sde_encoder_wait_for_event(conn->encoder,
  3408. MSM_ENC_TX_COMPLETE);
  3409. if (ret && ret != -EWOULDBLOCK) {
  3410. SDE_ERROR(
  3411. "[conn: %d] wait for commit done returned %d\n",
  3412. conn->base.id, ret);
  3413. } else if (!ret) {
  3414. if (priv->event_thread[crtc_id].thread)
  3415. kthread_flush_worker(
  3416. &priv->event_thread[crtc_id].worker);
  3417. sde_encoder_idle_request(conn->encoder);
  3418. }
  3419. }
  3420. drm_connector_list_iter_end(&conn_iter);
  3421. msm_atomic_flush_display_threads(priv);
  3422. }
  3423. struct msm_display_mode *sde_kms_get_msm_mode(struct drm_connector_state *conn_state)
  3424. {
  3425. struct sde_connector_state *sde_conn_state;
  3426. if (!conn_state)
  3427. return NULL;
  3428. sde_conn_state = to_sde_connector_state(conn_state);
  3429. return &sde_conn_state->msm_mode;
  3430. }
  3431. static int sde_kms_pm_suspend(struct device *dev)
  3432. {
  3433. struct drm_device *ddev;
  3434. struct drm_modeset_acquire_ctx ctx;
  3435. struct drm_connector *conn;
  3436. struct drm_encoder *enc;
  3437. struct drm_connector_list_iter conn_iter;
  3438. struct drm_atomic_state *state = NULL;
  3439. struct sde_kms *sde_kms;
  3440. int ret = 0, num_crtcs = 0;
  3441. if (!dev)
  3442. return -EINVAL;
  3443. ddev = dev_get_drvdata(dev);
  3444. if (!ddev || !ddev_to_msm_kms(ddev))
  3445. return -EINVAL;
  3446. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3447. SDE_EVT32(0);
  3448. /* disable hot-plug polling */
  3449. drm_kms_helper_poll_disable(ddev);
  3450. /* if any built-in display is stuck in CS, skip PM suspend entry to
  3451. * avoid driver SW state changes. With speculative fence enabled, HAL depends
  3452. * on power_on notification for the first commit to exit the Wait completion
  3453. * instead of retire fence signal.
  3454. */
  3455. drm_for_each_encoder(enc, ddev) {
  3456. if (sde_encoder_in_cont_splash(enc) && enc->crtc) {
  3457. SDE_DEBUG("skip PM suspend, splash is enabled on enc:%d\n", DRMID(enc));
  3458. SDE_EVT32(DRMID(enc), SDE_EVTLOG_FUNC_EXIT);
  3459. return -EINVAL;
  3460. }
  3461. }
  3462. /* acquire modeset lock(s) */
  3463. drm_modeset_acquire_init(&ctx, 0);
  3464. retry:
  3465. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3466. if (ret)
  3467. goto unlock;
  3468. /* save current state for resume */
  3469. if (sde_kms->suspend_state)
  3470. drm_atomic_state_put(sde_kms->suspend_state);
  3471. sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
  3472. if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
  3473. ret = PTR_ERR(sde_kms->suspend_state);
  3474. DRM_ERROR("failed to back up suspend state, %d\n", ret);
  3475. sde_kms->suspend_state = NULL;
  3476. goto unlock;
  3477. }
  3478. /* create atomic state to disable all CRTCs */
  3479. state = drm_atomic_state_alloc(ddev);
  3480. if (!state) {
  3481. ret = -ENOMEM;
  3482. DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
  3483. goto unlock;
  3484. }
  3485. state->acquire_ctx = &ctx;
  3486. drm_connector_list_iter_begin(ddev, &conn_iter);
  3487. drm_for_each_connector_iter(conn, &conn_iter) {
  3488. struct drm_crtc_state *crtc_state;
  3489. uint64_t lp;
  3490. if (!conn->state || !conn->state->crtc ||
  3491. conn->dpms != DRM_MODE_DPMS_ON ||
  3492. sde_encoder_in_clone_mode(conn->encoder))
  3493. continue;
  3494. lp = sde_connector_get_lp(conn);
  3495. if (lp == SDE_MODE_DPMS_LP1 &&
  3496. !sde_encoder_check_curr_mode(conn->encoder, MSM_DISPLAY_VIDEO_MODE)) {
  3497. /* transition LP1->LP2 on pm suspend */
  3498. ret = sde_connector_set_property_for_commit(conn, state,
  3499. CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
  3500. if (ret) {
  3501. DRM_ERROR("failed to set lp2 for conn %d\n",
  3502. conn->base.id);
  3503. drm_connector_list_iter_end(&conn_iter);
  3504. goto unlock;
  3505. }
  3506. }
  3507. if (lp != SDE_MODE_DPMS_LP2 ||
  3508. sde_encoder_check_curr_mode(conn->encoder, MSM_DISPLAY_VIDEO_MODE)) {
  3509. /* force CRTC to be inactive */
  3510. crtc_state = drm_atomic_get_crtc_state(state,
  3511. conn->state->crtc);
  3512. if (IS_ERR_OR_NULL(crtc_state)) {
  3513. DRM_ERROR("failed to get crtc %d state\n",
  3514. conn->state->crtc->base.id);
  3515. drm_connector_list_iter_end(&conn_iter);
  3516. ret = -EINVAL;
  3517. goto unlock;
  3518. }
  3519. if (lp != SDE_MODE_DPMS_LP1 ||
  3520. sde_encoder_check_curr_mode(conn->encoder, MSM_DISPLAY_VIDEO_MODE))
  3521. crtc_state->active = false;
  3522. ++num_crtcs;
  3523. }
  3524. }
  3525. drm_connector_list_iter_end(&conn_iter);
  3526. /* check for nothing to do */
  3527. if (num_crtcs == 0) {
  3528. DRM_DEBUG("all crtcs are already in the off state\n");
  3529. sde_kms->suspend_block = true;
  3530. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3531. goto unlock;
  3532. }
  3533. /* commit the "disable all" state */
  3534. ret = drm_atomic_commit(state);
  3535. if (ret < 0) {
  3536. DRM_ERROR("failed to disable crtcs, %d\n", ret);
  3537. goto unlock;
  3538. }
  3539. sde_kms->suspend_block = true;
  3540. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3541. unlock:
  3542. if (state) {
  3543. drm_atomic_state_put(state);
  3544. state = NULL;
  3545. }
  3546. if (ret == -EDEADLK) {
  3547. drm_modeset_backoff(&ctx);
  3548. goto retry;
  3549. }
  3550. if ((ret || !num_crtcs) && sde_kms->suspend_state) {
  3551. drm_atomic_state_put(sde_kms->suspend_state);
  3552. sde_kms->suspend_state = NULL;
  3553. }
  3554. drm_modeset_drop_locks(&ctx);
  3555. drm_modeset_acquire_fini(&ctx);
  3556. /*
  3557. * pm runtime driver avoids multiple runtime_suspend API call by
  3558. * checking runtime_status. However, this call helps when there is a
  3559. * race condition between pm_suspend call and doze_suspend/power_off
  3560. * commit. It removes the extra vote from suspend and adds it back
  3561. * later to allow power collapse during pm_suspend call
  3562. */
  3563. pm_runtime_put_sync(dev);
  3564. pm_runtime_get_noresume(dev);
  3565. /* dump clock state before entering suspend */
  3566. if (sde_kms->pm_suspend_clk_dump)
  3567. _sde_kms_dump_clks_state(sde_kms);
  3568. return ret;
  3569. }
  3570. static int sde_kms_pm_resume(struct device *dev)
  3571. {
  3572. struct drm_device *ddev;
  3573. struct sde_kms *sde_kms;
  3574. struct drm_encoder *enc;
  3575. struct drm_modeset_acquire_ctx ctx;
  3576. int ret, i;
  3577. if (!dev)
  3578. return -EINVAL;
  3579. ddev = dev_get_drvdata(dev);
  3580. if (!ddev || !ddev_to_msm_kms(ddev))
  3581. return -EINVAL;
  3582. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3583. SDE_EVT32(sde_kms->suspend_state != NULL);
  3584. /* if a display is in cont splash early exit */
  3585. drm_for_each_encoder(enc, ddev) {
  3586. if (sde_encoder_in_cont_splash(enc) && enc->crtc) {
  3587. SDE_DEBUG("skip PM resume entry splash is enabled on enc:%d\n", DRMID(enc));
  3588. SDE_EVT32(DRMID(enc), SDE_EVTLOG_FUNC_EXIT);
  3589. return -EINVAL;
  3590. }
  3591. }
  3592. if (sde_kms->suspend_state)
  3593. drm_mode_config_reset(ddev);
  3594. drm_modeset_acquire_init(&ctx, 0);
  3595. retry:
  3596. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3597. if (ret == -EDEADLK) {
  3598. drm_modeset_backoff(&ctx);
  3599. goto retry;
  3600. } else if (WARN_ON(ret)) {
  3601. goto end;
  3602. }
  3603. sde_kms->suspend_block = false;
  3604. if (sde_kms->suspend_state) {
  3605. sde_kms->suspend_state->acquire_ctx = &ctx;
  3606. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  3607. ret = drm_atomic_helper_commit_duplicated_state(
  3608. sde_kms->suspend_state, &ctx);
  3609. if (ret != -EDEADLK)
  3610. break;
  3611. drm_modeset_backoff(&ctx);
  3612. }
  3613. if (ret < 0)
  3614. DRM_ERROR("failed to restore state, %d\n", ret);
  3615. drm_atomic_state_put(sde_kms->suspend_state);
  3616. sde_kms->suspend_state = NULL;
  3617. }
  3618. end:
  3619. drm_modeset_drop_locks(&ctx);
  3620. drm_modeset_acquire_fini(&ctx);
  3621. /* enable hot-plug polling */
  3622. drm_kms_helper_poll_enable(ddev);
  3623. return 0;
  3624. }
  3625. static const struct msm_kms_funcs kms_funcs = {
  3626. .hw_init = sde_kms_hw_init,
  3627. .postinit = sde_kms_postinit,
  3628. .irq_preinstall = sde_irq_preinstall,
  3629. .irq_postinstall = sde_irq_postinstall,
  3630. .irq_uninstall = sde_irq_uninstall,
  3631. .irq = sde_irq,
  3632. .preclose = sde_kms_preclose,
  3633. .lastclose = sde_kms_lastclose,
  3634. .prepare_fence = sde_kms_prepare_fence,
  3635. .prepare_commit = sde_kms_prepare_commit,
  3636. .commit = sde_kms_commit,
  3637. .complete_commit = sde_kms_complete_commit,
  3638. .get_msm_mode = sde_kms_get_msm_mode,
  3639. .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
  3640. .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
  3641. .check_modified_format = sde_format_check_modified_format,
  3642. .atomic_check = sde_kms_atomic_check,
  3643. .get_format = sde_get_msm_format,
  3644. .round_pixclk = sde_kms_round_pixclk,
  3645. .display_early_wakeup = sde_kms_display_early_wakeup,
  3646. .pm_suspend = sde_kms_pm_suspend,
  3647. .pm_resume = sde_kms_pm_resume,
  3648. .destroy = sde_kms_destroy,
  3649. .debugfs_destroy = sde_kms_debugfs_destroy,
  3650. .cont_splash_config = sde_kms_cont_splash_config,
  3651. .register_events = _sde_kms_register_events,
  3652. .get_address_space = _sde_kms_get_address_space,
  3653. .get_address_space_device = _sde_kms_get_address_space_device,
  3654. .postopen = _sde_kms_post_open,
  3655. .check_for_splash = sde_kms_check_for_splash,
  3656. .trigger_null_flush = sde_kms_trigger_null_flush,
  3657. .get_mixer_count = sde_kms_get_mixer_count,
  3658. .get_dsc_count = sde_kms_get_dsc_count,
  3659. .in_trusted_vm = sde_kms_in_trusted_vm,
  3660. };
  3661. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
  3662. {
  3663. int i;
  3664. for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
  3665. if (!sde_kms->aspace[i])
  3666. continue;
  3667. msm_gem_address_space_put(sde_kms->aspace[i]);
  3668. sde_kms->aspace[i] = NULL;
  3669. }
  3670. return 0;
  3671. }
  3672. static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
  3673. {
  3674. struct msm_mmu *mmu;
  3675. struct resource *res;
  3676. struct platform_device *pdev;
  3677. int i, ret;
  3678. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  3679. int early_map = 0;
  3680. #endif
  3681. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev)
  3682. return -EINVAL;
  3683. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  3684. struct msm_gem_address_space *aspace;
  3685. mmu = msm_smmu_new(sde_kms->dev->dev, i);
  3686. if (IS_ERR(mmu)) {
  3687. ret = PTR_ERR(mmu);
  3688. SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
  3689. i, ret);
  3690. continue;
  3691. }
  3692. aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
  3693. mmu, "sde");
  3694. if (IS_ERR(aspace)) {
  3695. ret = PTR_ERR(aspace);
  3696. mmu->funcs->destroy(mmu);
  3697. goto fail;
  3698. }
  3699. sde_kms->aspace[i] = aspace;
  3700. aspace->domain_attached = true;
  3701. /* Mapping splash memory block */
  3702. if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
  3703. sde_kms->splash_data.num_splash_regions) {
  3704. ret = _sde_kms_map_all_splash_regions(sde_kms);
  3705. if (ret) {
  3706. SDE_ERROR("failed to map ret:%d\n", ret);
  3707. goto enable_trans_fail;
  3708. }
  3709. }
  3710. if (i == MSM_SMMU_DOMAIN_UNSECURE && sde_kms->catalog->hw_fence_rev) {
  3711. pdev = to_platform_device(sde_kms->dev->dev);
  3712. res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ipcc_reg");
  3713. if (!res) {
  3714. SDE_DEBUG("failed to get resource ipcc_reg, cannot map ipcc\n");
  3715. sde_kms->catalog->hw_fence_rev = 0;
  3716. } else {
  3717. sde_kms->ipcc_base_addr = res->start;
  3718. ret = _sde_kms_one2one_mem_map_ipcc_reg(sde_kms, resource_size(res),
  3719. HW_FENCE_IPCC_PROTOCOLp_CLIENTc(res->start,
  3720. sde_kms->catalog->ipcc_protocol_id,
  3721. sde_kms->catalog->ipcc_client_phys_id));
  3722. /* if mapping fails disable hw-fences */
  3723. if (ret)
  3724. sde_kms->catalog->hw_fence_rev = 0;
  3725. }
  3726. }
  3727. /*
  3728. * disable early-map which would have been enabled during
  3729. * bootup by smmu through the device-tree hint for cont-spash
  3730. */
  3731. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  3732. ret = mmu->funcs->enable_smmu_translations(mmu);
  3733. if (ret) {
  3734. SDE_ERROR("failed to enable_s1_translations ret:%d\n", ret);
  3735. goto enable_trans_fail;
  3736. }
  3737. #else
  3738. ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
  3739. &early_map);
  3740. if (ret) {
  3741. SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
  3742. ret, early_map);
  3743. goto enable_trans_fail;
  3744. }
  3745. #endif
  3746. }
  3747. sde_kms->base.aspace = sde_kms->aspace[0];
  3748. return 0;
  3749. enable_trans_fail:
  3750. _sde_kms_unmap_all_splash_regions(sde_kms);
  3751. fail:
  3752. _sde_kms_mmu_destroy(sde_kms);
  3753. return ret;
  3754. }
  3755. static void sde_kms_init_rot_sid_hw(struct sde_kms *sde_kms)
  3756. {
  3757. if (!sde_kms || !sde_kms->hw_sid || sde_in_trusted_vm(sde_kms))
  3758. return;
  3759. sde_hw_set_rotator_sid(sde_kms->hw_sid);
  3760. }
  3761. static void sde_kms_init_hw_fences(struct sde_kms *sde_kms)
  3762. {
  3763. if (!sde_kms || !sde_kms->hw_mdp)
  3764. return;
  3765. if (sde_kms->hw_mdp->ops.setup_hw_fences)
  3766. sde_kms->hw_mdp->ops.setup_hw_fences(sde_kms->hw_mdp,
  3767. sde_kms->catalog->ipcc_protocol_id, sde_kms->catalog->ipcc_client_phys_id,
  3768. sde_kms->ipcc_base_addr);
  3769. }
  3770. static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
  3771. {
  3772. if (!sde_kms || !sde_kms->hw_mdp || !sde_kms->catalog)
  3773. return;
  3774. if (sde_kms->hw_mdp->ops.reset_ubwc)
  3775. sde_kms->hw_mdp->ops.reset_ubwc(sde_kms->hw_mdp,
  3776. sde_kms->catalog);
  3777. }
  3778. static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
  3779. {
  3780. struct sde_vbif_set_qos_params qos_params;
  3781. struct sde_mdss_cfg *catalog;
  3782. if (!sde_kms->catalog)
  3783. return;
  3784. catalog = sde_kms->catalog;
  3785. memset(&qos_params, 0, sizeof(qos_params));
  3786. qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
  3787. qos_params.xin_id = catalog->dma_cfg.xin_id;
  3788. qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
  3789. qos_params.client_type = VBIF_LUTDMA_CLIENT;
  3790. sde_vbif_set_qos_remap(sde_kms, &qos_params);
  3791. }
  3792. static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
  3793. {
  3794. struct sde_hw_uidle *uidle;
  3795. if (!sde_kms) {
  3796. SDE_ERROR("invalid kms\n");
  3797. return -EINVAL;
  3798. }
  3799. uidle = sde_kms->hw_uidle;
  3800. if (uidle && uidle->ops.active_override_enable)
  3801. uidle->ops.active_override_enable(uidle, enable);
  3802. return 0;
  3803. }
  3804. void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
  3805. {
  3806. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3807. mutex_lock(&priv->phandle.phandle_lock);
  3808. if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
  3809. _sde_kms_update_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
  3810. else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
  3811. _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
  3812. mutex_unlock(&priv->phandle.phandle_lock);
  3813. }
  3814. static void sde_kms_irq_affinity_notify(
  3815. struct irq_affinity_notify *affinity_notify,
  3816. const cpumask_t *mask)
  3817. {
  3818. struct msm_drm_private *priv;
  3819. struct sde_kms *sde_kms = container_of(affinity_notify,
  3820. struct sde_kms, affinity_notify);
  3821. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  3822. return;
  3823. priv = sde_kms->dev->dev_private;
  3824. mutex_lock(&priv->phandle.phandle_lock);
  3825. _sde_kms_remove_pm_qos_irq_request(sde_kms, &sde_kms->irq_cpu_mask);
  3826. // request vote with updated irq cpu mask
  3827. if (atomic_read(&sde_kms->irq_vote_count))
  3828. _sde_kms_update_pm_qos_irq_request(sde_kms, mask);
  3829. mutex_unlock(&priv->phandle.phandle_lock);
  3830. }
  3831. static void sde_kms_irq_affinity_release(struct kref *ref) {}
  3832. static void sde_kms_handle_power_event(u32 event_type, void *usr)
  3833. {
  3834. struct sde_kms *sde_kms = usr;
  3835. struct msm_kms *msm_kms;
  3836. msm_kms = &sde_kms->base;
  3837. if (!sde_kms)
  3838. return;
  3839. SDE_DEBUG("event_type:%d\n", event_type);
  3840. SDE_EVT32_VERBOSE(event_type);
  3841. if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
  3842. sde_irq_update(msm_kms, true);
  3843. sde_kms->first_kickoff = true;
  3844. /**
  3845. * Rotator sid and hw fences need to be programmed since uefi doesn't
  3846. * configure them during continuous splash
  3847. */
  3848. sde_kms_init_rot_sid_hw(sde_kms);
  3849. sde_kms_init_hw_fences(sde_kms);
  3850. if (sde_kms->splash_data.num_splash_displays ||
  3851. sde_in_trusted_vm(sde_kms))
  3852. return;
  3853. sde_vbif_init_memtypes(sde_kms);
  3854. sde_kms_init_shared_hw(sde_kms);
  3855. _sde_kms_set_lutdma_vbif_remap(sde_kms);
  3856. } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
  3857. sde_irq_update(msm_kms, false);
  3858. sde_kms->first_kickoff = false;
  3859. if (sde_in_trusted_vm(sde_kms))
  3860. return;
  3861. _sde_kms_active_override(sde_kms, true);
  3862. sde_vbif_axi_halt_request(sde_kms);
  3863. }
  3864. }
  3865. #define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
  3866. static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
  3867. {
  3868. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3869. int rc = -EINVAL;
  3870. SDE_DEBUG("\n");
  3871. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  3872. rc = (rc > 0) ? 0 : rc;
  3873. SDE_EVT32(rc, genpd->device_count);
  3874. return rc;
  3875. }
  3876. static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
  3877. {
  3878. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3879. SDE_DEBUG("\n");
  3880. pm_runtime_put_sync(sde_kms->dev->dev);
  3881. SDE_EVT32(genpd->device_count);
  3882. return 0;
  3883. }
  3884. static int _sde_kms_get_demura_plane_data(struct sde_splash_data *data)
  3885. {
  3886. int i = 0;
  3887. int ret = 0;
  3888. int count = 0;
  3889. struct device_node *parent, *node;
  3890. struct resource r;
  3891. char node_name[DEMURA_REGION_NAME_MAX];
  3892. struct sde_splash_mem *mem;
  3893. struct sde_splash_display *splash_display;
  3894. if (!data->num_splash_displays) {
  3895. SDE_DEBUG("no splash displays. skipping\n");
  3896. return 0;
  3897. }
  3898. /**
  3899. * It is expected that each active demura block will have
  3900. * its own memory region defined.
  3901. */
  3902. parent = of_find_node_by_path("/reserved-memory");
  3903. for (i = 0; i < data->num_splash_displays; i++) {
  3904. splash_display = &data->splash_display[i];
  3905. snprintf(&node_name[0], DEMURA_REGION_NAME_MAX,
  3906. "demura_region_%d", i);
  3907. splash_display->demura = NULL;
  3908. node = of_find_node_by_name(parent, node_name);
  3909. if (!node) {
  3910. SDE_DEBUG("no Demura node %s! disp count: %d\n",
  3911. node_name, data->num_splash_displays);
  3912. continue;
  3913. } else if (of_address_to_resource(node, 0, &r)) {
  3914. SDE_ERROR("invalid data for:%s\n", node_name);
  3915. ret = -EINVAL;
  3916. break;
  3917. }
  3918. mem = &data->demura_mem[i];
  3919. mem->splash_buf_base = (unsigned long)r.start;
  3920. mem->splash_buf_size = (r.end - r.start) + 1;
  3921. if (!mem->splash_buf_base && !mem->splash_buf_size) {
  3922. SDE_DEBUG("dummy splash mem for disp %d. Skipping\n",
  3923. (i+1));
  3924. continue;
  3925. } else if (!mem->splash_buf_base || !mem->splash_buf_size) {
  3926. SDE_ERROR("mem for disp %d invalid: add:%lx size:%lx\n",
  3927. (i+1), mem->splash_buf_base,
  3928. mem->splash_buf_size);
  3929. continue;
  3930. }
  3931. mem->ref_cnt = 0;
  3932. splash_display->demura = mem;
  3933. count++;
  3934. SDE_DEBUG("demura mem for disp:%d add:%lx size:%x\n", (i + 1),
  3935. mem->splash_buf_base,
  3936. mem->splash_buf_size);
  3937. }
  3938. if (!ret && !count)
  3939. SDE_DEBUG("no demura regions for cont. splash found!\n");
  3940. return ret;
  3941. }
  3942. static int _sde_kms_get_splash_data(struct sde_splash_data *data)
  3943. {
  3944. int i = 0;
  3945. int ret = 0;
  3946. struct device_node *parent, *node, *node1;
  3947. struct resource r, r1;
  3948. const char *node_name = "splash_region";
  3949. struct sde_splash_mem *mem;
  3950. bool share_splash_mem = false;
  3951. int num_displays, num_regions;
  3952. struct sde_splash_display *splash_display;
  3953. if (of_find_node_with_property(NULL, "qcom,sde-emulated-env"))
  3954. return 0;
  3955. if (!data)
  3956. return -EINVAL;
  3957. memset(data, 0, sizeof(*data));
  3958. parent = of_find_node_by_path("/reserved-memory");
  3959. if (!parent) {
  3960. SDE_ERROR("failed to find reserved-memory node\n");
  3961. return -EINVAL;
  3962. }
  3963. node = of_find_node_by_name(parent, node_name);
  3964. if (!node) {
  3965. SDE_DEBUG("failed to find node %s\n", node_name);
  3966. return -EINVAL;
  3967. }
  3968. node1 = of_find_node_by_name(NULL, "disp_rdump_region");
  3969. if (!node1)
  3970. SDE_DEBUG("failed to find disp ramdump memory reservation\n");
  3971. /**
  3972. * Support sharing a single splash memory for all the built in displays
  3973. * and also independent splash region per displays. Incase of
  3974. * independent splash region for each connected display, dtsi node of
  3975. * cont_splash_region should be collection of all memory regions
  3976. * Ex: <r1.start r1.end r2.start r2.end ... rn.start, rn.end>
  3977. */
  3978. num_displays = dsi_display_get_num_of_displays();
  3979. num_regions = of_property_count_u64_elems(node, "reg") / 2;
  3980. data->num_splash_displays = num_displays;
  3981. SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
  3982. if (num_displays > num_regions) {
  3983. share_splash_mem = true;
  3984. pr_info(":%d displays share same splash buf\n", num_displays);
  3985. }
  3986. for (i = 0; i < num_displays; i++) {
  3987. splash_display = &data->splash_display[i];
  3988. if (!i || !share_splash_mem) {
  3989. if (of_address_to_resource(node, i, &r)) {
  3990. SDE_ERROR("invalid data for:%s\n", node_name);
  3991. return -EINVAL;
  3992. }
  3993. mem = &data->splash_mem[i];
  3994. if (!node1 || of_address_to_resource(node1, i, &r1)) {
  3995. SDE_DEBUG("failed to find ramdump memory\n");
  3996. mem->ramdump_base = 0;
  3997. mem->ramdump_size = 0;
  3998. } else {
  3999. mem->ramdump_base = (unsigned long)r1.start;
  4000. mem->ramdump_size = (r1.end - r1.start) + 1;
  4001. }
  4002. mem->splash_buf_base = (unsigned long)r.start;
  4003. mem->splash_buf_size = (r.end - r.start) + 1;
  4004. mem->ref_cnt = 0;
  4005. splash_display->splash = mem;
  4006. data->num_splash_regions++;
  4007. } else {
  4008. data->splash_display[i].splash = &data->splash_mem[0];
  4009. }
  4010. SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
  4011. splash_display->splash->splash_buf_base,
  4012. splash_display->splash->splash_buf_size);
  4013. }
  4014. data->type = SDE_SPLASH_HANDOFF;
  4015. ret = _sde_kms_get_demura_plane_data(data);
  4016. return ret;
  4017. }
  4018. static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
  4019. struct platform_device *platformdev)
  4020. {
  4021. int rc = -EINVAL;
  4022. sde_kms->mmio = msm_ioremap(platformdev, "mdp_phys", "mdp_phys");
  4023. if (IS_ERR(sde_kms->mmio)) {
  4024. rc = PTR_ERR(sde_kms->mmio);
  4025. SDE_ERROR("mdp register memory map failed: %d\n", rc);
  4026. sde_kms->mmio = NULL;
  4027. goto error;
  4028. }
  4029. DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
  4030. sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys");
  4031. rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
  4032. sde_kms->mmio_len,
  4033. msm_get_phys_addr(platformdev, "mdp_phys"),
  4034. SDE_DBG_SDE);
  4035. if (rc)
  4036. SDE_ERROR("dbg base register kms failed: %d\n", rc);
  4037. sde_kms->vbif[VBIF_RT] = msm_ioremap(platformdev, "vbif_phys", "vbif_phys");
  4038. if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
  4039. rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
  4040. SDE_ERROR("vbif register memory map failed: %d\n", rc);
  4041. sde_kms->vbif[VBIF_RT] = NULL;
  4042. goto error;
  4043. }
  4044. sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(platformdev, "vbif_phys");
  4045. rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
  4046. sde_kms->vbif_len[VBIF_RT],
  4047. msm_get_phys_addr(platformdev, "vbif_phys"),
  4048. SDE_DBG_VBIF_RT);
  4049. if (rc)
  4050. SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
  4051. sde_kms->vbif[VBIF_NRT] = msm_ioremap(platformdev, "vbif_nrt_phys", "vbif_nrt_phys");
  4052. if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
  4053. sde_kms->vbif[VBIF_NRT] = NULL;
  4054. SDE_DEBUG("VBIF NRT is not defined");
  4055. } else {
  4056. sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(platformdev, "vbif_nrt_phys");
  4057. }
  4058. sde_kms->reg_dma = msm_ioremap(platformdev, "regdma_phys", "regdma_phys");
  4059. if (IS_ERR(sde_kms->reg_dma)) {
  4060. sde_kms->reg_dma = NULL;
  4061. SDE_DEBUG("REG_DMA is not defined");
  4062. } else {
  4063. unsigned long mdp_addr = msm_get_phys_addr(platformdev, "mdp_phys");
  4064. sde_kms->reg_dma_len = msm_iomap_size(platformdev, "regdma_phys");
  4065. sde_kms->reg_dma_off = msm_get_phys_addr(platformdev, "regdma_phys") - mdp_addr;
  4066. rc = sde_dbg_reg_register_base(LUTDMA_DBG_NAME, sde_kms->reg_dma,
  4067. sde_kms->reg_dma_len,
  4068. msm_get_phys_addr(platformdev, "regdma_phys"),
  4069. SDE_DBG_LUTDMA);
  4070. if (rc)
  4071. SDE_ERROR("dbg base register reg_dma failed: %d\n", rc);
  4072. }
  4073. sde_kms->sid = msm_ioremap(platformdev, "sid_phys", "sid_phys");
  4074. if (IS_ERR(sde_kms->sid)) {
  4075. SDE_DEBUG("sid register is not defined: %d\n", rc);
  4076. sde_kms->sid = NULL;
  4077. } else {
  4078. sde_kms->sid_len = msm_iomap_size(platformdev, "sid_phys");
  4079. rc = sde_dbg_reg_register_base("sid", sde_kms->sid,
  4080. sde_kms->sid_len,
  4081. msm_get_phys_addr(platformdev, "sid_phys"),
  4082. SDE_DBG_SID);
  4083. if (rc)
  4084. SDE_ERROR("dbg base register sid failed: %d\n", rc);
  4085. }
  4086. error:
  4087. return rc;
  4088. }
  4089. static int _sde_kms_hw_init_power_helper(struct drm_device *dev,
  4090. struct sde_kms *sde_kms)
  4091. {
  4092. int rc = 0;
  4093. if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
  4094. sde_kms->genpd.name = dev->unique;
  4095. sde_kms->genpd.power_off = sde_kms_pd_disable;
  4096. sde_kms->genpd.power_on = sde_kms_pd_enable;
  4097. rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
  4098. if (rc < 0) {
  4099. SDE_ERROR("failed to init genpd provider %s: %d\n",
  4100. sde_kms->genpd.name, rc);
  4101. return rc;
  4102. }
  4103. rc = of_genpd_add_provider_simple(dev->dev->of_node,
  4104. &sde_kms->genpd);
  4105. if (rc < 0) {
  4106. SDE_ERROR("failed to add genpd provider %s: %d\n",
  4107. sde_kms->genpd.name, rc);
  4108. pm_genpd_remove(&sde_kms->genpd);
  4109. return rc;
  4110. }
  4111. sde_kms->genpd_init = true;
  4112. SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
  4113. }
  4114. return rc;
  4115. }
  4116. static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
  4117. struct drm_device *dev,
  4118. struct msm_drm_private *priv)
  4119. {
  4120. int i, rc = -EINVAL;
  4121. sde_kms->catalog = sde_hw_catalog_init(dev);
  4122. if (IS_ERR_OR_NULL(sde_kms->catalog)) {
  4123. rc = PTR_ERR(sde_kms->catalog);
  4124. if (!sde_kms->catalog)
  4125. rc = -EINVAL;
  4126. SDE_ERROR("catalog init failed: %d\n", rc);
  4127. sde_kms->catalog = NULL;
  4128. goto power_error;
  4129. }
  4130. sde_kms->core_rev = sde_kms->catalog->hw_rev;
  4131. pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
  4132. /* initialize power domain if defined */
  4133. rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
  4134. if (rc) {
  4135. SDE_ERROR("_sde_kms_hw_init_power_helper failed: %d\n", rc);
  4136. goto genpd_err;
  4137. }
  4138. rc = _sde_kms_mmu_init(sde_kms);
  4139. if (rc) {
  4140. SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
  4141. goto power_error;
  4142. }
  4143. /* Initialize reg dma block which is a singleton */
  4144. sde_kms->catalog->dma_cfg.base_off = sde_kms->reg_dma_off;
  4145. rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
  4146. sde_kms->dev);
  4147. if (rc) {
  4148. SDE_ERROR("failed: reg dma init failed\n");
  4149. goto power_error;
  4150. }
  4151. sde_dbg_init_dbg_buses(sde_kms->core_rev);
  4152. rc = sde_rm_init(&sde_kms->rm);
  4153. if (rc) {
  4154. SDE_ERROR("rm init failed: %d\n", rc);
  4155. goto power_error;
  4156. }
  4157. sde_kms->rm_init = true;
  4158. sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
  4159. if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
  4160. rc = PTR_ERR(sde_kms->hw_intr);
  4161. SDE_ERROR("hw_intr init failed: %d\n", rc);
  4162. sde_kms->hw_intr = NULL;
  4163. goto hw_intr_init_err;
  4164. }
  4165. /*
  4166. * Attempt continuous splash handoff only if reserved
  4167. * splash memory is found & release resources on any error
  4168. * in finding display hw config in splash
  4169. */
  4170. if (sde_kms->splash_data.num_splash_regions) {
  4171. struct sde_splash_display *display;
  4172. int ret, display_count =
  4173. sde_kms->splash_data.num_splash_displays;
  4174. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4175. &sde_kms->splash_data, sde_kms->catalog);
  4176. for (i = 0; i < display_count; i++) {
  4177. display = &sde_kms->splash_data.splash_display[i];
  4178. /*
  4179. * free splash region on resource init failure and
  4180. * cont-splash disabled case
  4181. */
  4182. if (!display->cont_splash_enabled || ret)
  4183. _sde_kms_free_splash_display_data(
  4184. sde_kms, display);
  4185. }
  4186. }
  4187. sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
  4188. if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
  4189. rc = PTR_ERR(sde_kms->hw_mdp);
  4190. if (!sde_kms->hw_mdp)
  4191. rc = -EINVAL;
  4192. SDE_ERROR("failed to get hw_mdp: %d\n", rc);
  4193. sde_kms->hw_mdp = NULL;
  4194. goto power_error;
  4195. }
  4196. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  4197. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  4198. sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
  4199. sde_kms->vbif[vbif_idx], sde_kms->catalog);
  4200. if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
  4201. rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
  4202. if (!sde_kms->hw_vbif[vbif_idx])
  4203. rc = -EINVAL;
  4204. SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
  4205. sde_kms->hw_vbif[vbif_idx] = NULL;
  4206. goto power_error;
  4207. }
  4208. }
  4209. if (sde_kms->catalog->uidle_cfg.uidle_rev) {
  4210. sde_kms->hw_uidle = sde_hw_uidle_init(UIDLE, sde_kms->mmio,
  4211. sde_kms->mmio_len, sde_kms->catalog);
  4212. if (IS_ERR_OR_NULL(sde_kms->hw_uidle)) {
  4213. rc = PTR_ERR(sde_kms->hw_uidle);
  4214. if (!sde_kms->hw_uidle)
  4215. rc = -EINVAL;
  4216. /* uidle is optional, so do not make it a fatal error */
  4217. SDE_ERROR("failed to init uidle rc:%d\n", rc);
  4218. sde_kms->hw_uidle = NULL;
  4219. rc = 0;
  4220. }
  4221. } else {
  4222. sde_kms->hw_uidle = NULL;
  4223. }
  4224. if (sde_kms->sid) {
  4225. sde_kms->hw_sid = sde_hw_sid_init(sde_kms->sid,
  4226. sde_kms->sid_len, sde_kms->catalog);
  4227. if (IS_ERR_OR_NULL(sde_kms->hw_sid)) {
  4228. rc = PTR_ERR(sde_kms->hw_sid);
  4229. SDE_ERROR("failed to init sid %d\n", rc);
  4230. sde_kms->hw_sid = NULL;
  4231. goto power_error;
  4232. }
  4233. }
  4234. rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
  4235. &priv->phandle, "core_clk");
  4236. if (rc) {
  4237. SDE_ERROR("failed to init perf %d\n", rc);
  4238. goto perf_err;
  4239. }
  4240. /*
  4241. * set the disable_immediate flag when driver supports the precise vsync
  4242. * timestamp as the DRM hooks for vblank timestamp/counters would be set
  4243. * based on the feature
  4244. */
  4245. if (test_bit(SDE_FEATURE_HW_VSYNC_TS, sde_kms->catalog->features))
  4246. dev->vblank_disable_immediate = true;
  4247. /*
  4248. * _sde_kms_drm_obj_init should create the DRM related objects
  4249. * i.e. CRTCs, planes, encoders, connectors and so forth
  4250. */
  4251. rc = _sde_kms_drm_obj_init(sde_kms);
  4252. if (rc) {
  4253. SDE_ERROR("modeset init failed: %d\n", rc);
  4254. goto drm_obj_init_err;
  4255. }
  4256. return 0;
  4257. genpd_err:
  4258. drm_obj_init_err:
  4259. sde_core_perf_destroy(&sde_kms->perf);
  4260. hw_intr_init_err:
  4261. perf_err:
  4262. power_error:
  4263. return rc;
  4264. }
  4265. int _sde_kms_get_tvm_inclusion_mem(struct sde_mdss_cfg *catalog, struct list_head *mem_list)
  4266. {
  4267. struct list_head temp_head;
  4268. struct msm_io_mem_entry *io_mem;
  4269. int rc, i = 0;
  4270. INIT_LIST_HEAD(&temp_head);
  4271. for (i = 0; i < catalog->tvm_reg_count; i++) {
  4272. struct resource *res = &catalog->tvm_reg[i];
  4273. io_mem = kzalloc(sizeof(struct msm_io_mem_entry), GFP_KERNEL);
  4274. if (!io_mem) {
  4275. rc = -ENOMEM;
  4276. goto parse_fail;
  4277. }
  4278. io_mem->base = res->start;
  4279. io_mem->size = resource_size(res);
  4280. list_add(&io_mem->list, &temp_head);
  4281. }
  4282. list_splice(&temp_head, mem_list);
  4283. return 0;
  4284. parse_fail:
  4285. msm_dss_clean_io_mem(&temp_head);
  4286. return rc;
  4287. }
  4288. #ifdef CONFIG_DRM_SDE_VM
  4289. int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  4290. {
  4291. struct platform_device *pdev = to_platform_device(sde_kms->dev->dev);
  4292. int rc = 0;
  4293. rc = msm_dss_get_io_mem(pdev, &io_res->mem);
  4294. if (rc) {
  4295. SDE_ERROR("failed to get io mem for KMS, rc = %d\n", rc);
  4296. return rc;
  4297. }
  4298. rc = msm_dss_get_pmic_io_mem(pdev, &io_res->mem);
  4299. if (rc) {
  4300. SDE_ERROR("failed to get io mem for pmic, rc:%d\n", rc);
  4301. return rc;
  4302. }
  4303. rc = msm_dss_get_io_irq(pdev, &io_res->irq, GH_IRQ_LABEL_SDE);
  4304. if (rc) {
  4305. SDE_ERROR("failed to get io irq for KMS");
  4306. return rc;
  4307. }
  4308. rc = _sde_kms_get_tvm_inclusion_mem(sde_kms->catalog, &io_res->mem);
  4309. if (rc) {
  4310. SDE_ERROR("failed to get tvm inclusion mem ranges");
  4311. return rc;
  4312. }
  4313. return rc;
  4314. }
  4315. #endif
  4316. static int sde_kms_hw_init(struct msm_kms *kms)
  4317. {
  4318. struct sde_kms *sde_kms;
  4319. struct drm_device *dev;
  4320. struct msm_drm_private *priv;
  4321. struct platform_device *platformdev;
  4322. int irq_num, rc = -EINVAL;
  4323. if (!kms) {
  4324. SDE_ERROR("invalid kms\n");
  4325. goto end;
  4326. }
  4327. sde_kms = to_sde_kms(kms);
  4328. dev = sde_kms->dev;
  4329. if (!dev || !dev->dev) {
  4330. SDE_ERROR("invalid device\n");
  4331. goto end;
  4332. }
  4333. platformdev = to_platform_device(dev->dev);
  4334. priv = dev->dev_private;
  4335. if (!priv) {
  4336. SDE_ERROR("invalid private data\n");
  4337. goto end;
  4338. }
  4339. rc = _sde_kms_hw_init_ioremap(sde_kms, platformdev);
  4340. if (rc)
  4341. goto error;
  4342. rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
  4343. if (rc)
  4344. SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
  4345. rc = _sde_kms_hw_init_blocks(sde_kms, dev, priv);
  4346. if (rc)
  4347. goto error;
  4348. dev->mode_config.min_width = sde_kms->catalog->min_display_width;
  4349. dev->mode_config.min_height = sde_kms->catalog->min_display_height;
  4350. dev->mode_config.max_width = sde_kms->catalog->max_display_width;
  4351. dev->mode_config.max_height = sde_kms->catalog->max_display_height;
  4352. mutex_init(&sde_kms->secure_transition_lock);
  4353. atomic_set(&sde_kms->detach_sec_cb, 0);
  4354. atomic_set(&sde_kms->detach_all_cb, 0);
  4355. atomic_set(&sde_kms->irq_vote_count, 0);
  4356. /*
  4357. * Support format modifiers for compression etc.
  4358. */
  4359. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 19, 0))
  4360. dev->mode_config.allow_fb_modifiers = true;
  4361. #endif
  4362. sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
  4363. sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
  4364. irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0);
  4365. SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
  4366. irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
  4367. if (sde_in_trusted_vm(sde_kms)) {
  4368. rc = sde_vm_trusted_init(sde_kms);
  4369. sde_dbg_set_hw_ownership_status(false);
  4370. } else {
  4371. rc = sde_vm_primary_init(sde_kms);
  4372. sde_dbg_set_hw_ownership_status(true);
  4373. }
  4374. if (rc) {
  4375. SDE_ERROR("failed to initialize VM ops, rc: %d\n", rc);
  4376. goto error;
  4377. }
  4378. return 0;
  4379. error:
  4380. _sde_kms_hw_destroy(sde_kms, platformdev);
  4381. end:
  4382. return rc;
  4383. }
  4384. struct msm_kms *sde_kms_init(struct drm_device *dev)
  4385. {
  4386. struct msm_drm_private *priv;
  4387. struct sde_kms *sde_kms;
  4388. if (!dev || !dev->dev_private) {
  4389. SDE_ERROR("drm device node invalid\n");
  4390. return ERR_PTR(-EINVAL);
  4391. }
  4392. priv = dev->dev_private;
  4393. sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
  4394. if (!sde_kms) {
  4395. SDE_ERROR("failed to allocate sde kms\n");
  4396. return ERR_PTR(-ENOMEM);
  4397. }
  4398. msm_kms_init(&sde_kms->base, &kms_funcs);
  4399. sde_kms->dev = dev;
  4400. return &sde_kms->base;
  4401. }
  4402. void sde_kms_vm_trusted_resource_deinit(struct sde_kms *sde_kms)
  4403. {
  4404. struct dsi_display *display;
  4405. struct sde_splash_display *handoff_display;
  4406. int i;
  4407. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4408. handoff_display = &sde_kms->splash_data.splash_display[i];
  4409. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4410. if (handoff_display->cont_splash_enabled)
  4411. _sde_kms_free_splash_display_data(sde_kms,
  4412. handoff_display);
  4413. dsi_display_set_active_state(display, false);
  4414. }
  4415. memset(&sde_kms->splash_data, 0, sizeof(struct sde_splash_data));
  4416. }
  4417. int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
  4418. struct drm_atomic_state *state)
  4419. {
  4420. struct drm_device *dev;
  4421. struct msm_drm_private *priv;
  4422. struct sde_splash_display *handoff_display;
  4423. struct dsi_display *display;
  4424. int ret, i;
  4425. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
  4426. SDE_ERROR("invalid params\n");
  4427. return -EINVAL;
  4428. }
  4429. dev = sde_kms->dev;
  4430. priv = dev->dev_private;
  4431. sde_kms->splash_data.type = SDE_VM_HANDOFF;
  4432. sde_kms->splash_data.num_splash_displays = sde_kms->dsi_display_count;
  4433. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4434. &sde_kms->splash_data, sde_kms->catalog);
  4435. if (ret) {
  4436. SDE_ERROR("invalid cont splash init, ret:%d\n", ret);
  4437. return -EINVAL;
  4438. }
  4439. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4440. handoff_display = &sde_kms->splash_data.splash_display[i];
  4441. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4442. if (!handoff_display->cont_splash_enabled || ret)
  4443. _sde_kms_free_splash_display_data(sde_kms,
  4444. handoff_display);
  4445. else
  4446. dsi_display_set_active_state(display, true);
  4447. }
  4448. if (sde_kms->splash_data.num_splash_displays != 1) {
  4449. SDE_ERROR("no. of displays not supported:%d\n",
  4450. sde_kms->splash_data.num_splash_displays);
  4451. ret = -EINVAL;
  4452. goto error;
  4453. }
  4454. ret = sde_kms_cont_splash_config(&sde_kms->base, state);
  4455. if (ret) {
  4456. SDE_ERROR("error in setting handoff configs\n");
  4457. goto error;
  4458. }
  4459. /**
  4460. * fill-in vote for the continuous splash hanodff path, which will be
  4461. * removed on the successful first commit.
  4462. */
  4463. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  4464. if (ret < 0) {
  4465. SDE_ERROR("failed to enable power resource %d\n", ret);
  4466. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  4467. goto error;
  4468. }
  4469. return 0;
  4470. error:
  4471. return ret;
  4472. }
  4473. static int _sde_kms_register_events(struct msm_kms *kms,
  4474. struct drm_mode_object *obj, u32 event, bool en)
  4475. {
  4476. int ret = 0;
  4477. struct drm_crtc *crtc;
  4478. struct drm_connector *conn;
  4479. struct sde_kms *sde_kms;
  4480. if (!kms || !obj) {
  4481. SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
  4482. return -EINVAL;
  4483. }
  4484. sde_kms = to_sde_kms(kms);
  4485. sde_vm_lock(sde_kms);
  4486. if (!sde_vm_owns_hw(sde_kms)) {
  4487. sde_vm_unlock(sde_kms);
  4488. SDE_DEBUG("HW is owned by other VM\n");
  4489. return -EACCES;
  4490. }
  4491. /* check vm ownership, if event registration requires HW access */
  4492. switch (obj->type) {
  4493. case DRM_MODE_OBJECT_CRTC:
  4494. crtc = obj_to_crtc(obj);
  4495. ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
  4496. break;
  4497. case DRM_MODE_OBJECT_CONNECTOR:
  4498. conn = obj_to_connector(obj);
  4499. ret = sde_connector_register_custom_event(sde_kms, conn, event,
  4500. en);
  4501. break;
  4502. }
  4503. sde_vm_unlock(sde_kms);
  4504. return ret;
  4505. }
  4506. int sde_kms_handle_recovery(struct drm_encoder *encoder)
  4507. {
  4508. SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
  4509. return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
  4510. }
  4511. void sde_kms_add_data_to_minidump_va(struct sde_kms *sde_kms)
  4512. {
  4513. struct msm_drm_private *priv;
  4514. struct sde_crtc *sde_crtc;
  4515. struct sde_crtc_state *cstate;
  4516. struct sde_connector *sde_conn;
  4517. struct sde_connector_state *conn_state;
  4518. u32 i;
  4519. priv = sde_kms->dev->dev_private;
  4520. sde_mini_dump_add_va_region("sde_kms", sizeof(*sde_kms), sde_kms);
  4521. for (i = 0; i < priv->num_crtcs; i++) {
  4522. sde_crtc = to_sde_crtc(priv->crtcs[i]);
  4523. cstate = to_sde_crtc_state(priv->crtcs[i]->state);
  4524. sde_mini_dump_add_va_region("sde_crtc", sizeof(*sde_crtc), sde_crtc);
  4525. sde_mini_dump_add_va_region("crtc_state", sizeof(*cstate), cstate);
  4526. }
  4527. for (i = 0; i < priv->num_planes; i++)
  4528. sde_plane_add_data_to_minidump_va(priv->planes[i]);
  4529. for (i = 0; i < priv->num_encoders; i++)
  4530. sde_encoder_add_data_to_minidump_va(priv->encoders[i]);
  4531. for (i = 0; i < priv->num_connectors; i++) {
  4532. sde_conn = to_sde_connector(priv->connectors[i]);
  4533. conn_state = to_sde_connector_state(priv->connectors[i]->state);
  4534. sde_mini_dump_add_va_region("sde_conn", sizeof(*sde_conn), sde_conn);
  4535. sde_mini_dump_add_va_region("conn_state", sizeof(*conn_state), conn_state);
  4536. }
  4537. }