sde_kms.c 130 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161
  1. /*
  2. * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  20. #include <drm/drm_crtc.h>
  21. #include <drm/drm_fixed.h>
  22. #include <drm/drm_panel.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/dma-buf.h>
  27. #include <linux/memblock.h>
  28. #include <linux/soc/qcom/panel_event_notifier.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_probe_helper.h>
  31. #include "msm_drv.h"
  32. #include "msm_mmu.h"
  33. #include "msm_gem.h"
  34. #include "dsi_display.h"
  35. #include "dsi_drm.h"
  36. #include "sde_wb.h"
  37. #include "dp_display.h"
  38. #include "dp_drm.h"
  39. #include "dp_mst_drm.h"
  40. #include "sde_kms.h"
  41. #include "sde_core_irq.h"
  42. #include "sde_formats.h"
  43. #include "sde_hw_vbif.h"
  44. #include "sde_vbif.h"
  45. #include "sde_encoder.h"
  46. #include "sde_plane.h"
  47. #include "sde_crtc.h"
  48. #include "sde_color_processing.h"
  49. #include "sde_reg_dma.h"
  50. #include "sde_connector.h"
  51. #include "sde_vm.h"
  52. #include <linux/qcom_scm.h>
  53. #include <linux/qcom-iommu-util.h>
  54. #include "soc/qcom/secure_buffer.h"
  55. #include <linux/qtee_shmbridge.h>
  56. #ifdef CONFIG_DRM_SDE_VM
  57. #include <linux/gunyah/gh_irq_lend.h>
  58. #endif
  59. #define CREATE_TRACE_POINTS
  60. #include "sde_trace.h"
  61. /* defines for secure channel call */
  62. #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
  63. #define MDP_DEVICE_ID 0x1A
  64. #define DEMURA_REGION_NAME_MAX 32
  65. EXPORT_TRACEPOINT_SYMBOL(tracing_mark_write);
  66. static const char * const iommu_ports[] = {
  67. "mdp_0",
  68. };
  69. /**
  70. * Controls size of event log buffer. Specified as a power of 2.
  71. */
  72. #define SDE_EVTLOG_SIZE 1024
  73. /*
  74. * To enable overall DRM driver logging
  75. * # echo 0x2 > /sys/module/drm/parameters/debug
  76. *
  77. * To enable DRM driver h/w logging
  78. * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  79. *
  80. * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  81. */
  82. #define SDE_DEBUGFS_DIR "msm_sde"
  83. #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
  84. #define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
  85. #define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
  86. /**
  87. * sdecustom - enable certain driver customizations for sde clients
  88. * Enabling this modifies the standard DRM behavior slightly and assumes
  89. * that the clients have specific knowledge about the modifications that
  90. * are involved, so don't enable this unless you know what you're doing.
  91. *
  92. * Parts of the driver that are affected by this setting may be located by
  93. * searching for invocations of the 'sde_is_custom_client()' function.
  94. *
  95. * This is disabled by default.
  96. */
  97. static bool sdecustom = true;
  98. module_param(sdecustom, bool, 0400);
  99. MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
  100. static int sde_kms_hw_init(struct msm_kms *kms);
  101. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
  102. static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
  103. static int _sde_kms_register_events(struct msm_kms *kms,
  104. struct drm_mode_object *obj, u32 event, bool en);
  105. bool sde_is_custom_client(void)
  106. {
  107. return sdecustom;
  108. }
  109. #ifdef CONFIG_DEBUG_FS
  110. void *sde_debugfs_get_root(struct sde_kms *sde_kms)
  111. {
  112. struct msm_drm_private *priv;
  113. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  114. return NULL;
  115. priv = sde_kms->dev->dev_private;
  116. return priv->debug_root;
  117. }
  118. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  119. {
  120. void *p;
  121. int rc;
  122. void *debugfs_root;
  123. p = sde_hw_util_get_log_mask_ptr();
  124. if (!sde_kms || !p)
  125. return -EINVAL;
  126. debugfs_root = sde_debugfs_get_root(sde_kms);
  127. if (!debugfs_root)
  128. return -EINVAL;
  129. /* allow debugfs_root to be NULL */
  130. debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
  131. (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
  132. (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
  133. rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
  134. if (rc) {
  135. SDE_ERROR("failed to init perf %d\n", rc);
  136. return rc;
  137. }
  138. sde_rm_debugfs_init(&sde_kms->rm, debugfs_root);
  139. if (sde_kms->catalog->qdss_count)
  140. debugfs_create_u32("qdss", 0600, debugfs_root,
  141. (u32 *)&sde_kms->qdss_enabled);
  142. debugfs_create_u32("pm_suspend_clk_dump", 0600, debugfs_root,
  143. (u32 *)&sde_kms->pm_suspend_clk_dump);
  144. return 0;
  145. }
  146. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  147. {
  148. struct sde_kms *sde_kms = to_sde_kms(kms);
  149. /* don't need to NULL check debugfs_root */
  150. if (sde_kms) {
  151. sde_debugfs_vbif_destroy(sde_kms);
  152. sde_debugfs_core_irq_destroy(sde_kms);
  153. }
  154. }
  155. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  156. {
  157. int i;
  158. struct device *dev = sde_kms->dev->dev;
  159. SDE_INFO("runtime PM suspended:%d", pm_runtime_suspended(dev));
  160. for (i = 0; i < sde_kms->dsi_display_count; i++)
  161. dsi_display_dump_clks_state(sde_kms->dsi_displays[i]);
  162. return 0;
  163. }
  164. #else
  165. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  166. {
  167. return 0;
  168. }
  169. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  170. {
  171. }
  172. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  173. {
  174. return 0;
  175. }
  176. #endif
  177. static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
  178. struct drm_crtc *crtc)
  179. {
  180. struct drm_encoder *encoder;
  181. struct drm_device *dev;
  182. int ret;
  183. if (!kms || !crtc || !crtc->state || !crtc->dev) {
  184. SDE_ERROR("invalid params\n");
  185. return;
  186. }
  187. if (!crtc->state->enable) {
  188. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  189. return;
  190. }
  191. if (!crtc->state->active) {
  192. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  193. return;
  194. }
  195. dev = crtc->dev;
  196. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  197. if (encoder->crtc != crtc)
  198. continue;
  199. /*
  200. * Video Mode - Wait for VSYNC
  201. * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
  202. * complete
  203. */
  204. SDE_EVT32_VERBOSE(DRMID(crtc));
  205. ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
  206. if (ret && ret != -EWOULDBLOCK) {
  207. SDE_ERROR(
  208. "[crtc: %d][enc: %d] wait for commit done returned %d\n",
  209. crtc->base.id, encoder->base.id, ret);
  210. break;
  211. }
  212. }
  213. }
  214. static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
  215. struct drm_crtc *crtc, bool enable)
  216. {
  217. struct drm_device *dev;
  218. struct msm_drm_private *priv;
  219. struct sde_mdss_cfg *sde_cfg;
  220. struct drm_plane *plane;
  221. int i, ret;
  222. dev = sde_kms->dev;
  223. priv = dev->dev_private;
  224. sde_cfg = sde_kms->catalog;
  225. ret = sde_vbif_halt_xin_mask(sde_kms,
  226. sde_cfg->sui_block_xin_mask, enable);
  227. if (ret) {
  228. SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
  229. return ret;
  230. }
  231. if (enable) {
  232. for (i = 0; i < priv->num_planes; i++) {
  233. plane = priv->planes[i];
  234. sde_plane_secure_ctrl_xin_client(plane, crtc);
  235. }
  236. }
  237. return 0;
  238. }
  239. /**
  240. * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
  241. * @sde_kms: Pointer to sde_kms struct
  242. * @vimd: switch the stage 2 translation to this VMID
  243. */
  244. static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
  245. {
  246. struct device dummy = {};
  247. dma_addr_t dma_handle;
  248. uint32_t num_sids;
  249. uint32_t *sec_sid;
  250. struct sde_mdss_cfg *sde_cfg = sde_kms->catalog;
  251. int ret = 0, i;
  252. struct qtee_shm shm;
  253. bool qtee_en = qtee_shmbridge_is_enabled();
  254. phys_addr_t mem_addr;
  255. u64 mem_size;
  256. num_sids = sde_cfg->sec_sid_mask_count;
  257. if (!num_sids) {
  258. SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
  259. return -EINVAL;
  260. }
  261. if (qtee_en) {
  262. ret = qtee_shmbridge_allocate_shm(num_sids * sizeof(uint32_t),
  263. &shm);
  264. if (ret)
  265. return -ENOMEM;
  266. sec_sid = (uint32_t *) shm.vaddr;
  267. mem_addr = shm.paddr;
  268. /**
  269. * SMMUSecureModeSwitch requires the size to be number of SID's
  270. * but shm allocates size in pages. Modify the args as per
  271. * client requirement.
  272. */
  273. mem_size = sizeof(uint32_t) * num_sids;
  274. } else {
  275. sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
  276. if (!sec_sid)
  277. return -ENOMEM;
  278. mem_addr = virt_to_phys(sec_sid);
  279. mem_size = sizeof(uint32_t) * num_sids;
  280. }
  281. for (i = 0; i < num_sids; i++) {
  282. sec_sid[i] = sde_cfg->sec_sid_mask[i];
  283. SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
  284. }
  285. ret = dma_coerce_mask_and_coherent(&dummy, DMA_BIT_MASK(64));
  286. if (ret) {
  287. SDE_ERROR("Failed to set dma mask for dummy dev %d\n", ret);
  288. goto map_error;
  289. }
  290. set_dma_ops(&dummy, NULL);
  291. dma_handle = dma_map_single(&dummy, sec_sid,
  292. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  293. if (dma_mapping_error(&dummy, dma_handle)) {
  294. SDE_ERROR("dma_map_single for dummy dev failed vmid 0x%x\n",
  295. vmid);
  296. goto map_error;
  297. }
  298. SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d, qtee_en %d",
  299. vmid, num_sids, qtee_en);
  300. ret = qcom_scm_mem_protect_sd_ctrl(MDP_DEVICE_ID, mem_addr,
  301. mem_size, vmid);
  302. if (ret)
  303. SDE_ERROR("Error:scm_call2, vmid %d, ret%d\n",
  304. vmid, ret);
  305. SDE_EVT32(MEM_PROTECT_SD_CTRL_SWITCH, MDP_DEVICE_ID, mem_size,
  306. vmid, qtee_en, num_sids, ret);
  307. dma_unmap_single(&dummy, dma_handle,
  308. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  309. map_error:
  310. if (qtee_en)
  311. qtee_shmbridge_free_shm(&shm);
  312. else
  313. kfree(sec_sid);
  314. return ret;
  315. }
  316. static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
  317. {
  318. u32 ret;
  319. if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
  320. return 0;
  321. /* detach_all_contexts */
  322. ret = sde_kms_mmu_detach(sde_kms, false);
  323. if (ret) {
  324. SDE_ERROR("failed to detach all cb ret:%d\n", ret);
  325. goto mmu_error;
  326. }
  327. ret = _sde_kms_scm_call(sde_kms, vmid);
  328. if (ret) {
  329. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  330. goto scm_error;
  331. }
  332. return 0;
  333. scm_error:
  334. sde_kms_mmu_attach(sde_kms, false);
  335. mmu_error:
  336. atomic_dec(&sde_kms->detach_all_cb);
  337. return ret;
  338. }
  339. static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, u32 vmid,
  340. u32 old_vmid)
  341. {
  342. u32 ret;
  343. if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
  344. return 0;
  345. ret = _sde_kms_scm_call(sde_kms, vmid);
  346. if (ret) {
  347. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  348. goto scm_error;
  349. }
  350. /* attach_all_contexts */
  351. ret = sde_kms_mmu_attach(sde_kms, false);
  352. if (ret) {
  353. SDE_ERROR("failed to attach all cb ret:%d\n", ret);
  354. goto mmu_error;
  355. }
  356. return 0;
  357. mmu_error:
  358. _sde_kms_scm_call(sde_kms, old_vmid);
  359. scm_error:
  360. atomic_inc(&sde_kms->detach_all_cb);
  361. return ret;
  362. }
  363. static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
  364. {
  365. u32 ret;
  366. if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
  367. return 0;
  368. /* detach secure_context */
  369. ret = sde_kms_mmu_detach(sde_kms, true);
  370. if (ret) {
  371. SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
  372. goto mmu_error;
  373. }
  374. ret = _sde_kms_scm_call(sde_kms, vmid);
  375. if (ret) {
  376. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  377. goto scm_error;
  378. }
  379. return 0;
  380. scm_error:
  381. sde_kms_mmu_attach(sde_kms, true);
  382. mmu_error:
  383. atomic_dec(&sde_kms->detach_sec_cb);
  384. return ret;
  385. }
  386. static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, u32 vmid,
  387. u32 old_vmid)
  388. {
  389. u32 ret;
  390. if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
  391. return 0;
  392. ret = _sde_kms_scm_call(sde_kms, vmid);
  393. if (ret) {
  394. goto scm_error;
  395. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  396. }
  397. ret = sde_kms_mmu_attach(sde_kms, true);
  398. if (ret) {
  399. SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
  400. goto mmu_error;
  401. }
  402. return 0;
  403. mmu_error:
  404. _sde_kms_scm_call(sde_kms, old_vmid);
  405. scm_error:
  406. atomic_inc(&sde_kms->detach_sec_cb);
  407. return ret;
  408. }
  409. static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
  410. struct drm_crtc *crtc, bool enable)
  411. {
  412. int ret;
  413. if (enable) {
  414. ret = pm_runtime_get_sync(sde_kms->dev->dev);
  415. if (ret < 0) {
  416. SDE_ERROR("failed to enable resource, ret:%d\n", ret);
  417. return ret;
  418. }
  419. sde_crtc_misr_setup(crtc, true, 1);
  420. ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
  421. if (ret) {
  422. sde_crtc_misr_setup(crtc, false, 0);
  423. pm_runtime_put_sync(sde_kms->dev->dev);
  424. return ret;
  425. }
  426. } else {
  427. _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
  428. sde_crtc_misr_setup(crtc, false, 0);
  429. pm_runtime_put_sync(sde_kms->dev->dev);
  430. }
  431. return 0;
  432. }
  433. static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
  434. bool post_commit)
  435. {
  436. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  437. int old_smmu_state = smmu_state->state;
  438. int ret = 0;
  439. u32 vmid;
  440. if (!sde_kms || !crtc) {
  441. SDE_ERROR("invalid argument(s)\n");
  442. return -EINVAL;
  443. }
  444. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
  445. post_commit, smmu_state->sui_misr_state,
  446. smmu_state->secure_level, SDE_EVTLOG_FUNC_ENTRY);
  447. if ((!smmu_state->transition_type) ||
  448. ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
  449. /* Bail out */
  450. return 0;
  451. /* enable sui misr if requested, before the transition */
  452. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
  453. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
  454. if (ret) {
  455. smmu_state->sui_misr_state = NONE;
  456. goto end;
  457. }
  458. }
  459. mutex_lock(&sde_kms->secure_transition_lock);
  460. switch (smmu_state->state) {
  461. case DETACH_ALL_REQ:
  462. ret = _sde_kms_detach_all_cb(sde_kms, VMID_CP_SEC_DISPLAY);
  463. if (!ret)
  464. smmu_state->state = DETACHED;
  465. break;
  466. case ATTACH_ALL_REQ:
  467. ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL,
  468. VMID_CP_SEC_DISPLAY);
  469. if (!ret) {
  470. smmu_state->state = ATTACHED;
  471. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  472. }
  473. break;
  474. case DETACH_SEC_REQ:
  475. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  476. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  477. ret = _sde_kms_detach_sec_cb(sde_kms, vmid);
  478. if (!ret)
  479. smmu_state->state = DETACHED_SEC;
  480. break;
  481. case ATTACH_SEC_REQ:
  482. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  483. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  484. ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL, vmid);
  485. if (!ret) {
  486. smmu_state->state = ATTACHED;
  487. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  488. }
  489. break;
  490. default:
  491. SDE_ERROR("crtc%d: invalid smmu state %d transition type %d\n",
  492. DRMID(crtc), smmu_state->state,
  493. smmu_state->transition_type);
  494. ret = -EINVAL;
  495. break;
  496. }
  497. mutex_unlock(&sde_kms->secure_transition_lock);
  498. /* disable sui misr if requested, after the transition */
  499. if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
  500. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  501. if (ret)
  502. goto end;
  503. }
  504. end:
  505. smmu_state->transition_error = false;
  506. if (ret) {
  507. smmu_state->transition_error = true;
  508. SDE_ERROR(
  509. "crtc%d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  510. DRMID(crtc), old_smmu_state, smmu_state->state,
  511. smmu_state->secure_level, ret);
  512. smmu_state->state = smmu_state->prev_state;
  513. smmu_state->secure_level = smmu_state->prev_secure_level;
  514. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ)
  515. _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  516. }
  517. SDE_DEBUG("crtc %d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  518. DRMID(crtc), old_smmu_state, smmu_state->state,
  519. smmu_state->secure_level, ret);
  520. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->prev_state,
  521. smmu_state->transition_type,
  522. smmu_state->transition_error,
  523. smmu_state->secure_level, smmu_state->prev_secure_level,
  524. smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
  525. smmu_state->sui_misr_state = NONE;
  526. smmu_state->transition_type = NONE;
  527. return ret;
  528. }
  529. static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
  530. struct drm_atomic_state *state)
  531. {
  532. struct drm_crtc *crtc;
  533. struct drm_crtc_state *old_crtc_state;
  534. struct drm_plane_state *old_plane_state, *new_plane_state;
  535. struct drm_plane *plane;
  536. struct drm_plane_state *plane_state;
  537. struct sde_kms *sde_kms = to_sde_kms(kms);
  538. struct drm_device *dev = sde_kms->dev;
  539. int i, ops = 0, ret = 0;
  540. bool old_valid_fb = false;
  541. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  542. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  543. if (!crtc->state || !crtc->state->active)
  544. continue;
  545. /*
  546. * It is safe to assume only one active crtc,
  547. * and compatible translation modes on the
  548. * planes staged on this crtc.
  549. * otherwise validation would have failed.
  550. * For this CRTC,
  551. */
  552. /*
  553. * 1. Check if old state on the CRTC has planes
  554. * staged with valid fbs
  555. */
  556. for_each_old_plane_in_state(state, plane, plane_state, i) {
  557. if (!plane_state->crtc)
  558. continue;
  559. if (plane_state->fb) {
  560. old_valid_fb = true;
  561. break;
  562. }
  563. }
  564. /*
  565. * 2.Get the operations needed to be performed before
  566. * secure transition can be initiated.
  567. */
  568. ops = sde_crtc_get_secure_transition_ops(crtc,
  569. old_crtc_state, old_valid_fb);
  570. if (ops < 0) {
  571. SDE_ERROR("invalid secure operations %x\n", ops);
  572. return ops;
  573. }
  574. if (!ops) {
  575. smmu_state->transition_error = false;
  576. goto no_ops;
  577. }
  578. SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
  579. crtc->base.id, ops, crtc->state);
  580. SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
  581. /* 3. Perform operations needed for secure transition */
  582. if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
  583. SDE_DEBUG("wait_for_transfer_done\n");
  584. sde_kms_wait_for_frame_transfer_complete(kms, crtc);
  585. }
  586. if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
  587. SDE_DEBUG("cleanup planes\n");
  588. drm_atomic_helper_cleanup_planes(dev, state);
  589. for_each_oldnew_plane_in_state(state, plane,
  590. old_plane_state, new_plane_state, i)
  591. sde_plane_destroy_fb(old_plane_state);
  592. }
  593. if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
  594. SDE_DEBUG("secure ctrl\n");
  595. _sde_kms_secure_ctrl(sde_kms, crtc, false);
  596. }
  597. if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
  598. SDE_DEBUG("prepare planes %d",
  599. crtc->state->plane_mask);
  600. drm_atomic_crtc_for_each_plane(plane,
  601. crtc) {
  602. const struct drm_plane_helper_funcs *funcs;
  603. plane_state = plane->state;
  604. funcs = plane->helper_private;
  605. SDE_DEBUG("psde:%d FB[%u]\n",
  606. plane->base.id,
  607. plane->fb->base.id);
  608. if (!funcs)
  609. continue;
  610. if (funcs->prepare_fb(plane, plane_state)) {
  611. ret = funcs->prepare_fb(plane,
  612. plane_state);
  613. if (ret)
  614. return ret;
  615. }
  616. }
  617. }
  618. SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
  619. SDE_DEBUG("secure operations completed\n");
  620. }
  621. no_ops:
  622. return 0;
  623. }
  624. static int _sde_kms_release_shared_buffer(unsigned int mem_addr,
  625. unsigned int splash_buffer_size,
  626. unsigned int ramdump_base,
  627. unsigned int ramdump_buffer_size)
  628. {
  629. unsigned long pfn_start, pfn_end, pfn_idx;
  630. int ret = 0;
  631. if (!mem_addr || !splash_buffer_size) {
  632. SDE_ERROR("invalid params\n");
  633. return -EINVAL;
  634. }
  635. /* leave ramdump memory only if base address matches */
  636. if (ramdump_base == mem_addr &&
  637. ramdump_buffer_size <= splash_buffer_size) {
  638. mem_addr += ramdump_buffer_size;
  639. splash_buffer_size -= ramdump_buffer_size;
  640. }
  641. pfn_start = mem_addr >> PAGE_SHIFT;
  642. pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
  643. ret = memblock_free(mem_addr, splash_buffer_size);
  644. if (ret) {
  645. SDE_ERROR("continuous splash memory free failed:%d\n", ret);
  646. return ret;
  647. }
  648. for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
  649. free_reserved_page(pfn_to_page(pfn_idx));
  650. return ret;
  651. }
  652. static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms,
  653. struct sde_splash_mem *splash)
  654. {
  655. struct msm_mmu *mmu = NULL;
  656. int ret = 0;
  657. if (!sde_kms->aspace[0]) {
  658. SDE_ERROR("aspace not found for sde kms node\n");
  659. return -EINVAL;
  660. }
  661. mmu = sde_kms->aspace[0]->mmu;
  662. if (!mmu) {
  663. SDE_ERROR("mmu not found for aspace\n");
  664. return -EINVAL;
  665. }
  666. if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) {
  667. SDE_ERROR("invalid input params for map\n");
  668. return -EINVAL;
  669. }
  670. if (!splash->ref_cnt) {
  671. ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base,
  672. splash->splash_buf_base,
  673. splash->splash_buf_size,
  674. IOMMU_READ | IOMMU_NOEXEC);
  675. if (ret)
  676. SDE_ERROR("splash memory smmu map failed:%d\n", ret);
  677. }
  678. splash->ref_cnt++;
  679. SDE_DEBUG("one2one mapping done for base:%lx size:%x ref_cnt:%d\n",
  680. splash->splash_buf_base,
  681. splash->splash_buf_size,
  682. splash->ref_cnt);
  683. return ret;
  684. }
  685. static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms)
  686. {
  687. int i = 0;
  688. int ret = 0;
  689. struct sde_splash_mem *region;
  690. if (!sde_kms)
  691. return -EINVAL;
  692. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  693. region = sde_kms->splash_data.splash_display[i].splash;
  694. ret = _sde_kms_splash_mem_get(sde_kms, region);
  695. if (ret)
  696. return ret;
  697. /* Demura is optional and need not exist */
  698. region = sde_kms->splash_data.splash_display[i].demura;
  699. if (region) {
  700. ret = _sde_kms_splash_mem_get(sde_kms, region);
  701. if (ret)
  702. return ret;
  703. }
  704. }
  705. return ret;
  706. }
  707. static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms,
  708. struct sde_splash_mem *splash)
  709. {
  710. struct msm_mmu *mmu = NULL;
  711. int rc = 0;
  712. if (!sde_kms || !sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
  713. SDE_ERROR("invalid params\n");
  714. return -EINVAL;
  715. }
  716. mmu = sde_kms->aspace[0]->mmu;
  717. if (!splash || !splash->ref_cnt ||
  718. !mmu || !mmu->funcs || !mmu->funcs->one_to_one_unmap)
  719. return -EINVAL;
  720. splash->ref_cnt--;
  721. SDE_DEBUG("splash base:%lx refcnt:%d\n",
  722. splash->splash_buf_base, splash->ref_cnt);
  723. if (!splash->ref_cnt) {
  724. mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base,
  725. splash->splash_buf_size);
  726. rc = _sde_kms_release_shared_buffer(splash->splash_buf_base,
  727. splash->splash_buf_size, splash->ramdump_base,
  728. splash->ramdump_size);
  729. splash->splash_buf_base = 0;
  730. splash->splash_buf_size = 0;
  731. }
  732. return rc;
  733. }
  734. static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
  735. {
  736. int i = 0;
  737. int ret = 0, failure = 0;
  738. struct sde_splash_mem *region;
  739. if (!sde_kms || !sde_kms->splash_data.num_splash_regions)
  740. return -EINVAL;
  741. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  742. region = sde_kms->splash_data.splash_display[i].splash;
  743. ret = _sde_kms_splash_mem_put(sde_kms, region);
  744. if (ret) {
  745. failure = 1;
  746. pr_err("Error unmapping splash mem for display %d\n",
  747. i);
  748. }
  749. /* Demura is optional and need not exist */
  750. region = sde_kms->splash_data.splash_display[i].demura;
  751. if (region) {
  752. ret = _sde_kms_splash_mem_put(sde_kms, region);
  753. if (ret) {
  754. failure = 1;
  755. pr_err("Error unmapping demura mem for display %d\n",
  756. i);
  757. }
  758. }
  759. }
  760. if (failure)
  761. ret = -EINVAL;
  762. return ret;
  763. }
  764. static int _sde_kms_get_blank(struct drm_crtc_state *crtc_state,
  765. struct drm_connector_state *conn_state)
  766. {
  767. int lp_mode, blank;
  768. if (crtc_state->active)
  769. lp_mode = sde_connector_get_property(conn_state,
  770. CONNECTOR_PROP_LP);
  771. else
  772. lp_mode = SDE_MODE_DPMS_OFF;
  773. switch (lp_mode) {
  774. case SDE_MODE_DPMS_ON:
  775. blank = DRM_PANEL_EVENT_UNBLANK;
  776. break;
  777. case SDE_MODE_DPMS_LP1:
  778. case SDE_MODE_DPMS_LP2:
  779. blank = DRM_PANEL_EVENT_BLANK_LP;
  780. break;
  781. case SDE_MODE_DPMS_OFF:
  782. default:
  783. blank = DRM_PANEL_EVENT_BLANK;
  784. break;
  785. }
  786. return blank;
  787. }
  788. static void _sde_kms_drm_check_dpms(struct drm_atomic_state *old_state,
  789. bool is_pre_commit)
  790. {
  791. struct panel_event_notification notification;
  792. struct drm_connector *connector;
  793. struct drm_connector_state *old_conn_state;
  794. struct drm_crtc_state *old_crtc_state;
  795. struct drm_crtc *crtc;
  796. struct sde_connector *c_conn;
  797. int i, old_mode, new_mode, old_fps, new_fps;
  798. enum panel_event_notifier_tag panel_type;
  799. for_each_old_connector_in_state(old_state, connector,
  800. old_conn_state, i) {
  801. crtc = connector->state->crtc ? connector->state->crtc :
  802. old_conn_state->crtc;
  803. if (!crtc)
  804. continue;
  805. new_fps = drm_mode_vrefresh(&crtc->state->mode);
  806. new_mode = _sde_kms_get_blank(crtc->state, connector->state);
  807. if (old_conn_state->crtc) {
  808. old_crtc_state = drm_atomic_get_existing_crtc_state(
  809. old_state, old_conn_state->crtc);
  810. old_fps = drm_mode_vrefresh(&old_crtc_state->mode);
  811. old_mode = _sde_kms_get_blank(old_crtc_state,
  812. old_conn_state);
  813. } else {
  814. old_fps = 0;
  815. old_mode = DRM_PANEL_EVENT_BLANK;
  816. }
  817. if ((old_mode != new_mode) || (old_fps != new_fps)) {
  818. c_conn = to_sde_connector(connector);
  819. SDE_EVT32(old_mode, new_mode, old_fps, new_fps,
  820. c_conn->panel, crtc->state->active,
  821. old_conn_state->crtc);
  822. pr_debug("change detected for connector:%s (power mode %d->%d, fps %d->%d)\n",
  823. c_conn->name, old_mode, new_mode, old_fps, new_fps);
  824. /* If suspend resume and fps change are happening
  825. * at the same time, give preference to power mode
  826. * changes rather than fps change.
  827. */
  828. if ((old_mode == new_mode) && (old_fps != new_fps))
  829. new_mode = DRM_PANEL_EVENT_FPS_CHANGE;
  830. if (!c_conn->panel)
  831. continue;
  832. panel_type = sde_encoder_is_primary_display(
  833. connector->encoder) ?
  834. PANEL_EVENT_NOTIFICATION_PRIMARY :
  835. PANEL_EVENT_NOTIFICATION_SECONDARY;
  836. notification.notif_type = new_mode;
  837. notification.panel = c_conn->panel;
  838. notification.notif_data.old_fps = old_fps;
  839. notification.notif_data.new_fps = new_fps;
  840. notification.notif_data.early_trigger = is_pre_commit;
  841. panel_event_notification_trigger(panel_type,
  842. &notification);
  843. }
  844. }
  845. }
  846. static struct drm_crtc *sde_kms_vm_get_vm_crtc(
  847. struct drm_atomic_state *state)
  848. {
  849. int i;
  850. enum sde_crtc_vm_req vm_req = VM_REQ_NONE;
  851. struct drm_crtc *crtc, *vm_crtc = NULL;
  852. struct drm_crtc_state *new_cstate, *old_cstate;
  853. struct sde_crtc_state *vm_cstate;
  854. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  855. if (!new_cstate->active && !old_cstate->active)
  856. continue;
  857. vm_cstate = to_sde_crtc_state(new_cstate);
  858. vm_req = sde_crtc_get_property(vm_cstate,
  859. CRTC_PROP_VM_REQ_STATE);
  860. if (vm_req != VM_REQ_NONE) {
  861. SDE_DEBUG("valid vm request:%d found on crtc-%d\n",
  862. vm_req, crtc->base.id);
  863. vm_crtc = crtc;
  864. break;
  865. }
  866. }
  867. return vm_crtc;
  868. }
  869. int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
  870. struct drm_atomic_state *state)
  871. {
  872. struct drm_device *ddev;
  873. struct drm_crtc *crtc;
  874. struct drm_crtc_state *new_cstate;
  875. struct drm_encoder *encoder;
  876. struct drm_connector *connector;
  877. struct sde_vm_ops *vm_ops;
  878. struct sde_crtc_state *cstate;
  879. struct drm_connector_list_iter iter;
  880. enum sde_crtc_vm_req vm_req;
  881. int rc = 0;
  882. ddev = sde_kms->dev;
  883. vm_ops = sde_vm_get_ops(sde_kms);
  884. if (!vm_ops)
  885. return -EINVAL;
  886. crtc = sde_kms_vm_get_vm_crtc(state);
  887. if (!crtc)
  888. return 0;
  889. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  890. cstate = to_sde_crtc_state(new_cstate);
  891. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  892. if (vm_req != VM_REQ_ACQUIRE)
  893. return 0;
  894. /* enable MDSS irq line */
  895. sde_irq_update(&sde_kms->base, true);
  896. /* clear the stale IRQ status bits */
  897. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  898. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  899. /* enable the display path IRQ's */
  900. drm_for_each_encoder_mask(encoder, crtc->dev,
  901. crtc->state->encoder_mask) {
  902. if (sde_encoder_in_clone_mode(encoder))
  903. continue;
  904. sde_encoder_irq_control(encoder, true);
  905. }
  906. /* Schedule ESD work */
  907. drm_connector_list_iter_begin(ddev, &iter);
  908. drm_for_each_connector_iter(connector, &iter)
  909. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  910. sde_connector_schedule_status_work(connector, true);
  911. drm_connector_list_iter_end(&iter);
  912. /* enable vblank events */
  913. drm_crtc_vblank_on(crtc);
  914. sde_dbg_set_hw_ownership_status(true);
  915. /* handle non-SDE pre_acquire */
  916. if (vm_ops->vm_client_post_acquire)
  917. rc = vm_ops->vm_client_post_acquire(sde_kms);
  918. return rc;
  919. }
  920. int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
  921. struct drm_atomic_state *state)
  922. {
  923. struct drm_device *ddev;
  924. struct drm_plane *plane;
  925. struct drm_crtc *crtc;
  926. struct drm_crtc_state *new_cstate;
  927. struct sde_crtc_state *cstate;
  928. enum sde_crtc_vm_req vm_req;
  929. ddev = sde_kms->dev;
  930. crtc = sde_kms_vm_get_vm_crtc(state);
  931. if (!crtc)
  932. return 0;
  933. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  934. cstate = to_sde_crtc_state(new_cstate);
  935. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  936. if (vm_req != VM_REQ_ACQUIRE)
  937. return 0;
  938. /* Clear the stale IRQ status bits */
  939. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  940. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  941. /* Program the SID's for the trusted VM */
  942. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  943. sde_plane_set_sid(plane, 1);
  944. sde_hw_set_lutdma_sid(sde_kms->hw_sid, 1);
  945. sde_dbg_set_hw_ownership_status(true);
  946. return 0;
  947. }
  948. static void sde_kms_prepare_commit(struct msm_kms *kms,
  949. struct drm_atomic_state *state)
  950. {
  951. struct sde_kms *sde_kms;
  952. struct msm_drm_private *priv;
  953. struct drm_device *dev;
  954. struct drm_encoder *encoder;
  955. struct drm_crtc *crtc;
  956. struct drm_crtc_state *cstate;
  957. struct sde_vm_ops *vm_ops;
  958. int i, rc;
  959. if (!kms)
  960. return;
  961. sde_kms = to_sde_kms(kms);
  962. dev = sde_kms->dev;
  963. if (!dev || !dev->dev_private)
  964. return;
  965. priv = dev->dev_private;
  966. SDE_ATRACE_BEGIN("prepare_commit");
  967. rc = pm_runtime_get_sync(sde_kms->dev->dev);
  968. if (rc < 0) {
  969. SDE_ERROR("failed to enable power resources %d\n", rc);
  970. SDE_EVT32(rc, SDE_EVTLOG_ERROR);
  971. goto end;
  972. }
  973. if (sde_kms->first_kickoff) {
  974. sde_power_scale_reg_bus(&priv->phandle, VOTE_INDEX_HIGH, false);
  975. sde_kms->first_kickoff = false;
  976. }
  977. for_each_new_crtc_in_state(state, crtc, cstate, i) {
  978. drm_for_each_encoder_mask(encoder, dev, cstate->encoder_mask) {
  979. if (sde_encoder_prepare_commit(encoder) == -ETIMEDOUT) {
  980. SDE_ERROR("crtc:%d, initiating hw reset\n",
  981. DRMID(crtc));
  982. sde_encoder_needs_hw_reset(encoder);
  983. sde_crtc_set_needs_hw_reset(crtc);
  984. }
  985. }
  986. }
  987. /*
  988. * NOTE: for secure use cases we want to apply the new HW
  989. * configuration only after completing preparation for secure
  990. * transitions prepare below if any transtions is required.
  991. */
  992. sde_kms_prepare_secure_transition(kms, state);
  993. vm_ops = sde_vm_get_ops(sde_kms);
  994. if (!vm_ops)
  995. goto end_vm;
  996. if (vm_ops->vm_prepare_commit)
  997. vm_ops->vm_prepare_commit(sde_kms, state);
  998. end_vm:
  999. _sde_kms_drm_check_dpms(state, true);
  1000. end:
  1001. SDE_ATRACE_END("prepare_commit");
  1002. }
  1003. static void sde_kms_commit(struct msm_kms *kms,
  1004. struct drm_atomic_state *old_state)
  1005. {
  1006. struct sde_kms *sde_kms;
  1007. struct drm_crtc *crtc;
  1008. struct drm_crtc_state *old_crtc_state;
  1009. int i;
  1010. if (!kms || !old_state)
  1011. return;
  1012. sde_kms = to_sde_kms(kms);
  1013. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1014. SDE_ERROR("power resource is not enabled\n");
  1015. return;
  1016. }
  1017. SDE_ATRACE_BEGIN("sde_kms_commit");
  1018. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1019. if (crtc->state->active) {
  1020. SDE_EVT32(DRMID(crtc), old_state);
  1021. sde_crtc_commit_kickoff(crtc, old_crtc_state);
  1022. }
  1023. }
  1024. SDE_ATRACE_END("sde_kms_commit");
  1025. }
  1026. static void _sde_kms_free_splash_display_data(struct sde_kms *sde_kms,
  1027. struct sde_splash_display *splash_display)
  1028. {
  1029. if (!sde_kms || !splash_display ||
  1030. !sde_kms->splash_data.num_splash_displays)
  1031. return;
  1032. if (sde_kms->splash_data.num_splash_regions) {
  1033. _sde_kms_splash_mem_put(sde_kms, splash_display->splash);
  1034. if (splash_display->demura)
  1035. _sde_kms_splash_mem_put(sde_kms,
  1036. splash_display->demura);
  1037. }
  1038. sde_kms->splash_data.num_splash_displays--;
  1039. SDE_DEBUG("cont_splash handoff done, remaining:%d\n",
  1040. sde_kms->splash_data.num_splash_displays);
  1041. memset(splash_display, 0x0, sizeof(struct sde_splash_display));
  1042. }
  1043. static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
  1044. struct drm_crtc *crtc)
  1045. {
  1046. struct msm_drm_private *priv;
  1047. struct sde_splash_display *splash_display;
  1048. int i;
  1049. if (!sde_kms || !crtc)
  1050. return;
  1051. priv = sde_kms->dev->dev_private;
  1052. if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays)
  1053. return;
  1054. SDE_EVT32(DRMID(crtc), crtc->state->active,
  1055. sde_kms->splash_data.num_splash_displays);
  1056. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  1057. splash_display = &sde_kms->splash_data.splash_display[i];
  1058. if (splash_display->encoder &&
  1059. crtc == splash_display->encoder->crtc)
  1060. break;
  1061. }
  1062. if (i >= MAX_DSI_DISPLAYS)
  1063. return;
  1064. if (splash_display->cont_splash_enabled) {
  1065. sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
  1066. splash_display, false);
  1067. _sde_kms_free_splash_display_data(sde_kms, splash_display);
  1068. }
  1069. /* remove the votes if all displays are done with splash */
  1070. if (!sde_kms->splash_data.num_splash_displays) {
  1071. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1072. sde_power_data_bus_set_quota(&priv->phandle, i,
  1073. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1074. priv->phandle.ib_quota[i] ? priv->phandle.ib_quota[i] :
  1075. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1076. pm_runtime_put_sync(sde_kms->dev->dev);
  1077. }
  1078. }
  1079. static void sde_kms_cancel_delayed_work(struct drm_crtc *crtc)
  1080. {
  1081. struct drm_connector *connector;
  1082. struct drm_connector_list_iter iter;
  1083. struct drm_encoder *encoder;
  1084. /* Cancel CRTC work */
  1085. sde_crtc_cancel_delayed_work(crtc);
  1086. /* Cancel ESD work */
  1087. drm_connector_list_iter_begin(crtc->dev, &iter);
  1088. drm_for_each_connector_iter(connector, &iter)
  1089. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  1090. sde_connector_schedule_status_work(connector, false);
  1091. drm_connector_list_iter_end(&iter);
  1092. /* Cancel Idle-PC work */
  1093. drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
  1094. if (sde_encoder_in_clone_mode(encoder))
  1095. continue;
  1096. sde_encoder_cancel_delayed_work(encoder);
  1097. }
  1098. }
  1099. int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
  1100. struct drm_atomic_state *state, bool is_primary)
  1101. {
  1102. struct drm_crtc *crtc;
  1103. struct drm_encoder *encoder;
  1104. int rc = 0;
  1105. crtc = sde_kms_vm_get_vm_crtc(state);
  1106. if (!crtc)
  1107. return 0;
  1108. /* if vm_req is enabled, once CRTC on the commit is guaranteed */
  1109. sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
  1110. sde_dbg_set_hw_ownership_status(false);
  1111. sde_kms_cancel_delayed_work(crtc);
  1112. /* disable SDE encoder irq's */
  1113. drm_for_each_encoder_mask(encoder, crtc->dev,
  1114. crtc->state->encoder_mask) {
  1115. if (sde_encoder_in_clone_mode(encoder))
  1116. continue;
  1117. sde_encoder_irq_control(encoder, false);
  1118. }
  1119. if (is_primary) {
  1120. /* disable vblank events */
  1121. drm_crtc_vblank_off(crtc);
  1122. /* reset sw state */
  1123. sde_crtc_reset_sw_state(crtc);
  1124. }
  1125. return rc;
  1126. }
  1127. int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
  1128. struct drm_atomic_state *state)
  1129. {
  1130. struct sde_vm_ops *vm_ops;
  1131. struct drm_device *ddev;
  1132. struct drm_crtc *crtc;
  1133. struct drm_plane *plane;
  1134. struct sde_crtc_state *cstate;
  1135. struct drm_crtc_state *new_cstate;
  1136. enum sde_crtc_vm_req vm_req;
  1137. int rc = 0;
  1138. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1139. return -EINVAL;
  1140. vm_ops = sde_vm_get_ops(sde_kms);
  1141. ddev = sde_kms->dev;
  1142. crtc = sde_kms_vm_get_vm_crtc(state);
  1143. if (!crtc)
  1144. return 0;
  1145. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1146. cstate = to_sde_crtc_state(new_cstate);
  1147. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1148. if (vm_req != VM_REQ_RELEASE)
  1149. return 0;
  1150. sde_kms_vm_pre_release(sde_kms, state, false);
  1151. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  1152. sde_plane_set_sid(plane, 0);
  1153. sde_hw_set_lutdma_sid(sde_kms->hw_sid, 0);
  1154. sde_vm_lock(sde_kms);
  1155. if (vm_ops->vm_release)
  1156. rc = vm_ops->vm_release(sde_kms);
  1157. sde_vm_unlock(sde_kms);
  1158. return rc;
  1159. }
  1160. int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
  1161. struct drm_atomic_state *state)
  1162. {
  1163. struct sde_vm_ops *vm_ops;
  1164. struct sde_crtc_state *cstate;
  1165. struct drm_crtc *crtc;
  1166. struct drm_crtc_state *new_cstate;
  1167. enum sde_crtc_vm_req vm_req;
  1168. int rc = 0;
  1169. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1170. return -EINVAL;
  1171. vm_ops = sde_vm_get_ops(sde_kms);
  1172. crtc = sde_kms_vm_get_vm_crtc(state);
  1173. if (!crtc)
  1174. return 0;
  1175. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1176. cstate = to_sde_crtc_state(new_cstate);
  1177. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1178. if (vm_req != VM_REQ_RELEASE)
  1179. return 0;
  1180. /* handle SDE pre-release */
  1181. rc = sde_kms_vm_pre_release(sde_kms, state, true);
  1182. if (rc) {
  1183. SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
  1184. goto exit;
  1185. }
  1186. /* properly handoff color processing features */
  1187. sde_cp_crtc_vm_primary_handoff(crtc);
  1188. sde_vm_lock(sde_kms);
  1189. /* handle non-SDE clients pre-release */
  1190. if (vm_ops->vm_client_pre_release) {
  1191. rc = vm_ops->vm_client_pre_release(sde_kms);
  1192. if (rc) {
  1193. SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
  1194. rc);
  1195. sde_vm_unlock(sde_kms);
  1196. goto exit;
  1197. }
  1198. }
  1199. /* disable IRQ line */
  1200. sde_irq_update(&sde_kms->base, false);
  1201. /* release HW */
  1202. if (vm_ops->vm_release) {
  1203. rc = vm_ops->vm_release(sde_kms);
  1204. if (rc)
  1205. SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
  1206. }
  1207. sde_vm_unlock(sde_kms);
  1208. _sde_crtc_vm_release_notify(crtc);
  1209. exit:
  1210. return rc;
  1211. }
  1212. static void sde_kms_complete_commit(struct msm_kms *kms,
  1213. struct drm_atomic_state *old_state)
  1214. {
  1215. struct sde_kms *sde_kms;
  1216. struct msm_drm_private *priv;
  1217. struct drm_crtc *crtc;
  1218. struct drm_crtc_state *old_crtc_state;
  1219. struct drm_connector *connector;
  1220. struct drm_connector_state *old_conn_state;
  1221. struct msm_display_conn_params params;
  1222. struct sde_vm_ops *vm_ops;
  1223. int i, rc = 0;
  1224. if (!kms || !old_state)
  1225. return;
  1226. sde_kms = to_sde_kms(kms);
  1227. if (!sde_kms->dev || !sde_kms->dev->dev_private)
  1228. return;
  1229. priv = sde_kms->dev->dev_private;
  1230. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1231. SDE_ERROR("power resource is not enabled\n");
  1232. return;
  1233. }
  1234. SDE_ATRACE_BEGIN("sde_kms_complete_commit");
  1235. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1236. sde_crtc_complete_commit(crtc, old_crtc_state);
  1237. /* complete secure transitions if any */
  1238. if (sde_kms->smmu_state.transition_type == POST_COMMIT)
  1239. _sde_kms_secure_ctrl(sde_kms, crtc, true);
  1240. }
  1241. for_each_old_connector_in_state(old_state, connector,
  1242. old_conn_state, i) {
  1243. struct sde_connector *c_conn;
  1244. c_conn = to_sde_connector(connector);
  1245. if (!c_conn->ops.post_kickoff)
  1246. continue;
  1247. memset(&params, 0, sizeof(params));
  1248. sde_connector_complete_qsync_commit(connector, &params);
  1249. rc = c_conn->ops.post_kickoff(connector, &params);
  1250. if (rc) {
  1251. pr_err("Connector Post kickoff failed rc=%d\n",
  1252. rc);
  1253. }
  1254. }
  1255. vm_ops = sde_vm_get_ops(sde_kms);
  1256. if (vm_ops && vm_ops->vm_post_commit) {
  1257. rc = vm_ops->vm_post_commit(sde_kms, old_state);
  1258. if (rc)
  1259. SDE_ERROR("vm post commit failed, rc = %d\n",
  1260. rc);
  1261. }
  1262. _sde_kms_drm_check_dpms(old_state, false);
  1263. pm_runtime_put_sync(sde_kms->dev->dev);
  1264. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  1265. _sde_kms_release_splash_resource(sde_kms, crtc);
  1266. SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
  1267. SDE_ATRACE_END("sde_kms_complete_commit");
  1268. }
  1269. static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
  1270. struct drm_crtc *crtc)
  1271. {
  1272. struct drm_encoder *encoder;
  1273. struct drm_device *dev;
  1274. int ret;
  1275. bool cwb_disabling;
  1276. if (!kms || !crtc || !crtc->state) {
  1277. SDE_ERROR("invalid params\n");
  1278. return;
  1279. }
  1280. dev = crtc->dev;
  1281. if (!crtc->state->enable) {
  1282. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  1283. return;
  1284. }
  1285. if (!crtc->state->active) {
  1286. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  1287. return;
  1288. }
  1289. if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
  1290. SDE_ERROR("power resource is not enabled\n");
  1291. return;
  1292. }
  1293. SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
  1294. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  1295. cwb_disabling = false;
  1296. if (encoder->crtc != crtc) {
  1297. cwb_disabling = sde_encoder_is_cwb_disabling(encoder,
  1298. crtc);
  1299. if (!cwb_disabling)
  1300. continue;
  1301. }
  1302. /*
  1303. * Wait for post-flush if necessary to delay before
  1304. * plane_cleanup. For example, wait for vsync in case of video
  1305. * mode panels. This may be a no-op for command mode panels.
  1306. */
  1307. SDE_EVT32_VERBOSE(DRMID(crtc));
  1308. ret = sde_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
  1309. if (ret && ret != -EWOULDBLOCK) {
  1310. SDE_ERROR("wait for commit done returned %d\n", ret);
  1311. sde_crtc_request_frame_reset(crtc, encoder);
  1312. break;
  1313. }
  1314. sde_crtc_complete_flip(crtc, NULL);
  1315. if (cwb_disabling)
  1316. sde_encoder_virt_reset(encoder);
  1317. }
  1318. sde_crtc_static_cache_read_kickoff(crtc);
  1319. SDE_ATRACE_END("sde_ksm_wait_for_commit_done");
  1320. }
  1321. static void sde_kms_prepare_fence(struct msm_kms *kms,
  1322. struct drm_atomic_state *old_state)
  1323. {
  1324. struct drm_crtc *crtc;
  1325. struct drm_crtc_state *old_crtc_state;
  1326. int i;
  1327. if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
  1328. SDE_ERROR("invalid argument(s)\n");
  1329. return;
  1330. }
  1331. SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
  1332. /* old_state actually contains updated crtc pointers */
  1333. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1334. if (crtc->state->active || crtc->state->active_changed)
  1335. sde_crtc_prepare_commit(crtc, old_crtc_state);
  1336. }
  1337. SDE_ATRACE_END("sde_kms_prepare_fence");
  1338. }
  1339. /**
  1340. * _sde_kms_get_displays - query for underlying display handles and cache them
  1341. * @sde_kms: Pointer to sde kms structure
  1342. * Returns: Zero on success
  1343. */
  1344. static int _sde_kms_get_displays(struct sde_kms *sde_kms)
  1345. {
  1346. int rc = -ENOMEM;
  1347. if (!sde_kms) {
  1348. SDE_ERROR("invalid sde kms\n");
  1349. return -EINVAL;
  1350. }
  1351. /* dsi */
  1352. sde_kms->dsi_displays = NULL;
  1353. sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
  1354. if (sde_kms->dsi_display_count) {
  1355. sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
  1356. sizeof(void *),
  1357. GFP_KERNEL);
  1358. if (!sde_kms->dsi_displays) {
  1359. SDE_ERROR("failed to allocate dsi displays\n");
  1360. goto exit_deinit_dsi;
  1361. }
  1362. sde_kms->dsi_display_count =
  1363. dsi_display_get_active_displays(sde_kms->dsi_displays,
  1364. sde_kms->dsi_display_count);
  1365. }
  1366. /* wb */
  1367. sde_kms->wb_displays = NULL;
  1368. sde_kms->wb_display_count = sde_wb_get_num_of_displays();
  1369. if (sde_kms->wb_display_count) {
  1370. sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
  1371. sizeof(void *),
  1372. GFP_KERNEL);
  1373. if (!sde_kms->wb_displays) {
  1374. SDE_ERROR("failed to allocate wb displays\n");
  1375. goto exit_deinit_wb;
  1376. }
  1377. sde_kms->wb_display_count =
  1378. wb_display_get_displays(sde_kms->wb_displays,
  1379. sde_kms->wb_display_count);
  1380. }
  1381. /* dp */
  1382. sde_kms->dp_displays = NULL;
  1383. sde_kms->dp_display_count = dp_display_get_num_of_displays();
  1384. if (sde_kms->dp_display_count) {
  1385. sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
  1386. sizeof(void *), GFP_KERNEL);
  1387. if (!sde_kms->dp_displays) {
  1388. SDE_ERROR("failed to allocate dp displays\n");
  1389. goto exit_deinit_dp;
  1390. }
  1391. sde_kms->dp_display_count =
  1392. dp_display_get_displays(sde_kms->dp_displays,
  1393. sde_kms->dp_display_count);
  1394. sde_kms->dp_stream_count = dp_display_get_num_of_streams();
  1395. }
  1396. return 0;
  1397. exit_deinit_dp:
  1398. kfree(sde_kms->dp_displays);
  1399. sde_kms->dp_stream_count = 0;
  1400. sde_kms->dp_display_count = 0;
  1401. sde_kms->dp_displays = NULL;
  1402. exit_deinit_wb:
  1403. kfree(sde_kms->wb_displays);
  1404. sde_kms->wb_display_count = 0;
  1405. sde_kms->wb_displays = NULL;
  1406. exit_deinit_dsi:
  1407. kfree(sde_kms->dsi_displays);
  1408. sde_kms->dsi_display_count = 0;
  1409. sde_kms->dsi_displays = NULL;
  1410. return rc;
  1411. }
  1412. /**
  1413. * _sde_kms_release_displays - release cache of underlying display handles
  1414. * @sde_kms: Pointer to sde kms structure
  1415. */
  1416. static void _sde_kms_release_displays(struct sde_kms *sde_kms)
  1417. {
  1418. if (!sde_kms) {
  1419. SDE_ERROR("invalid sde kms\n");
  1420. return;
  1421. }
  1422. kfree(sde_kms->wb_displays);
  1423. sde_kms->wb_displays = NULL;
  1424. sde_kms->wb_display_count = 0;
  1425. kfree(sde_kms->dsi_displays);
  1426. sde_kms->dsi_displays = NULL;
  1427. sde_kms->dsi_display_count = 0;
  1428. }
  1429. /**
  1430. * _sde_kms_setup_displays - create encoders, bridges and connectors
  1431. * for underlying displays
  1432. * @dev: Pointer to drm device structure
  1433. * @priv: Pointer to private drm device data
  1434. * @sde_kms: Pointer to sde kms structure
  1435. * Returns: Zero on success
  1436. */
  1437. static int _sde_kms_setup_displays(struct drm_device *dev,
  1438. struct msm_drm_private *priv,
  1439. struct sde_kms *sde_kms)
  1440. {
  1441. static const struct sde_connector_ops dsi_ops = {
  1442. .set_info_blob = dsi_conn_set_info_blob,
  1443. .detect = dsi_conn_detect,
  1444. .get_modes = dsi_connector_get_modes,
  1445. .pre_destroy = dsi_connector_put_modes,
  1446. .mode_valid = dsi_conn_mode_valid,
  1447. .get_info = dsi_display_get_info,
  1448. .set_backlight = dsi_display_set_backlight,
  1449. .soft_reset = dsi_display_soft_reset,
  1450. .pre_kickoff = dsi_conn_pre_kickoff,
  1451. .clk_ctrl = dsi_display_clk_ctrl,
  1452. .set_power = dsi_display_set_power,
  1453. .get_mode_info = dsi_conn_get_mode_info,
  1454. .get_dst_format = dsi_display_get_dst_format,
  1455. .post_kickoff = dsi_conn_post_kickoff,
  1456. .check_status = dsi_display_check_status,
  1457. .enable_event = dsi_conn_enable_event,
  1458. .cmd_transfer = dsi_display_cmd_transfer,
  1459. .cont_splash_config = dsi_display_cont_splash_config,
  1460. .cont_splash_res_disable = dsi_display_cont_splash_res_disable,
  1461. .get_panel_vfp = dsi_display_get_panel_vfp,
  1462. .get_default_lms = dsi_display_get_default_lms,
  1463. .cmd_receive = dsi_display_cmd_receive,
  1464. .install_properties = NULL,
  1465. .set_allowed_mode_switch = dsi_conn_set_allowed_mode_switch,
  1466. .set_dyn_bit_clk = dsi_conn_set_dyn_bit_clk,
  1467. .get_qsync_min_fps = dsi_conn_get_qsync_min_fps,
  1468. .get_avr_step_req = dsi_display_get_avr_step_req_fps,
  1469. .prepare_commit = dsi_conn_prepare_commit,
  1470. .set_submode_info = dsi_conn_set_submode_blob_info,
  1471. .get_num_lm_from_mode = dsi_conn_get_lm_from_mode,
  1472. };
  1473. static const struct sde_connector_ops wb_ops = {
  1474. .post_init = sde_wb_connector_post_init,
  1475. .set_info_blob = sde_wb_connector_set_info_blob,
  1476. .detect = sde_wb_connector_detect,
  1477. .get_modes = sde_wb_connector_get_modes,
  1478. .set_property = sde_wb_connector_set_property,
  1479. .get_info = sde_wb_get_info,
  1480. .soft_reset = NULL,
  1481. .get_mode_info = sde_wb_get_mode_info,
  1482. .get_dst_format = NULL,
  1483. .check_status = NULL,
  1484. .cmd_transfer = NULL,
  1485. .cont_splash_config = NULL,
  1486. .cont_splash_res_disable = NULL,
  1487. .get_panel_vfp = NULL,
  1488. .cmd_receive = NULL,
  1489. .install_properties = NULL,
  1490. .set_dyn_bit_clk = NULL,
  1491. .set_allowed_mode_switch = NULL,
  1492. };
  1493. static const struct sde_connector_ops dp_ops = {
  1494. .post_init = dp_connector_post_init,
  1495. .detect = dp_connector_detect,
  1496. .get_modes = dp_connector_get_modes,
  1497. .atomic_check = dp_connector_atomic_check,
  1498. .mode_valid = dp_connector_mode_valid,
  1499. .get_info = dp_connector_get_info,
  1500. .get_mode_info = dp_connector_get_mode_info,
  1501. .post_open = dp_connector_post_open,
  1502. .check_status = NULL,
  1503. .set_colorspace = dp_connector_set_colorspace,
  1504. .config_hdr = dp_connector_config_hdr,
  1505. .cmd_transfer = NULL,
  1506. .cont_splash_config = NULL,
  1507. .cont_splash_res_disable = NULL,
  1508. .get_panel_vfp = NULL,
  1509. .update_pps = dp_connector_update_pps,
  1510. .cmd_receive = NULL,
  1511. .install_properties = dp_connector_install_properties,
  1512. .set_allowed_mode_switch = NULL,
  1513. .set_dyn_bit_clk = NULL,
  1514. };
  1515. struct msm_display_info info;
  1516. struct drm_encoder *encoder;
  1517. void *display, *connector;
  1518. int i, max_encoders;
  1519. int rc = 0;
  1520. u32 dsc_count = 0, mixer_count = 0;
  1521. u32 max_dp_dsc_count, max_dp_mixer_count;
  1522. if (!dev || !priv || !sde_kms) {
  1523. SDE_ERROR("invalid argument(s)\n");
  1524. return -EINVAL;
  1525. }
  1526. max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
  1527. sde_kms->dp_display_count +
  1528. sde_kms->dp_stream_count;
  1529. if (max_encoders > ARRAY_SIZE(priv->encoders)) {
  1530. max_encoders = ARRAY_SIZE(priv->encoders);
  1531. SDE_ERROR("capping number of displays to %d", max_encoders);
  1532. }
  1533. /* wb */
  1534. for (i = 0; i < sde_kms->wb_display_count &&
  1535. priv->num_encoders < max_encoders; ++i) {
  1536. display = sde_kms->wb_displays[i];
  1537. encoder = NULL;
  1538. memset(&info, 0x0, sizeof(info));
  1539. rc = sde_wb_get_info(NULL, &info, display);
  1540. if (rc) {
  1541. SDE_ERROR("wb get_info %d failed\n", i);
  1542. continue;
  1543. }
  1544. encoder = sde_encoder_init(dev, &info);
  1545. if (IS_ERR_OR_NULL(encoder)) {
  1546. SDE_ERROR("encoder init failed for wb %d\n", i);
  1547. continue;
  1548. }
  1549. rc = sde_wb_drm_init(display, encoder);
  1550. if (rc) {
  1551. SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
  1552. sde_encoder_destroy(encoder);
  1553. continue;
  1554. }
  1555. connector = sde_connector_init(dev,
  1556. encoder,
  1557. 0,
  1558. display,
  1559. &wb_ops,
  1560. DRM_CONNECTOR_POLL_HPD,
  1561. DRM_MODE_CONNECTOR_VIRTUAL);
  1562. if (connector) {
  1563. priv->encoders[priv->num_encoders++] = encoder;
  1564. priv->connectors[priv->num_connectors++] = connector;
  1565. } else {
  1566. SDE_ERROR("wb %d connector init failed\n", i);
  1567. sde_wb_drm_deinit(display);
  1568. sde_encoder_destroy(encoder);
  1569. }
  1570. }
  1571. /* dsi */
  1572. for (i = 0; i < sde_kms->dsi_display_count &&
  1573. priv->num_encoders < max_encoders; ++i) {
  1574. display = sde_kms->dsi_displays[i];
  1575. encoder = NULL;
  1576. memset(&info, 0x0, sizeof(info));
  1577. rc = dsi_display_get_info(NULL, &info, display);
  1578. if (rc) {
  1579. SDE_ERROR("dsi get_info %d failed\n", i);
  1580. continue;
  1581. }
  1582. encoder = sde_encoder_init(dev, &info);
  1583. if (IS_ERR_OR_NULL(encoder)) {
  1584. SDE_ERROR("encoder init failed for dsi %d\n", i);
  1585. continue;
  1586. }
  1587. rc = dsi_display_drm_bridge_init(display, encoder);
  1588. if (rc) {
  1589. SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
  1590. sde_encoder_destroy(encoder);
  1591. continue;
  1592. }
  1593. connector = sde_connector_init(dev,
  1594. encoder,
  1595. dsi_display_get_drm_panel(display),
  1596. display,
  1597. &dsi_ops,
  1598. DRM_CONNECTOR_POLL_HPD,
  1599. DRM_MODE_CONNECTOR_DSI);
  1600. if (connector) {
  1601. priv->encoders[priv->num_encoders++] = encoder;
  1602. priv->connectors[priv->num_connectors++] = connector;
  1603. } else {
  1604. SDE_ERROR("dsi %d connector init failed\n", i);
  1605. dsi_display_drm_bridge_deinit(display);
  1606. sde_encoder_destroy(encoder);
  1607. continue;
  1608. }
  1609. rc = dsi_display_drm_ext_bridge_init(display,
  1610. encoder, connector);
  1611. if (rc) {
  1612. SDE_ERROR("dsi %d ext bridge init failed\n", rc);
  1613. dsi_display_drm_bridge_deinit(display);
  1614. sde_connector_destroy(connector);
  1615. sde_encoder_destroy(encoder);
  1616. }
  1617. dsc_count += info.dsc_count;
  1618. mixer_count += info.lm_count;
  1619. if (dsi_display_has_dsc_switch_support(display))
  1620. sde_kms->dsc_switch_support = true;
  1621. }
  1622. if (sde_kms->catalog->allowed_dsc_reservation_switch &&
  1623. !sde_kms->dsc_switch_support) {
  1624. SDE_DEBUG("dsc switch not supported\n");
  1625. sde_kms->catalog->allowed_dsc_reservation_switch = 0;
  1626. }
  1627. max_dp_mixer_count = sde_kms->catalog->mixer_count > mixer_count ?
  1628. sde_kms->catalog->mixer_count - mixer_count : 0;
  1629. max_dp_dsc_count = sde_kms->catalog->dsc_count > dsc_count ?
  1630. sde_kms->catalog->dsc_count - dsc_count : 0;
  1631. if (sde_kms->catalog->allowed_dsc_reservation_switch &
  1632. SDE_DP_DSC_RESERVATION_SWITCH)
  1633. max_dp_dsc_count = sde_kms->catalog->dsc_count;
  1634. /* dp */
  1635. for (i = 0; i < sde_kms->dp_display_count &&
  1636. priv->num_encoders < max_encoders; ++i) {
  1637. int idx;
  1638. display = sde_kms->dp_displays[i];
  1639. encoder = NULL;
  1640. memset(&info, 0x0, sizeof(info));
  1641. rc = dp_connector_get_info(NULL, &info, display);
  1642. if (rc) {
  1643. SDE_ERROR("dp get_info %d failed\n", i);
  1644. continue;
  1645. }
  1646. encoder = sde_encoder_init(dev, &info);
  1647. if (IS_ERR_OR_NULL(encoder)) {
  1648. SDE_ERROR("dp encoder init failed %d\n", i);
  1649. continue;
  1650. }
  1651. rc = dp_drm_bridge_init(display, encoder,
  1652. max_dp_mixer_count, max_dp_dsc_count);
  1653. if (rc) {
  1654. SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
  1655. sde_encoder_destroy(encoder);
  1656. continue;
  1657. }
  1658. connector = sde_connector_init(dev,
  1659. encoder,
  1660. NULL,
  1661. display,
  1662. &dp_ops,
  1663. DRM_CONNECTOR_POLL_HPD,
  1664. DRM_MODE_CONNECTOR_DisplayPort);
  1665. if (connector) {
  1666. priv->encoders[priv->num_encoders++] = encoder;
  1667. priv->connectors[priv->num_connectors++] = connector;
  1668. } else {
  1669. SDE_ERROR("dp %d connector init failed\n", i);
  1670. dp_drm_bridge_deinit(display);
  1671. sde_encoder_destroy(encoder);
  1672. }
  1673. /* update display cap to MST_MODE for DP MST encoders */
  1674. info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
  1675. for (idx = 0; idx < sde_kms->dp_stream_count &&
  1676. priv->num_encoders < max_encoders; idx++) {
  1677. info.h_tile_instance[0] = idx;
  1678. encoder = sde_encoder_init(dev, &info);
  1679. if (IS_ERR_OR_NULL(encoder)) {
  1680. SDE_ERROR("dp mst encoder init failed %d\n", i);
  1681. continue;
  1682. }
  1683. rc = dp_mst_drm_bridge_init(display, encoder);
  1684. if (rc) {
  1685. SDE_ERROR("dp mst bridge %d init failed, %d\n",
  1686. i, rc);
  1687. sde_encoder_destroy(encoder);
  1688. continue;
  1689. }
  1690. priv->encoders[priv->num_encoders++] = encoder;
  1691. }
  1692. }
  1693. return 0;
  1694. }
  1695. static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
  1696. {
  1697. struct msm_drm_private *priv;
  1698. int i;
  1699. if (!sde_kms) {
  1700. SDE_ERROR("invalid sde_kms\n");
  1701. return;
  1702. } else if (!sde_kms->dev) {
  1703. SDE_ERROR("invalid dev\n");
  1704. return;
  1705. } else if (!sde_kms->dev->dev_private) {
  1706. SDE_ERROR("invalid dev_private\n");
  1707. return;
  1708. }
  1709. priv = sde_kms->dev->dev_private;
  1710. for (i = 0; i < priv->num_crtcs; i++)
  1711. priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
  1712. priv->num_crtcs = 0;
  1713. for (i = 0; i < priv->num_planes; i++)
  1714. priv->planes[i]->funcs->destroy(priv->planes[i]);
  1715. priv->num_planes = 0;
  1716. for (i = 0; i < priv->num_connectors; i++)
  1717. priv->connectors[i]->funcs->destroy(priv->connectors[i]);
  1718. priv->num_connectors = 0;
  1719. for (i = 0; i < priv->num_encoders; i++)
  1720. priv->encoders[i]->funcs->destroy(priv->encoders[i]);
  1721. priv->num_encoders = 0;
  1722. _sde_kms_release_displays(sde_kms);
  1723. }
  1724. static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
  1725. {
  1726. struct drm_device *dev;
  1727. struct drm_plane *primary_planes[MAX_PLANES], *plane;
  1728. struct drm_crtc *crtc;
  1729. struct msm_drm_private *priv;
  1730. struct sde_mdss_cfg *catalog;
  1731. int primary_planes_idx = 0, i, ret;
  1732. int max_crtc_count;
  1733. u32 sspp_id[MAX_PLANES];
  1734. u32 master_plane_id[MAX_PLANES];
  1735. u32 num_virt_planes = 0;
  1736. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1737. SDE_ERROR("invalid sde_kms\n");
  1738. return -EINVAL;
  1739. }
  1740. dev = sde_kms->dev;
  1741. priv = dev->dev_private;
  1742. catalog = sde_kms->catalog;
  1743. ret = sde_core_irq_domain_add(sde_kms);
  1744. if (ret)
  1745. goto fail_irq;
  1746. /*
  1747. * Query for underlying display drivers, and create connectors,
  1748. * bridges and encoders for them.
  1749. */
  1750. if (!_sde_kms_get_displays(sde_kms))
  1751. (void)_sde_kms_setup_displays(dev, priv, sde_kms);
  1752. max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
  1753. /* Create the planes */
  1754. for (i = 0; i < catalog->sspp_count; i++) {
  1755. bool primary = true;
  1756. if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
  1757. || primary_planes_idx >= max_crtc_count)
  1758. primary = false;
  1759. plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
  1760. (1UL << max_crtc_count) - 1, 0);
  1761. if (IS_ERR(plane)) {
  1762. SDE_ERROR("sde_plane_init failed\n");
  1763. ret = PTR_ERR(plane);
  1764. goto fail;
  1765. }
  1766. priv->planes[priv->num_planes++] = plane;
  1767. if (primary)
  1768. primary_planes[primary_planes_idx++] = plane;
  1769. if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
  1770. sde_is_custom_client()) {
  1771. int priority =
  1772. catalog->sspp[i].sblk->smart_dma_priority;
  1773. sspp_id[priority - 1] = catalog->sspp[i].id;
  1774. master_plane_id[priority - 1] = plane->base.id;
  1775. num_virt_planes++;
  1776. }
  1777. }
  1778. /* Initialize smart DMA virtual planes */
  1779. for (i = 0; i < num_virt_planes; i++) {
  1780. plane = sde_plane_init(dev, sspp_id[i], false,
  1781. (1UL << max_crtc_count) - 1, master_plane_id[i]);
  1782. if (IS_ERR(plane)) {
  1783. SDE_ERROR("sde_plane for virtual SSPP init failed\n");
  1784. ret = PTR_ERR(plane);
  1785. goto fail;
  1786. }
  1787. priv->planes[priv->num_planes++] = plane;
  1788. }
  1789. max_crtc_count = min(max_crtc_count, primary_planes_idx);
  1790. /* Create one CRTC per encoder */
  1791. for (i = 0; i < max_crtc_count; i++) {
  1792. crtc = sde_crtc_init(dev, primary_planes[i]);
  1793. if (IS_ERR(crtc)) {
  1794. ret = PTR_ERR(crtc);
  1795. goto fail;
  1796. }
  1797. priv->crtcs[priv->num_crtcs++] = crtc;
  1798. }
  1799. if (sde_is_custom_client()) {
  1800. /* All CRTCs are compatible with all planes */
  1801. for (i = 0; i < priv->num_planes; i++)
  1802. priv->planes[i]->possible_crtcs =
  1803. (1 << priv->num_crtcs) - 1;
  1804. }
  1805. /* All CRTCs are compatible with all encoders */
  1806. for (i = 0; i < priv->num_encoders; i++)
  1807. priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
  1808. return 0;
  1809. fail:
  1810. _sde_kms_drm_obj_destroy(sde_kms);
  1811. fail_irq:
  1812. sde_core_irq_domain_fini(sde_kms);
  1813. return ret;
  1814. }
  1815. /**
  1816. * sde_kms_timeline_status - provides current timeline status
  1817. * This API should be called without mode config lock.
  1818. * @dev: Pointer to drm device
  1819. */
  1820. void sde_kms_timeline_status(struct drm_device *dev)
  1821. {
  1822. struct drm_crtc *crtc;
  1823. struct drm_connector *conn;
  1824. struct drm_connector_list_iter conn_iter;
  1825. if (!dev) {
  1826. SDE_ERROR("invalid drm device node\n");
  1827. return;
  1828. }
  1829. drm_for_each_crtc(crtc, dev)
  1830. sde_crtc_timeline_status(crtc);
  1831. if (mutex_is_locked(&dev->mode_config.mutex)) {
  1832. /*
  1833. *Probably locked from last close dumping status anyway
  1834. */
  1835. SDE_ERROR("dumping conn_timeline without mode_config lock\n");
  1836. drm_connector_list_iter_begin(dev, &conn_iter);
  1837. drm_for_each_connector_iter(conn, &conn_iter)
  1838. sde_conn_timeline_status(conn);
  1839. drm_connector_list_iter_end(&conn_iter);
  1840. return;
  1841. }
  1842. mutex_lock(&dev->mode_config.mutex);
  1843. drm_connector_list_iter_begin(dev, &conn_iter);
  1844. drm_for_each_connector_iter(conn, &conn_iter)
  1845. sde_conn_timeline_status(conn);
  1846. drm_connector_list_iter_end(&conn_iter);
  1847. mutex_unlock(&dev->mode_config.mutex);
  1848. }
  1849. static int sde_kms_postinit(struct msm_kms *kms)
  1850. {
  1851. struct sde_kms *sde_kms = to_sde_kms(kms);
  1852. struct drm_device *dev;
  1853. struct drm_crtc *crtc;
  1854. int rc;
  1855. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1856. SDE_ERROR("invalid sde_kms\n");
  1857. return -EINVAL;
  1858. }
  1859. dev = sde_kms->dev;
  1860. rc = _sde_debugfs_init(sde_kms);
  1861. if (rc)
  1862. SDE_ERROR("sde_debugfs init failed: %d\n", rc);
  1863. drm_for_each_crtc(crtc, dev)
  1864. sde_crtc_post_init(dev, crtc);
  1865. return rc;
  1866. }
  1867. static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
  1868. struct drm_encoder *encoder)
  1869. {
  1870. return rate;
  1871. }
  1872. static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
  1873. struct platform_device *pdev)
  1874. {
  1875. struct drm_device *dev;
  1876. struct msm_drm_private *priv;
  1877. struct sde_vm_ops *vm_ops;
  1878. int i;
  1879. if (!sde_kms || !pdev)
  1880. return;
  1881. dev = sde_kms->dev;
  1882. if (!dev)
  1883. return;
  1884. priv = dev->dev_private;
  1885. if (!priv)
  1886. return;
  1887. if (sde_kms->genpd_init) {
  1888. sde_kms->genpd_init = false;
  1889. pm_genpd_remove(&sde_kms->genpd);
  1890. of_genpd_del_provider(pdev->dev.of_node);
  1891. }
  1892. vm_ops = sde_vm_get_ops(sde_kms);
  1893. if (vm_ops && vm_ops->vm_deinit)
  1894. vm_ops->vm_deinit(sde_kms, vm_ops);
  1895. if (sde_kms->hw_intr)
  1896. sde_hw_intr_destroy(sde_kms->hw_intr);
  1897. sde_kms->hw_intr = NULL;
  1898. if (sde_kms->power_event)
  1899. sde_power_handle_unregister_event(
  1900. &priv->phandle, sde_kms->power_event);
  1901. _sde_kms_release_displays(sde_kms);
  1902. _sde_kms_unmap_all_splash_regions(sde_kms);
  1903. if (sde_kms->catalog) {
  1904. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  1905. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  1906. if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
  1907. sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
  1908. }
  1909. }
  1910. if (sde_kms->rm_init)
  1911. sde_rm_destroy(&sde_kms->rm);
  1912. sde_kms->rm_init = false;
  1913. if (sde_kms->catalog)
  1914. sde_hw_catalog_deinit(sde_kms->catalog);
  1915. sde_kms->catalog = NULL;
  1916. if (sde_kms->sid)
  1917. msm_iounmap(pdev, sde_kms->sid);
  1918. sde_kms->sid = NULL;
  1919. if (sde_kms->reg_dma)
  1920. msm_iounmap(pdev, sde_kms->reg_dma);
  1921. sde_kms->reg_dma = NULL;
  1922. if (sde_kms->vbif[VBIF_NRT])
  1923. msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
  1924. sde_kms->vbif[VBIF_NRT] = NULL;
  1925. if (sde_kms->vbif[VBIF_RT])
  1926. msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
  1927. sde_kms->vbif[VBIF_RT] = NULL;
  1928. if (sde_kms->mmio)
  1929. msm_iounmap(pdev, sde_kms->mmio);
  1930. sde_kms->mmio = NULL;
  1931. sde_reg_dma_deinit();
  1932. _sde_kms_mmu_destroy(sde_kms);
  1933. }
  1934. int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
  1935. {
  1936. int i;
  1937. if (!sde_kms)
  1938. return -EINVAL;
  1939. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  1940. struct msm_mmu *mmu;
  1941. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  1942. if (!aspace)
  1943. continue;
  1944. mmu = sde_kms->aspace[i]->mmu;
  1945. if (secure_only &&
  1946. !aspace->mmu->funcs->is_domain_secure(mmu))
  1947. continue;
  1948. /* cleanup aspace before detaching */
  1949. msm_gem_aspace_domain_attach_detach_update(aspace, true);
  1950. SDE_DEBUG("Detaching domain:%d\n", i);
  1951. aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
  1952. ARRAY_SIZE(iommu_ports));
  1953. aspace->domain_attached = false;
  1954. }
  1955. return 0;
  1956. }
  1957. int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
  1958. {
  1959. int i;
  1960. if (!sde_kms)
  1961. return -EINVAL;
  1962. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  1963. struct msm_mmu *mmu;
  1964. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  1965. if (!aspace)
  1966. continue;
  1967. mmu = sde_kms->aspace[i]->mmu;
  1968. if (secure_only &&
  1969. !aspace->mmu->funcs->is_domain_secure(mmu))
  1970. continue;
  1971. SDE_DEBUG("Attaching domain:%d\n", i);
  1972. aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
  1973. ARRAY_SIZE(iommu_ports));
  1974. aspace->domain_attached = true;
  1975. msm_gem_aspace_domain_attach_detach_update(aspace, false);
  1976. }
  1977. return 0;
  1978. }
  1979. static void sde_kms_destroy(struct msm_kms *kms)
  1980. {
  1981. struct sde_kms *sde_kms;
  1982. struct drm_device *dev;
  1983. if (!kms) {
  1984. SDE_ERROR("invalid kms\n");
  1985. return;
  1986. }
  1987. sde_kms = to_sde_kms(kms);
  1988. dev = sde_kms->dev;
  1989. if (!dev || !dev->dev) {
  1990. SDE_ERROR("invalid device\n");
  1991. return;
  1992. }
  1993. _sde_kms_hw_destroy(sde_kms, to_platform_device(dev->dev));
  1994. kfree(sde_kms);
  1995. }
  1996. static void sde_kms_helper_clear_dim_layers(struct drm_atomic_state *state, struct drm_crtc *crtc)
  1997. {
  1998. struct drm_crtc_state *crtc_state = NULL;
  1999. struct sde_crtc_state *c_state;
  2000. if (!state || !crtc) {
  2001. SDE_ERROR("invalid params\n");
  2002. return;
  2003. }
  2004. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2005. c_state = to_sde_crtc_state(crtc_state);
  2006. _sde_crtc_clear_dim_layers_v1(crtc_state);
  2007. set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, c_state->dirty);
  2008. }
  2009. static int sde_kms_set_crtc_for_conn(struct drm_device *dev,
  2010. struct drm_encoder *enc, struct drm_atomic_state *state)
  2011. {
  2012. struct drm_connector *conn = NULL;
  2013. struct drm_connector *tmp_conn = NULL;
  2014. struct drm_connector_list_iter conn_iter;
  2015. struct drm_crtc_state *crtc_state = NULL;
  2016. struct drm_connector_state *conn_state = NULL;
  2017. int ret = 0;
  2018. drm_connector_list_iter_begin(dev, &conn_iter);
  2019. drm_for_each_connector_iter(tmp_conn, &conn_iter) {
  2020. if (enc == tmp_conn->state->best_encoder) {
  2021. conn = tmp_conn;
  2022. break;
  2023. }
  2024. }
  2025. drm_connector_list_iter_end(&conn_iter);
  2026. if (!conn || !enc->crtc) {
  2027. SDE_ERROR("invalid params for enc:%d\n", DRMID(enc));
  2028. return -EINVAL;
  2029. }
  2030. crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
  2031. if (IS_ERR(crtc_state)) {
  2032. ret = PTR_ERR(crtc_state);
  2033. SDE_ERROR("error %d getting crtc %d state\n",
  2034. ret, DRMID(enc->crtc));
  2035. return ret;
  2036. }
  2037. conn_state = drm_atomic_get_connector_state(state, conn);
  2038. if (IS_ERR(conn_state)) {
  2039. ret = PTR_ERR(conn_state);
  2040. SDE_ERROR("error %d getting connector %d state\n",
  2041. ret, DRMID(conn));
  2042. return ret;
  2043. }
  2044. crtc_state->active = true;
  2045. ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
  2046. if (ret)
  2047. SDE_ERROR("error %d setting the crtc\n", ret);
  2048. return ret;
  2049. }
  2050. static void _sde_kms_plane_force_remove(struct drm_plane *plane,
  2051. struct drm_atomic_state *state)
  2052. {
  2053. struct drm_plane_state *plane_state;
  2054. int ret = 0;
  2055. plane_state = drm_atomic_get_plane_state(state, plane);
  2056. if (IS_ERR(plane_state)) {
  2057. ret = PTR_ERR(plane_state);
  2058. SDE_ERROR("error %d getting plane %d state\n",
  2059. ret, plane->base.id);
  2060. return;
  2061. }
  2062. plane->old_fb = plane->fb;
  2063. SDE_DEBUG("disabling plane %d\n", plane->base.id);
  2064. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2065. if (ret != 0)
  2066. SDE_ERROR("error %d disabling plane %d\n", ret,
  2067. plane->base.id);
  2068. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2069. }
  2070. static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
  2071. struct drm_atomic_state *state)
  2072. {
  2073. struct drm_device *dev = sde_kms->dev;
  2074. struct drm_framebuffer *fb, *tfb;
  2075. struct list_head fbs;
  2076. struct drm_plane *plane;
  2077. struct drm_crtc *crtc = NULL;
  2078. unsigned int crtc_mask = 0;
  2079. int ret = 0;
  2080. INIT_LIST_HEAD(&fbs);
  2081. list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
  2082. if (drm_framebuffer_read_refcount(fb) > 1) {
  2083. list_move_tail(&fb->filp_head, &fbs);
  2084. drm_for_each_plane(plane, dev) {
  2085. if (plane->state && plane->state->fb == fb) {
  2086. if (plane->state->crtc)
  2087. crtc_mask |= drm_crtc_mask(plane->state->crtc);
  2088. _sde_kms_plane_force_remove(plane, state);
  2089. }
  2090. }
  2091. } else {
  2092. list_del_init(&fb->filp_head);
  2093. drm_framebuffer_put(fb);
  2094. }
  2095. }
  2096. if (list_empty(&fbs)) {
  2097. SDE_DEBUG("skip commit as no fb(s)\n");
  2098. return 0;
  2099. }
  2100. drm_for_each_crtc(crtc, dev) {
  2101. if ((crtc_mask & drm_crtc_mask(crtc)) && crtc->state->active) {
  2102. struct drm_encoder *drm_enc;
  2103. drm_for_each_encoder_mask(drm_enc, crtc->dev,
  2104. crtc->state->encoder_mask) {
  2105. ret = sde_kms_set_crtc_for_conn(dev, drm_enc, state);
  2106. if (ret)
  2107. goto error;
  2108. }
  2109. sde_kms_helper_clear_dim_layers(state, crtc);
  2110. }
  2111. }
  2112. SDE_EVT32(state, crtc_mask);
  2113. SDE_DEBUG("null commit after removing all the pipes\n");
  2114. ret = drm_atomic_commit(state);
  2115. error:
  2116. if (ret) {
  2117. /*
  2118. * move the fbs back to original list, so it would be
  2119. * handled during drm_release
  2120. */
  2121. list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
  2122. list_move_tail(&fb->filp_head, &file->fbs);
  2123. if (ret == -EDEADLK || ret == -ERESTARTSYS)
  2124. SDE_DEBUG("atomic commit failed in preclose, ret:%d\n", ret);
  2125. else
  2126. SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
  2127. goto end;
  2128. }
  2129. while (!list_empty(&fbs)) {
  2130. fb = list_first_entry(&fbs, typeof(*fb), filp_head);
  2131. list_del_init(&fb->filp_head);
  2132. drm_framebuffer_put(fb);
  2133. }
  2134. end:
  2135. return ret;
  2136. }
  2137. static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
  2138. {
  2139. struct sde_kms *sde_kms = to_sde_kms(kms);
  2140. struct drm_device *dev = sde_kms->dev;
  2141. struct msm_drm_private *priv = dev->dev_private;
  2142. unsigned int i;
  2143. struct drm_atomic_state *state = NULL;
  2144. struct drm_modeset_acquire_ctx ctx;
  2145. int ret = 0;
  2146. /* cancel pending flip event */
  2147. for (i = 0; i < priv->num_crtcs; i++)
  2148. sde_crtc_complete_flip(priv->crtcs[i], file);
  2149. drm_modeset_acquire_init(&ctx, 0);
  2150. retry:
  2151. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2152. if (ret == -EDEADLK) {
  2153. drm_modeset_backoff(&ctx);
  2154. goto retry;
  2155. } else if (WARN_ON(ret)) {
  2156. goto end;
  2157. }
  2158. state = drm_atomic_state_alloc(dev);
  2159. if (!state) {
  2160. ret = -ENOMEM;
  2161. goto end;
  2162. }
  2163. state->acquire_ctx = &ctx;
  2164. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  2165. ret = _sde_kms_remove_fbs(sde_kms, file, state);
  2166. if (ret != -EDEADLK && ret != -ERESTARTSYS)
  2167. break;
  2168. drm_atomic_state_clear(state);
  2169. drm_modeset_backoff(&ctx);
  2170. }
  2171. end:
  2172. if (state)
  2173. drm_atomic_state_put(state);
  2174. SDE_DEBUG("sde preclose done, ret:%d\n", ret);
  2175. drm_modeset_drop_locks(&ctx);
  2176. drm_modeset_acquire_fini(&ctx);
  2177. }
  2178. static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
  2179. struct drm_atomic_state *state)
  2180. {
  2181. struct drm_device *dev = sde_kms->dev;
  2182. struct drm_plane *plane;
  2183. struct drm_plane_state *plane_state;
  2184. struct drm_crtc *crtc;
  2185. struct drm_crtc_state *crtc_state;
  2186. struct drm_connector *conn;
  2187. struct drm_connector_state *conn_state;
  2188. struct drm_connector_list_iter conn_iter;
  2189. int ret = 0;
  2190. drm_for_each_plane(plane, dev) {
  2191. plane_state = drm_atomic_get_plane_state(state, plane);
  2192. if (IS_ERR(plane_state)) {
  2193. ret = PTR_ERR(plane_state);
  2194. SDE_ERROR("error %d getting plane %d state\n",
  2195. ret, DRMID(plane));
  2196. return ret;
  2197. }
  2198. ret = sde_plane_helper_reset_custom_properties(plane,
  2199. plane_state);
  2200. if (ret) {
  2201. SDE_ERROR("error %d resetting plane props %d\n",
  2202. ret, DRMID(plane));
  2203. return ret;
  2204. }
  2205. }
  2206. drm_for_each_crtc(crtc, dev) {
  2207. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2208. if (IS_ERR(crtc_state)) {
  2209. ret = PTR_ERR(crtc_state);
  2210. SDE_ERROR("error %d getting crtc %d state\n",
  2211. ret, DRMID(crtc));
  2212. return ret;
  2213. }
  2214. ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
  2215. if (ret) {
  2216. SDE_ERROR("error %d resetting crtc props %d\n",
  2217. ret, DRMID(crtc));
  2218. return ret;
  2219. }
  2220. }
  2221. drm_connector_list_iter_begin(dev, &conn_iter);
  2222. drm_for_each_connector_iter(conn, &conn_iter) {
  2223. conn_state = drm_atomic_get_connector_state(state, conn);
  2224. if (IS_ERR(conn_state)) {
  2225. ret = PTR_ERR(conn_state);
  2226. SDE_ERROR("error %d getting connector %d state\n",
  2227. ret, DRMID(conn));
  2228. return ret;
  2229. }
  2230. ret = sde_connector_helper_reset_custom_properties(conn,
  2231. conn_state);
  2232. if (ret) {
  2233. SDE_ERROR("error %d resetting connector props %d\n",
  2234. ret, DRMID(conn));
  2235. return ret;
  2236. }
  2237. }
  2238. drm_connector_list_iter_end(&conn_iter);
  2239. return ret;
  2240. }
  2241. static void sde_kms_lastclose(struct msm_kms *kms)
  2242. {
  2243. struct sde_kms *sde_kms;
  2244. struct drm_device *dev;
  2245. struct drm_atomic_state *state;
  2246. struct drm_modeset_acquire_ctx ctx;
  2247. int ret;
  2248. if (!kms) {
  2249. SDE_ERROR("invalid argument\n");
  2250. return;
  2251. }
  2252. sde_kms = to_sde_kms(kms);
  2253. dev = sde_kms->dev;
  2254. drm_modeset_acquire_init(&ctx, 0);
  2255. state = drm_atomic_state_alloc(dev);
  2256. if (!state) {
  2257. ret = -ENOMEM;
  2258. goto out_ctx;
  2259. }
  2260. state->acquire_ctx = &ctx;
  2261. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  2262. retry:
  2263. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2264. if (ret)
  2265. goto out_state;
  2266. ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
  2267. if (ret)
  2268. goto out_state;
  2269. ret = drm_atomic_commit(state);
  2270. out_state:
  2271. if (ret == -EDEADLK)
  2272. goto backoff;
  2273. drm_atomic_state_put(state);
  2274. out_ctx:
  2275. drm_modeset_drop_locks(&ctx);
  2276. drm_modeset_acquire_fini(&ctx);
  2277. if (ret)
  2278. SDE_ERROR("kms lastclose failed: %d\n", ret);
  2279. SDE_EVT32(ret, SDE_EVTLOG_FUNC_EXIT);
  2280. return;
  2281. backoff:
  2282. drm_atomic_state_clear(state);
  2283. drm_modeset_backoff(&ctx);
  2284. SDE_EVT32(ret, SDE_EVTLOG_FUNC_CASE1);
  2285. goto retry;
  2286. }
  2287. static int _sde_kms_validate_vm_request(struct drm_atomic_state *state, struct sde_kms *sde_kms,
  2288. enum sde_crtc_vm_req vm_req, bool vm_owns_hw)
  2289. {
  2290. struct drm_crtc *crtc, *active_crtc = NULL, *global_active_crtc = NULL;
  2291. struct drm_crtc_state *new_cstate, *old_cstate, *active_cstate;
  2292. struct drm_encoder *encoder;
  2293. struct drm_connector *connector;
  2294. struct drm_connector_state *new_connstate;
  2295. struct sde_vm_ops *vm_ops = sde_vm_get_ops(sde_kms);
  2296. struct sde_mdss_cfg *catalog = sde_kms->catalog;
  2297. struct sde_connector *sde_conn;
  2298. struct dsi_display *dsi_display;
  2299. uint32_t i, commit_crtc_cnt = 0, global_crtc_cnt = 0;
  2300. uint32_t crtc_encoder_cnt = 0;
  2301. enum sde_crtc_idle_pc_state idle_pc_state;
  2302. int rc = 0;
  2303. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2304. struct sde_crtc_state *new_state = NULL;
  2305. if (!new_cstate->active && !old_cstate->active)
  2306. continue;
  2307. new_state = to_sde_crtc_state(new_cstate);
  2308. idle_pc_state = sde_crtc_get_property(new_state, CRTC_PROP_IDLE_PC_STATE);
  2309. active_crtc = crtc;
  2310. active_cstate = new_cstate;
  2311. commit_crtc_cnt++;
  2312. }
  2313. list_for_each_entry(crtc, &sde_kms->dev->mode_config.crtc_list, head) {
  2314. if (!crtc->state->active)
  2315. continue;
  2316. global_crtc_cnt++;
  2317. global_active_crtc = crtc;
  2318. }
  2319. if (active_crtc) {
  2320. drm_for_each_encoder_mask(encoder, active_crtc->dev, active_cstate->encoder_mask)
  2321. crtc_encoder_cnt++;
  2322. }
  2323. for_each_new_connector_in_state(state, connector, new_connstate, i) {
  2324. int conn_mask = active_cstate->connector_mask;
  2325. if (drm_connector_mask(connector) & conn_mask) {
  2326. sde_conn = to_sde_connector(connector);
  2327. dsi_display = (struct dsi_display *) sde_conn->display;
  2328. SDE_EVT32(DRMID(connector), DRMID(active_crtc), i, dsi_display->type,
  2329. dsi_display->trusted_vm_env);
  2330. SDE_DEBUG("VM display:%s, conn:%d, crtc:%d, type:%d, tvm:%d\n",
  2331. dsi_display->name, DRMID(connector), DRMID(active_crtc),
  2332. dsi_display->type, dsi_display->trusted_vm_env);
  2333. break;
  2334. }
  2335. }
  2336. /* Check for single crtc commits only on valid VM requests */
  2337. if (active_crtc && global_active_crtc &&
  2338. (commit_crtc_cnt > catalog->max_trusted_vm_displays ||
  2339. global_crtc_cnt > catalog->max_trusted_vm_displays ||
  2340. active_crtc != global_active_crtc)) {
  2341. SDE_ERROR("VM switch failed; MAX:%d a_cnt:%d g_cnt:%d a_crtc:%d g_crtc:%d\n",
  2342. catalog->max_trusted_vm_displays, commit_crtc_cnt, global_crtc_cnt,
  2343. DRMID(active_crtc), DRMID(global_active_crtc));
  2344. return -E2BIG;
  2345. } else if ((vm_req == VM_REQ_RELEASE) &&
  2346. ((idle_pc_state == IDLE_PC_ENABLE) ||
  2347. (crtc_encoder_cnt > TRUSTED_VM_MAX_ENCODER_PER_CRTC))) {
  2348. /*
  2349. * disable idle-pc before releasing the HW
  2350. * allow only specified number of encoders on a given crtc
  2351. */
  2352. SDE_ERROR("VM switch failed; idle-pc:%d max:%d encoder_cnt:%d\n",
  2353. idle_pc_state, TRUSTED_VM_MAX_ENCODER_PER_CRTC, crtc_encoder_cnt);
  2354. return -EINVAL;
  2355. }
  2356. if ((vm_req == VM_REQ_ACQUIRE) && !vm_owns_hw) {
  2357. rc = vm_ops->vm_acquire(sde_kms);
  2358. if (rc) {
  2359. SDE_ERROR("VM acquire failed; hw_owner:%d, rc:%d\n", vm_owns_hw, rc);
  2360. return rc;
  2361. }
  2362. if (vm_ops->vm_resource_init)
  2363. rc = vm_ops->vm_resource_init(sde_kms, state);
  2364. }
  2365. return rc;
  2366. }
  2367. static int sde_kms_check_vm_request(struct msm_kms *kms,
  2368. struct drm_atomic_state *state)
  2369. {
  2370. struct sde_kms *sde_kms;
  2371. struct drm_crtc *crtc;
  2372. struct drm_crtc_state *new_cstate, *old_cstate;
  2373. struct sde_vm_ops *vm_ops;
  2374. enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
  2375. int i, rc = 0;
  2376. bool vm_req_active = false, prev_vm_req = false;
  2377. bool vm_owns_hw;
  2378. if (!kms || !state)
  2379. return -EINVAL;
  2380. sde_kms = to_sde_kms(kms);
  2381. vm_ops = sde_vm_get_ops(sde_kms);
  2382. if (!vm_ops)
  2383. return 0;
  2384. if (!vm_ops->vm_request_valid || !vm_ops->vm_owns_hw || !vm_ops->vm_acquire)
  2385. return -EINVAL;
  2386. drm_for_each_crtc(crtc, state->dev) {
  2387. if (crtc->state && (sde_crtc_get_property(to_sde_crtc_state(crtc->state),
  2388. CRTC_PROP_VM_REQ_STATE) == VM_REQ_RELEASE)) {
  2389. prev_vm_req = true;
  2390. break;
  2391. }
  2392. }
  2393. /* check for an active vm request */
  2394. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2395. struct sde_crtc_state *old_state = NULL, *new_state = NULL;
  2396. if (!new_cstate->active && !old_cstate->active)
  2397. continue;
  2398. new_state = to_sde_crtc_state(new_cstate);
  2399. new_vm_req = sde_crtc_get_property(new_state, CRTC_PROP_VM_REQ_STATE);
  2400. old_state = to_sde_crtc_state(old_cstate);
  2401. old_vm_req = sde_crtc_get_property(old_state, CRTC_PROP_VM_REQ_STATE);
  2402. /*
  2403. * VM request should be validated in the following usecases
  2404. * - There is a vm request(other than VM_REQ_NONE) on current/prev crtc state.
  2405. * - Previously, vm transition has taken place on one of the crtc's.
  2406. */
  2407. if (old_vm_req || new_vm_req || prev_vm_req) {
  2408. if (!vm_req_active) {
  2409. sde_vm_lock(sde_kms);
  2410. vm_owns_hw = sde_vm_owns_hw(sde_kms);
  2411. }
  2412. rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
  2413. if (rc) {
  2414. SDE_ERROR(
  2415. "VM transition check failed; o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n",
  2416. old_vm_req, new_vm_req, vm_owns_hw, rc);
  2417. sde_vm_unlock(sde_kms);
  2418. vm_req_active = false;
  2419. break;
  2420. } else if (old_vm_req == VM_REQ_ACQUIRE && new_vm_req == VM_REQ_NONE) {
  2421. SDE_DEBUG("VM transition valid; ignore further checks\n");
  2422. if (!vm_req_active)
  2423. sde_vm_unlock(sde_kms);
  2424. } else {
  2425. vm_req_active = true;
  2426. }
  2427. }
  2428. }
  2429. /* validate active requests and perform acquire if necessary */
  2430. if (vm_req_active) {
  2431. rc = _sde_kms_validate_vm_request(state, sde_kms, new_vm_req, vm_owns_hw);
  2432. sde_vm_unlock(sde_kms);
  2433. SDE_EVT32(old_vm_req, new_vm_req, vm_req_active, vm_owns_hw, rc);
  2434. SDE_DEBUG("VM o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n", old_vm_req, new_vm_req,
  2435. vm_req_active ? vm_owns_hw : -1, rc);
  2436. }
  2437. return rc;
  2438. }
  2439. static int sde_kms_check_secure_transition(struct msm_kms *kms,
  2440. struct drm_atomic_state *state)
  2441. {
  2442. struct sde_kms *sde_kms;
  2443. struct drm_device *dev;
  2444. struct drm_crtc *crtc;
  2445. struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
  2446. struct drm_crtc_state *crtc_state;
  2447. int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
  2448. bool sec_session = false, global_sec_session = false;
  2449. uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
  2450. int i;
  2451. if (!kms || !state) {
  2452. return -EINVAL;
  2453. SDE_ERROR("invalid arguments\n");
  2454. }
  2455. sde_kms = to_sde_kms(kms);
  2456. dev = sde_kms->dev;
  2457. /* iterate state object for active secure/non-secure crtc */
  2458. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2459. if (!crtc_state->active)
  2460. continue;
  2461. active_crtc_cnt++;
  2462. sde_crtc_state_find_plane_fb_modes(crtc_state, &fb_ns,
  2463. &fb_sec, &fb_sec_dir);
  2464. if (fb_sec_dir)
  2465. sec_session = true;
  2466. cur_crtc = crtc;
  2467. }
  2468. /* iterate global list for active and secure/non-secure crtc */
  2469. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2470. if (!crtc->state->active)
  2471. continue;
  2472. global_active_crtc_cnt++;
  2473. /* update only when crtc is not the same as current crtc */
  2474. if (crtc != cur_crtc) {
  2475. fb_ns = fb_sec = fb_sec_dir = 0;
  2476. sde_crtc_find_plane_fb_modes(crtc, &fb_ns,
  2477. &fb_sec, &fb_sec_dir);
  2478. if (fb_sec_dir)
  2479. global_sec_session = true;
  2480. global_crtc = crtc;
  2481. }
  2482. }
  2483. if (!global_sec_session && !sec_session)
  2484. return 0;
  2485. /*
  2486. * - fail crtc commit, if secure-camera/secure-ui session is
  2487. * in-progress in any other display
  2488. * - fail secure-camera/secure-ui crtc commit, if any other display
  2489. * session is in-progress
  2490. */
  2491. if ((global_active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
  2492. (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
  2493. SDE_ERROR(
  2494. "crtc%d secure check failed global_active:%d active:%d\n",
  2495. cur_crtc ? cur_crtc->base.id : -1,
  2496. global_active_crtc_cnt, active_crtc_cnt);
  2497. return -EPERM;
  2498. /*
  2499. * As only one crtc is allowed during secure session, the crtc
  2500. * in this commit should match with the global crtc
  2501. */
  2502. } else if (global_crtc && cur_crtc && (global_crtc != cur_crtc)) {
  2503. SDE_ERROR("crtc%d-sec%d not allowed during crtc%d-sec%d\n",
  2504. cur_crtc->base.id, sec_session,
  2505. global_crtc->base.id, global_sec_session);
  2506. return -EPERM;
  2507. }
  2508. return 0;
  2509. }
  2510. static void sde_kms_vm_res_release(struct msm_kms *kms,
  2511. struct drm_atomic_state *state)
  2512. {
  2513. struct drm_crtc *crtc;
  2514. struct drm_crtc_state *new_cstate;
  2515. struct sde_crtc_state *cstate;
  2516. struct sde_vm_ops *vm_ops;
  2517. enum sde_crtc_vm_req vm_req;
  2518. struct sde_kms *sde_kms = to_sde_kms(kms);
  2519. vm_ops = sde_vm_get_ops(sde_kms);
  2520. if (!vm_ops)
  2521. return;
  2522. crtc = sde_kms_vm_get_vm_crtc(state);
  2523. if (!crtc)
  2524. return;
  2525. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  2526. cstate = to_sde_crtc_state(new_cstate);
  2527. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  2528. if (vm_req != VM_REQ_ACQUIRE)
  2529. return;
  2530. sde_vm_lock(sde_kms);
  2531. if (vm_ops->vm_acquire_fail_handler)
  2532. vm_ops->vm_acquire_fail_handler(sde_kms);
  2533. sde_vm_unlock(sde_kms);
  2534. }
  2535. static int sde_kms_atomic_check(struct msm_kms *kms,
  2536. struct drm_atomic_state *state)
  2537. {
  2538. struct sde_kms *sde_kms;
  2539. struct drm_device *dev;
  2540. int ret;
  2541. if (!kms || !state)
  2542. return -EINVAL;
  2543. sde_kms = to_sde_kms(kms);
  2544. dev = sde_kms->dev;
  2545. SDE_ATRACE_BEGIN("atomic_check");
  2546. if (sde_kms_is_suspend_blocked(dev)) {
  2547. SDE_DEBUG("suspended, skip atomic_check\n");
  2548. ret = -EBUSY;
  2549. goto end;
  2550. }
  2551. ret = sde_kms_check_vm_request(kms, state);
  2552. if (ret) {
  2553. SDE_ERROR("vm switch request checks failed\n");
  2554. goto end;
  2555. }
  2556. ret = drm_atomic_helper_check(dev, state);
  2557. if (ret)
  2558. goto vm_clean_up;
  2559. /*
  2560. * Check if any secure transition(moving CRTC between secure and
  2561. * non-secure state and vice-versa) is allowed or not. when moving
  2562. * to secure state, planes with fb_mode set to dir_translated only can
  2563. * be staged on the CRTC, and only one CRTC can be active during
  2564. * Secure state
  2565. */
  2566. ret = sde_kms_check_secure_transition(kms, state);
  2567. if (ret)
  2568. goto vm_clean_up;
  2569. goto end;
  2570. vm_clean_up:
  2571. sde_kms_vm_res_release(kms, state);
  2572. end:
  2573. SDE_ATRACE_END("atomic_check");
  2574. return ret;
  2575. }
  2576. static struct msm_gem_address_space*
  2577. _sde_kms_get_address_space(struct msm_kms *kms,
  2578. unsigned int domain)
  2579. {
  2580. struct sde_kms *sde_kms;
  2581. if (!kms) {
  2582. SDE_ERROR("invalid kms\n");
  2583. return NULL;
  2584. }
  2585. sde_kms = to_sde_kms(kms);
  2586. if (!sde_kms) {
  2587. SDE_ERROR("invalid sde_kms\n");
  2588. return NULL;
  2589. }
  2590. if (domain >= MSM_SMMU_DOMAIN_MAX)
  2591. return NULL;
  2592. return (sde_kms->aspace[domain] &&
  2593. sde_kms->aspace[domain]->domain_attached) ?
  2594. sde_kms->aspace[domain] : NULL;
  2595. }
  2596. static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
  2597. unsigned int domain)
  2598. {
  2599. struct sde_kms *sde_kms;
  2600. struct msm_gem_address_space *aspace;
  2601. if (!kms) {
  2602. SDE_ERROR("invalid kms\n");
  2603. return NULL;
  2604. }
  2605. sde_kms = to_sde_kms(kms);
  2606. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  2607. SDE_ERROR("invalid params\n");
  2608. return NULL;
  2609. }
  2610. aspace = _sde_kms_get_address_space(kms, domain);
  2611. return (aspace && aspace->domain_attached) ?
  2612. msm_gem_get_aspace_device(aspace) : NULL;
  2613. }
  2614. static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
  2615. {
  2616. struct drm_device *dev = NULL;
  2617. struct sde_kms *sde_kms = NULL;
  2618. struct drm_connector *connector = NULL;
  2619. struct drm_connector_list_iter conn_iter;
  2620. struct sde_connector *sde_conn = NULL;
  2621. if (!kms) {
  2622. SDE_ERROR("invalid kms\n");
  2623. return;
  2624. }
  2625. sde_kms = to_sde_kms(kms);
  2626. dev = sde_kms->dev;
  2627. if (!dev) {
  2628. SDE_ERROR("invalid device\n");
  2629. return;
  2630. }
  2631. if (!dev->mode_config.poll_enabled)
  2632. return;
  2633. mutex_lock(&dev->mode_config.mutex);
  2634. drm_connector_list_iter_begin(dev, &conn_iter);
  2635. drm_for_each_connector_iter(connector, &conn_iter) {
  2636. /* Only handle HPD capable connectors. */
  2637. if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
  2638. continue;
  2639. sde_conn = to_sde_connector(connector);
  2640. if (sde_conn->ops.post_open)
  2641. sde_conn->ops.post_open(&sde_conn->base,
  2642. sde_conn->display);
  2643. }
  2644. drm_connector_list_iter_end(&conn_iter);
  2645. mutex_unlock(&dev->mode_config.mutex);
  2646. }
  2647. static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
  2648. struct sde_splash_display *splash_display,
  2649. struct drm_crtc *crtc)
  2650. {
  2651. struct msm_drm_private *priv;
  2652. struct drm_plane *plane;
  2653. struct sde_splash_mem *splash;
  2654. struct sde_splash_mem *demura;
  2655. struct sde_plane_state *pstate;
  2656. struct sde_sspp_index_info *pipe_info;
  2657. enum sde_sspp pipe_id;
  2658. bool is_virtual;
  2659. int i;
  2660. if (!sde_kms || !splash_display || !crtc) {
  2661. SDE_ERROR("invalid input args\n");
  2662. return -EINVAL;
  2663. }
  2664. priv = sde_kms->dev->dev_private;
  2665. pipe_info = &splash_display->pipe_info;
  2666. splash = splash_display->splash;
  2667. demura = splash_display->demura;
  2668. for (i = 0; i < priv->num_planes; i++) {
  2669. plane = priv->planes[i];
  2670. pipe_id = sde_plane_pipe(plane);
  2671. is_virtual = is_sde_plane_virtual(plane);
  2672. if ((is_virtual && test_bit(pipe_id, pipe_info->virt_pipes)) ||
  2673. (!is_virtual && test_bit(pipe_id, pipe_info->pipes))) {
  2674. if (splash && sde_plane_validate_src_addr(plane,
  2675. splash->splash_buf_base,
  2676. splash->splash_buf_size)) {
  2677. if (!demura || sde_plane_validate_src_addr(
  2678. plane, demura->splash_buf_base,
  2679. demura->splash_buf_size)) {
  2680. SDE_ERROR("invalid adr on pipe:%d crtc:%d\n",
  2681. pipe_id, DRMID(crtc));
  2682. continue;
  2683. }
  2684. }
  2685. plane->state->crtc = crtc;
  2686. crtc->state->plane_mask |= drm_plane_mask(plane);
  2687. pstate = to_sde_plane_state(plane->state);
  2688. pstate->cont_splash_populated = true;
  2689. SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n",
  2690. DRMID(crtc), DRMID(plane), is_virtual);
  2691. }
  2692. }
  2693. return 0;
  2694. }
  2695. static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
  2696. struct dsi_display *dsi_display)
  2697. {
  2698. void *display;
  2699. struct drm_encoder *encoder = NULL;
  2700. struct msm_display_info info;
  2701. struct drm_device *dev;
  2702. struct sde_kms *sde_kms;
  2703. struct drm_connector_list_iter conn_iter;
  2704. struct drm_connector *connector = NULL;
  2705. struct sde_connector *sde_conn = NULL;
  2706. int rc = 0;
  2707. sde_kms = to_sde_kms(kms);
  2708. dev = sde_kms->dev;
  2709. display = dsi_display;
  2710. if (dsi_display) {
  2711. if (dsi_display->bridge->base.encoder) {
  2712. encoder = dsi_display->bridge->base.encoder;
  2713. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2714. }
  2715. memset(&info, 0x0, sizeof(info));
  2716. rc = dsi_display_get_info(NULL, &info, display);
  2717. if (rc) {
  2718. SDE_ERROR("%s: dsi get_info failed: %d\n",
  2719. __func__, rc);
  2720. encoder = NULL;
  2721. }
  2722. }
  2723. drm_connector_list_iter_begin(dev, &conn_iter);
  2724. drm_for_each_connector_iter(connector, &conn_iter) {
  2725. struct drm_encoder *c_encoder;
  2726. drm_connector_for_each_possible_encoder(connector,
  2727. c_encoder)
  2728. break;
  2729. if (!c_encoder) {
  2730. SDE_ERROR("c_encoder not found\n");
  2731. return -EINVAL;
  2732. }
  2733. /**
  2734. * Inform cont_splash is disabled to each interface/connector.
  2735. * This is currently supported for DSI interface.
  2736. */
  2737. sde_conn = to_sde_connector(connector);
  2738. if (sde_conn && sde_conn->ops.cont_splash_res_disable) {
  2739. if (!dsi_display || !encoder) {
  2740. sde_conn->ops.cont_splash_res_disable
  2741. (sde_conn->display);
  2742. } else if (c_encoder->base.id == encoder->base.id) {
  2743. /**
  2744. * This handles dual DSI
  2745. * configuration where one DSI
  2746. * interface has cont_splash
  2747. * enabled and the other doesn't.
  2748. */
  2749. sde_conn->ops.cont_splash_res_disable
  2750. (sde_conn->display);
  2751. break;
  2752. }
  2753. }
  2754. }
  2755. drm_connector_list_iter_end(&conn_iter);
  2756. return 0;
  2757. }
  2758. static int sde_kms_vm_trusted_cont_splash_res_init(struct sde_kms *sde_kms)
  2759. {
  2760. int i;
  2761. void *display;
  2762. struct dsi_display *dsi_display;
  2763. struct drm_encoder *encoder;
  2764. if (!sde_kms)
  2765. return -EINVAL;
  2766. if (!sde_in_trusted_vm(sde_kms))
  2767. return 0;
  2768. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  2769. display = sde_kms->dsi_displays[i];
  2770. dsi_display = (struct dsi_display *)display;
  2771. if (!dsi_display->bridge->base.encoder) {
  2772. SDE_ERROR("no encoder on dsi display:%d", i);
  2773. return -EINVAL;
  2774. }
  2775. encoder = dsi_display->bridge->base.encoder;
  2776. encoder->possible_crtcs = 1 << i;
  2777. SDE_DEBUG(
  2778. "dsi-display:%d encoder id[%d]=%d name=%s crtcs=%x\n", i,
  2779. encoder->index, encoder->base.id,
  2780. encoder->name, encoder->possible_crtcs);
  2781. }
  2782. return 0;
  2783. }
  2784. static struct drm_display_mode *_sde_kms_get_splash_mode(
  2785. struct sde_kms *sde_kms, struct drm_connector *connector,
  2786. struct drm_atomic_state *state)
  2787. {
  2788. struct drm_display_mode *mode, *cur_mode = NULL;
  2789. struct drm_crtc *crtc;
  2790. struct drm_crtc_state *new_cstate, *old_cstate;
  2791. u32 i = 0;
  2792. if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
  2793. list_for_each_entry(mode, &connector->modes, head) {
  2794. if (mode->type & DRM_MODE_TYPE_PREFERRED) {
  2795. cur_mode = mode;
  2796. break;
  2797. }
  2798. }
  2799. } else if (state) {
  2800. /* get the mode from first atomic_check phase for trusted_vm*/
  2801. for_each_oldnew_crtc_in_state(state, crtc, old_cstate,
  2802. new_cstate, i) {
  2803. if (!new_cstate->active && !old_cstate->active)
  2804. continue;
  2805. list_for_each_entry(mode, &connector->modes, head) {
  2806. if (drm_mode_equal(&new_cstate->mode, mode)) {
  2807. cur_mode = mode;
  2808. break;
  2809. }
  2810. }
  2811. }
  2812. }
  2813. return cur_mode;
  2814. }
  2815. static int sde_kms_cont_splash_config(struct msm_kms *kms,
  2816. struct drm_atomic_state *state)
  2817. {
  2818. void *display;
  2819. struct dsi_display *dsi_display;
  2820. struct msm_display_info info;
  2821. struct drm_encoder *encoder = NULL;
  2822. struct drm_crtc *crtc = NULL;
  2823. int i, rc = 0;
  2824. struct drm_display_mode *drm_mode = NULL;
  2825. struct drm_device *dev;
  2826. struct msm_drm_private *priv;
  2827. struct sde_kms *sde_kms;
  2828. struct drm_connector_list_iter conn_iter;
  2829. struct drm_connector *connector = NULL;
  2830. struct sde_connector *sde_conn = NULL;
  2831. struct sde_splash_display *splash_display;
  2832. if (!kms) {
  2833. SDE_ERROR("invalid kms\n");
  2834. return -EINVAL;
  2835. }
  2836. sde_kms = to_sde_kms(kms);
  2837. dev = sde_kms->dev;
  2838. if (!dev) {
  2839. SDE_ERROR("invalid device\n");
  2840. return -EINVAL;
  2841. }
  2842. rc = sde_kms_vm_trusted_cont_splash_res_init(sde_kms);
  2843. if (rc) {
  2844. SDE_ERROR("failed vm cont splash resource init, rc=%d", rc);
  2845. return -EINVAL;
  2846. }
  2847. if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
  2848. && (!sde_kms->splash_data.num_splash_regions)) ||
  2849. !sde_kms->splash_data.num_splash_displays) {
  2850. DRM_INFO("cont_splash feature not enabled\n");
  2851. sde_kms_inform_cont_splash_res_disable(kms, NULL);
  2852. return rc;
  2853. }
  2854. DRM_INFO("cont_splash enabled in %d of %d display(s)\n",
  2855. sde_kms->splash_data.num_splash_displays,
  2856. sde_kms->dsi_display_count);
  2857. /* dsi */
  2858. for (i = 0; i < sde_kms->dsi_display_count; ++i) {
  2859. struct sde_crtc_state *cstate;
  2860. struct sde_connector_state *conn_state;
  2861. display = sde_kms->dsi_displays[i];
  2862. dsi_display = (struct dsi_display *)display;
  2863. splash_display = &sde_kms->splash_data.splash_display[i];
  2864. if (!splash_display->cont_splash_enabled) {
  2865. SDE_DEBUG("display->name = %s splash not enabled\n",
  2866. dsi_display->name);
  2867. sde_kms_inform_cont_splash_res_disable(kms,
  2868. dsi_display);
  2869. continue;
  2870. }
  2871. SDE_DEBUG("display->name = %s\n", dsi_display->name);
  2872. if (dsi_display->bridge->base.encoder) {
  2873. encoder = dsi_display->bridge->base.encoder;
  2874. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2875. }
  2876. memset(&info, 0x0, sizeof(info));
  2877. rc = dsi_display_get_info(NULL, &info, display);
  2878. if (rc) {
  2879. SDE_ERROR("dsi get_info %d failed\n", i);
  2880. encoder = NULL;
  2881. continue;
  2882. }
  2883. SDE_DEBUG("info.is_connected = %s, info.display_type = %d\n",
  2884. ((info.is_connected) ? "true" : "false"),
  2885. info.display_type);
  2886. if (!encoder) {
  2887. SDE_ERROR("encoder not initialized\n");
  2888. return -EINVAL;
  2889. }
  2890. priv = sde_kms->dev->dev_private;
  2891. encoder->crtc = priv->crtcs[i];
  2892. crtc = encoder->crtc;
  2893. splash_display->encoder = encoder;
  2894. SDE_DEBUG("for dsi-display:%d crtc id[%d]:%d enc id[%d]:%d\n",
  2895. i, crtc->index, crtc->base.id, encoder->index,
  2896. encoder->base.id);
  2897. mutex_lock(&dev->mode_config.mutex);
  2898. drm_connector_list_iter_begin(dev, &conn_iter);
  2899. drm_for_each_connector_iter(connector, &conn_iter) {
  2900. struct drm_encoder *c_encoder;
  2901. drm_connector_for_each_possible_encoder(connector,
  2902. c_encoder)
  2903. break;
  2904. if (!c_encoder) {
  2905. SDE_ERROR("c_encoder not found\n");
  2906. mutex_unlock(&dev->mode_config.mutex);
  2907. return -EINVAL;
  2908. }
  2909. /**
  2910. * SDE_KMS doesn't attach more than one encoder to
  2911. * a DSI connector. So it is safe to check only with
  2912. * the first encoder entry. Revisit this logic if we
  2913. * ever have to support continuous splash for
  2914. * external displays in MST configuration.
  2915. */
  2916. if (c_encoder->base.id == encoder->base.id)
  2917. break;
  2918. }
  2919. drm_connector_list_iter_end(&conn_iter);
  2920. if (!connector) {
  2921. SDE_ERROR("connector not initialized\n");
  2922. mutex_unlock(&dev->mode_config.mutex);
  2923. return -EINVAL;
  2924. }
  2925. mutex_unlock(&dev->mode_config.mutex);
  2926. crtc->state->encoder_mask = drm_encoder_mask(encoder);
  2927. crtc->state->connector_mask = drm_connector_mask(connector);
  2928. connector->state->crtc = crtc;
  2929. drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, state);
  2930. if (!drm_mode) {
  2931. SDE_ERROR("drm_mode not found; handoff_type:%d\n",
  2932. sde_kms->splash_data.type);
  2933. return -EINVAL;
  2934. }
  2935. SDE_DEBUG(
  2936. "drm_mode->name:%s, type:0x%x, flags:0x%x, handoff_type:%d\n",
  2937. drm_mode->name, drm_mode->type,
  2938. drm_mode->flags, sde_kms->splash_data.type);
  2939. /* Update CRTC drm structure */
  2940. crtc->state->active = true;
  2941. rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
  2942. if (rc) {
  2943. SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
  2944. return rc;
  2945. }
  2946. drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
  2947. drm_mode_copy(&crtc->mode, drm_mode);
  2948. cstate = to_sde_crtc_state(crtc->state);
  2949. cstate->cont_splash_populated = true;
  2950. /* Update encoder structure */
  2951. sde_encoder_update_caps_for_cont_splash(encoder,
  2952. splash_display, true);
  2953. sde_crtc_update_cont_splash_settings(crtc);
  2954. sde_conn = to_sde_connector(connector);
  2955. if (sde_conn && sde_conn->ops.cont_splash_config)
  2956. sde_conn->ops.cont_splash_config(sde_conn->display);
  2957. conn_state = to_sde_connector_state(connector->state);
  2958. conn_state->cont_splash_populated = true;
  2959. rc = _sde_kms_update_planes_for_cont_splash(sde_kms,
  2960. splash_display, crtc);
  2961. if (rc) {
  2962. SDE_ERROR("Failed: updating plane status rc=%d\n", rc);
  2963. return rc;
  2964. }
  2965. }
  2966. return rc;
  2967. }
  2968. static bool sde_kms_check_for_splash(struct msm_kms *kms)
  2969. {
  2970. struct sde_kms *sde_kms;
  2971. if (!kms) {
  2972. SDE_ERROR("invalid kms\n");
  2973. return false;
  2974. }
  2975. sde_kms = to_sde_kms(kms);
  2976. return sde_kms->splash_data.num_splash_displays;
  2977. }
  2978. static int sde_kms_get_mixer_count(const struct msm_kms *kms,
  2979. const struct drm_display_mode *mode,
  2980. const struct msm_resource_caps_info *res, u32 *num_lm)
  2981. {
  2982. struct sde_kms *sde_kms;
  2983. s64 mode_clock_hz = 0;
  2984. s64 max_mdp_clock_hz = 0;
  2985. s64 max_lm_width = 0;
  2986. s64 hdisplay_fp = 0;
  2987. s64 htotal_fp = 0;
  2988. s64 vtotal_fp = 0;
  2989. s64 vrefresh_fp = 0;
  2990. s64 mdp_fudge_factor = 0;
  2991. s64 num_lm_fp = 0;
  2992. s64 lm_clk_fp = 0;
  2993. s64 lm_width_fp = 0;
  2994. int rc = 0;
  2995. if (!num_lm) {
  2996. SDE_ERROR("invalid num_lm pointer\n");
  2997. return -EINVAL;
  2998. }
  2999. /* default to 1 layer mixer */
  3000. *num_lm = 1;
  3001. if (!kms || !mode || !res) {
  3002. SDE_ERROR("invalid input args\n");
  3003. return -EINVAL;
  3004. }
  3005. sde_kms = to_sde_kms(kms);
  3006. max_mdp_clock_hz = drm_int2fixp(sde_kms->perf.max_core_clk_rate);
  3007. max_lm_width = drm_int2fixp(res->max_mixer_width);
  3008. hdisplay_fp = drm_int2fixp(mode->hdisplay);
  3009. htotal_fp = drm_int2fixp(mode->htotal);
  3010. vtotal_fp = drm_int2fixp(mode->vtotal);
  3011. vrefresh_fp = drm_int2fixp(drm_mode_vrefresh(mode));
  3012. mdp_fudge_factor = drm_fixp_from_fraction(105, 100);
  3013. /* mode clock = [(h * v * fps * 1.05) / (num_lm)] */
  3014. mode_clock_hz = drm_fixp_mul(htotal_fp, vtotal_fp);
  3015. mode_clock_hz = drm_fixp_mul(mode_clock_hz, vrefresh_fp);
  3016. mode_clock_hz = drm_fixp_mul(mode_clock_hz, mdp_fudge_factor);
  3017. if (mode_clock_hz > max_mdp_clock_hz ||
  3018. hdisplay_fp > max_lm_width) {
  3019. *num_lm = 0;
  3020. do {
  3021. *num_lm += 2;
  3022. num_lm_fp = drm_int2fixp(*num_lm);
  3023. lm_clk_fp = drm_fixp_div(mode_clock_hz, num_lm_fp);
  3024. lm_width_fp = drm_fixp_div(hdisplay_fp, num_lm_fp);
  3025. if (*num_lm > 4) {
  3026. rc = -EINVAL;
  3027. goto error;
  3028. }
  3029. } while (lm_clk_fp > max_mdp_clock_hz ||
  3030. lm_width_fp > max_lm_width);
  3031. mode_clock_hz = lm_clk_fp;
  3032. }
  3033. SDE_DEBUG("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3034. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3035. *num_lm, drm_fixp2int(mode_clock_hz),
  3036. sde_kms->perf.max_core_clk_rate);
  3037. return 0;
  3038. error:
  3039. SDE_ERROR("required mode clk exceeds max mdp clk\n");
  3040. SDE_ERROR("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3041. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3042. *num_lm, drm_fixp2int(mode_clock_hz),
  3043. sde_kms->perf.max_core_clk_rate);
  3044. return rc;
  3045. }
  3046. static int sde_kms_get_dsc_count(const struct msm_kms *kms,
  3047. u32 hdisplay, u32 *num_dsc)
  3048. {
  3049. struct sde_kms *sde_kms;
  3050. uint32_t max_dsc_width;
  3051. if (!num_dsc) {
  3052. SDE_ERROR("invalid num_dsc pointer\n");
  3053. return -EINVAL;
  3054. }
  3055. *num_dsc = 0;
  3056. if (!kms || !hdisplay) {
  3057. SDE_ERROR("invalid input args\n");
  3058. return -EINVAL;
  3059. }
  3060. sde_kms = to_sde_kms(kms);
  3061. max_dsc_width = sde_kms->catalog->max_dsc_width;
  3062. *num_dsc = DIV_ROUND_UP(hdisplay, max_dsc_width);
  3063. SDE_DEBUG("h=%d, max_dsc_width=%d, num_dsc=%d\n",
  3064. hdisplay, max_dsc_width,
  3065. *num_dsc);
  3066. return 0;
  3067. }
  3068. static void _sde_kms_null_commit(struct drm_device *dev,
  3069. struct drm_encoder *enc)
  3070. {
  3071. struct drm_modeset_acquire_ctx ctx;
  3072. struct drm_atomic_state *state = NULL;
  3073. int retry_cnt = 0;
  3074. int ret = 0;
  3075. drm_modeset_acquire_init(&ctx, 0);
  3076. retry:
  3077. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  3078. if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
  3079. drm_modeset_backoff(&ctx);
  3080. retry_cnt++;
  3081. udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
  3082. goto retry;
  3083. } else if (WARN_ON(ret)) {
  3084. goto end;
  3085. }
  3086. state = drm_atomic_state_alloc(dev);
  3087. if (!state) {
  3088. DRM_ERROR("failed to allocate atomic state, %d\n", ret);
  3089. goto end;
  3090. }
  3091. state->acquire_ctx = &ctx;
  3092. ret = sde_kms_set_crtc_for_conn(dev, enc, state);
  3093. if (ret)
  3094. goto end;
  3095. ret = drm_atomic_commit(state);
  3096. if (ret)
  3097. SDE_ERROR("Error %d doing the atomic commit\n", ret);
  3098. end:
  3099. if (state)
  3100. drm_atomic_state_put(state);
  3101. drm_modeset_drop_locks(&ctx);
  3102. drm_modeset_acquire_fini(&ctx);
  3103. }
  3104. void sde_kms_display_early_wakeup(struct drm_device *dev,
  3105. const int32_t connector_id)
  3106. {
  3107. struct drm_connector_list_iter conn_iter;
  3108. struct drm_connector *conn;
  3109. struct drm_encoder *drm_enc;
  3110. drm_connector_list_iter_begin(dev, &conn_iter);
  3111. drm_for_each_connector_iter(conn, &conn_iter) {
  3112. if (connector_id != DRM_MSM_WAKE_UP_ALL_DISPLAYS &&
  3113. connector_id != conn->base.id)
  3114. continue;
  3115. if (conn->state && conn->state->best_encoder)
  3116. drm_enc = conn->state->best_encoder;
  3117. else
  3118. drm_enc = conn->encoder;
  3119. if (drm_enc)
  3120. sde_encoder_early_wakeup(drm_enc);
  3121. }
  3122. drm_connector_list_iter_end(&conn_iter);
  3123. }
  3124. static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
  3125. struct device *dev)
  3126. {
  3127. int i, ret, crtc_id = 0;
  3128. struct drm_device *ddev = dev_get_drvdata(dev);
  3129. struct drm_connector *conn;
  3130. struct drm_connector_list_iter conn_iter;
  3131. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3132. drm_connector_list_iter_begin(ddev, &conn_iter);
  3133. drm_for_each_connector_iter(conn, &conn_iter) {
  3134. uint64_t lp;
  3135. lp = sde_connector_get_lp(conn);
  3136. if (lp != SDE_MODE_DPMS_LP2)
  3137. continue;
  3138. if (sde_encoder_in_clone_mode(conn->encoder))
  3139. continue;
  3140. crtc_id = drm_crtc_index(conn->state->crtc);
  3141. if (priv->disp_thread[crtc_id].thread)
  3142. kthread_flush_worker(
  3143. &priv->disp_thread[crtc_id].worker);
  3144. ret = sde_encoder_wait_for_event(conn->encoder,
  3145. MSM_ENC_TX_COMPLETE);
  3146. if (ret && ret != -EWOULDBLOCK) {
  3147. SDE_ERROR(
  3148. "[conn: %d] wait for commit done returned %d\n",
  3149. conn->base.id, ret);
  3150. } else if (!ret) {
  3151. if (priv->event_thread[crtc_id].thread)
  3152. kthread_flush_worker(
  3153. &priv->event_thread[crtc_id].worker);
  3154. sde_encoder_idle_request(conn->encoder);
  3155. }
  3156. }
  3157. drm_connector_list_iter_end(&conn_iter);
  3158. for (i = 0; i < priv->num_crtcs; i++) {
  3159. if (priv->disp_thread[i].thread)
  3160. kthread_flush_worker(
  3161. &priv->disp_thread[i].worker);
  3162. if (priv->event_thread[i].thread)
  3163. kthread_flush_worker(
  3164. &priv->event_thread[i].worker);
  3165. }
  3166. kthread_flush_worker(&priv->pp_event_worker);
  3167. }
  3168. struct msm_display_mode *sde_kms_get_msm_mode(struct drm_connector_state *conn_state)
  3169. {
  3170. struct sde_connector_state *sde_conn_state;
  3171. if (!conn_state)
  3172. return NULL;
  3173. sde_conn_state = to_sde_connector_state(conn_state);
  3174. return &sde_conn_state->msm_mode;
  3175. }
  3176. static int sde_kms_pm_suspend(struct device *dev)
  3177. {
  3178. struct drm_device *ddev;
  3179. struct drm_modeset_acquire_ctx ctx;
  3180. struct drm_connector *conn;
  3181. struct drm_encoder *enc;
  3182. struct drm_connector_list_iter conn_iter;
  3183. struct drm_atomic_state *state = NULL;
  3184. struct sde_kms *sde_kms;
  3185. int ret = 0, num_crtcs = 0;
  3186. if (!dev)
  3187. return -EINVAL;
  3188. ddev = dev_get_drvdata(dev);
  3189. if (!ddev || !ddev_to_msm_kms(ddev))
  3190. return -EINVAL;
  3191. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3192. SDE_EVT32(0);
  3193. /* disable hot-plug polling */
  3194. drm_kms_helper_poll_disable(ddev);
  3195. /* if a display stuck in CS trigger a null commit to complete handoff */
  3196. drm_for_each_encoder(enc, ddev) {
  3197. if (sde_encoder_in_cont_splash(enc) && enc->crtc)
  3198. _sde_kms_null_commit(ddev, enc);
  3199. }
  3200. /* acquire modeset lock(s) */
  3201. drm_modeset_acquire_init(&ctx, 0);
  3202. retry:
  3203. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3204. if (ret)
  3205. goto unlock;
  3206. /* save current state for resume */
  3207. if (sde_kms->suspend_state)
  3208. drm_atomic_state_put(sde_kms->suspend_state);
  3209. sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
  3210. if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
  3211. ret = PTR_ERR(sde_kms->suspend_state);
  3212. DRM_ERROR("failed to back up suspend state, %d\n", ret);
  3213. sde_kms->suspend_state = NULL;
  3214. goto unlock;
  3215. }
  3216. /* create atomic state to disable all CRTCs */
  3217. state = drm_atomic_state_alloc(ddev);
  3218. if (!state) {
  3219. ret = -ENOMEM;
  3220. DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
  3221. goto unlock;
  3222. }
  3223. state->acquire_ctx = &ctx;
  3224. drm_connector_list_iter_begin(ddev, &conn_iter);
  3225. drm_for_each_connector_iter(conn, &conn_iter) {
  3226. struct drm_crtc_state *crtc_state;
  3227. uint64_t lp;
  3228. if (!conn->state || !conn->state->crtc ||
  3229. conn->dpms != DRM_MODE_DPMS_ON ||
  3230. sde_encoder_in_clone_mode(conn->encoder))
  3231. continue;
  3232. lp = sde_connector_get_lp(conn);
  3233. if (lp == SDE_MODE_DPMS_LP1) {
  3234. /* transition LP1->LP2 on pm suspend */
  3235. ret = sde_connector_set_property_for_commit(conn, state,
  3236. CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
  3237. if (ret) {
  3238. DRM_ERROR("failed to set lp2 for conn %d\n",
  3239. conn->base.id);
  3240. drm_connector_list_iter_end(&conn_iter);
  3241. goto unlock;
  3242. }
  3243. }
  3244. if (lp != SDE_MODE_DPMS_LP2) {
  3245. /* force CRTC to be inactive */
  3246. crtc_state = drm_atomic_get_crtc_state(state,
  3247. conn->state->crtc);
  3248. if (IS_ERR_OR_NULL(crtc_state)) {
  3249. DRM_ERROR("failed to get crtc %d state\n",
  3250. conn->state->crtc->base.id);
  3251. drm_connector_list_iter_end(&conn_iter);
  3252. goto unlock;
  3253. }
  3254. if (lp != SDE_MODE_DPMS_LP1)
  3255. crtc_state->active = false;
  3256. ++num_crtcs;
  3257. }
  3258. }
  3259. drm_connector_list_iter_end(&conn_iter);
  3260. /* check for nothing to do */
  3261. if (num_crtcs == 0) {
  3262. DRM_DEBUG("all crtcs are already in the off state\n");
  3263. sde_kms->suspend_block = true;
  3264. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3265. goto unlock;
  3266. }
  3267. /* commit the "disable all" state */
  3268. ret = drm_atomic_commit(state);
  3269. if (ret < 0) {
  3270. DRM_ERROR("failed to disable crtcs, %d\n", ret);
  3271. goto unlock;
  3272. }
  3273. sde_kms->suspend_block = true;
  3274. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3275. unlock:
  3276. if (state) {
  3277. drm_atomic_state_put(state);
  3278. state = NULL;
  3279. }
  3280. if (ret == -EDEADLK) {
  3281. drm_modeset_backoff(&ctx);
  3282. goto retry;
  3283. }
  3284. drm_modeset_drop_locks(&ctx);
  3285. drm_modeset_acquire_fini(&ctx);
  3286. /*
  3287. * pm runtime driver avoids multiple runtime_suspend API call by
  3288. * checking runtime_status. However, this call helps when there is a
  3289. * race condition between pm_suspend call and doze_suspend/power_off
  3290. * commit. It removes the extra vote from suspend and adds it back
  3291. * later to allow power collapse during pm_suspend call
  3292. */
  3293. pm_runtime_put_sync(dev);
  3294. pm_runtime_get_noresume(dev);
  3295. /* dump clock state before entering suspend */
  3296. if (sde_kms->pm_suspend_clk_dump)
  3297. _sde_kms_dump_clks_state(sde_kms);
  3298. return ret;
  3299. }
  3300. static int sde_kms_pm_resume(struct device *dev)
  3301. {
  3302. struct drm_device *ddev;
  3303. struct sde_kms *sde_kms;
  3304. struct drm_modeset_acquire_ctx ctx;
  3305. int ret, i;
  3306. if (!dev)
  3307. return -EINVAL;
  3308. ddev = dev_get_drvdata(dev);
  3309. if (!ddev || !ddev_to_msm_kms(ddev))
  3310. return -EINVAL;
  3311. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3312. SDE_EVT32(sde_kms->suspend_state != NULL);
  3313. drm_mode_config_reset(ddev);
  3314. drm_modeset_acquire_init(&ctx, 0);
  3315. retry:
  3316. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3317. if (ret == -EDEADLK) {
  3318. drm_modeset_backoff(&ctx);
  3319. goto retry;
  3320. } else if (WARN_ON(ret)) {
  3321. goto end;
  3322. }
  3323. sde_kms->suspend_block = false;
  3324. if (sde_kms->suspend_state) {
  3325. sde_kms->suspend_state->acquire_ctx = &ctx;
  3326. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  3327. ret = drm_atomic_helper_commit_duplicated_state(
  3328. sde_kms->suspend_state, &ctx);
  3329. if (ret != -EDEADLK)
  3330. break;
  3331. drm_modeset_backoff(&ctx);
  3332. }
  3333. if (ret < 0)
  3334. DRM_ERROR("failed to restore state, %d\n", ret);
  3335. drm_atomic_state_put(sde_kms->suspend_state);
  3336. sde_kms->suspend_state = NULL;
  3337. }
  3338. end:
  3339. drm_modeset_drop_locks(&ctx);
  3340. drm_modeset_acquire_fini(&ctx);
  3341. /* enable hot-plug polling */
  3342. drm_kms_helper_poll_enable(ddev);
  3343. return 0;
  3344. }
  3345. static const struct msm_kms_funcs kms_funcs = {
  3346. .hw_init = sde_kms_hw_init,
  3347. .postinit = sde_kms_postinit,
  3348. .irq_preinstall = sde_irq_preinstall,
  3349. .irq_postinstall = sde_irq_postinstall,
  3350. .irq_uninstall = sde_irq_uninstall,
  3351. .irq = sde_irq,
  3352. .preclose = sde_kms_preclose,
  3353. .lastclose = sde_kms_lastclose,
  3354. .prepare_fence = sde_kms_prepare_fence,
  3355. .prepare_commit = sde_kms_prepare_commit,
  3356. .commit = sde_kms_commit,
  3357. .complete_commit = sde_kms_complete_commit,
  3358. .get_msm_mode = sde_kms_get_msm_mode,
  3359. .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
  3360. .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
  3361. .check_modified_format = sde_format_check_modified_format,
  3362. .atomic_check = sde_kms_atomic_check,
  3363. .get_format = sde_get_msm_format,
  3364. .round_pixclk = sde_kms_round_pixclk,
  3365. .display_early_wakeup = sde_kms_display_early_wakeup,
  3366. .pm_suspend = sde_kms_pm_suspend,
  3367. .pm_resume = sde_kms_pm_resume,
  3368. .destroy = sde_kms_destroy,
  3369. .debugfs_destroy = sde_kms_debugfs_destroy,
  3370. .cont_splash_config = sde_kms_cont_splash_config,
  3371. .register_events = _sde_kms_register_events,
  3372. .get_address_space = _sde_kms_get_address_space,
  3373. .get_address_space_device = _sde_kms_get_address_space_device,
  3374. .postopen = _sde_kms_post_open,
  3375. .check_for_splash = sde_kms_check_for_splash,
  3376. .get_mixer_count = sde_kms_get_mixer_count,
  3377. .get_dsc_count = sde_kms_get_dsc_count,
  3378. };
  3379. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
  3380. {
  3381. int i;
  3382. for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
  3383. if (!sde_kms->aspace[i])
  3384. continue;
  3385. msm_gem_address_space_put(sde_kms->aspace[i]);
  3386. sde_kms->aspace[i] = NULL;
  3387. }
  3388. return 0;
  3389. }
  3390. static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
  3391. {
  3392. struct msm_mmu *mmu;
  3393. int i, ret;
  3394. int early_map = 0;
  3395. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev)
  3396. return -EINVAL;
  3397. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  3398. struct msm_gem_address_space *aspace;
  3399. mmu = msm_smmu_new(sde_kms->dev->dev, i);
  3400. if (IS_ERR(mmu)) {
  3401. ret = PTR_ERR(mmu);
  3402. SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
  3403. i, ret);
  3404. continue;
  3405. }
  3406. aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
  3407. mmu, "sde");
  3408. if (IS_ERR(aspace)) {
  3409. ret = PTR_ERR(aspace);
  3410. mmu->funcs->destroy(mmu);
  3411. goto fail;
  3412. }
  3413. sde_kms->aspace[i] = aspace;
  3414. aspace->domain_attached = true;
  3415. /* Mapping splash memory block */
  3416. if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
  3417. sde_kms->splash_data.num_splash_regions) {
  3418. ret = _sde_kms_map_all_splash_regions(sde_kms);
  3419. if (ret) {
  3420. SDE_ERROR("failed to map ret:%d\n", ret);
  3421. goto early_map_fail;
  3422. }
  3423. }
  3424. /*
  3425. * disable early-map which would have been enabled during
  3426. * bootup by smmu through the device-tree hint for cont-spash
  3427. */
  3428. ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
  3429. &early_map);
  3430. if (ret) {
  3431. SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
  3432. ret, early_map);
  3433. goto early_map_fail;
  3434. }
  3435. }
  3436. sde_kms->base.aspace = sde_kms->aspace[0];
  3437. return 0;
  3438. early_map_fail:
  3439. _sde_kms_unmap_all_splash_regions(sde_kms);
  3440. fail:
  3441. _sde_kms_mmu_destroy(sde_kms);
  3442. return ret;
  3443. }
  3444. static void sde_kms_init_rot_sid_hw(struct sde_kms *sde_kms)
  3445. {
  3446. if (!sde_kms || !sde_kms->hw_sid || sde_in_trusted_vm(sde_kms))
  3447. return;
  3448. sde_hw_set_rotator_sid(sde_kms->hw_sid);
  3449. }
  3450. static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
  3451. {
  3452. if (!sde_kms || !sde_kms->hw_mdp || !sde_kms->catalog)
  3453. return;
  3454. if (sde_kms->hw_mdp->ops.reset_ubwc)
  3455. sde_kms->hw_mdp->ops.reset_ubwc(sde_kms->hw_mdp,
  3456. sde_kms->catalog);
  3457. }
  3458. static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
  3459. {
  3460. struct sde_vbif_set_qos_params qos_params;
  3461. struct sde_mdss_cfg *catalog;
  3462. if (!sde_kms->catalog)
  3463. return;
  3464. catalog = sde_kms->catalog;
  3465. memset(&qos_params, 0, sizeof(qos_params));
  3466. qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
  3467. qos_params.xin_id = catalog->dma_cfg.xin_id;
  3468. qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
  3469. qos_params.client_type = VBIF_LUTDMA_CLIENT;
  3470. sde_vbif_set_qos_remap(sde_kms, &qos_params);
  3471. }
  3472. static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
  3473. {
  3474. struct sde_hw_uidle *uidle;
  3475. if (!sde_kms) {
  3476. SDE_ERROR("invalid kms\n");
  3477. return -EINVAL;
  3478. }
  3479. uidle = sde_kms->hw_uidle;
  3480. if (uidle && uidle->ops.active_override_enable)
  3481. uidle->ops.active_override_enable(uidle, enable);
  3482. return 0;
  3483. }
  3484. static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
  3485. {
  3486. struct device *cpu_dev;
  3487. int cpu = 0;
  3488. u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
  3489. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  3490. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  3491. return;
  3492. }
  3493. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  3494. cpu_dev = get_cpu_device(cpu);
  3495. if (!cpu_dev) {
  3496. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  3497. cpu);
  3498. continue;
  3499. }
  3500. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  3501. dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
  3502. cpu_irq_latency);
  3503. else
  3504. dev_pm_qos_add_request(cpu_dev,
  3505. &sde_kms->pm_qos_irq_req[cpu],
  3506. DEV_PM_QOS_RESUME_LATENCY,
  3507. cpu_irq_latency);
  3508. }
  3509. }
  3510. static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
  3511. {
  3512. struct device *cpu_dev;
  3513. int cpu = 0;
  3514. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  3515. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  3516. return;
  3517. }
  3518. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  3519. cpu_dev = get_cpu_device(cpu);
  3520. if (!cpu_dev) {
  3521. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  3522. cpu);
  3523. continue;
  3524. }
  3525. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  3526. dev_pm_qos_remove_request(
  3527. &sde_kms->pm_qos_irq_req[cpu]);
  3528. }
  3529. }
  3530. void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
  3531. {
  3532. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3533. mutex_lock(&priv->phandle.phandle_lock);
  3534. if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
  3535. _sde_kms_update_pm_qos_irq_request(sde_kms);
  3536. else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
  3537. _sde_kms_remove_pm_qos_irq_request(sde_kms);
  3538. mutex_unlock(&priv->phandle.phandle_lock);
  3539. }
  3540. static void sde_kms_irq_affinity_notify(
  3541. struct irq_affinity_notify *affinity_notify,
  3542. const cpumask_t *mask)
  3543. {
  3544. struct msm_drm_private *priv;
  3545. struct sde_kms *sde_kms = container_of(affinity_notify,
  3546. struct sde_kms, affinity_notify);
  3547. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  3548. return;
  3549. priv = sde_kms->dev->dev_private;
  3550. mutex_lock(&priv->phandle.phandle_lock);
  3551. _sde_kms_remove_pm_qos_irq_request(sde_kms);
  3552. // save irq cpu mask
  3553. sde_kms->irq_cpu_mask = *mask;
  3554. // request vote with updated irq cpu mask
  3555. if (atomic_read(&sde_kms->irq_vote_count))
  3556. _sde_kms_update_pm_qos_irq_request(sde_kms);
  3557. mutex_unlock(&priv->phandle.phandle_lock);
  3558. }
  3559. static void sde_kms_irq_affinity_release(struct kref *ref) {}
  3560. static void sde_kms_handle_power_event(u32 event_type, void *usr)
  3561. {
  3562. struct sde_kms *sde_kms = usr;
  3563. struct msm_kms *msm_kms;
  3564. msm_kms = &sde_kms->base;
  3565. if (!sde_kms)
  3566. return;
  3567. SDE_DEBUG("event_type:%d\n", event_type);
  3568. SDE_EVT32_VERBOSE(event_type);
  3569. if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
  3570. sde_irq_update(msm_kms, true);
  3571. sde_kms->first_kickoff = true;
  3572. /**
  3573. * Rotator sid needs to be programmed since uefi doesn't
  3574. * configure it during continuous splash
  3575. */
  3576. sde_kms_init_rot_sid_hw(sde_kms);
  3577. if (sde_kms->splash_data.num_splash_displays ||
  3578. sde_in_trusted_vm(sde_kms))
  3579. return;
  3580. sde_vbif_init_memtypes(sde_kms);
  3581. sde_kms_init_shared_hw(sde_kms);
  3582. _sde_kms_set_lutdma_vbif_remap(sde_kms);
  3583. } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
  3584. sde_irq_update(msm_kms, false);
  3585. sde_kms->first_kickoff = false;
  3586. if (sde_in_trusted_vm(sde_kms))
  3587. return;
  3588. _sde_kms_active_override(sde_kms, true);
  3589. if (!is_sde_rsc_available(SDE_RSC_INDEX))
  3590. sde_vbif_axi_halt_request(sde_kms);
  3591. }
  3592. }
  3593. #define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
  3594. static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
  3595. {
  3596. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3597. int rc = -EINVAL;
  3598. SDE_DEBUG("\n");
  3599. rc = pm_runtime_get_sync(sde_kms->dev->dev);
  3600. if (rc > 0)
  3601. rc = 0;
  3602. SDE_EVT32(rc, genpd->device_count);
  3603. return rc;
  3604. }
  3605. static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
  3606. {
  3607. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3608. SDE_DEBUG("\n");
  3609. pm_runtime_put_sync(sde_kms->dev->dev);
  3610. SDE_EVT32(genpd->device_count);
  3611. return 0;
  3612. }
  3613. static int _sde_kms_get_demura_plane_data(struct sde_splash_data *data)
  3614. {
  3615. int i = 0;
  3616. int ret = 0;
  3617. int count = 0;
  3618. struct device_node *parent, *node;
  3619. struct resource r;
  3620. char node_name[DEMURA_REGION_NAME_MAX];
  3621. struct sde_splash_mem *mem;
  3622. struct sde_splash_display *splash_display;
  3623. if (!data->num_splash_displays) {
  3624. SDE_DEBUG("no splash displays. skipping\n");
  3625. return 0;
  3626. }
  3627. /**
  3628. * It is expected that each active demura block will have
  3629. * its own memory region defined.
  3630. */
  3631. parent = of_find_node_by_path("/reserved-memory");
  3632. for (i = 0; i < data->num_splash_displays; i++) {
  3633. splash_display = &data->splash_display[i];
  3634. snprintf(&node_name[0], DEMURA_REGION_NAME_MAX,
  3635. "demura_region_%d", i);
  3636. splash_display->demura = NULL;
  3637. node = of_find_node_by_name(parent, node_name);
  3638. if (!node) {
  3639. SDE_DEBUG("no Demura node %s! disp count: %d\n",
  3640. node_name, data->num_splash_displays);
  3641. continue;
  3642. } else if (of_address_to_resource(node, 0, &r)) {
  3643. SDE_ERROR("invalid data for:%s\n", node_name);
  3644. ret = -EINVAL;
  3645. break;
  3646. }
  3647. mem = &data->demura_mem[i];
  3648. mem->splash_buf_base = (unsigned long)r.start;
  3649. mem->splash_buf_size = (r.end - r.start) + 1;
  3650. if (!mem->splash_buf_base && !mem->splash_buf_size) {
  3651. SDE_DEBUG("dummy splash mem for disp %d. Skipping\n",
  3652. (i+1));
  3653. continue;
  3654. } else if (!mem->splash_buf_base || !mem->splash_buf_size) {
  3655. SDE_ERROR("mem for disp %d invalid: add:%lx size:%lx\n",
  3656. (i+1), mem->splash_buf_base,
  3657. mem->splash_buf_size);
  3658. continue;
  3659. }
  3660. mem->ref_cnt = 0;
  3661. splash_display->demura = mem;
  3662. count++;
  3663. SDE_DEBUG("demura mem for disp:%d add:%lx size:%x\n", (i + 1),
  3664. mem->splash_buf_base,
  3665. mem->splash_buf_size);
  3666. }
  3667. if (!ret && !count)
  3668. SDE_DEBUG("no demura regions for cont. splash found!\n");
  3669. return ret;
  3670. }
  3671. static int _sde_kms_get_splash_data(struct sde_splash_data *data)
  3672. {
  3673. int i = 0;
  3674. int ret = 0;
  3675. struct device_node *parent, *node, *node1;
  3676. struct resource r, r1;
  3677. const char *node_name = "splash_region";
  3678. struct sde_splash_mem *mem;
  3679. bool share_splash_mem = false;
  3680. int num_displays, num_regions;
  3681. struct sde_splash_display *splash_display;
  3682. if (!data)
  3683. return -EINVAL;
  3684. memset(data, 0, sizeof(*data));
  3685. parent = of_find_node_by_path("/reserved-memory");
  3686. if (!parent) {
  3687. SDE_ERROR("failed to find reserved-memory node\n");
  3688. return -EINVAL;
  3689. }
  3690. node = of_find_node_by_name(parent, node_name);
  3691. if (!node) {
  3692. SDE_DEBUG("failed to find node %s\n", node_name);
  3693. return -EINVAL;
  3694. }
  3695. node1 = of_find_node_by_name(NULL, "disp_rdump_region");
  3696. if (!node1)
  3697. SDE_DEBUG("failed to find disp ramdump memory reservation\n");
  3698. /**
  3699. * Support sharing a single splash memory for all the built in displays
  3700. * and also independent splash region per displays. Incase of
  3701. * independent splash region for each connected display, dtsi node of
  3702. * cont_splash_region should be collection of all memory regions
  3703. * Ex: <r1.start r1.end r2.start r2.end ... rn.start, rn.end>
  3704. */
  3705. num_displays = dsi_display_get_num_of_displays();
  3706. num_regions = of_property_count_u64_elems(node, "reg") / 2;
  3707. data->num_splash_displays = num_displays;
  3708. SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
  3709. if (num_displays > num_regions) {
  3710. share_splash_mem = true;
  3711. pr_info(":%d displays share same splash buf\n", num_displays);
  3712. }
  3713. for (i = 0; i < num_displays; i++) {
  3714. splash_display = &data->splash_display[i];
  3715. if (!i || !share_splash_mem) {
  3716. if (of_address_to_resource(node, i, &r)) {
  3717. SDE_ERROR("invalid data for:%s\n", node_name);
  3718. return -EINVAL;
  3719. }
  3720. mem = &data->splash_mem[i];
  3721. if (!node1 || of_address_to_resource(node1, i, &r1)) {
  3722. SDE_DEBUG("failed to find ramdump memory\n");
  3723. mem->ramdump_base = 0;
  3724. mem->ramdump_size = 0;
  3725. } else {
  3726. mem->ramdump_base = (unsigned long)r1.start;
  3727. mem->ramdump_size = (r1.end - r1.start) + 1;
  3728. }
  3729. mem->splash_buf_base = (unsigned long)r.start;
  3730. mem->splash_buf_size = (r.end - r.start) + 1;
  3731. mem->ref_cnt = 0;
  3732. splash_display->splash = mem;
  3733. data->num_splash_regions++;
  3734. } else {
  3735. data->splash_display[i].splash = &data->splash_mem[0];
  3736. }
  3737. SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
  3738. splash_display->splash->splash_buf_base,
  3739. splash_display->splash->splash_buf_size);
  3740. }
  3741. data->type = SDE_SPLASH_HANDOFF;
  3742. ret = _sde_kms_get_demura_plane_data(data);
  3743. return ret;
  3744. }
  3745. static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
  3746. struct platform_device *platformdev)
  3747. {
  3748. int rc = -EINVAL;
  3749. sde_kms->mmio = msm_ioremap(platformdev, "mdp_phys", "mdp_phys");
  3750. if (IS_ERR(sde_kms->mmio)) {
  3751. rc = PTR_ERR(sde_kms->mmio);
  3752. SDE_ERROR("mdp register memory map failed: %d\n", rc);
  3753. sde_kms->mmio = NULL;
  3754. goto error;
  3755. }
  3756. DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
  3757. sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys");
  3758. rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
  3759. sde_kms->mmio_len,
  3760. msm_get_phys_addr(platformdev, "mdp_phys"),
  3761. SDE_DBG_SDE);
  3762. if (rc)
  3763. SDE_ERROR("dbg base register kms failed: %d\n", rc);
  3764. sde_kms->vbif[VBIF_RT] = msm_ioremap(platformdev, "vbif_phys", "vbif_phys");
  3765. if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
  3766. rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
  3767. SDE_ERROR("vbif register memory map failed: %d\n", rc);
  3768. sde_kms->vbif[VBIF_RT] = NULL;
  3769. goto error;
  3770. }
  3771. sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(platformdev, "vbif_phys");
  3772. rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
  3773. sde_kms->vbif_len[VBIF_RT],
  3774. msm_get_phys_addr(platformdev, "vbif_phys"),
  3775. SDE_DBG_VBIF_RT);
  3776. if (rc)
  3777. SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
  3778. sde_kms->vbif[VBIF_NRT] = msm_ioremap(platformdev, "vbif_nrt_phys", "vbif_nrt_phys");
  3779. if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
  3780. sde_kms->vbif[VBIF_NRT] = NULL;
  3781. SDE_DEBUG("VBIF NRT is not defined");
  3782. } else {
  3783. sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(platformdev, "vbif_nrt_phys");
  3784. }
  3785. sde_kms->reg_dma = msm_ioremap(platformdev, "regdma_phys", "regdma_phys");
  3786. if (IS_ERR(sde_kms->reg_dma)) {
  3787. sde_kms->reg_dma = NULL;
  3788. SDE_DEBUG("REG_DMA is not defined");
  3789. } else {
  3790. unsigned long mdp_addr = msm_get_phys_addr(platformdev, "mdp_phys");
  3791. sde_kms->reg_dma_len = msm_iomap_size(platformdev, "regdma_phys");
  3792. sde_kms->reg_dma_off = msm_get_phys_addr(platformdev, "regdma_phys") - mdp_addr;
  3793. rc = sde_dbg_reg_register_base("reg_dma", sde_kms->reg_dma,
  3794. sde_kms->reg_dma_len,
  3795. msm_get_phys_addr(platformdev, "regdma_phys"),
  3796. SDE_DBG_LUTDMA);
  3797. if (rc)
  3798. SDE_ERROR("dbg base register reg_dma failed: %d\n", rc);
  3799. }
  3800. sde_kms->sid = msm_ioremap(platformdev, "sid_phys", "sid_phys");
  3801. if (IS_ERR(sde_kms->sid)) {
  3802. SDE_DEBUG("sid register is not defined: %d\n", rc);
  3803. sde_kms->sid = NULL;
  3804. } else {
  3805. sde_kms->sid_len = msm_iomap_size(platformdev, "sid_phys");
  3806. rc = sde_dbg_reg_register_base("sid", sde_kms->sid,
  3807. sde_kms->sid_len,
  3808. msm_get_phys_addr(platformdev, "sid_phys"),
  3809. SDE_DBG_SID);
  3810. if (rc)
  3811. SDE_ERROR("dbg base register sid failed: %d\n", rc);
  3812. }
  3813. error:
  3814. return rc;
  3815. }
  3816. static int _sde_kms_hw_init_power_helper(struct drm_device *dev,
  3817. struct sde_kms *sde_kms)
  3818. {
  3819. int rc = 0;
  3820. if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
  3821. sde_kms->genpd.name = dev->unique;
  3822. sde_kms->genpd.power_off = sde_kms_pd_disable;
  3823. sde_kms->genpd.power_on = sde_kms_pd_enable;
  3824. rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
  3825. if (rc < 0) {
  3826. SDE_ERROR("failed to init genpd provider %s: %d\n",
  3827. sde_kms->genpd.name, rc);
  3828. return rc;
  3829. }
  3830. rc = of_genpd_add_provider_simple(dev->dev->of_node,
  3831. &sde_kms->genpd);
  3832. if (rc < 0) {
  3833. SDE_ERROR("failed to add genpd provider %s: %d\n",
  3834. sde_kms->genpd.name, rc);
  3835. pm_genpd_remove(&sde_kms->genpd);
  3836. return rc;
  3837. }
  3838. sde_kms->genpd_init = true;
  3839. SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
  3840. }
  3841. return rc;
  3842. }
  3843. static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
  3844. struct drm_device *dev,
  3845. struct msm_drm_private *priv)
  3846. {
  3847. struct sde_rm *rm = NULL;
  3848. int i, rc = -EINVAL;
  3849. sde_kms->catalog = sde_hw_catalog_init(dev);
  3850. if (IS_ERR_OR_NULL(sde_kms->catalog)) {
  3851. rc = PTR_ERR(sde_kms->catalog);
  3852. if (!sde_kms->catalog)
  3853. rc = -EINVAL;
  3854. SDE_ERROR("catalog init failed: %d\n", rc);
  3855. sde_kms->catalog = NULL;
  3856. goto power_error;
  3857. }
  3858. sde_kms->core_rev = sde_kms->catalog->hwversion;
  3859. pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
  3860. /* initialize power domain if defined */
  3861. rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
  3862. if (rc) {
  3863. SDE_ERROR("_sde_kms_hw_init_power_helper failed: %d\n", rc);
  3864. goto genpd_err;
  3865. }
  3866. rc = _sde_kms_mmu_init(sde_kms);
  3867. if (rc) {
  3868. SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
  3869. goto power_error;
  3870. }
  3871. /* Initialize reg dma block which is a singleton */
  3872. sde_kms->catalog->dma_cfg.base_off = sde_kms->reg_dma_off;
  3873. rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
  3874. sde_kms->dev);
  3875. if (rc) {
  3876. SDE_ERROR("failed: reg dma init failed\n");
  3877. goto power_error;
  3878. }
  3879. sde_dbg_init_dbg_buses(sde_kms->core_rev);
  3880. rm = &sde_kms->rm;
  3881. rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
  3882. sde_kms->dev);
  3883. if (rc) {
  3884. SDE_ERROR("rm init failed: %d\n", rc);
  3885. goto power_error;
  3886. }
  3887. sde_kms->rm_init = true;
  3888. sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
  3889. if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
  3890. rc = PTR_ERR(sde_kms->hw_intr);
  3891. SDE_ERROR("hw_intr init failed: %d\n", rc);
  3892. sde_kms->hw_intr = NULL;
  3893. goto hw_intr_init_err;
  3894. }
  3895. /*
  3896. * Attempt continuous splash handoff only if reserved
  3897. * splash memory is found & release resources on any error
  3898. * in finding display hw config in splash
  3899. */
  3900. if (sde_kms->splash_data.num_splash_regions) {
  3901. struct sde_splash_display *display;
  3902. int ret, display_count =
  3903. sde_kms->splash_data.num_splash_displays;
  3904. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  3905. &sde_kms->splash_data, sde_kms->catalog);
  3906. for (i = 0; i < display_count; i++) {
  3907. display = &sde_kms->splash_data.splash_display[i];
  3908. /*
  3909. * free splash region on resource init failure and
  3910. * cont-splash disabled case
  3911. */
  3912. if (!display->cont_splash_enabled || ret)
  3913. _sde_kms_free_splash_display_data(
  3914. sde_kms, display);
  3915. }
  3916. }
  3917. sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
  3918. if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
  3919. rc = PTR_ERR(sde_kms->hw_mdp);
  3920. if (!sde_kms->hw_mdp)
  3921. rc = -EINVAL;
  3922. SDE_ERROR("failed to get hw_mdp: %d\n", rc);
  3923. sde_kms->hw_mdp = NULL;
  3924. goto power_error;
  3925. }
  3926. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  3927. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  3928. sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
  3929. sde_kms->vbif[vbif_idx], sde_kms->catalog);
  3930. if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
  3931. rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
  3932. if (!sde_kms->hw_vbif[vbif_idx])
  3933. rc = -EINVAL;
  3934. SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
  3935. sde_kms->hw_vbif[vbif_idx] = NULL;
  3936. goto power_error;
  3937. }
  3938. }
  3939. if (sde_kms->catalog->uidle_cfg.uidle_rev) {
  3940. sde_kms->hw_uidle = sde_hw_uidle_init(UIDLE, sde_kms->mmio,
  3941. sde_kms->mmio_len, sde_kms->catalog);
  3942. if (IS_ERR_OR_NULL(sde_kms->hw_uidle)) {
  3943. rc = PTR_ERR(sde_kms->hw_uidle);
  3944. if (!sde_kms->hw_uidle)
  3945. rc = -EINVAL;
  3946. /* uidle is optional, so do not make it a fatal error */
  3947. SDE_ERROR("failed to init uidle rc:%d\n", rc);
  3948. sde_kms->hw_uidle = NULL;
  3949. rc = 0;
  3950. }
  3951. } else {
  3952. sde_kms->hw_uidle = NULL;
  3953. }
  3954. if (sde_kms->sid) {
  3955. sde_kms->hw_sid = sde_hw_sid_init(sde_kms->sid,
  3956. sde_kms->sid_len, sde_kms->catalog);
  3957. if (IS_ERR_OR_NULL(sde_kms->hw_sid)) {
  3958. rc = PTR_ERR(sde_kms->hw_sid);
  3959. SDE_ERROR("failed to init sid %d\n", rc);
  3960. sde_kms->hw_sid = NULL;
  3961. goto power_error;
  3962. }
  3963. }
  3964. rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
  3965. &priv->phandle, "core_clk");
  3966. if (rc) {
  3967. SDE_ERROR("failed to init perf %d\n", rc);
  3968. goto perf_err;
  3969. }
  3970. /*
  3971. * set the disable_immediate flag when driver supports the precise vsync
  3972. * timestamp as the DRM hooks for vblank timestamp/counters would be set
  3973. * based on the feature
  3974. */
  3975. if (sde_kms->catalog->has_precise_vsync_ts)
  3976. dev->vblank_disable_immediate = true;
  3977. /*
  3978. * _sde_kms_drm_obj_init should create the DRM related objects
  3979. * i.e. CRTCs, planes, encoders, connectors and so forth
  3980. */
  3981. rc = _sde_kms_drm_obj_init(sde_kms);
  3982. if (rc) {
  3983. SDE_ERROR("modeset init failed: %d\n", rc);
  3984. goto drm_obj_init_err;
  3985. }
  3986. return 0;
  3987. genpd_err:
  3988. drm_obj_init_err:
  3989. sde_core_perf_destroy(&sde_kms->perf);
  3990. hw_intr_init_err:
  3991. perf_err:
  3992. power_error:
  3993. return rc;
  3994. }
  3995. int _sde_kms_get_tvm_inclusion_mem(struct sde_mdss_cfg *catalog, struct list_head *mem_list)
  3996. {
  3997. struct list_head temp_head;
  3998. struct msm_io_mem_entry *io_mem;
  3999. int rc, i = 0;
  4000. INIT_LIST_HEAD(&temp_head);
  4001. for (i = 0; i < catalog->tvm_reg_count; i++) {
  4002. struct resource *res = &catalog->tvm_reg[i];
  4003. io_mem = kzalloc(sizeof(struct msm_io_mem_entry), GFP_KERNEL);
  4004. if (!io_mem) {
  4005. rc = -ENOMEM;
  4006. goto parse_fail;
  4007. }
  4008. io_mem->base = res->start;
  4009. io_mem->size = resource_size(res);
  4010. list_add(&io_mem->list, &temp_head);
  4011. }
  4012. list_splice(&temp_head, mem_list);
  4013. return 0;
  4014. parse_fail:
  4015. msm_dss_clean_io_mem(&temp_head);
  4016. return rc;
  4017. }
  4018. #ifdef CONFIG_DRM_SDE_VM
  4019. int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  4020. {
  4021. struct platform_device *pdev = to_platform_device(sde_kms->dev->dev);
  4022. int rc = 0;
  4023. rc = msm_dss_get_io_mem(pdev, &io_res->mem);
  4024. if (rc) {
  4025. SDE_ERROR("failed to get io mem for KMS, rc = %d\n", rc);
  4026. return rc;
  4027. }
  4028. rc = msm_dss_get_pmic_io_mem(pdev, &io_res->mem);
  4029. if (rc) {
  4030. SDE_ERROR("failed to get io mem for pmic, rc:%d\n", rc);
  4031. return rc;
  4032. }
  4033. rc = msm_dss_get_io_irq(pdev, &io_res->irq, GH_IRQ_LABEL_SDE);
  4034. if (rc) {
  4035. SDE_ERROR("failed to get io irq for KMS");
  4036. return rc;
  4037. }
  4038. rc = _sde_kms_get_tvm_inclusion_mem(sde_kms->catalog, &io_res->mem);
  4039. if (rc) {
  4040. SDE_ERROR("failed to get tvm inclusion mem ranges");
  4041. return rc;
  4042. }
  4043. return rc;
  4044. }
  4045. #endif
  4046. static int sde_kms_hw_init(struct msm_kms *kms)
  4047. {
  4048. struct sde_kms *sde_kms;
  4049. struct drm_device *dev;
  4050. struct msm_drm_private *priv;
  4051. struct platform_device *platformdev;
  4052. int i, irq_num, rc = -EINVAL;
  4053. if (!kms) {
  4054. SDE_ERROR("invalid kms\n");
  4055. goto end;
  4056. }
  4057. sde_kms = to_sde_kms(kms);
  4058. dev = sde_kms->dev;
  4059. if (!dev || !dev->dev) {
  4060. SDE_ERROR("invalid device\n");
  4061. goto end;
  4062. }
  4063. platformdev = to_platform_device(dev->dev);
  4064. priv = dev->dev_private;
  4065. if (!priv) {
  4066. SDE_ERROR("invalid private data\n");
  4067. goto end;
  4068. }
  4069. rc = _sde_kms_hw_init_ioremap(sde_kms, platformdev);
  4070. if (rc)
  4071. goto error;
  4072. rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
  4073. if (rc)
  4074. SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
  4075. rc = _sde_kms_hw_init_blocks(sde_kms, dev, priv);
  4076. if (rc)
  4077. goto error;
  4078. dev->mode_config.min_width = sde_kms->catalog->min_display_width;
  4079. dev->mode_config.min_height = sde_kms->catalog->min_display_height;
  4080. dev->mode_config.max_width = sde_kms->catalog->max_display_width;
  4081. dev->mode_config.max_height = sde_kms->catalog->max_display_height;
  4082. mutex_init(&sde_kms->secure_transition_lock);
  4083. atomic_set(&sde_kms->detach_sec_cb, 0);
  4084. atomic_set(&sde_kms->detach_all_cb, 0);
  4085. atomic_set(&sde_kms->irq_vote_count, 0);
  4086. /*
  4087. * Support format modifiers for compression etc.
  4088. */
  4089. dev->mode_config.allow_fb_modifiers = true;
  4090. /*
  4091. * Handle (re)initializations during power enable
  4092. */
  4093. sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
  4094. sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
  4095. SDE_POWER_EVENT_POST_ENABLE |
  4096. SDE_POWER_EVENT_PRE_DISABLE,
  4097. sde_kms_handle_power_event, sde_kms, "kms");
  4098. if (sde_kms->splash_data.num_splash_displays) {
  4099. SDE_DEBUG("Skipping MDP Resources disable\n");
  4100. } else {
  4101. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  4102. sde_power_data_bus_set_quota(&priv->phandle, i,
  4103. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  4104. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  4105. pm_runtime_put_sync(sde_kms->dev->dev);
  4106. }
  4107. sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
  4108. sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
  4109. irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0);
  4110. SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
  4111. irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
  4112. if (sde_in_trusted_vm(sde_kms)) {
  4113. rc = sde_vm_trusted_init(sde_kms);
  4114. sde_dbg_set_hw_ownership_status(false);
  4115. } else {
  4116. rc = sde_vm_primary_init(sde_kms);
  4117. sde_dbg_set_hw_ownership_status(true);
  4118. }
  4119. if (rc) {
  4120. SDE_ERROR("failed to initialize VM ops, rc: %d\n", rc);
  4121. goto error;
  4122. }
  4123. return 0;
  4124. error:
  4125. _sde_kms_hw_destroy(sde_kms, platformdev);
  4126. end:
  4127. return rc;
  4128. }
  4129. struct msm_kms *sde_kms_init(struct drm_device *dev)
  4130. {
  4131. struct msm_drm_private *priv;
  4132. struct sde_kms *sde_kms;
  4133. if (!dev || !dev->dev_private) {
  4134. SDE_ERROR("drm device node invalid\n");
  4135. return ERR_PTR(-EINVAL);
  4136. }
  4137. priv = dev->dev_private;
  4138. sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
  4139. if (!sde_kms) {
  4140. SDE_ERROR("failed to allocate sde kms\n");
  4141. return ERR_PTR(-ENOMEM);
  4142. }
  4143. msm_kms_init(&sde_kms->base, &kms_funcs);
  4144. sde_kms->dev = dev;
  4145. return &sde_kms->base;
  4146. }
  4147. void sde_kms_vm_trusted_resource_deinit(struct sde_kms *sde_kms)
  4148. {
  4149. struct dsi_display *display;
  4150. struct sde_splash_display *handoff_display;
  4151. int i;
  4152. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4153. handoff_display = &sde_kms->splash_data.splash_display[i];
  4154. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4155. if (handoff_display->cont_splash_enabled)
  4156. _sde_kms_free_splash_display_data(sde_kms,
  4157. handoff_display);
  4158. dsi_display_set_active_state(display, false);
  4159. }
  4160. memset(&sde_kms->splash_data, 0, sizeof(struct sde_splash_data));
  4161. }
  4162. int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
  4163. struct drm_atomic_state *state)
  4164. {
  4165. struct drm_device *dev;
  4166. struct msm_drm_private *priv;
  4167. struct sde_splash_display *handoff_display;
  4168. struct dsi_display *display;
  4169. int ret, i;
  4170. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
  4171. SDE_ERROR("invalid params\n");
  4172. return -EINVAL;
  4173. }
  4174. dev = sde_kms->dev;
  4175. priv = dev->dev_private;
  4176. sde_kms->splash_data.type = SDE_VM_HANDOFF;
  4177. sde_kms->splash_data.num_splash_displays = sde_kms->dsi_display_count;
  4178. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4179. &sde_kms->splash_data, sde_kms->catalog);
  4180. if (ret) {
  4181. SDE_ERROR("invalid cont splash init, ret:%d\n", ret);
  4182. return -EINVAL;
  4183. }
  4184. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4185. handoff_display = &sde_kms->splash_data.splash_display[i];
  4186. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4187. if (!handoff_display->cont_splash_enabled || ret)
  4188. _sde_kms_free_splash_display_data(sde_kms,
  4189. handoff_display);
  4190. else
  4191. dsi_display_set_active_state(display, true);
  4192. }
  4193. if (sde_kms->splash_data.num_splash_displays != 1) {
  4194. SDE_ERROR("no. of displays not supported:%d\n",
  4195. sde_kms->splash_data.num_splash_displays);
  4196. goto error;
  4197. }
  4198. ret = sde_kms_cont_splash_config(&sde_kms->base, state);
  4199. if (ret) {
  4200. SDE_ERROR("error in setting handoff configs\n");
  4201. goto error;
  4202. }
  4203. /**
  4204. * fill-in vote for the continuous splash hanodff path, which will be
  4205. * removed on the successful first commit.
  4206. */
  4207. pm_runtime_get_sync(sde_kms->dev->dev);
  4208. return 0;
  4209. error:
  4210. return ret;
  4211. }
  4212. static int _sde_kms_register_events(struct msm_kms *kms,
  4213. struct drm_mode_object *obj, u32 event, bool en)
  4214. {
  4215. int ret = 0;
  4216. struct drm_crtc *crtc;
  4217. struct drm_connector *conn;
  4218. struct sde_kms *sde_kms;
  4219. if (!kms || !obj) {
  4220. SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
  4221. return -EINVAL;
  4222. }
  4223. sde_kms = to_sde_kms(kms);
  4224. sde_vm_lock(sde_kms);
  4225. if (!sde_vm_owns_hw(sde_kms)) {
  4226. sde_vm_unlock(sde_kms);
  4227. SDE_DEBUG("HW is owned by other VM\n");
  4228. return -EACCES;
  4229. }
  4230. /* check vm ownership, if event registration requires HW access */
  4231. switch (obj->type) {
  4232. case DRM_MODE_OBJECT_CRTC:
  4233. crtc = obj_to_crtc(obj);
  4234. ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
  4235. break;
  4236. case DRM_MODE_OBJECT_CONNECTOR:
  4237. conn = obj_to_connector(obj);
  4238. ret = sde_connector_register_custom_event(sde_kms, conn, event,
  4239. en);
  4240. break;
  4241. }
  4242. sde_vm_unlock(sde_kms);
  4243. return ret;
  4244. }
  4245. int sde_kms_handle_recovery(struct drm_encoder *encoder)
  4246. {
  4247. SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
  4248. return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
  4249. }
  4250. void sde_kms_add_data_to_minidump_va(struct sde_kms *sde_kms)
  4251. {
  4252. struct msm_drm_private *priv;
  4253. struct sde_crtc *sde_crtc;
  4254. struct sde_crtc_state *cstate;
  4255. struct sde_connector *sde_conn;
  4256. struct sde_connector_state *conn_state;
  4257. u32 i;
  4258. priv = sde_kms->dev->dev_private;
  4259. sde_mini_dump_add_va_region("sde_kms", sizeof(*sde_kms), sde_kms);
  4260. for (i = 0; i < priv->num_crtcs; i++) {
  4261. sde_crtc = to_sde_crtc(priv->crtcs[i]);
  4262. cstate = to_sde_crtc_state(priv->crtcs[i]->state);
  4263. sde_mini_dump_add_va_region("sde_crtc", sizeof(*sde_crtc), sde_crtc);
  4264. sde_mini_dump_add_va_region("crtc_state", sizeof(*cstate), cstate);
  4265. }
  4266. for (i = 0; i < priv->num_planes; i++)
  4267. sde_plane_add_data_to_minidump_va(priv->planes[i]);
  4268. for (i = 0; i < priv->num_encoders; i++)
  4269. sde_encoder_add_data_to_minidump_va(priv->encoders[i]);
  4270. for (i = 0; i < priv->num_connectors; i++) {
  4271. sde_conn = to_sde_connector(priv->connectors[i]);
  4272. conn_state = to_sde_connector_state(priv->connectors[i]->state);
  4273. sde_mini_dump_add_va_region("sde_conn", sizeof(*sde_conn), sde_conn);
  4274. sde_mini_dump_add_va_region("conn_state", sizeof(*conn_state), conn_state);
  4275. }
  4276. }