sde_kms.c 130 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173
  1. /*
  2. * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  3. * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (C) 2013 Red Hat
  5. * Author: Rob Clark <[email protected]>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of the GNU General Public License version 2 as published by
  9. * the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful, but WITHOUT
  12. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  13. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  14. * more details.
  15. *
  16. * You should have received a copy of the GNU General Public License along with
  17. * this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
  20. #include <drm/drm_crtc.h>
  21. #include <drm/drm_fixed.h>
  22. #include <drm/drm_panel.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/of_address.h>
  25. #include <linux/of_irq.h>
  26. #include <linux/dma-buf.h>
  27. #include <linux/memblock.h>
  28. #include <linux/soc/qcom/panel_event_notifier.h>
  29. #include <drm/drm_atomic_uapi.h>
  30. #include <drm/drm_probe_helper.h>
  31. #include "msm_drv.h"
  32. #include "msm_mmu.h"
  33. #include "msm_gem.h"
  34. #include "dsi_display.h"
  35. #include "dsi_drm.h"
  36. #include "sde_wb.h"
  37. #include "dp_display.h"
  38. #include "dp_drm.h"
  39. #include "dp_mst_drm.h"
  40. #include "sde_kms.h"
  41. #include "sde_core_irq.h"
  42. #include "sde_formats.h"
  43. #include "sde_hw_vbif.h"
  44. #include "sde_vbif.h"
  45. #include "sde_encoder.h"
  46. #include "sde_plane.h"
  47. #include "sde_crtc.h"
  48. #include "sde_color_processing.h"
  49. #include "sde_reg_dma.h"
  50. #include "sde_connector.h"
  51. #include "sde_vm.h"
  52. #include <linux/qcom_scm.h>
  53. #include <linux/qcom-iommu-util.h>
  54. #include "soc/qcom/secure_buffer.h"
  55. #include <linux/qtee_shmbridge.h>
  56. #ifdef CONFIG_DRM_SDE_VM
  57. #include <linux/gunyah/gh_irq_lend.h>
  58. #endif
  59. #define CREATE_TRACE_POINTS
  60. #include "sde_trace.h"
  61. /* defines for secure channel call */
  62. #define MEM_PROTECT_SD_CTRL_SWITCH 0x18
  63. #define MDP_DEVICE_ID 0x1A
  64. #define DEMURA_REGION_NAME_MAX 32
  65. EXPORT_TRACEPOINT_SYMBOL(tracing_mark_write);
  66. static const char * const iommu_ports[] = {
  67. "mdp_0",
  68. };
  69. /**
  70. * Controls size of event log buffer. Specified as a power of 2.
  71. */
  72. #define SDE_EVTLOG_SIZE 1024
  73. /*
  74. * To enable overall DRM driver logging
  75. * # echo 0x2 > /sys/module/drm/parameters/debug
  76. *
  77. * To enable DRM driver h/w logging
  78. * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  79. *
  80. * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  81. */
  82. #define SDE_DEBUGFS_DIR "msm_sde"
  83. #define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
  84. #define SDE_KMS_MODESET_LOCK_TIMEOUT_US 500
  85. #define SDE_KMS_MODESET_LOCK_MAX_TRIALS 20
  86. /**
  87. * sdecustom - enable certain driver customizations for sde clients
  88. * Enabling this modifies the standard DRM behavior slightly and assumes
  89. * that the clients have specific knowledge about the modifications that
  90. * are involved, so don't enable this unless you know what you're doing.
  91. *
  92. * Parts of the driver that are affected by this setting may be located by
  93. * searching for invocations of the 'sde_is_custom_client()' function.
  94. *
  95. * This is disabled by default.
  96. */
  97. static bool sdecustom = true;
  98. module_param(sdecustom, bool, 0400);
  99. MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
  100. static int sde_kms_hw_init(struct msm_kms *kms);
  101. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
  102. static int _sde_kms_mmu_init(struct sde_kms *sde_kms);
  103. static int _sde_kms_register_events(struct msm_kms *kms,
  104. struct drm_mode_object *obj, u32 event, bool en);
  105. bool sde_is_custom_client(void)
  106. {
  107. return sdecustom;
  108. }
  109. #if IS_ENABLED(CONFIG_DEBUG_FS)
  110. void *sde_debugfs_get_root(struct sde_kms *sde_kms)
  111. {
  112. struct msm_drm_private *priv;
  113. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  114. return NULL;
  115. priv = sde_kms->dev->dev_private;
  116. return priv->debug_root;
  117. }
  118. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  119. {
  120. void *p;
  121. int rc;
  122. void *debugfs_root;
  123. p = sde_hw_util_get_log_mask_ptr();
  124. if (!sde_kms || !p)
  125. return -EINVAL;
  126. debugfs_root = sde_debugfs_get_root(sde_kms);
  127. if (!debugfs_root)
  128. return -EINVAL;
  129. /* allow debugfs_root to be NULL */
  130. debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME, 0600, debugfs_root, p);
  131. (void) sde_debugfs_vbif_init(sde_kms, debugfs_root);
  132. (void) sde_debugfs_core_irq_init(sde_kms, debugfs_root);
  133. rc = sde_core_perf_debugfs_init(&sde_kms->perf, debugfs_root);
  134. if (rc) {
  135. SDE_ERROR("failed to init perf %d\n", rc);
  136. return rc;
  137. }
  138. sde_rm_debugfs_init(&sde_kms->rm, debugfs_root);
  139. if (sde_kms->catalog->qdss_count)
  140. debugfs_create_u32("qdss", 0600, debugfs_root,
  141. (u32 *)&sde_kms->qdss_enabled);
  142. debugfs_create_u32("pm_suspend_clk_dump", 0600, debugfs_root,
  143. (u32 *)&sde_kms->pm_suspend_clk_dump);
  144. return 0;
  145. }
  146. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  147. {
  148. struct sde_kms *sde_kms = to_sde_kms(kms);
  149. /* don't need to NULL check debugfs_root */
  150. if (sde_kms) {
  151. sde_debugfs_vbif_destroy(sde_kms);
  152. sde_debugfs_core_irq_destroy(sde_kms);
  153. }
  154. }
  155. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  156. {
  157. int i;
  158. struct device *dev = sde_kms->dev->dev;
  159. SDE_INFO("runtime PM suspended:%d", pm_runtime_suspended(dev));
  160. for (i = 0; i < sde_kms->dsi_display_count; i++)
  161. dsi_display_dump_clks_state(sde_kms->dsi_displays[i]);
  162. return 0;
  163. }
  164. #else
  165. static int _sde_debugfs_init(struct sde_kms *sde_kms)
  166. {
  167. return 0;
  168. }
  169. static void sde_kms_debugfs_destroy(struct msm_kms *kms)
  170. {
  171. }
  172. static int _sde_kms_dump_clks_state(struct sde_kms *sde_kms)
  173. {
  174. return 0;
  175. }
  176. #endif /* CONFIG_DEBUG_FS */
  177. static void sde_kms_wait_for_frame_transfer_complete(struct msm_kms *kms,
  178. struct drm_crtc *crtc)
  179. {
  180. struct drm_encoder *encoder;
  181. struct drm_device *dev;
  182. int ret;
  183. if (!kms || !crtc || !crtc->state || !crtc->dev) {
  184. SDE_ERROR("invalid params\n");
  185. return;
  186. }
  187. if (!crtc->state->enable) {
  188. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  189. return;
  190. }
  191. if (!crtc->state->active) {
  192. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  193. return;
  194. }
  195. dev = crtc->dev;
  196. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  197. if (encoder->crtc != crtc)
  198. continue;
  199. /*
  200. * Video Mode - Wait for VSYNC
  201. * Cmd Mode - Wait for PP_DONE. Will be no-op if transfer is
  202. * complete
  203. */
  204. SDE_EVT32_VERBOSE(DRMID(crtc));
  205. ret = sde_encoder_wait_for_event(encoder, MSM_ENC_TX_COMPLETE);
  206. if (ret && ret != -EWOULDBLOCK) {
  207. SDE_ERROR(
  208. "[crtc: %d][enc: %d] wait for commit done returned %d\n",
  209. crtc->base.id, encoder->base.id, ret);
  210. break;
  211. }
  212. }
  213. }
  214. static int _sde_kms_secure_ctrl_xin_clients(struct sde_kms *sde_kms,
  215. struct drm_crtc *crtc, bool enable)
  216. {
  217. struct drm_device *dev;
  218. struct msm_drm_private *priv;
  219. struct sde_mdss_cfg *sde_cfg;
  220. struct drm_plane *plane;
  221. int i, ret;
  222. dev = sde_kms->dev;
  223. priv = dev->dev_private;
  224. sde_cfg = sde_kms->catalog;
  225. ret = sde_vbif_halt_xin_mask(sde_kms,
  226. sde_cfg->sui_block_xin_mask, enable);
  227. if (ret) {
  228. SDE_ERROR("failed to halt some xin-clients, ret:%d\n", ret);
  229. return ret;
  230. }
  231. if (enable) {
  232. for (i = 0; i < priv->num_planes; i++) {
  233. plane = priv->planes[i];
  234. sde_plane_secure_ctrl_xin_client(plane, crtc);
  235. }
  236. }
  237. return 0;
  238. }
  239. /**
  240. * _sde_kms_scm_call - makes secure channel call to switch the VMIDs
  241. * @sde_kms: Pointer to sde_kms struct
  242. * @vimd: switch the stage 2 translation to this VMID
  243. */
  244. static int _sde_kms_scm_call(struct sde_kms *sde_kms, int vmid)
  245. {
  246. struct device dummy = {};
  247. dma_addr_t dma_handle;
  248. uint32_t num_sids;
  249. uint32_t *sec_sid;
  250. struct sde_mdss_cfg *sde_cfg = sde_kms->catalog;
  251. int ret = 0, i;
  252. struct qtee_shm shm;
  253. bool qtee_en = qtee_shmbridge_is_enabled();
  254. phys_addr_t mem_addr;
  255. u64 mem_size;
  256. num_sids = sde_cfg->sec_sid_mask_count;
  257. if (!num_sids) {
  258. SDE_ERROR("secure SID masks not configured, vmid 0x%x\n", vmid);
  259. return -EINVAL;
  260. }
  261. if (qtee_en) {
  262. ret = qtee_shmbridge_allocate_shm(num_sids * sizeof(uint32_t),
  263. &shm);
  264. if (ret)
  265. return -ENOMEM;
  266. sec_sid = (uint32_t *) shm.vaddr;
  267. mem_addr = shm.paddr;
  268. /**
  269. * SMMUSecureModeSwitch requires the size to be number of SID's
  270. * but shm allocates size in pages. Modify the args as per
  271. * client requirement.
  272. */
  273. mem_size = sizeof(uint32_t) * num_sids;
  274. } else {
  275. sec_sid = kcalloc(num_sids, sizeof(uint32_t), GFP_KERNEL);
  276. if (!sec_sid)
  277. return -ENOMEM;
  278. mem_addr = virt_to_phys(sec_sid);
  279. mem_size = sizeof(uint32_t) * num_sids;
  280. }
  281. for (i = 0; i < num_sids; i++) {
  282. sec_sid[i] = sde_cfg->sec_sid_mask[i];
  283. SDE_DEBUG("sid_mask[%d]: %d\n", i, sec_sid[i]);
  284. }
  285. ret = dma_coerce_mask_and_coherent(&dummy, DMA_BIT_MASK(64));
  286. if (ret) {
  287. SDE_ERROR("Failed to set dma mask for dummy dev %d\n", ret);
  288. goto map_error;
  289. }
  290. set_dma_ops(&dummy, NULL);
  291. dma_handle = dma_map_single(&dummy, sec_sid,
  292. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  293. if (dma_mapping_error(&dummy, dma_handle)) {
  294. SDE_ERROR("dma_map_single for dummy dev failed vmid 0x%x\n",
  295. vmid);
  296. goto map_error;
  297. }
  298. SDE_DEBUG("calling scm_call for vmid 0x%x, num_sids %d, qtee_en %d",
  299. vmid, num_sids, qtee_en);
  300. ret = qcom_scm_mem_protect_sd_ctrl(MDP_DEVICE_ID, mem_addr,
  301. mem_size, vmid);
  302. if (ret)
  303. SDE_ERROR("Error:scm_call2, vmid %d, ret%d\n",
  304. vmid, ret);
  305. SDE_EVT32(MEM_PROTECT_SD_CTRL_SWITCH, MDP_DEVICE_ID, mem_size,
  306. vmid, qtee_en, num_sids, ret);
  307. dma_unmap_single(&dummy, dma_handle,
  308. num_sids * sizeof(uint32_t), DMA_TO_DEVICE);
  309. map_error:
  310. if (qtee_en)
  311. qtee_shmbridge_free_shm(&shm);
  312. else
  313. kfree(sec_sid);
  314. return ret;
  315. }
  316. static int _sde_kms_detach_all_cb(struct sde_kms *sde_kms, u32 vmid)
  317. {
  318. u32 ret;
  319. if (atomic_inc_return(&sde_kms->detach_all_cb) > 1)
  320. return 0;
  321. /* detach_all_contexts */
  322. ret = sde_kms_mmu_detach(sde_kms, false);
  323. if (ret) {
  324. SDE_ERROR("failed to detach all cb ret:%d\n", ret);
  325. goto mmu_error;
  326. }
  327. ret = _sde_kms_scm_call(sde_kms, vmid);
  328. if (ret) {
  329. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  330. goto scm_error;
  331. }
  332. return 0;
  333. scm_error:
  334. sde_kms_mmu_attach(sde_kms, false);
  335. mmu_error:
  336. atomic_dec(&sde_kms->detach_all_cb);
  337. return ret;
  338. }
  339. static int _sde_kms_attach_all_cb(struct sde_kms *sde_kms, u32 vmid,
  340. u32 old_vmid)
  341. {
  342. u32 ret;
  343. if (atomic_dec_return(&sde_kms->detach_all_cb) != 0)
  344. return 0;
  345. ret = _sde_kms_scm_call(sde_kms, vmid);
  346. if (ret) {
  347. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  348. goto scm_error;
  349. }
  350. /* attach_all_contexts */
  351. ret = sde_kms_mmu_attach(sde_kms, false);
  352. if (ret) {
  353. SDE_ERROR("failed to attach all cb ret:%d\n", ret);
  354. goto mmu_error;
  355. }
  356. return 0;
  357. mmu_error:
  358. _sde_kms_scm_call(sde_kms, old_vmid);
  359. scm_error:
  360. atomic_inc(&sde_kms->detach_all_cb);
  361. return ret;
  362. }
  363. static int _sde_kms_detach_sec_cb(struct sde_kms *sde_kms, int vmid)
  364. {
  365. u32 ret;
  366. if (atomic_inc_return(&sde_kms->detach_sec_cb) > 1)
  367. return 0;
  368. /* detach secure_context */
  369. ret = sde_kms_mmu_detach(sde_kms, true);
  370. if (ret) {
  371. SDE_ERROR("failed to detach sec cb ret:%d\n", ret);
  372. goto mmu_error;
  373. }
  374. ret = _sde_kms_scm_call(sde_kms, vmid);
  375. if (ret) {
  376. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  377. goto scm_error;
  378. }
  379. return 0;
  380. scm_error:
  381. sde_kms_mmu_attach(sde_kms, true);
  382. mmu_error:
  383. atomic_dec(&sde_kms->detach_sec_cb);
  384. return ret;
  385. }
  386. static int _sde_kms_attach_sec_cb(struct sde_kms *sde_kms, u32 vmid,
  387. u32 old_vmid)
  388. {
  389. u32 ret;
  390. if (atomic_dec_return(&sde_kms->detach_sec_cb) != 0)
  391. return 0;
  392. ret = _sde_kms_scm_call(sde_kms, vmid);
  393. if (ret) {
  394. goto scm_error;
  395. SDE_ERROR("scm call failed for vmid:%d\n", vmid);
  396. }
  397. ret = sde_kms_mmu_attach(sde_kms, true);
  398. if (ret) {
  399. SDE_ERROR("failed to attach sec cb ret:%d\n", ret);
  400. goto mmu_error;
  401. }
  402. return 0;
  403. mmu_error:
  404. _sde_kms_scm_call(sde_kms, old_vmid);
  405. scm_error:
  406. atomic_inc(&sde_kms->detach_sec_cb);
  407. return ret;
  408. }
  409. static int _sde_kms_sui_misr_ctrl(struct sde_kms *sde_kms,
  410. struct drm_crtc *crtc, bool enable)
  411. {
  412. int ret;
  413. if (enable) {
  414. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  415. if (ret < 0) {
  416. SDE_ERROR("failed to enable power resource %d\n", ret);
  417. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  418. return ret;
  419. }
  420. sde_crtc_misr_setup(crtc, true, 1);
  421. ret = _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, true);
  422. if (ret) {
  423. sde_crtc_misr_setup(crtc, false, 0);
  424. pm_runtime_put_sync(sde_kms->dev->dev);
  425. return ret;
  426. }
  427. } else {
  428. _sde_kms_secure_ctrl_xin_clients(sde_kms, crtc, false);
  429. sde_crtc_misr_setup(crtc, false, 0);
  430. pm_runtime_put_sync(sde_kms->dev->dev);
  431. }
  432. return 0;
  433. }
  434. static int _sde_kms_secure_ctrl(struct sde_kms *sde_kms, struct drm_crtc *crtc,
  435. bool post_commit)
  436. {
  437. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  438. int old_smmu_state = smmu_state->state;
  439. int ret = 0;
  440. u32 vmid;
  441. if (!sde_kms || !crtc) {
  442. SDE_ERROR("invalid argument(s)\n");
  443. return -EINVAL;
  444. }
  445. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->transition_type,
  446. post_commit, smmu_state->sui_misr_state,
  447. smmu_state->secure_level, SDE_EVTLOG_FUNC_ENTRY);
  448. if ((!smmu_state->transition_type) ||
  449. ((smmu_state->transition_type == POST_COMMIT) && !post_commit))
  450. /* Bail out */
  451. return 0;
  452. /* enable sui misr if requested, before the transition */
  453. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ) {
  454. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, true);
  455. if (ret) {
  456. smmu_state->sui_misr_state = NONE;
  457. goto end;
  458. }
  459. }
  460. mutex_lock(&sde_kms->secure_transition_lock);
  461. switch (smmu_state->state) {
  462. case DETACH_ALL_REQ:
  463. ret = _sde_kms_detach_all_cb(sde_kms, VMID_CP_SEC_DISPLAY);
  464. if (!ret)
  465. smmu_state->state = DETACHED;
  466. break;
  467. case ATTACH_ALL_REQ:
  468. ret = _sde_kms_attach_all_cb(sde_kms, VMID_CP_PIXEL,
  469. VMID_CP_SEC_DISPLAY);
  470. if (!ret) {
  471. smmu_state->state = ATTACHED;
  472. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  473. }
  474. break;
  475. case DETACH_SEC_REQ:
  476. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  477. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  478. ret = _sde_kms_detach_sec_cb(sde_kms, vmid);
  479. if (!ret)
  480. smmu_state->state = DETACHED_SEC;
  481. break;
  482. case ATTACH_SEC_REQ:
  483. vmid = (smmu_state->secure_level == SDE_DRM_SEC_ONLY) ?
  484. VMID_CP_SEC_DISPLAY : VMID_CP_CAMERA_PREVIEW;
  485. ret = _sde_kms_attach_sec_cb(sde_kms, VMID_CP_PIXEL, vmid);
  486. if (!ret) {
  487. smmu_state->state = ATTACHED;
  488. smmu_state->secure_level = SDE_DRM_SEC_NON_SEC;
  489. }
  490. break;
  491. default:
  492. SDE_ERROR("crtc%d: invalid smmu state %d transition type %d\n",
  493. DRMID(crtc), smmu_state->state,
  494. smmu_state->transition_type);
  495. ret = -EINVAL;
  496. break;
  497. }
  498. mutex_unlock(&sde_kms->secure_transition_lock);
  499. /* disable sui misr if requested, after the transition */
  500. if (!ret && (smmu_state->sui_misr_state == SUI_MISR_DISABLE_REQ)) {
  501. ret = _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  502. if (ret)
  503. goto end;
  504. }
  505. end:
  506. smmu_state->transition_error = false;
  507. if (ret) {
  508. smmu_state->transition_error = true;
  509. SDE_ERROR(
  510. "crtc%d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  511. DRMID(crtc), old_smmu_state, smmu_state->state,
  512. smmu_state->secure_level, ret);
  513. smmu_state->state = smmu_state->prev_state;
  514. smmu_state->secure_level = smmu_state->prev_secure_level;
  515. if (smmu_state->sui_misr_state == SUI_MISR_ENABLE_REQ)
  516. _sde_kms_sui_misr_ctrl(sde_kms, crtc, false);
  517. }
  518. SDE_DEBUG("crtc %d: req_state %d, new_state %d, sec_lvl %d, ret %d\n",
  519. DRMID(crtc), old_smmu_state, smmu_state->state,
  520. smmu_state->secure_level, ret);
  521. SDE_EVT32(DRMID(crtc), smmu_state->state, smmu_state->prev_state,
  522. smmu_state->transition_type,
  523. smmu_state->transition_error,
  524. smmu_state->secure_level, smmu_state->prev_secure_level,
  525. smmu_state->sui_misr_state, ret, SDE_EVTLOG_FUNC_EXIT);
  526. smmu_state->sui_misr_state = NONE;
  527. smmu_state->transition_type = NONE;
  528. return ret;
  529. }
  530. static int sde_kms_prepare_secure_transition(struct msm_kms *kms,
  531. struct drm_atomic_state *state)
  532. {
  533. struct drm_crtc *crtc;
  534. struct drm_crtc_state *old_crtc_state;
  535. struct drm_plane_state *old_plane_state, *new_plane_state;
  536. struct drm_plane *plane;
  537. struct drm_plane_state *plane_state;
  538. struct sde_kms *sde_kms = to_sde_kms(kms);
  539. struct drm_device *dev = sde_kms->dev;
  540. int i, ops = 0, ret = 0;
  541. bool old_valid_fb = false;
  542. struct sde_kms_smmu_state_data *smmu_state = &sde_kms->smmu_state;
  543. for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
  544. if (!crtc->state || !crtc->state->active)
  545. continue;
  546. /*
  547. * It is safe to assume only one active crtc,
  548. * and compatible translation modes on the
  549. * planes staged on this crtc.
  550. * otherwise validation would have failed.
  551. * For this CRTC,
  552. */
  553. /*
  554. * 1. Check if old state on the CRTC has planes
  555. * staged with valid fbs
  556. */
  557. for_each_old_plane_in_state(state, plane, plane_state, i) {
  558. if (!plane_state->crtc)
  559. continue;
  560. if (plane_state->fb) {
  561. old_valid_fb = true;
  562. break;
  563. }
  564. }
  565. /*
  566. * 2.Get the operations needed to be performed before
  567. * secure transition can be initiated.
  568. */
  569. ops = sde_crtc_get_secure_transition_ops(crtc,
  570. old_crtc_state, old_valid_fb);
  571. if (ops < 0) {
  572. SDE_ERROR("invalid secure operations %x\n", ops);
  573. return ops;
  574. }
  575. if (!ops) {
  576. smmu_state->transition_error = false;
  577. goto no_ops;
  578. }
  579. SDE_DEBUG("%d:secure operations(%x) started on state:%pK\n",
  580. crtc->base.id, ops, crtc->state);
  581. SDE_EVT32(DRMID(crtc), ops, crtc->state, old_valid_fb);
  582. /* 3. Perform operations needed for secure transition */
  583. if (ops & SDE_KMS_OPS_WAIT_FOR_TX_DONE) {
  584. SDE_DEBUG("wait_for_transfer_done\n");
  585. sde_kms_wait_for_frame_transfer_complete(kms, crtc);
  586. }
  587. if (ops & SDE_KMS_OPS_CLEANUP_PLANE_FB) {
  588. SDE_DEBUG("cleanup planes\n");
  589. drm_atomic_helper_cleanup_planes(dev, state);
  590. for_each_oldnew_plane_in_state(state, plane,
  591. old_plane_state, new_plane_state, i)
  592. sde_plane_destroy_fb(old_plane_state);
  593. }
  594. if (ops & SDE_KMS_OPS_SECURE_STATE_CHANGE) {
  595. SDE_DEBUG("secure ctrl\n");
  596. _sde_kms_secure_ctrl(sde_kms, crtc, false);
  597. }
  598. if (ops & SDE_KMS_OPS_PREPARE_PLANE_FB) {
  599. SDE_DEBUG("prepare planes %d",
  600. crtc->state->plane_mask);
  601. drm_atomic_crtc_for_each_plane(plane,
  602. crtc) {
  603. const struct drm_plane_helper_funcs *funcs;
  604. plane_state = plane->state;
  605. funcs = plane->helper_private;
  606. SDE_DEBUG("psde:%d FB[%u]\n",
  607. plane->base.id,
  608. plane->fb->base.id);
  609. if (!funcs)
  610. continue;
  611. if (funcs->prepare_fb(plane, plane_state)) {
  612. ret = funcs->prepare_fb(plane,
  613. plane_state);
  614. if (ret)
  615. return ret;
  616. }
  617. }
  618. }
  619. SDE_EVT32(DRMID(crtc), SDE_EVTLOG_FUNC_EXIT);
  620. SDE_DEBUG("secure operations completed\n");
  621. }
  622. no_ops:
  623. return 0;
  624. }
  625. static int _sde_kms_release_shared_buffer(unsigned int mem_addr,
  626. unsigned int splash_buffer_size,
  627. unsigned int ramdump_base,
  628. unsigned int ramdump_buffer_size)
  629. {
  630. unsigned long pfn_start, pfn_end, pfn_idx;
  631. int ret = 0;
  632. if (!mem_addr || !splash_buffer_size) {
  633. SDE_ERROR("invalid params\n");
  634. return -EINVAL;
  635. }
  636. /* leave ramdump memory only if base address matches */
  637. if (ramdump_base == mem_addr &&
  638. ramdump_buffer_size <= splash_buffer_size) {
  639. mem_addr += ramdump_buffer_size;
  640. splash_buffer_size -= ramdump_buffer_size;
  641. }
  642. pfn_start = mem_addr >> PAGE_SHIFT;
  643. pfn_end = (mem_addr + splash_buffer_size) >> PAGE_SHIFT;
  644. for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
  645. free_reserved_page(pfn_to_page(pfn_idx));
  646. return ret;
  647. }
  648. static int _sde_kms_splash_mem_get(struct sde_kms *sde_kms,
  649. struct sde_splash_mem *splash)
  650. {
  651. struct msm_mmu *mmu = NULL;
  652. int ret = 0;
  653. if (!sde_kms->aspace[0]) {
  654. SDE_ERROR("aspace not found for sde kms node\n");
  655. return -EINVAL;
  656. }
  657. mmu = sde_kms->aspace[0]->mmu;
  658. if (!mmu) {
  659. SDE_ERROR("mmu not found for aspace\n");
  660. return -EINVAL;
  661. }
  662. if (!splash || !mmu->funcs || !mmu->funcs->one_to_one_map) {
  663. SDE_ERROR("invalid input params for map\n");
  664. return -EINVAL;
  665. }
  666. if (!splash->ref_cnt) {
  667. ret = mmu->funcs->one_to_one_map(mmu, splash->splash_buf_base,
  668. splash->splash_buf_base,
  669. splash->splash_buf_size,
  670. IOMMU_READ | IOMMU_NOEXEC);
  671. if (ret)
  672. SDE_ERROR("splash memory smmu map failed:%d\n", ret);
  673. }
  674. splash->ref_cnt++;
  675. SDE_DEBUG("one2one mapping done for base:%lx size:%x ref_cnt:%d\n",
  676. splash->splash_buf_base,
  677. splash->splash_buf_size,
  678. splash->ref_cnt);
  679. return ret;
  680. }
  681. static int _sde_kms_map_all_splash_regions(struct sde_kms *sde_kms)
  682. {
  683. int i = 0;
  684. int ret = 0;
  685. struct sde_splash_mem *region;
  686. if (!sde_kms)
  687. return -EINVAL;
  688. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  689. region = sde_kms->splash_data.splash_display[i].splash;
  690. ret = _sde_kms_splash_mem_get(sde_kms, region);
  691. if (ret)
  692. return ret;
  693. /* Demura is optional and need not exist */
  694. region = sde_kms->splash_data.splash_display[i].demura;
  695. if (region) {
  696. ret = _sde_kms_splash_mem_get(sde_kms, region);
  697. if (ret)
  698. return ret;
  699. }
  700. }
  701. return ret;
  702. }
  703. static int _sde_kms_splash_mem_put(struct sde_kms *sde_kms,
  704. struct sde_splash_mem *splash)
  705. {
  706. struct msm_mmu *mmu = NULL;
  707. int rc = 0;
  708. if (!sde_kms || !sde_kms->aspace[0] || !sde_kms->aspace[0]->mmu) {
  709. SDE_ERROR("invalid params\n");
  710. return -EINVAL;
  711. }
  712. mmu = sde_kms->aspace[0]->mmu;
  713. if (!splash || !splash->ref_cnt ||
  714. !mmu || !mmu->funcs || !mmu->funcs->one_to_one_unmap)
  715. return -EINVAL;
  716. splash->ref_cnt--;
  717. SDE_DEBUG("splash base:%lx refcnt:%d\n",
  718. splash->splash_buf_base, splash->ref_cnt);
  719. if (!splash->ref_cnt) {
  720. mmu->funcs->one_to_one_unmap(mmu, splash->splash_buf_base,
  721. splash->splash_buf_size);
  722. rc = _sde_kms_release_shared_buffer(splash->splash_buf_base,
  723. splash->splash_buf_size, splash->ramdump_base,
  724. splash->ramdump_size);
  725. splash->splash_buf_base = 0;
  726. splash->splash_buf_size = 0;
  727. }
  728. return rc;
  729. }
  730. static int _sde_kms_unmap_all_splash_regions(struct sde_kms *sde_kms)
  731. {
  732. int i = 0;
  733. int ret = 0, failure = 0;
  734. struct sde_splash_mem *region;
  735. if (!sde_kms || !sde_kms->splash_data.num_splash_regions)
  736. return -EINVAL;
  737. for (i = 0; i < sde_kms->splash_data.num_splash_displays; i++) {
  738. region = sde_kms->splash_data.splash_display[i].splash;
  739. ret = _sde_kms_splash_mem_put(sde_kms, region);
  740. if (ret) {
  741. failure = 1;
  742. pr_err("Error unmapping splash mem for display %d\n",
  743. i);
  744. }
  745. /* Demura is optional and need not exist */
  746. region = sde_kms->splash_data.splash_display[i].demura;
  747. if (region) {
  748. ret = _sde_kms_splash_mem_put(sde_kms, region);
  749. if (ret) {
  750. failure = 1;
  751. pr_err("Error unmapping demura mem for display %d\n",
  752. i);
  753. }
  754. }
  755. }
  756. if (failure)
  757. ret = -EINVAL;
  758. return ret;
  759. }
  760. static int _sde_kms_get_blank(struct drm_crtc_state *crtc_state,
  761. struct drm_connector_state *conn_state)
  762. {
  763. int lp_mode, blank;
  764. if (crtc_state->active)
  765. lp_mode = sde_connector_get_property(conn_state,
  766. CONNECTOR_PROP_LP);
  767. else
  768. lp_mode = SDE_MODE_DPMS_OFF;
  769. switch (lp_mode) {
  770. case SDE_MODE_DPMS_ON:
  771. blank = DRM_PANEL_EVENT_UNBLANK;
  772. break;
  773. case SDE_MODE_DPMS_LP1:
  774. case SDE_MODE_DPMS_LP2:
  775. blank = DRM_PANEL_EVENT_BLANK_LP;
  776. break;
  777. case SDE_MODE_DPMS_OFF:
  778. default:
  779. blank = DRM_PANEL_EVENT_BLANK;
  780. break;
  781. }
  782. return blank;
  783. }
  784. static void _sde_kms_drm_check_dpms(struct drm_atomic_state *old_state,
  785. bool is_pre_commit)
  786. {
  787. struct panel_event_notification notification;
  788. struct drm_connector *connector;
  789. struct drm_connector_state *old_conn_state;
  790. struct drm_crtc_state *old_crtc_state;
  791. struct drm_crtc *crtc;
  792. struct sde_connector *c_conn;
  793. int i, old_mode, new_mode, old_fps, new_fps;
  794. enum panel_event_notifier_tag panel_type;
  795. for_each_old_connector_in_state(old_state, connector,
  796. old_conn_state, i) {
  797. crtc = connector->state->crtc ? connector->state->crtc :
  798. old_conn_state->crtc;
  799. if (!crtc)
  800. continue;
  801. new_fps = drm_mode_vrefresh(&crtc->state->mode);
  802. new_mode = _sde_kms_get_blank(crtc->state, connector->state);
  803. if (old_conn_state->crtc) {
  804. old_crtc_state = drm_atomic_get_existing_crtc_state(
  805. old_state, old_conn_state->crtc);
  806. old_fps = drm_mode_vrefresh(&old_crtc_state->mode);
  807. old_mode = _sde_kms_get_blank(old_crtc_state,
  808. old_conn_state);
  809. } else {
  810. old_fps = 0;
  811. old_mode = DRM_PANEL_EVENT_BLANK;
  812. }
  813. if ((old_mode != new_mode) || (old_fps != new_fps)) {
  814. c_conn = to_sde_connector(connector);
  815. SDE_EVT32(old_mode, new_mode, old_fps, new_fps,
  816. c_conn->panel, crtc->state->active,
  817. old_conn_state->crtc);
  818. pr_debug("change detected for connector:%s (power mode %d->%d, fps %d->%d)\n",
  819. c_conn->name, old_mode, new_mode, old_fps, new_fps);
  820. /* If suspend resume and fps change are happening
  821. * at the same time, give preference to power mode
  822. * changes rather than fps change.
  823. */
  824. if ((old_mode == new_mode) && (old_fps != new_fps))
  825. new_mode = DRM_PANEL_EVENT_FPS_CHANGE;
  826. if (!c_conn->panel)
  827. continue;
  828. panel_type = sde_encoder_is_primary_display(
  829. connector->encoder) ?
  830. PANEL_EVENT_NOTIFICATION_PRIMARY :
  831. PANEL_EVENT_NOTIFICATION_SECONDARY;
  832. notification.notif_type = new_mode;
  833. notification.panel = c_conn->panel;
  834. notification.notif_data.old_fps = old_fps;
  835. notification.notif_data.new_fps = new_fps;
  836. notification.notif_data.early_trigger = is_pre_commit;
  837. panel_event_notification_trigger(panel_type,
  838. &notification);
  839. }
  840. }
  841. }
  842. static struct drm_crtc *sde_kms_vm_get_vm_crtc(
  843. struct drm_atomic_state *state)
  844. {
  845. int i;
  846. enum sde_crtc_vm_req vm_req = VM_REQ_NONE;
  847. struct drm_crtc *crtc, *vm_crtc = NULL;
  848. struct drm_crtc_state *new_cstate, *old_cstate;
  849. struct sde_crtc_state *vm_cstate;
  850. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  851. if (!new_cstate->active && !old_cstate->active)
  852. continue;
  853. vm_cstate = to_sde_crtc_state(new_cstate);
  854. vm_req = sde_crtc_get_property(vm_cstate,
  855. CRTC_PROP_VM_REQ_STATE);
  856. if (vm_req != VM_REQ_NONE) {
  857. SDE_DEBUG("valid vm request:%d found on crtc-%d\n",
  858. vm_req, crtc->base.id);
  859. vm_crtc = crtc;
  860. break;
  861. }
  862. }
  863. return vm_crtc;
  864. }
  865. int sde_kms_vm_primary_prepare_commit(struct sde_kms *sde_kms,
  866. struct drm_atomic_state *state)
  867. {
  868. struct drm_device *ddev;
  869. struct drm_crtc *crtc;
  870. struct drm_crtc_state *new_cstate;
  871. struct drm_encoder *encoder;
  872. struct drm_connector *connector;
  873. struct sde_vm_ops *vm_ops;
  874. struct sde_crtc_state *cstate;
  875. struct drm_connector_list_iter iter;
  876. enum sde_crtc_vm_req vm_req;
  877. int rc = 0;
  878. ddev = sde_kms->dev;
  879. vm_ops = sde_vm_get_ops(sde_kms);
  880. if (!vm_ops)
  881. return -EINVAL;
  882. crtc = sde_kms_vm_get_vm_crtc(state);
  883. if (!crtc)
  884. return 0;
  885. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  886. cstate = to_sde_crtc_state(new_cstate);
  887. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  888. if (vm_req != VM_REQ_ACQUIRE)
  889. return 0;
  890. /* enable MDSS irq line */
  891. sde_irq_update(&sde_kms->base, true);
  892. /* clear the stale IRQ status bits */
  893. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  894. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  895. /* enable the display path IRQ's */
  896. drm_for_each_encoder_mask(encoder, crtc->dev,
  897. crtc->state->encoder_mask) {
  898. if (sde_encoder_in_clone_mode(encoder))
  899. continue;
  900. sde_encoder_irq_control(encoder, true);
  901. }
  902. /* Schedule ESD work */
  903. drm_connector_list_iter_begin(ddev, &iter);
  904. drm_for_each_connector_iter(connector, &iter)
  905. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  906. sde_connector_schedule_status_work(connector, true);
  907. drm_connector_list_iter_end(&iter);
  908. /* enable vblank events */
  909. drm_crtc_vblank_on(crtc);
  910. sde_dbg_set_hw_ownership_status(true);
  911. /* handle non-SDE pre_acquire */
  912. if (vm_ops->vm_client_post_acquire)
  913. rc = vm_ops->vm_client_post_acquire(sde_kms);
  914. return rc;
  915. }
  916. int sde_kms_vm_trusted_prepare_commit(struct sde_kms *sde_kms,
  917. struct drm_atomic_state *state)
  918. {
  919. struct drm_device *ddev;
  920. struct drm_plane *plane;
  921. struct drm_crtc *crtc;
  922. struct drm_crtc_state *new_cstate;
  923. struct sde_crtc_state *cstate;
  924. enum sde_crtc_vm_req vm_req;
  925. ddev = sde_kms->dev;
  926. crtc = sde_kms_vm_get_vm_crtc(state);
  927. if (!crtc)
  928. return 0;
  929. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  930. cstate = to_sde_crtc_state(new_cstate);
  931. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  932. if (vm_req != VM_REQ_ACQUIRE)
  933. return 0;
  934. /* Clear the stale IRQ status bits */
  935. if (sde_kms->hw_intr && sde_kms->hw_intr->ops.clear_all_irqs)
  936. sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
  937. /* Program the SID's for the trusted VM */
  938. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  939. sde_plane_set_sid(plane, 1);
  940. sde_hw_set_lutdma_sid(sde_kms->hw_sid, 1);
  941. sde_dbg_set_hw_ownership_status(true);
  942. return 0;
  943. }
  944. static void sde_kms_prepare_commit(struct msm_kms *kms,
  945. struct drm_atomic_state *state)
  946. {
  947. struct sde_kms *sde_kms;
  948. struct msm_drm_private *priv;
  949. struct drm_device *dev;
  950. struct drm_encoder *encoder;
  951. struct drm_crtc *crtc;
  952. struct drm_crtc_state *cstate;
  953. struct sde_vm_ops *vm_ops;
  954. int i, rc;
  955. if (!kms)
  956. return;
  957. sde_kms = to_sde_kms(kms);
  958. dev = sde_kms->dev;
  959. if (!dev || !dev->dev_private)
  960. return;
  961. priv = dev->dev_private;
  962. SDE_ATRACE_BEGIN("prepare_commit");
  963. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  964. if (rc < 0) {
  965. SDE_ERROR("failed to enable power resources %d\n", rc);
  966. SDE_EVT32(rc, SDE_EVTLOG_ERROR);
  967. goto end;
  968. }
  969. if (sde_kms->first_kickoff) {
  970. sde_power_scale_reg_bus(&priv->phandle, VOTE_INDEX_HIGH, false);
  971. sde_kms->first_kickoff = false;
  972. }
  973. for_each_new_crtc_in_state(state, crtc, cstate, i) {
  974. drm_for_each_encoder_mask(encoder, dev, cstate->encoder_mask) {
  975. if (sde_encoder_prepare_commit(encoder) == -ETIMEDOUT) {
  976. SDE_ERROR("crtc:%d, initiating hw reset\n",
  977. DRMID(crtc));
  978. sde_encoder_needs_hw_reset(encoder);
  979. sde_crtc_set_needs_hw_reset(crtc);
  980. }
  981. }
  982. }
  983. /*
  984. * NOTE: for secure use cases we want to apply the new HW
  985. * configuration only after completing preparation for secure
  986. * transitions prepare below if any transtions is required.
  987. */
  988. sde_kms_prepare_secure_transition(kms, state);
  989. vm_ops = sde_vm_get_ops(sde_kms);
  990. if (!vm_ops)
  991. goto end_vm;
  992. if (vm_ops->vm_prepare_commit)
  993. vm_ops->vm_prepare_commit(sde_kms, state);
  994. end_vm:
  995. _sde_kms_drm_check_dpms(state, true);
  996. end:
  997. SDE_ATRACE_END("prepare_commit");
  998. }
  999. static void sde_kms_commit(struct msm_kms *kms,
  1000. struct drm_atomic_state *old_state)
  1001. {
  1002. struct sde_kms *sde_kms;
  1003. struct drm_crtc *crtc;
  1004. struct drm_crtc_state *old_crtc_state;
  1005. int i;
  1006. if (!kms || !old_state)
  1007. return;
  1008. sde_kms = to_sde_kms(kms);
  1009. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1010. SDE_ERROR("power resource is not enabled\n");
  1011. return;
  1012. }
  1013. SDE_ATRACE_BEGIN("sde_kms_commit");
  1014. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1015. if (crtc->state->active) {
  1016. SDE_EVT32(DRMID(crtc), old_state);
  1017. sde_crtc_commit_kickoff(crtc, old_crtc_state);
  1018. }
  1019. }
  1020. SDE_ATRACE_END("sde_kms_commit");
  1021. }
  1022. static void _sde_kms_free_splash_display_data(struct sde_kms *sde_kms,
  1023. struct sde_splash_display *splash_display)
  1024. {
  1025. if (!sde_kms || !splash_display ||
  1026. !sde_kms->splash_data.num_splash_displays)
  1027. return;
  1028. if (sde_kms->splash_data.num_splash_regions) {
  1029. _sde_kms_splash_mem_put(sde_kms, splash_display->splash);
  1030. if (splash_display->demura)
  1031. _sde_kms_splash_mem_put(sde_kms,
  1032. splash_display->demura);
  1033. }
  1034. sde_kms->splash_data.num_splash_displays--;
  1035. SDE_DEBUG("cont_splash handoff done, remaining:%d\n",
  1036. sde_kms->splash_data.num_splash_displays);
  1037. memset(splash_display, 0x0, sizeof(struct sde_splash_display));
  1038. }
  1039. static void _sde_kms_release_splash_resource(struct sde_kms *sde_kms,
  1040. struct drm_crtc *crtc)
  1041. {
  1042. struct msm_drm_private *priv;
  1043. struct sde_splash_display *splash_display;
  1044. int i;
  1045. if (!sde_kms || !crtc)
  1046. return;
  1047. priv = sde_kms->dev->dev_private;
  1048. if (!crtc->state->active || !sde_kms->splash_data.num_splash_displays)
  1049. return;
  1050. SDE_EVT32(DRMID(crtc), crtc->state->active,
  1051. sde_kms->splash_data.num_splash_displays);
  1052. for (i = 0; i < MAX_DSI_DISPLAYS; i++) {
  1053. splash_display = &sde_kms->splash_data.splash_display[i];
  1054. if (splash_display->encoder &&
  1055. crtc == splash_display->encoder->crtc)
  1056. break;
  1057. }
  1058. if (i >= MAX_DSI_DISPLAYS)
  1059. return;
  1060. if (splash_display->cont_splash_enabled) {
  1061. sde_encoder_update_caps_for_cont_splash(splash_display->encoder,
  1062. splash_display, false);
  1063. _sde_kms_free_splash_display_data(sde_kms, splash_display);
  1064. }
  1065. /* remove the votes if all displays are done with splash */
  1066. if (!sde_kms->splash_data.num_splash_displays) {
  1067. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  1068. sde_power_data_bus_set_quota(&priv->phandle, i,
  1069. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  1070. priv->phandle.ib_quota[i] ? priv->phandle.ib_quota[i] :
  1071. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  1072. pm_runtime_put_sync(sde_kms->dev->dev);
  1073. }
  1074. }
  1075. static void sde_kms_cancel_delayed_work(struct drm_crtc *crtc)
  1076. {
  1077. struct drm_connector *connector;
  1078. struct drm_connector_list_iter iter;
  1079. struct drm_encoder *encoder;
  1080. /* Cancel CRTC work */
  1081. sde_crtc_cancel_delayed_work(crtc);
  1082. /* Cancel ESD work */
  1083. drm_connector_list_iter_begin(crtc->dev, &iter);
  1084. drm_for_each_connector_iter(connector, &iter)
  1085. if (drm_connector_mask(connector) & crtc->state->connector_mask)
  1086. sde_connector_schedule_status_work(connector, false);
  1087. drm_connector_list_iter_end(&iter);
  1088. /* Cancel Idle-PC work */
  1089. drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask) {
  1090. if (sde_encoder_in_clone_mode(encoder))
  1091. continue;
  1092. sde_encoder_cancel_delayed_work(encoder);
  1093. }
  1094. }
  1095. int sde_kms_vm_pre_release(struct sde_kms *sde_kms,
  1096. struct drm_atomic_state *state, bool is_primary)
  1097. {
  1098. struct drm_crtc *crtc;
  1099. struct drm_encoder *encoder;
  1100. int rc = 0;
  1101. crtc = sde_kms_vm_get_vm_crtc(state);
  1102. if (!crtc)
  1103. return 0;
  1104. /* if vm_req is enabled, once CRTC on the commit is guaranteed */
  1105. sde_kms_wait_for_frame_transfer_complete(&sde_kms->base, crtc);
  1106. sde_dbg_set_hw_ownership_status(false);
  1107. sde_kms_cancel_delayed_work(crtc);
  1108. /* disable SDE encoder irq's */
  1109. drm_for_each_encoder_mask(encoder, crtc->dev,
  1110. crtc->state->encoder_mask) {
  1111. if (sde_encoder_in_clone_mode(encoder))
  1112. continue;
  1113. sde_encoder_irq_control(encoder, false);
  1114. }
  1115. if (is_primary) {
  1116. /* disable vblank events */
  1117. drm_crtc_vblank_off(crtc);
  1118. /* reset sw state */
  1119. sde_crtc_reset_sw_state(crtc);
  1120. }
  1121. return rc;
  1122. }
  1123. int sde_kms_vm_trusted_post_commit(struct sde_kms *sde_kms,
  1124. struct drm_atomic_state *state)
  1125. {
  1126. struct sde_vm_ops *vm_ops;
  1127. struct drm_device *ddev;
  1128. struct drm_crtc *crtc;
  1129. struct drm_plane *plane;
  1130. struct sde_crtc_state *cstate;
  1131. struct drm_crtc_state *new_cstate;
  1132. enum sde_crtc_vm_req vm_req;
  1133. int rc = 0;
  1134. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1135. return -EINVAL;
  1136. vm_ops = sde_vm_get_ops(sde_kms);
  1137. ddev = sde_kms->dev;
  1138. crtc = sde_kms_vm_get_vm_crtc(state);
  1139. if (!crtc)
  1140. return 0;
  1141. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1142. cstate = to_sde_crtc_state(new_cstate);
  1143. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1144. if (vm_req != VM_REQ_RELEASE)
  1145. return 0;
  1146. sde_kms_vm_pre_release(sde_kms, state, false);
  1147. list_for_each_entry(plane, &ddev->mode_config.plane_list, head)
  1148. sde_plane_set_sid(plane, 0);
  1149. sde_hw_set_lutdma_sid(sde_kms->hw_sid, 0);
  1150. sde_vm_lock(sde_kms);
  1151. if (vm_ops->vm_release)
  1152. rc = vm_ops->vm_release(sde_kms);
  1153. sde_vm_unlock(sde_kms);
  1154. return rc;
  1155. }
  1156. int sde_kms_vm_primary_post_commit(struct sde_kms *sde_kms,
  1157. struct drm_atomic_state *state)
  1158. {
  1159. struct sde_vm_ops *vm_ops;
  1160. struct sde_crtc_state *cstate;
  1161. struct drm_crtc *crtc;
  1162. struct drm_crtc_state *new_cstate;
  1163. enum sde_crtc_vm_req vm_req;
  1164. int rc = 0;
  1165. if (!sde_kms || !sde_vm_is_enabled(sde_kms))
  1166. return -EINVAL;
  1167. vm_ops = sde_vm_get_ops(sde_kms);
  1168. crtc = sde_kms_vm_get_vm_crtc(state);
  1169. if (!crtc)
  1170. return 0;
  1171. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  1172. cstate = to_sde_crtc_state(new_cstate);
  1173. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  1174. if (vm_req != VM_REQ_RELEASE)
  1175. return 0;
  1176. /* handle SDE pre-release */
  1177. rc = sde_kms_vm_pre_release(sde_kms, state, true);
  1178. if (rc) {
  1179. SDE_ERROR("sde vm pre_release failed, rc=%d\n", rc);
  1180. goto exit;
  1181. }
  1182. /* properly handoff color processing features */
  1183. sde_cp_crtc_vm_primary_handoff(crtc);
  1184. sde_vm_lock(sde_kms);
  1185. /* handle non-SDE clients pre-release */
  1186. if (vm_ops->vm_client_pre_release) {
  1187. rc = vm_ops->vm_client_pre_release(sde_kms);
  1188. if (rc) {
  1189. SDE_ERROR("sde vm client pre_release failed, rc=%d\n",
  1190. rc);
  1191. sde_vm_unlock(sde_kms);
  1192. goto exit;
  1193. }
  1194. }
  1195. /* disable IRQ line */
  1196. sde_irq_update(&sde_kms->base, false);
  1197. /* release HW */
  1198. if (vm_ops->vm_release) {
  1199. rc = vm_ops->vm_release(sde_kms);
  1200. if (rc)
  1201. SDE_ERROR("sde vm assign failed, rc=%d\n", rc);
  1202. }
  1203. sde_vm_unlock(sde_kms);
  1204. _sde_crtc_vm_release_notify(crtc);
  1205. exit:
  1206. return rc;
  1207. }
  1208. static void sde_kms_complete_commit(struct msm_kms *kms,
  1209. struct drm_atomic_state *old_state)
  1210. {
  1211. struct sde_kms *sde_kms;
  1212. struct msm_drm_private *priv;
  1213. struct drm_crtc *crtc;
  1214. struct drm_crtc_state *old_crtc_state;
  1215. struct drm_connector *connector;
  1216. struct drm_connector_state *old_conn_state;
  1217. struct msm_display_conn_params params;
  1218. struct sde_vm_ops *vm_ops;
  1219. int i, rc = 0;
  1220. if (!kms || !old_state)
  1221. return;
  1222. sde_kms = to_sde_kms(kms);
  1223. if (!sde_kms->dev || !sde_kms->dev->dev_private)
  1224. return;
  1225. priv = sde_kms->dev->dev_private;
  1226. if (!sde_kms_power_resource_is_enabled(sde_kms->dev)) {
  1227. SDE_ERROR("power resource is not enabled\n");
  1228. return;
  1229. }
  1230. SDE_ATRACE_BEGIN("sde_kms_complete_commit");
  1231. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1232. sde_crtc_complete_commit(crtc, old_crtc_state);
  1233. /* complete secure transitions if any */
  1234. if (sde_kms->smmu_state.transition_type == POST_COMMIT)
  1235. _sde_kms_secure_ctrl(sde_kms, crtc, true);
  1236. }
  1237. for_each_old_connector_in_state(old_state, connector,
  1238. old_conn_state, i) {
  1239. struct sde_connector *c_conn;
  1240. c_conn = to_sde_connector(connector);
  1241. if (!c_conn->ops.post_kickoff)
  1242. continue;
  1243. memset(&params, 0, sizeof(params));
  1244. sde_connector_complete_qsync_commit(connector, &params);
  1245. rc = c_conn->ops.post_kickoff(connector, &params);
  1246. if (rc) {
  1247. pr_err("Connector Post kickoff failed rc=%d\n",
  1248. rc);
  1249. }
  1250. }
  1251. vm_ops = sde_vm_get_ops(sde_kms);
  1252. if (vm_ops && vm_ops->vm_post_commit) {
  1253. rc = vm_ops->vm_post_commit(sde_kms, old_state);
  1254. if (rc)
  1255. SDE_ERROR("vm post commit failed, rc = %d\n",
  1256. rc);
  1257. }
  1258. _sde_kms_drm_check_dpms(old_state, false);
  1259. pm_runtime_put_sync(sde_kms->dev->dev);
  1260. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
  1261. _sde_kms_release_splash_resource(sde_kms, crtc);
  1262. SDE_EVT32_VERBOSE(SDE_EVTLOG_FUNC_EXIT);
  1263. SDE_ATRACE_END("sde_kms_complete_commit");
  1264. }
  1265. static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
  1266. struct drm_crtc *crtc)
  1267. {
  1268. struct drm_encoder *encoder;
  1269. struct drm_device *dev;
  1270. int ret;
  1271. bool cwb_disabling;
  1272. if (!kms || !crtc || !crtc->state) {
  1273. SDE_ERROR("invalid params\n");
  1274. return;
  1275. }
  1276. dev = crtc->dev;
  1277. if (!crtc->state->enable) {
  1278. SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
  1279. return;
  1280. }
  1281. if (!crtc->state->active) {
  1282. SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
  1283. return;
  1284. }
  1285. if (!sde_kms_power_resource_is_enabled(crtc->dev)) {
  1286. SDE_ERROR("power resource is not enabled\n");
  1287. return;
  1288. }
  1289. SDE_ATRACE_BEGIN("sde_kms_wait_for_commit_done");
  1290. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  1291. cwb_disabling = false;
  1292. if (encoder->crtc != crtc) {
  1293. cwb_disabling = sde_encoder_is_cwb_disabling(encoder,
  1294. crtc);
  1295. if (!cwb_disabling)
  1296. continue;
  1297. }
  1298. /*
  1299. * Wait for post-flush if necessary to delay before
  1300. * plane_cleanup. For example, wait for vsync in case of video
  1301. * mode panels. This may be a no-op for command mode panels.
  1302. */
  1303. SDE_EVT32_VERBOSE(DRMID(crtc));
  1304. ret = sde_encoder_wait_for_event(encoder, cwb_disabling ?
  1305. MSM_ENC_TX_COMPLETE : MSM_ENC_COMMIT_DONE);
  1306. if (ret && ret != -EWOULDBLOCK) {
  1307. SDE_ERROR("wait for commit done returned %d\n", ret);
  1308. sde_crtc_request_frame_reset(crtc, encoder);
  1309. break;
  1310. }
  1311. sde_crtc_complete_flip(crtc, NULL);
  1312. if (cwb_disabling)
  1313. sde_encoder_virt_reset(encoder);
  1314. }
  1315. sde_crtc_static_cache_read_kickoff(crtc);
  1316. SDE_ATRACE_END("sde_ksm_wait_for_commit_done");
  1317. }
  1318. static void sde_kms_prepare_fence(struct msm_kms *kms,
  1319. struct drm_atomic_state *old_state)
  1320. {
  1321. struct drm_crtc *crtc;
  1322. struct drm_crtc_state *old_crtc_state;
  1323. int i;
  1324. if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
  1325. SDE_ERROR("invalid argument(s)\n");
  1326. return;
  1327. }
  1328. SDE_ATRACE_BEGIN("sde_kms_prepare_fence");
  1329. /* old_state actually contains updated crtc pointers */
  1330. for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
  1331. if (crtc->state->active || crtc->state->active_changed)
  1332. sde_crtc_prepare_commit(crtc, old_crtc_state);
  1333. }
  1334. SDE_ATRACE_END("sde_kms_prepare_fence");
  1335. }
  1336. /**
  1337. * _sde_kms_get_displays - query for underlying display handles and cache them
  1338. * @sde_kms: Pointer to sde kms structure
  1339. * Returns: Zero on success
  1340. */
  1341. static int _sde_kms_get_displays(struct sde_kms *sde_kms)
  1342. {
  1343. int rc = -ENOMEM;
  1344. if (!sde_kms) {
  1345. SDE_ERROR("invalid sde kms\n");
  1346. return -EINVAL;
  1347. }
  1348. /* dsi */
  1349. sde_kms->dsi_displays = NULL;
  1350. sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
  1351. if (sde_kms->dsi_display_count) {
  1352. sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
  1353. sizeof(void *),
  1354. GFP_KERNEL);
  1355. if (!sde_kms->dsi_displays) {
  1356. SDE_ERROR("failed to allocate dsi displays\n");
  1357. goto exit_deinit_dsi;
  1358. }
  1359. sde_kms->dsi_display_count =
  1360. dsi_display_get_active_displays(sde_kms->dsi_displays,
  1361. sde_kms->dsi_display_count);
  1362. }
  1363. /* wb */
  1364. sde_kms->wb_displays = NULL;
  1365. sde_kms->wb_display_count = sde_wb_get_num_of_displays();
  1366. if (sde_kms->wb_display_count) {
  1367. sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
  1368. sizeof(void *),
  1369. GFP_KERNEL);
  1370. if (!sde_kms->wb_displays) {
  1371. SDE_ERROR("failed to allocate wb displays\n");
  1372. goto exit_deinit_wb;
  1373. }
  1374. sde_kms->wb_display_count =
  1375. wb_display_get_displays(sde_kms->wb_displays,
  1376. sde_kms->wb_display_count);
  1377. }
  1378. /* dp */
  1379. sde_kms->dp_displays = NULL;
  1380. sde_kms->dp_display_count = dp_display_get_num_of_displays();
  1381. if (sde_kms->dp_display_count) {
  1382. sde_kms->dp_displays = kcalloc(sde_kms->dp_display_count,
  1383. sizeof(void *), GFP_KERNEL);
  1384. if (!sde_kms->dp_displays) {
  1385. SDE_ERROR("failed to allocate dp displays\n");
  1386. goto exit_deinit_dp;
  1387. }
  1388. sde_kms->dp_display_count =
  1389. dp_display_get_displays(sde_kms->dp_displays,
  1390. sde_kms->dp_display_count);
  1391. sde_kms->dp_stream_count = dp_display_get_num_of_streams();
  1392. }
  1393. return 0;
  1394. exit_deinit_dp:
  1395. kfree(sde_kms->dp_displays);
  1396. sde_kms->dp_stream_count = 0;
  1397. sde_kms->dp_display_count = 0;
  1398. sde_kms->dp_displays = NULL;
  1399. exit_deinit_wb:
  1400. kfree(sde_kms->wb_displays);
  1401. sde_kms->wb_display_count = 0;
  1402. sde_kms->wb_displays = NULL;
  1403. exit_deinit_dsi:
  1404. kfree(sde_kms->dsi_displays);
  1405. sde_kms->dsi_display_count = 0;
  1406. sde_kms->dsi_displays = NULL;
  1407. return rc;
  1408. }
  1409. /**
  1410. * _sde_kms_release_displays - release cache of underlying display handles
  1411. * @sde_kms: Pointer to sde kms structure
  1412. */
  1413. static void _sde_kms_release_displays(struct sde_kms *sde_kms)
  1414. {
  1415. if (!sde_kms) {
  1416. SDE_ERROR("invalid sde kms\n");
  1417. return;
  1418. }
  1419. kfree(sde_kms->wb_displays);
  1420. sde_kms->wb_displays = NULL;
  1421. sde_kms->wb_display_count = 0;
  1422. kfree(sde_kms->dsi_displays);
  1423. sde_kms->dsi_displays = NULL;
  1424. sde_kms->dsi_display_count = 0;
  1425. }
  1426. /**
  1427. * _sde_kms_setup_displays - create encoders, bridges and connectors
  1428. * for underlying displays
  1429. * @dev: Pointer to drm device structure
  1430. * @priv: Pointer to private drm device data
  1431. * @sde_kms: Pointer to sde kms structure
  1432. * Returns: Zero on success
  1433. */
  1434. static int _sde_kms_setup_displays(struct drm_device *dev,
  1435. struct msm_drm_private *priv,
  1436. struct sde_kms *sde_kms)
  1437. {
  1438. static const struct sde_connector_ops dsi_ops = {
  1439. .set_info_blob = dsi_conn_set_info_blob,
  1440. .detect = dsi_conn_detect,
  1441. .get_modes = dsi_connector_get_modes,
  1442. .pre_destroy = dsi_connector_put_modes,
  1443. .mode_valid = dsi_conn_mode_valid,
  1444. .get_info = dsi_display_get_info,
  1445. .set_backlight = dsi_display_set_backlight,
  1446. .soft_reset = dsi_display_soft_reset,
  1447. .pre_kickoff = dsi_conn_pre_kickoff,
  1448. .clk_ctrl = dsi_display_clk_ctrl,
  1449. .set_power = dsi_display_set_power,
  1450. .get_mode_info = dsi_conn_get_mode_info,
  1451. .get_dst_format = dsi_display_get_dst_format,
  1452. .post_kickoff = dsi_conn_post_kickoff,
  1453. .check_status = dsi_display_check_status,
  1454. .enable_event = dsi_conn_enable_event,
  1455. .cmd_transfer = dsi_display_cmd_transfer,
  1456. .cont_splash_config = dsi_display_cont_splash_config,
  1457. .cont_splash_res_disable = dsi_display_cont_splash_res_disable,
  1458. .get_panel_vfp = dsi_display_get_panel_vfp,
  1459. .get_default_lms = dsi_display_get_default_lms,
  1460. .cmd_receive = dsi_display_cmd_receive,
  1461. .install_properties = NULL,
  1462. .set_allowed_mode_switch = dsi_conn_set_allowed_mode_switch,
  1463. .set_dyn_bit_clk = dsi_conn_set_dyn_bit_clk,
  1464. .get_qsync_min_fps = dsi_conn_get_qsync_min_fps,
  1465. .get_avr_step_req = dsi_display_get_avr_step_req_fps,
  1466. .prepare_commit = dsi_conn_prepare_commit,
  1467. .set_submode_info = dsi_conn_set_submode_blob_info,
  1468. .get_num_lm_from_mode = dsi_conn_get_lm_from_mode,
  1469. };
  1470. static const struct sde_connector_ops wb_ops = {
  1471. .post_init = sde_wb_connector_post_init,
  1472. .set_info_blob = sde_wb_connector_set_info_blob,
  1473. .detect = sde_wb_connector_detect,
  1474. .get_modes = sde_wb_connector_get_modes,
  1475. .set_property = sde_wb_connector_set_property,
  1476. .get_info = sde_wb_get_info,
  1477. .soft_reset = NULL,
  1478. .get_mode_info = sde_wb_get_mode_info,
  1479. .get_dst_format = NULL,
  1480. .check_status = NULL,
  1481. .cmd_transfer = NULL,
  1482. .cont_splash_config = NULL,
  1483. .cont_splash_res_disable = NULL,
  1484. .get_panel_vfp = NULL,
  1485. .cmd_receive = NULL,
  1486. .install_properties = NULL,
  1487. .set_dyn_bit_clk = NULL,
  1488. .set_allowed_mode_switch = NULL,
  1489. };
  1490. static const struct sde_connector_ops dp_ops = {
  1491. .post_init = dp_connector_post_init,
  1492. .detect = dp_connector_detect,
  1493. .get_modes = dp_connector_get_modes,
  1494. .atomic_check = dp_connector_atomic_check,
  1495. .mode_valid = dp_connector_mode_valid,
  1496. .get_info = dp_connector_get_info,
  1497. .get_mode_info = dp_connector_get_mode_info,
  1498. .post_open = dp_connector_post_open,
  1499. .check_status = NULL,
  1500. .set_colorspace = dp_connector_set_colorspace,
  1501. .config_hdr = dp_connector_config_hdr,
  1502. .cmd_transfer = NULL,
  1503. .cont_splash_config = NULL,
  1504. .cont_splash_res_disable = NULL,
  1505. .get_panel_vfp = NULL,
  1506. .update_pps = dp_connector_update_pps,
  1507. .cmd_receive = NULL,
  1508. .install_properties = dp_connector_install_properties,
  1509. .set_allowed_mode_switch = NULL,
  1510. .set_dyn_bit_clk = NULL,
  1511. };
  1512. struct msm_display_info info;
  1513. struct drm_encoder *encoder;
  1514. void *display, *connector;
  1515. int i, max_encoders;
  1516. int rc = 0;
  1517. u32 dsc_count = 0, mixer_count = 0;
  1518. u32 max_dp_dsc_count, max_dp_mixer_count;
  1519. if (!dev || !priv || !sde_kms) {
  1520. SDE_ERROR("invalid argument(s)\n");
  1521. return -EINVAL;
  1522. }
  1523. max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count +
  1524. sde_kms->dp_display_count +
  1525. sde_kms->dp_stream_count;
  1526. if (max_encoders > ARRAY_SIZE(priv->encoders)) {
  1527. max_encoders = ARRAY_SIZE(priv->encoders);
  1528. SDE_ERROR("capping number of displays to %d", max_encoders);
  1529. }
  1530. /* wb */
  1531. for (i = 0; i < sde_kms->wb_display_count &&
  1532. priv->num_encoders < max_encoders; ++i) {
  1533. display = sde_kms->wb_displays[i];
  1534. encoder = NULL;
  1535. memset(&info, 0x0, sizeof(info));
  1536. rc = sde_wb_get_info(NULL, &info, display);
  1537. if (rc) {
  1538. SDE_ERROR("wb get_info %d failed\n", i);
  1539. continue;
  1540. }
  1541. encoder = sde_encoder_init(dev, &info);
  1542. if (IS_ERR_OR_NULL(encoder)) {
  1543. SDE_ERROR("encoder init failed for wb %d\n", i);
  1544. continue;
  1545. }
  1546. rc = sde_wb_drm_init(display, encoder);
  1547. if (rc) {
  1548. SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
  1549. sde_encoder_destroy(encoder);
  1550. continue;
  1551. }
  1552. connector = sde_connector_init(dev,
  1553. encoder,
  1554. 0,
  1555. display,
  1556. &wb_ops,
  1557. DRM_CONNECTOR_POLL_HPD,
  1558. DRM_MODE_CONNECTOR_VIRTUAL);
  1559. if (connector) {
  1560. priv->encoders[priv->num_encoders++] = encoder;
  1561. priv->connectors[priv->num_connectors++] = connector;
  1562. } else {
  1563. SDE_ERROR("wb %d connector init failed\n", i);
  1564. sde_wb_drm_deinit(display);
  1565. sde_encoder_destroy(encoder);
  1566. }
  1567. }
  1568. /* dsi */
  1569. for (i = 0; i < sde_kms->dsi_display_count &&
  1570. priv->num_encoders < max_encoders; ++i) {
  1571. display = sde_kms->dsi_displays[i];
  1572. encoder = NULL;
  1573. memset(&info, 0x0, sizeof(info));
  1574. rc = dsi_display_get_info(NULL, &info, display);
  1575. if (rc) {
  1576. SDE_ERROR("dsi get_info %d failed\n", i);
  1577. continue;
  1578. }
  1579. encoder = sde_encoder_init(dev, &info);
  1580. if (IS_ERR_OR_NULL(encoder)) {
  1581. SDE_ERROR("encoder init failed for dsi %d\n", i);
  1582. continue;
  1583. }
  1584. rc = dsi_display_drm_bridge_init(display, encoder);
  1585. if (rc) {
  1586. SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
  1587. sde_encoder_destroy(encoder);
  1588. continue;
  1589. }
  1590. connector = sde_connector_init(dev,
  1591. encoder,
  1592. dsi_display_get_drm_panel(display),
  1593. display,
  1594. &dsi_ops,
  1595. DRM_CONNECTOR_POLL_HPD,
  1596. DRM_MODE_CONNECTOR_DSI);
  1597. if (connector) {
  1598. priv->encoders[priv->num_encoders++] = encoder;
  1599. priv->connectors[priv->num_connectors++] = connector;
  1600. } else {
  1601. SDE_ERROR("dsi %d connector init failed\n", i);
  1602. dsi_display_drm_bridge_deinit(display);
  1603. sde_encoder_destroy(encoder);
  1604. continue;
  1605. }
  1606. rc = dsi_display_drm_ext_bridge_init(display,
  1607. encoder, connector);
  1608. if (rc) {
  1609. SDE_ERROR("dsi %d ext bridge init failed\n", rc);
  1610. dsi_display_drm_bridge_deinit(display);
  1611. sde_connector_destroy(connector);
  1612. sde_encoder_destroy(encoder);
  1613. }
  1614. dsc_count += info.dsc_count;
  1615. mixer_count += info.lm_count;
  1616. if (dsi_display_has_dsc_switch_support(display))
  1617. sde_kms->dsc_switch_support = true;
  1618. }
  1619. if (sde_kms->catalog->allowed_dsc_reservation_switch &&
  1620. !sde_kms->dsc_switch_support) {
  1621. SDE_DEBUG("dsc switch not supported\n");
  1622. sde_kms->catalog->allowed_dsc_reservation_switch = 0;
  1623. }
  1624. max_dp_mixer_count = sde_kms->catalog->mixer_count > mixer_count ?
  1625. sde_kms->catalog->mixer_count - mixer_count : 0;
  1626. max_dp_dsc_count = sde_kms->catalog->dsc_count > dsc_count ?
  1627. sde_kms->catalog->dsc_count - dsc_count : 0;
  1628. if (sde_kms->catalog->allowed_dsc_reservation_switch &
  1629. SDE_DP_DSC_RESERVATION_SWITCH)
  1630. max_dp_dsc_count = sde_kms->catalog->dsc_count;
  1631. /* dp */
  1632. for (i = 0; i < sde_kms->dp_display_count &&
  1633. priv->num_encoders < max_encoders; ++i) {
  1634. int idx;
  1635. display = sde_kms->dp_displays[i];
  1636. encoder = NULL;
  1637. memset(&info, 0x0, sizeof(info));
  1638. rc = dp_connector_get_info(NULL, &info, display);
  1639. if (rc) {
  1640. SDE_ERROR("dp get_info %d failed\n", i);
  1641. continue;
  1642. }
  1643. encoder = sde_encoder_init(dev, &info);
  1644. if (IS_ERR_OR_NULL(encoder)) {
  1645. SDE_ERROR("dp encoder init failed %d\n", i);
  1646. continue;
  1647. }
  1648. rc = dp_drm_bridge_init(display, encoder,
  1649. max_dp_mixer_count, max_dp_dsc_count);
  1650. if (rc) {
  1651. SDE_ERROR("dp bridge %d init failed, %d\n", i, rc);
  1652. sde_encoder_destroy(encoder);
  1653. continue;
  1654. }
  1655. connector = sde_connector_init(dev,
  1656. encoder,
  1657. NULL,
  1658. display,
  1659. &dp_ops,
  1660. DRM_CONNECTOR_POLL_HPD,
  1661. DRM_MODE_CONNECTOR_DisplayPort);
  1662. if (connector) {
  1663. priv->encoders[priv->num_encoders++] = encoder;
  1664. priv->connectors[priv->num_connectors++] = connector;
  1665. } else {
  1666. SDE_ERROR("dp %d connector init failed\n", i);
  1667. dp_drm_bridge_deinit(display);
  1668. sde_encoder_destroy(encoder);
  1669. }
  1670. /* update display cap to MST_MODE for DP MST encoders */
  1671. info.capabilities |= MSM_DISPLAY_CAP_MST_MODE;
  1672. for (idx = 0; idx < sde_kms->dp_stream_count &&
  1673. priv->num_encoders < max_encoders; idx++) {
  1674. info.h_tile_instance[0] = idx;
  1675. encoder = sde_encoder_init(dev, &info);
  1676. if (IS_ERR_OR_NULL(encoder)) {
  1677. SDE_ERROR("dp mst encoder init failed %d\n", i);
  1678. continue;
  1679. }
  1680. rc = dp_mst_drm_bridge_init(display, encoder);
  1681. if (rc) {
  1682. SDE_ERROR("dp mst bridge %d init failed, %d\n",
  1683. i, rc);
  1684. sde_encoder_destroy(encoder);
  1685. continue;
  1686. }
  1687. priv->encoders[priv->num_encoders++] = encoder;
  1688. }
  1689. }
  1690. return 0;
  1691. }
  1692. static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
  1693. {
  1694. struct msm_drm_private *priv;
  1695. int i;
  1696. if (!sde_kms) {
  1697. SDE_ERROR("invalid sde_kms\n");
  1698. return;
  1699. } else if (!sde_kms->dev) {
  1700. SDE_ERROR("invalid dev\n");
  1701. return;
  1702. } else if (!sde_kms->dev->dev_private) {
  1703. SDE_ERROR("invalid dev_private\n");
  1704. return;
  1705. }
  1706. priv = sde_kms->dev->dev_private;
  1707. for (i = 0; i < priv->num_crtcs; i++)
  1708. priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
  1709. priv->num_crtcs = 0;
  1710. for (i = 0; i < priv->num_planes; i++)
  1711. priv->planes[i]->funcs->destroy(priv->planes[i]);
  1712. priv->num_planes = 0;
  1713. for (i = 0; i < priv->num_connectors; i++)
  1714. priv->connectors[i]->funcs->destroy(priv->connectors[i]);
  1715. priv->num_connectors = 0;
  1716. for (i = 0; i < priv->num_encoders; i++)
  1717. priv->encoders[i]->funcs->destroy(priv->encoders[i]);
  1718. priv->num_encoders = 0;
  1719. _sde_kms_release_displays(sde_kms);
  1720. }
  1721. static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
  1722. {
  1723. struct drm_device *dev;
  1724. struct drm_plane *primary_planes[MAX_PLANES], *plane;
  1725. struct drm_crtc *crtc;
  1726. struct msm_drm_private *priv;
  1727. struct sde_mdss_cfg *catalog;
  1728. int primary_planes_idx = 0, i, ret;
  1729. int max_crtc_count;
  1730. u32 sspp_id[MAX_PLANES];
  1731. u32 master_plane_id[MAX_PLANES];
  1732. u32 num_virt_planes = 0;
  1733. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1734. SDE_ERROR("invalid sde_kms\n");
  1735. return -EINVAL;
  1736. }
  1737. dev = sde_kms->dev;
  1738. priv = dev->dev_private;
  1739. catalog = sde_kms->catalog;
  1740. ret = sde_core_irq_domain_add(sde_kms);
  1741. if (ret)
  1742. goto fail_irq;
  1743. /*
  1744. * Query for underlying display drivers, and create connectors,
  1745. * bridges and encoders for them.
  1746. */
  1747. if (!_sde_kms_get_displays(sde_kms))
  1748. (void)_sde_kms_setup_displays(dev, priv, sde_kms);
  1749. max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
  1750. /* Create the planes */
  1751. for (i = 0; i < catalog->sspp_count; i++) {
  1752. bool primary = true;
  1753. if (primary_planes_idx >= max_crtc_count)
  1754. primary = false;
  1755. plane = sde_plane_init(dev, catalog->sspp[i].id, primary,
  1756. (1UL << max_crtc_count) - 1, 0);
  1757. if (IS_ERR(plane)) {
  1758. SDE_ERROR("sde_plane_init failed\n");
  1759. ret = PTR_ERR(plane);
  1760. goto fail;
  1761. }
  1762. priv->planes[priv->num_planes++] = plane;
  1763. if (primary)
  1764. primary_planes[primary_planes_idx++] = plane;
  1765. if (sde_hw_sspp_multirect_enabled(&catalog->sspp[i]) &&
  1766. sde_is_custom_client()) {
  1767. int priority =
  1768. catalog->sspp[i].sblk->smart_dma_priority;
  1769. sspp_id[priority - 1] = catalog->sspp[i].id;
  1770. master_plane_id[priority - 1] = plane->base.id;
  1771. num_virt_planes++;
  1772. }
  1773. }
  1774. /* Initialize smart DMA virtual planes */
  1775. for (i = 0; i < num_virt_planes; i++) {
  1776. plane = sde_plane_init(dev, sspp_id[i], false,
  1777. (1UL << max_crtc_count) - 1, master_plane_id[i]);
  1778. if (IS_ERR(plane)) {
  1779. SDE_ERROR("sde_plane for virtual SSPP init failed\n");
  1780. ret = PTR_ERR(plane);
  1781. goto fail;
  1782. }
  1783. priv->planes[priv->num_planes++] = plane;
  1784. }
  1785. max_crtc_count = min(max_crtc_count, primary_planes_idx);
  1786. /* Create one CRTC per encoder */
  1787. for (i = 0; i < max_crtc_count; i++) {
  1788. crtc = sde_crtc_init(dev, primary_planes[i]);
  1789. if (IS_ERR(crtc)) {
  1790. ret = PTR_ERR(crtc);
  1791. goto fail;
  1792. }
  1793. priv->crtcs[priv->num_crtcs++] = crtc;
  1794. }
  1795. if (sde_is_custom_client()) {
  1796. /* All CRTCs are compatible with all planes */
  1797. for (i = 0; i < priv->num_planes; i++)
  1798. priv->planes[i]->possible_crtcs =
  1799. (1 << priv->num_crtcs) - 1;
  1800. }
  1801. /* All CRTCs are compatible with all encoders */
  1802. for (i = 0; i < priv->num_encoders; i++)
  1803. priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
  1804. return 0;
  1805. fail:
  1806. _sde_kms_drm_obj_destroy(sde_kms);
  1807. fail_irq:
  1808. sde_core_irq_domain_fini(sde_kms);
  1809. return ret;
  1810. }
  1811. /**
  1812. * sde_kms_timeline_status - provides current timeline status
  1813. * This API should be called without mode config lock.
  1814. * @dev: Pointer to drm device
  1815. */
  1816. void sde_kms_timeline_status(struct drm_device *dev)
  1817. {
  1818. struct drm_crtc *crtc;
  1819. struct drm_connector *conn;
  1820. struct drm_connector_list_iter conn_iter;
  1821. if (!dev) {
  1822. SDE_ERROR("invalid drm device node\n");
  1823. return;
  1824. }
  1825. drm_for_each_crtc(crtc, dev)
  1826. sde_crtc_timeline_status(crtc);
  1827. if (mutex_is_locked(&dev->mode_config.mutex)) {
  1828. /*
  1829. *Probably locked from last close dumping status anyway
  1830. */
  1831. SDE_ERROR("dumping conn_timeline without mode_config lock\n");
  1832. drm_connector_list_iter_begin(dev, &conn_iter);
  1833. drm_for_each_connector_iter(conn, &conn_iter)
  1834. sde_conn_timeline_status(conn);
  1835. drm_connector_list_iter_end(&conn_iter);
  1836. return;
  1837. }
  1838. mutex_lock(&dev->mode_config.mutex);
  1839. drm_connector_list_iter_begin(dev, &conn_iter);
  1840. drm_for_each_connector_iter(conn, &conn_iter)
  1841. sde_conn_timeline_status(conn);
  1842. drm_connector_list_iter_end(&conn_iter);
  1843. mutex_unlock(&dev->mode_config.mutex);
  1844. }
  1845. static int sde_kms_postinit(struct msm_kms *kms)
  1846. {
  1847. struct sde_kms *sde_kms = to_sde_kms(kms);
  1848. struct drm_device *dev;
  1849. struct drm_crtc *crtc;
  1850. int rc;
  1851. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  1852. SDE_ERROR("invalid sde_kms\n");
  1853. return -EINVAL;
  1854. }
  1855. dev = sde_kms->dev;
  1856. rc = _sde_debugfs_init(sde_kms);
  1857. if (rc)
  1858. SDE_ERROR("sde_debugfs init failed: %d\n", rc);
  1859. drm_for_each_crtc(crtc, dev)
  1860. sde_crtc_post_init(dev, crtc);
  1861. return rc;
  1862. }
  1863. static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
  1864. struct drm_encoder *encoder)
  1865. {
  1866. return rate;
  1867. }
  1868. static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
  1869. struct platform_device *pdev)
  1870. {
  1871. struct drm_device *dev;
  1872. struct msm_drm_private *priv;
  1873. struct sde_vm_ops *vm_ops;
  1874. int i;
  1875. if (!sde_kms || !pdev)
  1876. return;
  1877. dev = sde_kms->dev;
  1878. if (!dev)
  1879. return;
  1880. priv = dev->dev_private;
  1881. if (!priv)
  1882. return;
  1883. if (sde_kms->genpd_init) {
  1884. sde_kms->genpd_init = false;
  1885. pm_genpd_remove(&sde_kms->genpd);
  1886. of_genpd_del_provider(pdev->dev.of_node);
  1887. }
  1888. vm_ops = sde_vm_get_ops(sde_kms);
  1889. if (vm_ops && vm_ops->vm_deinit)
  1890. vm_ops->vm_deinit(sde_kms, vm_ops);
  1891. if (sde_kms->hw_intr)
  1892. sde_hw_intr_destroy(sde_kms->hw_intr);
  1893. sde_kms->hw_intr = NULL;
  1894. if (sde_kms->power_event)
  1895. sde_power_handle_unregister_event(
  1896. &priv->phandle, sde_kms->power_event);
  1897. _sde_kms_release_displays(sde_kms);
  1898. _sde_kms_unmap_all_splash_regions(sde_kms);
  1899. if (sde_kms->catalog) {
  1900. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  1901. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  1902. if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
  1903. sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
  1904. }
  1905. }
  1906. if (sde_kms->rm_init)
  1907. sde_rm_destroy(&sde_kms->rm);
  1908. sde_kms->rm_init = false;
  1909. if (sde_kms->catalog)
  1910. sde_hw_catalog_deinit(sde_kms->catalog);
  1911. sde_kms->catalog = NULL;
  1912. if (sde_kms->sid)
  1913. msm_iounmap(pdev, sde_kms->sid);
  1914. sde_kms->sid = NULL;
  1915. if (sde_kms->reg_dma)
  1916. msm_iounmap(pdev, sde_kms->reg_dma);
  1917. sde_kms->reg_dma = NULL;
  1918. if (sde_kms->vbif[VBIF_NRT])
  1919. msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
  1920. sde_kms->vbif[VBIF_NRT] = NULL;
  1921. if (sde_kms->vbif[VBIF_RT])
  1922. msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
  1923. sde_kms->vbif[VBIF_RT] = NULL;
  1924. if (sde_kms->mmio)
  1925. msm_iounmap(pdev, sde_kms->mmio);
  1926. sde_kms->mmio = NULL;
  1927. sde_reg_dma_deinit();
  1928. _sde_kms_mmu_destroy(sde_kms);
  1929. }
  1930. int sde_kms_mmu_detach(struct sde_kms *sde_kms, bool secure_only)
  1931. {
  1932. int i;
  1933. if (!sde_kms)
  1934. return -EINVAL;
  1935. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  1936. struct msm_mmu *mmu;
  1937. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  1938. if (!aspace)
  1939. continue;
  1940. mmu = sde_kms->aspace[i]->mmu;
  1941. if (secure_only &&
  1942. !aspace->mmu->funcs->is_domain_secure(mmu))
  1943. continue;
  1944. /* cleanup aspace before detaching */
  1945. msm_gem_aspace_domain_attach_detach_update(aspace, true);
  1946. SDE_DEBUG("Detaching domain:%d\n", i);
  1947. aspace->mmu->funcs->detach(mmu, (const char **)iommu_ports,
  1948. ARRAY_SIZE(iommu_ports));
  1949. aspace->domain_attached = false;
  1950. }
  1951. return 0;
  1952. }
  1953. int sde_kms_mmu_attach(struct sde_kms *sde_kms, bool secure_only)
  1954. {
  1955. int i;
  1956. if (!sde_kms)
  1957. return -EINVAL;
  1958. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  1959. struct msm_mmu *mmu;
  1960. struct msm_gem_address_space *aspace = sde_kms->aspace[i];
  1961. if (!aspace)
  1962. continue;
  1963. mmu = sde_kms->aspace[i]->mmu;
  1964. if (secure_only &&
  1965. !aspace->mmu->funcs->is_domain_secure(mmu))
  1966. continue;
  1967. SDE_DEBUG("Attaching domain:%d\n", i);
  1968. aspace->mmu->funcs->attach(mmu, (const char **)iommu_ports,
  1969. ARRAY_SIZE(iommu_ports));
  1970. aspace->domain_attached = true;
  1971. msm_gem_aspace_domain_attach_detach_update(aspace, false);
  1972. }
  1973. return 0;
  1974. }
  1975. static void sde_kms_destroy(struct msm_kms *kms)
  1976. {
  1977. struct sde_kms *sde_kms;
  1978. struct drm_device *dev;
  1979. if (!kms) {
  1980. SDE_ERROR("invalid kms\n");
  1981. return;
  1982. }
  1983. sde_kms = to_sde_kms(kms);
  1984. dev = sde_kms->dev;
  1985. if (!dev || !dev->dev) {
  1986. SDE_ERROR("invalid device\n");
  1987. return;
  1988. }
  1989. _sde_kms_hw_destroy(sde_kms, to_platform_device(dev->dev));
  1990. kfree(sde_kms);
  1991. }
  1992. static void sde_kms_helper_clear_dim_layers(struct drm_atomic_state *state, struct drm_crtc *crtc)
  1993. {
  1994. struct drm_crtc_state *crtc_state = NULL;
  1995. struct sde_crtc_state *c_state;
  1996. if (!state || !crtc) {
  1997. SDE_ERROR("invalid params\n");
  1998. return;
  1999. }
  2000. crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
  2001. c_state = to_sde_crtc_state(crtc_state);
  2002. _sde_crtc_clear_dim_layers_v1(crtc_state);
  2003. set_bit(SDE_CRTC_DIRTY_DIM_LAYERS, c_state->dirty);
  2004. }
  2005. static int sde_kms_set_crtc_for_conn(struct drm_device *dev,
  2006. struct drm_encoder *enc, struct drm_atomic_state *state)
  2007. {
  2008. struct drm_connector *conn = NULL;
  2009. struct drm_connector *tmp_conn = NULL;
  2010. struct drm_connector_list_iter conn_iter;
  2011. struct drm_crtc_state *crtc_state = NULL;
  2012. struct drm_connector_state *conn_state = NULL;
  2013. int ret = 0;
  2014. drm_connector_list_iter_begin(dev, &conn_iter);
  2015. drm_for_each_connector_iter(tmp_conn, &conn_iter) {
  2016. if (enc == tmp_conn->state->best_encoder) {
  2017. conn = tmp_conn;
  2018. break;
  2019. }
  2020. }
  2021. drm_connector_list_iter_end(&conn_iter);
  2022. if (!conn || !enc->crtc) {
  2023. SDE_ERROR("invalid params for enc:%d\n", DRMID(enc));
  2024. return -EINVAL;
  2025. }
  2026. crtc_state = drm_atomic_get_crtc_state(state, enc->crtc);
  2027. if (IS_ERR(crtc_state)) {
  2028. ret = PTR_ERR(crtc_state);
  2029. SDE_ERROR("error %d getting crtc %d state\n",
  2030. ret, DRMID(enc->crtc));
  2031. return ret;
  2032. }
  2033. conn_state = drm_atomic_get_connector_state(state, conn);
  2034. if (IS_ERR(conn_state)) {
  2035. ret = PTR_ERR(conn_state);
  2036. SDE_ERROR("error %d getting connector %d state\n",
  2037. ret, DRMID(conn));
  2038. return ret;
  2039. }
  2040. crtc_state->active = true;
  2041. ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
  2042. if (ret)
  2043. SDE_ERROR("error %d setting the crtc\n", ret);
  2044. return ret;
  2045. }
  2046. static void _sde_kms_plane_force_remove(struct drm_plane *plane,
  2047. struct drm_atomic_state *state)
  2048. {
  2049. struct drm_plane_state *plane_state;
  2050. int ret = 0;
  2051. plane_state = drm_atomic_get_plane_state(state, plane);
  2052. if (IS_ERR(plane_state)) {
  2053. ret = PTR_ERR(plane_state);
  2054. SDE_ERROR("error %d getting plane %d state\n",
  2055. ret, plane->base.id);
  2056. return;
  2057. }
  2058. plane->old_fb = plane->fb;
  2059. SDE_DEBUG("disabling plane %d\n", plane->base.id);
  2060. ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
  2061. if (ret != 0)
  2062. SDE_ERROR("error %d disabling plane %d\n", ret,
  2063. plane->base.id);
  2064. drm_atomic_set_fb_for_plane(plane_state, NULL);
  2065. }
  2066. static int _sde_kms_remove_fbs(struct sde_kms *sde_kms, struct drm_file *file,
  2067. struct drm_atomic_state *state)
  2068. {
  2069. struct drm_device *dev = sde_kms->dev;
  2070. struct drm_framebuffer *fb, *tfb;
  2071. struct list_head fbs;
  2072. struct drm_plane *plane;
  2073. struct drm_crtc *crtc = NULL;
  2074. unsigned int crtc_mask = 0;
  2075. int ret = 0;
  2076. INIT_LIST_HEAD(&fbs);
  2077. list_for_each_entry_safe(fb, tfb, &file->fbs, filp_head) {
  2078. if (drm_framebuffer_read_refcount(fb) > 1) {
  2079. list_move_tail(&fb->filp_head, &fbs);
  2080. drm_for_each_plane(plane, dev) {
  2081. if (plane->state && plane->state->fb == fb) {
  2082. if (plane->state->crtc)
  2083. crtc_mask |= drm_crtc_mask(plane->state->crtc);
  2084. _sde_kms_plane_force_remove(plane, state);
  2085. }
  2086. }
  2087. } else {
  2088. list_del_init(&fb->filp_head);
  2089. drm_framebuffer_put(fb);
  2090. }
  2091. }
  2092. if (list_empty(&fbs)) {
  2093. SDE_DEBUG("skip commit as no fb(s)\n");
  2094. return 0;
  2095. }
  2096. drm_for_each_crtc(crtc, dev) {
  2097. if ((crtc_mask & drm_crtc_mask(crtc)) && crtc->state->active) {
  2098. struct drm_encoder *drm_enc;
  2099. drm_for_each_encoder_mask(drm_enc, crtc->dev,
  2100. crtc->state->encoder_mask) {
  2101. ret = sde_kms_set_crtc_for_conn(dev, drm_enc, state);
  2102. if (ret)
  2103. goto error;
  2104. }
  2105. sde_kms_helper_clear_dim_layers(state, crtc);
  2106. }
  2107. }
  2108. SDE_EVT32(state, crtc_mask);
  2109. SDE_DEBUG("null commit after removing all the pipes\n");
  2110. ret = drm_atomic_commit(state);
  2111. error:
  2112. if (ret) {
  2113. /*
  2114. * move the fbs back to original list, so it would be
  2115. * handled during drm_release
  2116. */
  2117. list_for_each_entry_safe(fb, tfb, &fbs, filp_head)
  2118. list_move_tail(&fb->filp_head, &file->fbs);
  2119. if (ret == -EDEADLK || ret == -ERESTARTSYS)
  2120. SDE_DEBUG("atomic commit failed in preclose, ret:%d\n", ret);
  2121. else
  2122. SDE_ERROR("atomic commit failed in preclose, ret:%d\n", ret);
  2123. goto end;
  2124. }
  2125. while (!list_empty(&fbs)) {
  2126. fb = list_first_entry(&fbs, typeof(*fb), filp_head);
  2127. list_del_init(&fb->filp_head);
  2128. drm_framebuffer_put(fb);
  2129. }
  2130. end:
  2131. return ret;
  2132. }
  2133. static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
  2134. {
  2135. struct sde_kms *sde_kms = to_sde_kms(kms);
  2136. struct drm_device *dev = sde_kms->dev;
  2137. struct msm_drm_private *priv = dev->dev_private;
  2138. unsigned int i;
  2139. struct drm_atomic_state *state = NULL;
  2140. struct drm_modeset_acquire_ctx ctx;
  2141. int ret = 0;
  2142. /* cancel pending flip event */
  2143. for (i = 0; i < priv->num_crtcs; i++)
  2144. sde_crtc_complete_flip(priv->crtcs[i], file);
  2145. drm_modeset_acquire_init(&ctx, 0);
  2146. retry:
  2147. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2148. if (ret == -EDEADLK) {
  2149. drm_modeset_backoff(&ctx);
  2150. goto retry;
  2151. } else if (WARN_ON(ret)) {
  2152. goto end;
  2153. }
  2154. state = drm_atomic_state_alloc(dev);
  2155. if (!state) {
  2156. ret = -ENOMEM;
  2157. goto end;
  2158. }
  2159. state->acquire_ctx = &ctx;
  2160. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  2161. ret = _sde_kms_remove_fbs(sde_kms, file, state);
  2162. if (ret != -EDEADLK && ret != -ERESTARTSYS)
  2163. break;
  2164. drm_atomic_state_clear(state);
  2165. drm_modeset_backoff(&ctx);
  2166. }
  2167. end:
  2168. if (state)
  2169. drm_atomic_state_put(state);
  2170. SDE_DEBUG("sde preclose done, ret:%d\n", ret);
  2171. drm_modeset_drop_locks(&ctx);
  2172. drm_modeset_acquire_fini(&ctx);
  2173. }
  2174. static int _sde_kms_helper_reset_custom_properties(struct sde_kms *sde_kms,
  2175. struct drm_atomic_state *state)
  2176. {
  2177. struct drm_device *dev = sde_kms->dev;
  2178. struct drm_plane *plane;
  2179. struct drm_plane_state *plane_state;
  2180. struct drm_crtc *crtc;
  2181. struct drm_crtc_state *crtc_state;
  2182. struct drm_connector *conn;
  2183. struct drm_connector_state *conn_state;
  2184. struct drm_connector_list_iter conn_iter;
  2185. int ret = 0;
  2186. drm_for_each_plane(plane, dev) {
  2187. plane_state = drm_atomic_get_plane_state(state, plane);
  2188. if (IS_ERR(plane_state)) {
  2189. ret = PTR_ERR(plane_state);
  2190. SDE_ERROR("error %d getting plane %d state\n",
  2191. ret, DRMID(plane));
  2192. return ret;
  2193. }
  2194. ret = sde_plane_helper_reset_custom_properties(plane,
  2195. plane_state);
  2196. if (ret) {
  2197. SDE_ERROR("error %d resetting plane props %d\n",
  2198. ret, DRMID(plane));
  2199. return ret;
  2200. }
  2201. }
  2202. drm_for_each_crtc(crtc, dev) {
  2203. crtc_state = drm_atomic_get_crtc_state(state, crtc);
  2204. if (IS_ERR(crtc_state)) {
  2205. ret = PTR_ERR(crtc_state);
  2206. SDE_ERROR("error %d getting crtc %d state\n",
  2207. ret, DRMID(crtc));
  2208. return ret;
  2209. }
  2210. ret = sde_crtc_helper_reset_custom_properties(crtc, crtc_state);
  2211. if (ret) {
  2212. SDE_ERROR("error %d resetting crtc props %d\n",
  2213. ret, DRMID(crtc));
  2214. return ret;
  2215. }
  2216. }
  2217. drm_connector_list_iter_begin(dev, &conn_iter);
  2218. drm_for_each_connector_iter(conn, &conn_iter) {
  2219. conn_state = drm_atomic_get_connector_state(state, conn);
  2220. if (IS_ERR(conn_state)) {
  2221. ret = PTR_ERR(conn_state);
  2222. SDE_ERROR("error %d getting connector %d state\n",
  2223. ret, DRMID(conn));
  2224. return ret;
  2225. }
  2226. ret = sde_connector_helper_reset_custom_properties(conn,
  2227. conn_state);
  2228. if (ret) {
  2229. SDE_ERROR("error %d resetting connector props %d\n",
  2230. ret, DRMID(conn));
  2231. return ret;
  2232. }
  2233. }
  2234. drm_connector_list_iter_end(&conn_iter);
  2235. return ret;
  2236. }
  2237. static void sde_kms_lastclose(struct msm_kms *kms)
  2238. {
  2239. struct sde_kms *sde_kms;
  2240. struct drm_device *dev;
  2241. struct drm_atomic_state *state;
  2242. struct drm_modeset_acquire_ctx ctx;
  2243. int ret;
  2244. if (!kms) {
  2245. SDE_ERROR("invalid argument\n");
  2246. return;
  2247. }
  2248. sde_kms = to_sde_kms(kms);
  2249. dev = sde_kms->dev;
  2250. drm_modeset_acquire_init(&ctx, 0);
  2251. state = drm_atomic_state_alloc(dev);
  2252. if (!state) {
  2253. ret = -ENOMEM;
  2254. goto out_ctx;
  2255. }
  2256. state->acquire_ctx = &ctx;
  2257. SDE_EVT32(SDE_EVTLOG_FUNC_ENTRY);
  2258. retry:
  2259. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  2260. if (ret)
  2261. goto out_state;
  2262. ret = _sde_kms_helper_reset_custom_properties(sde_kms, state);
  2263. if (ret)
  2264. goto out_state;
  2265. ret = drm_atomic_commit(state);
  2266. out_state:
  2267. if (ret == -EDEADLK)
  2268. goto backoff;
  2269. drm_atomic_state_put(state);
  2270. out_ctx:
  2271. drm_modeset_drop_locks(&ctx);
  2272. drm_modeset_acquire_fini(&ctx);
  2273. if (ret)
  2274. SDE_ERROR("kms lastclose failed: %d\n", ret);
  2275. SDE_EVT32(ret, SDE_EVTLOG_FUNC_EXIT);
  2276. return;
  2277. backoff:
  2278. drm_atomic_state_clear(state);
  2279. drm_modeset_backoff(&ctx);
  2280. SDE_EVT32(ret, SDE_EVTLOG_FUNC_CASE1);
  2281. goto retry;
  2282. }
  2283. static int _sde_kms_validate_vm_request(struct drm_atomic_state *state, struct sde_kms *sde_kms,
  2284. enum sde_crtc_vm_req vm_req, bool vm_owns_hw)
  2285. {
  2286. struct drm_crtc *crtc, *active_crtc = NULL, *global_active_crtc = NULL;
  2287. struct drm_crtc_state *new_cstate, *old_cstate, *active_cstate;
  2288. struct drm_encoder *encoder;
  2289. struct drm_connector *connector;
  2290. struct drm_connector_state *new_connstate;
  2291. struct sde_vm_ops *vm_ops = sde_vm_get_ops(sde_kms);
  2292. struct sde_mdss_cfg *catalog = sde_kms->catalog;
  2293. struct sde_connector *sde_conn;
  2294. struct dsi_display *dsi_display;
  2295. uint32_t i, commit_crtc_cnt = 0, global_crtc_cnt = 0;
  2296. uint32_t crtc_encoder_cnt = 0;
  2297. enum sde_crtc_idle_pc_state idle_pc_state;
  2298. int rc = 0;
  2299. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2300. struct sde_crtc_state *new_state = NULL;
  2301. if (!new_cstate->active && !old_cstate->active)
  2302. continue;
  2303. new_state = to_sde_crtc_state(new_cstate);
  2304. idle_pc_state = sde_crtc_get_property(new_state, CRTC_PROP_IDLE_PC_STATE);
  2305. active_crtc = crtc;
  2306. active_cstate = new_cstate;
  2307. commit_crtc_cnt++;
  2308. }
  2309. list_for_each_entry(crtc, &sde_kms->dev->mode_config.crtc_list, head) {
  2310. if (!crtc->state->active)
  2311. continue;
  2312. global_crtc_cnt++;
  2313. global_active_crtc = crtc;
  2314. }
  2315. if (active_crtc) {
  2316. drm_for_each_encoder_mask(encoder, active_crtc->dev, active_cstate->encoder_mask)
  2317. crtc_encoder_cnt++;
  2318. }
  2319. for_each_new_connector_in_state(state, connector, new_connstate, i) {
  2320. int conn_mask = active_cstate->connector_mask;
  2321. if (drm_connector_mask(connector) & conn_mask) {
  2322. sde_conn = to_sde_connector(connector);
  2323. dsi_display = (struct dsi_display *) sde_conn->display;
  2324. SDE_EVT32(DRMID(connector), DRMID(active_crtc), i, dsi_display->type,
  2325. dsi_display->trusted_vm_env);
  2326. SDE_DEBUG("VM display:%s, conn:%d, crtc:%d, type:%d, tvm:%d\n",
  2327. dsi_display->name, DRMID(connector), DRMID(active_crtc),
  2328. dsi_display->type, dsi_display->trusted_vm_env);
  2329. break;
  2330. }
  2331. }
  2332. /* Check for single crtc commits only on valid VM requests */
  2333. if (active_crtc && global_active_crtc &&
  2334. (commit_crtc_cnt > catalog->max_trusted_vm_displays ||
  2335. global_crtc_cnt > catalog->max_trusted_vm_displays ||
  2336. active_crtc != global_active_crtc)) {
  2337. SDE_ERROR("VM switch failed; MAX:%d a_cnt:%d g_cnt:%d a_crtc:%d g_crtc:%d\n",
  2338. catalog->max_trusted_vm_displays, commit_crtc_cnt, global_crtc_cnt,
  2339. DRMID(active_crtc), DRMID(global_active_crtc));
  2340. return -E2BIG;
  2341. } else if ((vm_req == VM_REQ_RELEASE) &&
  2342. ((idle_pc_state == IDLE_PC_ENABLE) ||
  2343. (crtc_encoder_cnt > TRUSTED_VM_MAX_ENCODER_PER_CRTC))) {
  2344. /*
  2345. * disable idle-pc before releasing the HW
  2346. * allow only specified number of encoders on a given crtc
  2347. */
  2348. SDE_ERROR("VM switch failed; idle-pc:%d max:%d encoder_cnt:%d\n",
  2349. idle_pc_state, TRUSTED_VM_MAX_ENCODER_PER_CRTC, crtc_encoder_cnt);
  2350. return -EINVAL;
  2351. }
  2352. if ((vm_req == VM_REQ_ACQUIRE) && !vm_owns_hw) {
  2353. rc = vm_ops->vm_acquire(sde_kms);
  2354. if (rc) {
  2355. SDE_ERROR("VM acquire failed; hw_owner:%d, rc:%d\n", vm_owns_hw, rc);
  2356. return rc;
  2357. }
  2358. if (vm_ops->vm_resource_init)
  2359. rc = vm_ops->vm_resource_init(sde_kms, state);
  2360. }
  2361. return rc;
  2362. }
  2363. static int sde_kms_check_vm_request(struct msm_kms *kms,
  2364. struct drm_atomic_state *state)
  2365. {
  2366. struct sde_kms *sde_kms;
  2367. struct drm_crtc *crtc;
  2368. struct drm_crtc_state *new_cstate, *old_cstate;
  2369. struct sde_vm_ops *vm_ops;
  2370. enum sde_crtc_vm_req old_vm_req = VM_REQ_NONE, new_vm_req = VM_REQ_NONE;
  2371. int i, rc = 0;
  2372. bool vm_req_active = false, prev_vm_req = false;
  2373. bool vm_owns_hw;
  2374. if (!kms || !state)
  2375. return -EINVAL;
  2376. sde_kms = to_sde_kms(kms);
  2377. vm_ops = sde_vm_get_ops(sde_kms);
  2378. if (!vm_ops)
  2379. return 0;
  2380. if (!vm_ops->vm_request_valid || !vm_ops->vm_owns_hw || !vm_ops->vm_acquire)
  2381. return -EINVAL;
  2382. drm_for_each_crtc(crtc, state->dev) {
  2383. if (crtc->state && (sde_crtc_get_property(to_sde_crtc_state(crtc->state),
  2384. CRTC_PROP_VM_REQ_STATE) == VM_REQ_RELEASE)) {
  2385. prev_vm_req = true;
  2386. break;
  2387. }
  2388. }
  2389. /* check for an active vm request */
  2390. for_each_oldnew_crtc_in_state(state, crtc, old_cstate, new_cstate, i) {
  2391. struct sde_crtc_state *old_state = NULL, *new_state = NULL;
  2392. if (!new_cstate->active && !old_cstate->active)
  2393. continue;
  2394. new_state = to_sde_crtc_state(new_cstate);
  2395. new_vm_req = sde_crtc_get_property(new_state, CRTC_PROP_VM_REQ_STATE);
  2396. old_state = to_sde_crtc_state(old_cstate);
  2397. old_vm_req = sde_crtc_get_property(old_state, CRTC_PROP_VM_REQ_STATE);
  2398. /*
  2399. * VM request should be validated in the following usecases
  2400. * - There is a vm request(other than VM_REQ_NONE) on current/prev crtc state.
  2401. * - Previously, vm transition has taken place on one of the crtc's.
  2402. */
  2403. if (old_vm_req || new_vm_req || prev_vm_req) {
  2404. if (!vm_req_active) {
  2405. sde_vm_lock(sde_kms);
  2406. vm_owns_hw = sde_vm_owns_hw(sde_kms);
  2407. }
  2408. rc = vm_ops->vm_request_valid(sde_kms, old_vm_req, new_vm_req);
  2409. if (rc) {
  2410. SDE_ERROR(
  2411. "VM transition check failed; o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n",
  2412. old_vm_req, new_vm_req, vm_owns_hw, rc);
  2413. sde_vm_unlock(sde_kms);
  2414. vm_req_active = false;
  2415. break;
  2416. } else if (old_vm_req == VM_REQ_ACQUIRE && new_vm_req == VM_REQ_NONE) {
  2417. SDE_DEBUG("VM transition valid; ignore further checks\n");
  2418. if (!vm_req_active)
  2419. sde_vm_unlock(sde_kms);
  2420. } else {
  2421. vm_req_active = true;
  2422. }
  2423. }
  2424. }
  2425. /* validate active requests and perform acquire if necessary */
  2426. if (vm_req_active) {
  2427. rc = _sde_kms_validate_vm_request(state, sde_kms, new_vm_req, vm_owns_hw);
  2428. sde_vm_unlock(sde_kms);
  2429. SDE_EVT32(old_vm_req, new_vm_req, vm_req_active, vm_owns_hw, rc);
  2430. SDE_DEBUG("VM o_state:%d, n_state:%d, hw_owner:%d, rc:%d\n", old_vm_req, new_vm_req,
  2431. vm_req_active ? vm_owns_hw : -1, rc);
  2432. }
  2433. return rc;
  2434. }
  2435. static int sde_kms_check_secure_transition(struct msm_kms *kms,
  2436. struct drm_atomic_state *state)
  2437. {
  2438. struct sde_kms *sde_kms;
  2439. struct drm_device *dev;
  2440. struct drm_crtc *crtc;
  2441. struct drm_crtc *cur_crtc = NULL, *global_crtc = NULL;
  2442. struct drm_crtc_state *crtc_state;
  2443. int active_crtc_cnt = 0, global_active_crtc_cnt = 0;
  2444. bool sec_session = false, global_sec_session = false;
  2445. uint32_t fb_ns = 0, fb_sec = 0, fb_sec_dir = 0;
  2446. int i;
  2447. if (!kms || !state) {
  2448. return -EINVAL;
  2449. SDE_ERROR("invalid arguments\n");
  2450. }
  2451. sde_kms = to_sde_kms(kms);
  2452. dev = sde_kms->dev;
  2453. /* iterate state object for active secure/non-secure crtc */
  2454. for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
  2455. if (!crtc_state->active)
  2456. continue;
  2457. active_crtc_cnt++;
  2458. sde_crtc_state_find_plane_fb_modes(crtc_state, &fb_ns,
  2459. &fb_sec, &fb_sec_dir);
  2460. if (fb_sec_dir)
  2461. sec_session = true;
  2462. cur_crtc = crtc;
  2463. }
  2464. /* iterate global list for active and secure/non-secure crtc */
  2465. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  2466. if (!crtc->state->active)
  2467. continue;
  2468. global_active_crtc_cnt++;
  2469. /* update only when crtc is not the same as current crtc */
  2470. if (crtc != cur_crtc) {
  2471. fb_ns = fb_sec = fb_sec_dir = 0;
  2472. sde_crtc_find_plane_fb_modes(crtc, &fb_ns,
  2473. &fb_sec, &fb_sec_dir);
  2474. if (fb_sec_dir)
  2475. global_sec_session = true;
  2476. global_crtc = crtc;
  2477. }
  2478. }
  2479. if (!global_sec_session && !sec_session)
  2480. return 0;
  2481. /*
  2482. * - fail crtc commit, if secure-camera/secure-ui session is
  2483. * in-progress in any other display
  2484. * - fail secure-camera/secure-ui crtc commit, if any other display
  2485. * session is in-progress
  2486. */
  2487. if ((global_active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE) ||
  2488. (active_crtc_cnt > MAX_ALLOWED_CRTC_CNT_DURING_SECURE)) {
  2489. SDE_ERROR(
  2490. "crtc%d secure check failed global_active:%d active:%d\n",
  2491. cur_crtc ? cur_crtc->base.id : -1,
  2492. global_active_crtc_cnt, active_crtc_cnt);
  2493. return -EPERM;
  2494. /*
  2495. * As only one crtc is allowed during secure session, the crtc
  2496. * in this commit should match with the global crtc
  2497. */
  2498. } else if (global_crtc && cur_crtc && (global_crtc != cur_crtc)) {
  2499. SDE_ERROR("crtc%d-sec%d not allowed during crtc%d-sec%d\n",
  2500. cur_crtc->base.id, sec_session,
  2501. global_crtc->base.id, global_sec_session);
  2502. return -EPERM;
  2503. }
  2504. return 0;
  2505. }
  2506. static void sde_kms_vm_res_release(struct msm_kms *kms,
  2507. struct drm_atomic_state *state)
  2508. {
  2509. struct drm_crtc *crtc;
  2510. struct drm_crtc_state *new_cstate;
  2511. struct sde_crtc_state *cstate;
  2512. struct sde_vm_ops *vm_ops;
  2513. enum sde_crtc_vm_req vm_req;
  2514. struct sde_kms *sde_kms = to_sde_kms(kms);
  2515. vm_ops = sde_vm_get_ops(sde_kms);
  2516. if (!vm_ops)
  2517. return;
  2518. crtc = sde_kms_vm_get_vm_crtc(state);
  2519. if (!crtc)
  2520. return;
  2521. new_cstate = drm_atomic_get_new_crtc_state(state, crtc);
  2522. cstate = to_sde_crtc_state(new_cstate);
  2523. vm_req = sde_crtc_get_property(cstate, CRTC_PROP_VM_REQ_STATE);
  2524. if (vm_req != VM_REQ_ACQUIRE)
  2525. return;
  2526. sde_vm_lock(sde_kms);
  2527. if (vm_ops->vm_acquire_fail_handler)
  2528. vm_ops->vm_acquire_fail_handler(sde_kms);
  2529. sde_vm_unlock(sde_kms);
  2530. }
  2531. static int sde_kms_atomic_check(struct msm_kms *kms,
  2532. struct drm_atomic_state *state)
  2533. {
  2534. struct sde_kms *sde_kms;
  2535. struct drm_device *dev;
  2536. int ret;
  2537. if (!kms || !state)
  2538. return -EINVAL;
  2539. sde_kms = to_sde_kms(kms);
  2540. dev = sde_kms->dev;
  2541. SDE_ATRACE_BEGIN("atomic_check");
  2542. if (sde_kms_is_suspend_blocked(dev)) {
  2543. SDE_DEBUG("suspended, skip atomic_check\n");
  2544. ret = -EBUSY;
  2545. goto end;
  2546. }
  2547. ret = sde_kms_check_vm_request(kms, state);
  2548. if (ret) {
  2549. SDE_ERROR("vm switch request checks failed\n");
  2550. goto end;
  2551. }
  2552. ret = drm_atomic_helper_check(dev, state);
  2553. if (ret)
  2554. goto vm_clean_up;
  2555. /*
  2556. * Check if any secure transition(moving CRTC between secure and
  2557. * non-secure state and vice-versa) is allowed or not. when moving
  2558. * to secure state, planes with fb_mode set to dir_translated only can
  2559. * be staged on the CRTC, and only one CRTC can be active during
  2560. * Secure state
  2561. */
  2562. ret = sde_kms_check_secure_transition(kms, state);
  2563. if (ret)
  2564. goto vm_clean_up;
  2565. goto end;
  2566. vm_clean_up:
  2567. sde_kms_vm_res_release(kms, state);
  2568. end:
  2569. SDE_ATRACE_END("atomic_check");
  2570. return ret;
  2571. }
  2572. static struct msm_gem_address_space*
  2573. _sde_kms_get_address_space(struct msm_kms *kms,
  2574. unsigned int domain)
  2575. {
  2576. struct sde_kms *sde_kms;
  2577. if (!kms) {
  2578. SDE_ERROR("invalid kms\n");
  2579. return NULL;
  2580. }
  2581. sde_kms = to_sde_kms(kms);
  2582. if (!sde_kms) {
  2583. SDE_ERROR("invalid sde_kms\n");
  2584. return NULL;
  2585. }
  2586. if (domain >= MSM_SMMU_DOMAIN_MAX)
  2587. return NULL;
  2588. return (sde_kms->aspace[domain] &&
  2589. sde_kms->aspace[domain]->domain_attached) ?
  2590. sde_kms->aspace[domain] : NULL;
  2591. }
  2592. static struct device *_sde_kms_get_address_space_device(struct msm_kms *kms,
  2593. unsigned int domain)
  2594. {
  2595. struct sde_kms *sde_kms;
  2596. struct msm_gem_address_space *aspace;
  2597. if (!kms) {
  2598. SDE_ERROR("invalid kms\n");
  2599. return NULL;
  2600. }
  2601. sde_kms = to_sde_kms(kms);
  2602. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
  2603. SDE_ERROR("invalid params\n");
  2604. return NULL;
  2605. }
  2606. aspace = _sde_kms_get_address_space(kms, domain);
  2607. return (aspace && aspace->domain_attached) ?
  2608. msm_gem_get_aspace_device(aspace) : NULL;
  2609. }
  2610. static void _sde_kms_post_open(struct msm_kms *kms, struct drm_file *file)
  2611. {
  2612. struct drm_device *dev = NULL;
  2613. struct sde_kms *sde_kms = NULL;
  2614. struct drm_connector *connector = NULL;
  2615. struct drm_connector_list_iter conn_iter;
  2616. struct sde_connector *sde_conn = NULL;
  2617. if (!kms) {
  2618. SDE_ERROR("invalid kms\n");
  2619. return;
  2620. }
  2621. sde_kms = to_sde_kms(kms);
  2622. dev = sde_kms->dev;
  2623. if (!dev) {
  2624. SDE_ERROR("invalid device\n");
  2625. return;
  2626. }
  2627. if (!dev->mode_config.poll_enabled)
  2628. return;
  2629. mutex_lock(&dev->mode_config.mutex);
  2630. drm_connector_list_iter_begin(dev, &conn_iter);
  2631. drm_for_each_connector_iter(connector, &conn_iter) {
  2632. /* Only handle HPD capable connectors. */
  2633. if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
  2634. continue;
  2635. sde_conn = to_sde_connector(connector);
  2636. if (sde_conn->ops.post_open)
  2637. sde_conn->ops.post_open(&sde_conn->base,
  2638. sde_conn->display);
  2639. }
  2640. drm_connector_list_iter_end(&conn_iter);
  2641. mutex_unlock(&dev->mode_config.mutex);
  2642. }
  2643. static int _sde_kms_update_planes_for_cont_splash(struct sde_kms *sde_kms,
  2644. struct sde_splash_display *splash_display,
  2645. struct drm_crtc *crtc)
  2646. {
  2647. struct msm_drm_private *priv;
  2648. struct drm_plane *plane;
  2649. struct sde_splash_mem *splash;
  2650. struct sde_splash_mem *demura;
  2651. struct sde_plane_state *pstate;
  2652. struct sde_sspp_index_info *pipe_info;
  2653. enum sde_sspp pipe_id;
  2654. bool is_virtual;
  2655. int i;
  2656. if (!sde_kms || !splash_display || !crtc) {
  2657. SDE_ERROR("invalid input args\n");
  2658. return -EINVAL;
  2659. }
  2660. priv = sde_kms->dev->dev_private;
  2661. pipe_info = &splash_display->pipe_info;
  2662. splash = splash_display->splash;
  2663. demura = splash_display->demura;
  2664. for (i = 0; i < priv->num_planes; i++) {
  2665. plane = priv->planes[i];
  2666. pipe_id = sde_plane_pipe(plane);
  2667. is_virtual = is_sde_plane_virtual(plane);
  2668. if ((is_virtual && test_bit(pipe_id, pipe_info->virt_pipes)) ||
  2669. (!is_virtual && test_bit(pipe_id, pipe_info->pipes))) {
  2670. if (splash && sde_plane_validate_src_addr(plane,
  2671. splash->splash_buf_base,
  2672. splash->splash_buf_size)) {
  2673. if (!demura || sde_plane_validate_src_addr(
  2674. plane, demura->splash_buf_base,
  2675. demura->splash_buf_size)) {
  2676. SDE_ERROR("invalid adr on pipe:%d crtc:%d\n",
  2677. pipe_id, DRMID(crtc));
  2678. continue;
  2679. }
  2680. }
  2681. plane->state->crtc = crtc;
  2682. crtc->state->plane_mask |= drm_plane_mask(plane);
  2683. pstate = to_sde_plane_state(plane->state);
  2684. pstate->cont_splash_populated = true;
  2685. SDE_DEBUG("set crtc:%d for plane:%d rect:%d\n",
  2686. DRMID(crtc), DRMID(plane), is_virtual);
  2687. }
  2688. }
  2689. return 0;
  2690. }
  2691. static int sde_kms_inform_cont_splash_res_disable(struct msm_kms *kms,
  2692. struct dsi_display *dsi_display)
  2693. {
  2694. void *display;
  2695. struct drm_encoder *encoder = NULL;
  2696. struct msm_display_info info;
  2697. struct drm_device *dev;
  2698. struct sde_kms *sde_kms;
  2699. struct drm_connector_list_iter conn_iter;
  2700. struct drm_connector *connector = NULL;
  2701. struct sde_connector *sde_conn = NULL;
  2702. int rc = 0;
  2703. sde_kms = to_sde_kms(kms);
  2704. dev = sde_kms->dev;
  2705. display = dsi_display;
  2706. if (dsi_display) {
  2707. if (dsi_display->bridge->base.encoder) {
  2708. encoder = dsi_display->bridge->base.encoder;
  2709. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2710. }
  2711. memset(&info, 0x0, sizeof(info));
  2712. rc = dsi_display_get_info(NULL, &info, display);
  2713. if (rc) {
  2714. SDE_ERROR("%s: dsi get_info failed: %d\n",
  2715. __func__, rc);
  2716. encoder = NULL;
  2717. }
  2718. }
  2719. drm_connector_list_iter_begin(dev, &conn_iter);
  2720. drm_for_each_connector_iter(connector, &conn_iter) {
  2721. struct drm_encoder *c_encoder;
  2722. drm_connector_for_each_possible_encoder(connector,
  2723. c_encoder)
  2724. break;
  2725. if (!c_encoder) {
  2726. SDE_ERROR("c_encoder not found\n");
  2727. return -EINVAL;
  2728. }
  2729. /**
  2730. * Inform cont_splash is disabled to each interface/connector.
  2731. * This is currently supported for DSI interface.
  2732. */
  2733. sde_conn = to_sde_connector(connector);
  2734. if (sde_conn && sde_conn->ops.cont_splash_res_disable) {
  2735. if (!dsi_display || !encoder) {
  2736. sde_conn->ops.cont_splash_res_disable
  2737. (sde_conn->display);
  2738. } else if (c_encoder->base.id == encoder->base.id) {
  2739. /**
  2740. * This handles dual DSI
  2741. * configuration where one DSI
  2742. * interface has cont_splash
  2743. * enabled and the other doesn't.
  2744. */
  2745. sde_conn->ops.cont_splash_res_disable
  2746. (sde_conn->display);
  2747. break;
  2748. }
  2749. }
  2750. }
  2751. drm_connector_list_iter_end(&conn_iter);
  2752. return 0;
  2753. }
  2754. static int sde_kms_vm_trusted_cont_splash_res_init(struct sde_kms *sde_kms)
  2755. {
  2756. int i;
  2757. void *display;
  2758. struct dsi_display *dsi_display;
  2759. struct drm_encoder *encoder;
  2760. if (!sde_kms)
  2761. return -EINVAL;
  2762. if (!sde_in_trusted_vm(sde_kms))
  2763. return 0;
  2764. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  2765. display = sde_kms->dsi_displays[i];
  2766. dsi_display = (struct dsi_display *)display;
  2767. if (!dsi_display->bridge->base.encoder) {
  2768. SDE_ERROR("no encoder on dsi display:%d", i);
  2769. return -EINVAL;
  2770. }
  2771. encoder = dsi_display->bridge->base.encoder;
  2772. encoder->possible_crtcs = 1 << i;
  2773. SDE_DEBUG(
  2774. "dsi-display:%d encoder id[%d]=%d name=%s crtcs=%x\n", i,
  2775. encoder->index, encoder->base.id,
  2776. encoder->name, encoder->possible_crtcs);
  2777. }
  2778. return 0;
  2779. }
  2780. static struct drm_display_mode *_sde_kms_get_splash_mode(
  2781. struct sde_kms *sde_kms, struct drm_connector *connector,
  2782. struct drm_atomic_state *state)
  2783. {
  2784. struct drm_display_mode *mode, *cur_mode = NULL;
  2785. struct drm_crtc *crtc;
  2786. struct drm_crtc_state *new_cstate, *old_cstate;
  2787. u32 i = 0;
  2788. if (sde_kms->splash_data.type == SDE_SPLASH_HANDOFF) {
  2789. list_for_each_entry(mode, &connector->modes, head) {
  2790. if (mode->type & DRM_MODE_TYPE_PREFERRED) {
  2791. cur_mode = mode;
  2792. break;
  2793. }
  2794. }
  2795. } else if (state) {
  2796. /* get the mode from first atomic_check phase for trusted_vm*/
  2797. for_each_oldnew_crtc_in_state(state, crtc, old_cstate,
  2798. new_cstate, i) {
  2799. if (!new_cstate->active && !old_cstate->active)
  2800. continue;
  2801. list_for_each_entry(mode, &connector->modes, head) {
  2802. if (drm_mode_equal(&new_cstate->mode, mode)) {
  2803. cur_mode = mode;
  2804. break;
  2805. }
  2806. }
  2807. }
  2808. }
  2809. return cur_mode;
  2810. }
  2811. static int sde_kms_cont_splash_config(struct msm_kms *kms,
  2812. struct drm_atomic_state *state)
  2813. {
  2814. void *display;
  2815. struct dsi_display *dsi_display;
  2816. struct msm_display_info info;
  2817. struct drm_encoder *encoder = NULL;
  2818. struct drm_crtc *crtc = NULL;
  2819. int i, rc = 0;
  2820. struct drm_display_mode *drm_mode = NULL;
  2821. struct drm_device *dev;
  2822. struct msm_drm_private *priv;
  2823. struct sde_kms *sde_kms;
  2824. struct drm_connector_list_iter conn_iter;
  2825. struct drm_connector *connector = NULL;
  2826. struct sde_connector *sde_conn = NULL;
  2827. struct sde_splash_display *splash_display;
  2828. if (!kms) {
  2829. SDE_ERROR("invalid kms\n");
  2830. return -EINVAL;
  2831. }
  2832. sde_kms = to_sde_kms(kms);
  2833. dev = sde_kms->dev;
  2834. if (!dev) {
  2835. SDE_ERROR("invalid device\n");
  2836. return -EINVAL;
  2837. }
  2838. rc = sde_kms_vm_trusted_cont_splash_res_init(sde_kms);
  2839. if (rc) {
  2840. SDE_ERROR("failed vm cont splash resource init, rc=%d", rc);
  2841. return -EINVAL;
  2842. }
  2843. if (((sde_kms->splash_data.type == SDE_SPLASH_HANDOFF)
  2844. && (!sde_kms->splash_data.num_splash_regions)) ||
  2845. !sde_kms->splash_data.num_splash_displays) {
  2846. DRM_INFO("cont_splash feature not enabled\n");
  2847. sde_kms_inform_cont_splash_res_disable(kms, NULL);
  2848. return rc;
  2849. }
  2850. DRM_INFO("cont_splash enabled in %d of %d display(s)\n",
  2851. sde_kms->splash_data.num_splash_displays,
  2852. sde_kms->dsi_display_count);
  2853. /* dsi */
  2854. for (i = 0; i < sde_kms->dsi_display_count; ++i) {
  2855. struct sde_crtc_state *cstate;
  2856. struct sde_connector_state *conn_state;
  2857. display = sde_kms->dsi_displays[i];
  2858. dsi_display = (struct dsi_display *)display;
  2859. splash_display = &sde_kms->splash_data.splash_display[i];
  2860. if (!splash_display->cont_splash_enabled) {
  2861. SDE_DEBUG("display->name = %s splash not enabled\n",
  2862. dsi_display->name);
  2863. sde_kms_inform_cont_splash_res_disable(kms,
  2864. dsi_display);
  2865. continue;
  2866. }
  2867. SDE_DEBUG("display->name = %s\n", dsi_display->name);
  2868. if (dsi_display->bridge->base.encoder) {
  2869. encoder = dsi_display->bridge->base.encoder;
  2870. SDE_DEBUG("encoder name = %s\n", encoder->name);
  2871. }
  2872. memset(&info, 0x0, sizeof(info));
  2873. rc = dsi_display_get_info(NULL, &info, display);
  2874. if (rc) {
  2875. SDE_ERROR("dsi get_info %d failed\n", i);
  2876. encoder = NULL;
  2877. continue;
  2878. }
  2879. SDE_DEBUG("info.is_connected = %s, info.display_type = %d\n",
  2880. ((info.is_connected) ? "true" : "false"),
  2881. info.display_type);
  2882. if (!encoder) {
  2883. SDE_ERROR("encoder not initialized\n");
  2884. return -EINVAL;
  2885. }
  2886. priv = sde_kms->dev->dev_private;
  2887. encoder->crtc = priv->crtcs[i];
  2888. crtc = encoder->crtc;
  2889. splash_display->encoder = encoder;
  2890. SDE_DEBUG("for dsi-display:%d crtc id[%d]:%d enc id[%d]:%d\n",
  2891. i, crtc->index, crtc->base.id, encoder->index,
  2892. encoder->base.id);
  2893. mutex_lock(&dev->mode_config.mutex);
  2894. drm_connector_list_iter_begin(dev, &conn_iter);
  2895. drm_for_each_connector_iter(connector, &conn_iter) {
  2896. struct drm_encoder *c_encoder;
  2897. drm_connector_for_each_possible_encoder(connector,
  2898. c_encoder)
  2899. break;
  2900. if (!c_encoder) {
  2901. SDE_ERROR("c_encoder not found\n");
  2902. mutex_unlock(&dev->mode_config.mutex);
  2903. return -EINVAL;
  2904. }
  2905. /**
  2906. * SDE_KMS doesn't attach more than one encoder to
  2907. * a DSI connector. So it is safe to check only with
  2908. * the first encoder entry. Revisit this logic if we
  2909. * ever have to support continuous splash for
  2910. * external displays in MST configuration.
  2911. */
  2912. if (c_encoder->base.id == encoder->base.id)
  2913. break;
  2914. }
  2915. drm_connector_list_iter_end(&conn_iter);
  2916. if (!connector) {
  2917. SDE_ERROR("connector not initialized\n");
  2918. mutex_unlock(&dev->mode_config.mutex);
  2919. return -EINVAL;
  2920. }
  2921. mutex_unlock(&dev->mode_config.mutex);
  2922. crtc->state->encoder_mask = drm_encoder_mask(encoder);
  2923. crtc->state->connector_mask = drm_connector_mask(connector);
  2924. connector->state->crtc = crtc;
  2925. drm_mode = _sde_kms_get_splash_mode(sde_kms, connector, state);
  2926. if (!drm_mode) {
  2927. SDE_ERROR("drm_mode not found; handoff_type:%d\n",
  2928. sde_kms->splash_data.type);
  2929. return -EINVAL;
  2930. }
  2931. SDE_DEBUG(
  2932. "drm_mode->name:%s, type:0x%x, flags:0x%x, handoff_type:%d\n",
  2933. drm_mode->name, drm_mode->type,
  2934. drm_mode->flags, sde_kms->splash_data.type);
  2935. /* Update CRTC drm structure */
  2936. crtc->state->active = true;
  2937. rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
  2938. if (rc) {
  2939. SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
  2940. return rc;
  2941. }
  2942. drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
  2943. drm_mode_copy(&crtc->mode, drm_mode);
  2944. cstate = to_sde_crtc_state(crtc->state);
  2945. cstate->cont_splash_populated = true;
  2946. /* Update encoder structure */
  2947. sde_encoder_update_caps_for_cont_splash(encoder,
  2948. splash_display, true);
  2949. sde_crtc_update_cont_splash_settings(crtc);
  2950. sde_conn = to_sde_connector(connector);
  2951. if (sde_conn && sde_conn->ops.cont_splash_config)
  2952. sde_conn->ops.cont_splash_config(sde_conn->display);
  2953. conn_state = to_sde_connector_state(connector->state);
  2954. conn_state->cont_splash_populated = true;
  2955. rc = _sde_kms_update_planes_for_cont_splash(sde_kms,
  2956. splash_display, crtc);
  2957. if (rc) {
  2958. SDE_ERROR("Failed: updating plane status rc=%d\n", rc);
  2959. return rc;
  2960. }
  2961. }
  2962. return rc;
  2963. }
  2964. static bool sde_kms_check_for_splash(struct msm_kms *kms)
  2965. {
  2966. struct sde_kms *sde_kms;
  2967. if (!kms) {
  2968. SDE_ERROR("invalid kms\n");
  2969. return false;
  2970. }
  2971. sde_kms = to_sde_kms(kms);
  2972. return sde_kms->splash_data.num_splash_displays;
  2973. }
  2974. static int sde_kms_get_mixer_count(const struct msm_kms *kms,
  2975. const struct drm_display_mode *mode,
  2976. const struct msm_resource_caps_info *res, u32 *num_lm)
  2977. {
  2978. struct sde_kms *sde_kms;
  2979. s64 mode_clock_hz = 0;
  2980. s64 max_mdp_clock_hz = 0;
  2981. s64 max_lm_width = 0;
  2982. s64 hdisplay_fp = 0;
  2983. s64 htotal_fp = 0;
  2984. s64 vtotal_fp = 0;
  2985. s64 vrefresh_fp = 0;
  2986. s64 mdp_fudge_factor = 0;
  2987. s64 num_lm_fp = 0;
  2988. s64 lm_clk_fp = 0;
  2989. s64 lm_width_fp = 0;
  2990. int rc = 0;
  2991. if (!num_lm) {
  2992. SDE_ERROR("invalid num_lm pointer\n");
  2993. return -EINVAL;
  2994. }
  2995. /* default to 1 layer mixer */
  2996. *num_lm = 1;
  2997. if (!kms || !mode || !res) {
  2998. SDE_ERROR("invalid input args\n");
  2999. return -EINVAL;
  3000. }
  3001. sde_kms = to_sde_kms(kms);
  3002. max_mdp_clock_hz = drm_int2fixp(sde_kms->perf.max_core_clk_rate);
  3003. max_lm_width = drm_int2fixp(res->max_mixer_width);
  3004. hdisplay_fp = drm_int2fixp(mode->hdisplay);
  3005. htotal_fp = drm_int2fixp(mode->htotal);
  3006. vtotal_fp = drm_int2fixp(mode->vtotal);
  3007. vrefresh_fp = drm_int2fixp(drm_mode_vrefresh(mode));
  3008. mdp_fudge_factor = drm_fixp_from_fraction(105, 100);
  3009. /* mode clock = [(h * v * fps * 1.05) / (num_lm)] */
  3010. mode_clock_hz = drm_fixp_mul(htotal_fp, vtotal_fp);
  3011. mode_clock_hz = drm_fixp_mul(mode_clock_hz, vrefresh_fp);
  3012. mode_clock_hz = drm_fixp_mul(mode_clock_hz, mdp_fudge_factor);
  3013. if (mode_clock_hz > max_mdp_clock_hz ||
  3014. hdisplay_fp > max_lm_width) {
  3015. *num_lm = 0;
  3016. do {
  3017. *num_lm += 2;
  3018. num_lm_fp = drm_int2fixp(*num_lm);
  3019. lm_clk_fp = drm_fixp_div(mode_clock_hz, num_lm_fp);
  3020. lm_width_fp = drm_fixp_div(hdisplay_fp, num_lm_fp);
  3021. if (*num_lm > 4) {
  3022. rc = -EINVAL;
  3023. goto error;
  3024. }
  3025. } while (lm_clk_fp > max_mdp_clock_hz ||
  3026. lm_width_fp > max_lm_width);
  3027. mode_clock_hz = lm_clk_fp;
  3028. }
  3029. SDE_DEBUG("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3030. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3031. *num_lm, drm_fixp2int(mode_clock_hz),
  3032. sde_kms->perf.max_core_clk_rate);
  3033. return 0;
  3034. error:
  3035. SDE_ERROR("required mode clk exceeds max mdp clk\n");
  3036. SDE_ERROR("[%s] h=%d v=%d fps=%d lm=%d mode_clk=%u max_clk=%llu\n",
  3037. mode->name, mode->htotal, mode->vtotal, drm_mode_vrefresh(mode),
  3038. *num_lm, drm_fixp2int(mode_clock_hz),
  3039. sde_kms->perf.max_core_clk_rate);
  3040. return rc;
  3041. }
  3042. static int sde_kms_get_dsc_count(const struct msm_kms *kms,
  3043. u32 hdisplay, u32 *num_dsc)
  3044. {
  3045. struct sde_kms *sde_kms;
  3046. uint32_t max_dsc_width;
  3047. if (!num_dsc) {
  3048. SDE_ERROR("invalid num_dsc pointer\n");
  3049. return -EINVAL;
  3050. }
  3051. *num_dsc = 0;
  3052. if (!kms || !hdisplay) {
  3053. SDE_ERROR("invalid input args\n");
  3054. return -EINVAL;
  3055. }
  3056. sde_kms = to_sde_kms(kms);
  3057. max_dsc_width = sde_kms->catalog->max_dsc_width;
  3058. *num_dsc = DIV_ROUND_UP(hdisplay, max_dsc_width);
  3059. SDE_DEBUG("h=%d, max_dsc_width=%d, num_dsc=%d\n",
  3060. hdisplay, max_dsc_width,
  3061. *num_dsc);
  3062. return 0;
  3063. }
  3064. static void _sde_kms_null_commit(struct drm_device *dev,
  3065. struct drm_encoder *enc)
  3066. {
  3067. struct drm_modeset_acquire_ctx ctx;
  3068. struct drm_atomic_state *state = NULL;
  3069. int retry_cnt = 0;
  3070. int ret = 0;
  3071. drm_modeset_acquire_init(&ctx, 0);
  3072. retry:
  3073. ret = drm_modeset_lock_all_ctx(dev, &ctx);
  3074. if (ret == -EDEADLK && retry_cnt < SDE_KMS_MODESET_LOCK_MAX_TRIALS) {
  3075. drm_modeset_backoff(&ctx);
  3076. retry_cnt++;
  3077. udelay(SDE_KMS_MODESET_LOCK_TIMEOUT_US);
  3078. goto retry;
  3079. } else if (WARN_ON(ret)) {
  3080. goto end;
  3081. }
  3082. state = drm_atomic_state_alloc(dev);
  3083. if (!state) {
  3084. DRM_ERROR("failed to allocate atomic state, %d\n", ret);
  3085. goto end;
  3086. }
  3087. state->acquire_ctx = &ctx;
  3088. ret = sde_kms_set_crtc_for_conn(dev, enc, state);
  3089. if (ret)
  3090. goto end;
  3091. ret = drm_atomic_commit(state);
  3092. if (ret)
  3093. SDE_ERROR("Error %d doing the atomic commit\n", ret);
  3094. end:
  3095. if (state)
  3096. drm_atomic_state_put(state);
  3097. drm_modeset_drop_locks(&ctx);
  3098. drm_modeset_acquire_fini(&ctx);
  3099. }
  3100. void sde_kms_display_early_wakeup(struct drm_device *dev,
  3101. const int32_t connector_id)
  3102. {
  3103. struct drm_connector_list_iter conn_iter;
  3104. struct drm_connector *conn;
  3105. struct drm_encoder *drm_enc;
  3106. drm_connector_list_iter_begin(dev, &conn_iter);
  3107. drm_for_each_connector_iter(conn, &conn_iter) {
  3108. if (connector_id != DRM_MSM_WAKE_UP_ALL_DISPLAYS &&
  3109. connector_id != conn->base.id)
  3110. continue;
  3111. if (conn->state && conn->state->best_encoder)
  3112. drm_enc = conn->state->best_encoder;
  3113. else
  3114. drm_enc = conn->encoder;
  3115. if (drm_enc)
  3116. sde_encoder_early_wakeup(drm_enc);
  3117. }
  3118. drm_connector_list_iter_end(&conn_iter);
  3119. }
  3120. static void _sde_kms_pm_suspend_idle_helper(struct sde_kms *sde_kms,
  3121. struct device *dev)
  3122. {
  3123. int i, ret, crtc_id = 0;
  3124. struct drm_device *ddev = dev_get_drvdata(dev);
  3125. struct drm_connector *conn;
  3126. struct drm_connector_list_iter conn_iter;
  3127. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3128. drm_connector_list_iter_begin(ddev, &conn_iter);
  3129. drm_for_each_connector_iter(conn, &conn_iter) {
  3130. uint64_t lp;
  3131. lp = sde_connector_get_lp(conn);
  3132. if (lp != SDE_MODE_DPMS_LP2)
  3133. continue;
  3134. if (sde_encoder_in_clone_mode(conn->encoder))
  3135. continue;
  3136. crtc_id = drm_crtc_index(conn->state->crtc);
  3137. if (priv->disp_thread[crtc_id].thread)
  3138. kthread_flush_worker(
  3139. &priv->disp_thread[crtc_id].worker);
  3140. ret = sde_encoder_wait_for_event(conn->encoder,
  3141. MSM_ENC_TX_COMPLETE);
  3142. if (ret && ret != -EWOULDBLOCK) {
  3143. SDE_ERROR(
  3144. "[conn: %d] wait for commit done returned %d\n",
  3145. conn->base.id, ret);
  3146. } else if (!ret) {
  3147. if (priv->event_thread[crtc_id].thread)
  3148. kthread_flush_worker(
  3149. &priv->event_thread[crtc_id].worker);
  3150. sde_encoder_idle_request(conn->encoder);
  3151. }
  3152. }
  3153. drm_connector_list_iter_end(&conn_iter);
  3154. for (i = 0; i < priv->num_crtcs; i++) {
  3155. if (priv->disp_thread[i].thread)
  3156. kthread_flush_worker(
  3157. &priv->disp_thread[i].worker);
  3158. if (priv->event_thread[i].thread)
  3159. kthread_flush_worker(
  3160. &priv->event_thread[i].worker);
  3161. }
  3162. kthread_flush_worker(&priv->pp_event_worker);
  3163. }
  3164. struct msm_display_mode *sde_kms_get_msm_mode(struct drm_connector_state *conn_state)
  3165. {
  3166. struct sde_connector_state *sde_conn_state;
  3167. if (!conn_state)
  3168. return NULL;
  3169. sde_conn_state = to_sde_connector_state(conn_state);
  3170. return &sde_conn_state->msm_mode;
  3171. }
  3172. static int sde_kms_pm_suspend(struct device *dev)
  3173. {
  3174. struct drm_device *ddev;
  3175. struct drm_modeset_acquire_ctx ctx;
  3176. struct drm_connector *conn;
  3177. struct drm_encoder *enc;
  3178. struct drm_connector_list_iter conn_iter;
  3179. struct drm_atomic_state *state = NULL;
  3180. struct sde_kms *sde_kms;
  3181. int ret = 0, num_crtcs = 0;
  3182. if (!dev)
  3183. return -EINVAL;
  3184. ddev = dev_get_drvdata(dev);
  3185. if (!ddev || !ddev_to_msm_kms(ddev))
  3186. return -EINVAL;
  3187. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3188. SDE_EVT32(0);
  3189. /* disable hot-plug polling */
  3190. drm_kms_helper_poll_disable(ddev);
  3191. /* if a display stuck in CS trigger a null commit to complete handoff */
  3192. drm_for_each_encoder(enc, ddev) {
  3193. if (sde_encoder_in_cont_splash(enc) && enc->crtc)
  3194. _sde_kms_null_commit(ddev, enc);
  3195. }
  3196. /* acquire modeset lock(s) */
  3197. drm_modeset_acquire_init(&ctx, 0);
  3198. retry:
  3199. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3200. if (ret)
  3201. goto unlock;
  3202. /* save current state for resume */
  3203. if (sde_kms->suspend_state)
  3204. drm_atomic_state_put(sde_kms->suspend_state);
  3205. sde_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
  3206. if (IS_ERR_OR_NULL(sde_kms->suspend_state)) {
  3207. ret = PTR_ERR(sde_kms->suspend_state);
  3208. DRM_ERROR("failed to back up suspend state, %d\n", ret);
  3209. sde_kms->suspend_state = NULL;
  3210. goto unlock;
  3211. }
  3212. /* create atomic state to disable all CRTCs */
  3213. state = drm_atomic_state_alloc(ddev);
  3214. if (!state) {
  3215. ret = -ENOMEM;
  3216. DRM_ERROR("failed to allocate crtc disable state, %d\n", ret);
  3217. goto unlock;
  3218. }
  3219. state->acquire_ctx = &ctx;
  3220. drm_connector_list_iter_begin(ddev, &conn_iter);
  3221. drm_for_each_connector_iter(conn, &conn_iter) {
  3222. struct drm_crtc_state *crtc_state;
  3223. uint64_t lp;
  3224. if (!conn->state || !conn->state->crtc ||
  3225. conn->dpms != DRM_MODE_DPMS_ON ||
  3226. sde_encoder_in_clone_mode(conn->encoder))
  3227. continue;
  3228. lp = sde_connector_get_lp(conn);
  3229. if (lp == SDE_MODE_DPMS_LP1) {
  3230. /* transition LP1->LP2 on pm suspend */
  3231. ret = sde_connector_set_property_for_commit(conn, state,
  3232. CONNECTOR_PROP_LP, SDE_MODE_DPMS_LP2);
  3233. if (ret) {
  3234. DRM_ERROR("failed to set lp2 for conn %d\n",
  3235. conn->base.id);
  3236. drm_connector_list_iter_end(&conn_iter);
  3237. goto unlock;
  3238. }
  3239. }
  3240. if (lp != SDE_MODE_DPMS_LP2) {
  3241. /* force CRTC to be inactive */
  3242. crtc_state = drm_atomic_get_crtc_state(state,
  3243. conn->state->crtc);
  3244. if (IS_ERR_OR_NULL(crtc_state)) {
  3245. DRM_ERROR("failed to get crtc %d state\n",
  3246. conn->state->crtc->base.id);
  3247. drm_connector_list_iter_end(&conn_iter);
  3248. goto unlock;
  3249. }
  3250. if (lp != SDE_MODE_DPMS_LP1)
  3251. crtc_state->active = false;
  3252. ++num_crtcs;
  3253. }
  3254. }
  3255. drm_connector_list_iter_end(&conn_iter);
  3256. /* check for nothing to do */
  3257. if (num_crtcs == 0) {
  3258. DRM_DEBUG("all crtcs are already in the off state\n");
  3259. sde_kms->suspend_block = true;
  3260. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3261. goto unlock;
  3262. }
  3263. /* commit the "disable all" state */
  3264. ret = drm_atomic_commit(state);
  3265. if (ret < 0) {
  3266. DRM_ERROR("failed to disable crtcs, %d\n", ret);
  3267. goto unlock;
  3268. }
  3269. sde_kms->suspend_block = true;
  3270. _sde_kms_pm_suspend_idle_helper(sde_kms, dev);
  3271. unlock:
  3272. if (state) {
  3273. drm_atomic_state_put(state);
  3274. state = NULL;
  3275. }
  3276. if (ret == -EDEADLK) {
  3277. drm_modeset_backoff(&ctx);
  3278. goto retry;
  3279. }
  3280. drm_modeset_drop_locks(&ctx);
  3281. drm_modeset_acquire_fini(&ctx);
  3282. /*
  3283. * pm runtime driver avoids multiple runtime_suspend API call by
  3284. * checking runtime_status. However, this call helps when there is a
  3285. * race condition between pm_suspend call and doze_suspend/power_off
  3286. * commit. It removes the extra vote from suspend and adds it back
  3287. * later to allow power collapse during pm_suspend call
  3288. */
  3289. pm_runtime_put_sync(dev);
  3290. pm_runtime_get_noresume(dev);
  3291. /* dump clock state before entering suspend */
  3292. if (sde_kms->pm_suspend_clk_dump)
  3293. _sde_kms_dump_clks_state(sde_kms);
  3294. return ret;
  3295. }
  3296. static int sde_kms_pm_resume(struct device *dev)
  3297. {
  3298. struct drm_device *ddev;
  3299. struct sde_kms *sde_kms;
  3300. struct drm_modeset_acquire_ctx ctx;
  3301. int ret, i;
  3302. if (!dev)
  3303. return -EINVAL;
  3304. ddev = dev_get_drvdata(dev);
  3305. if (!ddev || !ddev_to_msm_kms(ddev))
  3306. return -EINVAL;
  3307. sde_kms = to_sde_kms(ddev_to_msm_kms(ddev));
  3308. SDE_EVT32(sde_kms->suspend_state != NULL);
  3309. drm_mode_config_reset(ddev);
  3310. drm_modeset_acquire_init(&ctx, 0);
  3311. retry:
  3312. ret = drm_modeset_lock_all_ctx(ddev, &ctx);
  3313. if (ret == -EDEADLK) {
  3314. drm_modeset_backoff(&ctx);
  3315. goto retry;
  3316. } else if (WARN_ON(ret)) {
  3317. goto end;
  3318. }
  3319. sde_kms->suspend_block = false;
  3320. if (sde_kms->suspend_state) {
  3321. sde_kms->suspend_state->acquire_ctx = &ctx;
  3322. for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
  3323. ret = drm_atomic_helper_commit_duplicated_state(
  3324. sde_kms->suspend_state, &ctx);
  3325. if (ret != -EDEADLK)
  3326. break;
  3327. drm_modeset_backoff(&ctx);
  3328. }
  3329. if (ret < 0)
  3330. DRM_ERROR("failed to restore state, %d\n", ret);
  3331. drm_atomic_state_put(sde_kms->suspend_state);
  3332. sde_kms->suspend_state = NULL;
  3333. }
  3334. end:
  3335. drm_modeset_drop_locks(&ctx);
  3336. drm_modeset_acquire_fini(&ctx);
  3337. /* enable hot-plug polling */
  3338. drm_kms_helper_poll_enable(ddev);
  3339. return 0;
  3340. }
  3341. static const struct msm_kms_funcs kms_funcs = {
  3342. .hw_init = sde_kms_hw_init,
  3343. .postinit = sde_kms_postinit,
  3344. .irq_preinstall = sde_irq_preinstall,
  3345. .irq_postinstall = sde_irq_postinstall,
  3346. .irq_uninstall = sde_irq_uninstall,
  3347. .irq = sde_irq,
  3348. .preclose = sde_kms_preclose,
  3349. .lastclose = sde_kms_lastclose,
  3350. .prepare_fence = sde_kms_prepare_fence,
  3351. .prepare_commit = sde_kms_prepare_commit,
  3352. .commit = sde_kms_commit,
  3353. .complete_commit = sde_kms_complete_commit,
  3354. .get_msm_mode = sde_kms_get_msm_mode,
  3355. .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
  3356. .wait_for_tx_complete = sde_kms_wait_for_frame_transfer_complete,
  3357. .check_modified_format = sde_format_check_modified_format,
  3358. .atomic_check = sde_kms_atomic_check,
  3359. .get_format = sde_get_msm_format,
  3360. .round_pixclk = sde_kms_round_pixclk,
  3361. .display_early_wakeup = sde_kms_display_early_wakeup,
  3362. .pm_suspend = sde_kms_pm_suspend,
  3363. .pm_resume = sde_kms_pm_resume,
  3364. .destroy = sde_kms_destroy,
  3365. .debugfs_destroy = sde_kms_debugfs_destroy,
  3366. .cont_splash_config = sde_kms_cont_splash_config,
  3367. .register_events = _sde_kms_register_events,
  3368. .get_address_space = _sde_kms_get_address_space,
  3369. .get_address_space_device = _sde_kms_get_address_space_device,
  3370. .postopen = _sde_kms_post_open,
  3371. .check_for_splash = sde_kms_check_for_splash,
  3372. .get_mixer_count = sde_kms_get_mixer_count,
  3373. .get_dsc_count = sde_kms_get_dsc_count,
  3374. };
  3375. static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
  3376. {
  3377. int i;
  3378. for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
  3379. if (!sde_kms->aspace[i])
  3380. continue;
  3381. msm_gem_address_space_put(sde_kms->aspace[i]);
  3382. sde_kms->aspace[i] = NULL;
  3383. }
  3384. return 0;
  3385. }
  3386. static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
  3387. {
  3388. struct msm_mmu *mmu;
  3389. int i, ret;
  3390. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  3391. int early_map = 0;
  3392. #endif
  3393. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev)
  3394. return -EINVAL;
  3395. for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
  3396. struct msm_gem_address_space *aspace;
  3397. mmu = msm_smmu_new(sde_kms->dev->dev, i);
  3398. if (IS_ERR(mmu)) {
  3399. ret = PTR_ERR(mmu);
  3400. SDE_DEBUG("failed to init iommu id %d: rc:%d\n",
  3401. i, ret);
  3402. continue;
  3403. }
  3404. aspace = msm_gem_smmu_address_space_create(sde_kms->dev,
  3405. mmu, "sde");
  3406. if (IS_ERR(aspace)) {
  3407. ret = PTR_ERR(aspace);
  3408. mmu->funcs->destroy(mmu);
  3409. goto fail;
  3410. }
  3411. sde_kms->aspace[i] = aspace;
  3412. aspace->domain_attached = true;
  3413. /* Mapping splash memory block */
  3414. if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
  3415. sde_kms->splash_data.num_splash_regions) {
  3416. ret = _sde_kms_map_all_splash_regions(sde_kms);
  3417. if (ret) {
  3418. SDE_ERROR("failed to map ret:%d\n", ret);
  3419. goto enable_trans_fail;
  3420. }
  3421. }
  3422. /*
  3423. * disable early-map which would have been enabled during
  3424. * bootup by smmu through the device-tree hint for cont-spash
  3425. */
  3426. #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0))
  3427. ret = mmu->funcs->enable_smmu_translations(mmu);
  3428. if (ret) {
  3429. SDE_ERROR("failed to enable_s1_translations ret:%d\n", ret);
  3430. goto enable_trans_fail;
  3431. }
  3432. #else
  3433. ret = mmu->funcs->set_attribute(mmu, DOMAIN_ATTR_EARLY_MAP,
  3434. &early_map);
  3435. if (ret) {
  3436. SDE_ERROR("failed to set_att ret:%d, early_map:%d\n",
  3437. ret, early_map);
  3438. goto enable_trans_fail;
  3439. }
  3440. #endif
  3441. }
  3442. sde_kms->base.aspace = sde_kms->aspace[0];
  3443. return 0;
  3444. enable_trans_fail:
  3445. _sde_kms_unmap_all_splash_regions(sde_kms);
  3446. fail:
  3447. _sde_kms_mmu_destroy(sde_kms);
  3448. return ret;
  3449. }
  3450. static void sde_kms_init_rot_sid_hw(struct sde_kms *sde_kms)
  3451. {
  3452. if (!sde_kms || !sde_kms->hw_sid || sde_in_trusted_vm(sde_kms))
  3453. return;
  3454. sde_hw_set_rotator_sid(sde_kms->hw_sid);
  3455. }
  3456. static void sde_kms_init_shared_hw(struct sde_kms *sde_kms)
  3457. {
  3458. if (!sde_kms || !sde_kms->hw_mdp || !sde_kms->catalog)
  3459. return;
  3460. if (sde_kms->hw_mdp->ops.reset_ubwc)
  3461. sde_kms->hw_mdp->ops.reset_ubwc(sde_kms->hw_mdp,
  3462. sde_kms->catalog);
  3463. }
  3464. static void _sde_kms_set_lutdma_vbif_remap(struct sde_kms *sde_kms)
  3465. {
  3466. struct sde_vbif_set_qos_params qos_params;
  3467. struct sde_mdss_cfg *catalog;
  3468. if (!sde_kms->catalog)
  3469. return;
  3470. catalog = sde_kms->catalog;
  3471. memset(&qos_params, 0, sizeof(qos_params));
  3472. qos_params.vbif_idx = catalog->dma_cfg.vbif_idx;
  3473. qos_params.xin_id = catalog->dma_cfg.xin_id;
  3474. qos_params.clk_ctrl = catalog->dma_cfg.clk_ctrl;
  3475. qos_params.client_type = VBIF_LUTDMA_CLIENT;
  3476. sde_vbif_set_qos_remap(sde_kms, &qos_params);
  3477. }
  3478. static int _sde_kms_active_override(struct sde_kms *sde_kms, bool enable)
  3479. {
  3480. struct sde_hw_uidle *uidle;
  3481. if (!sde_kms) {
  3482. SDE_ERROR("invalid kms\n");
  3483. return -EINVAL;
  3484. }
  3485. uidle = sde_kms->hw_uidle;
  3486. if (uidle && uidle->ops.active_override_enable)
  3487. uidle->ops.active_override_enable(uidle, enable);
  3488. return 0;
  3489. }
  3490. static void _sde_kms_update_pm_qos_irq_request(struct sde_kms *sde_kms)
  3491. {
  3492. struct device *cpu_dev;
  3493. int cpu = 0;
  3494. u32 cpu_irq_latency = sde_kms->catalog->perf.cpu_irq_latency;
  3495. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  3496. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  3497. return;
  3498. }
  3499. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  3500. cpu_dev = get_cpu_device(cpu);
  3501. if (!cpu_dev) {
  3502. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  3503. cpu);
  3504. continue;
  3505. }
  3506. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  3507. dev_pm_qos_update_request(&sde_kms->pm_qos_irq_req[cpu],
  3508. cpu_irq_latency);
  3509. else
  3510. dev_pm_qos_add_request(cpu_dev,
  3511. &sde_kms->pm_qos_irq_req[cpu],
  3512. DEV_PM_QOS_RESUME_LATENCY,
  3513. cpu_irq_latency);
  3514. }
  3515. }
  3516. static void _sde_kms_remove_pm_qos_irq_request(struct sde_kms *sde_kms)
  3517. {
  3518. struct device *cpu_dev;
  3519. int cpu = 0;
  3520. if (cpumask_empty(&sde_kms->irq_cpu_mask)) {
  3521. SDE_DEBUG("%s: irq_cpu_mask is empty\n", __func__);
  3522. return;
  3523. }
  3524. for_each_cpu(cpu, &sde_kms->irq_cpu_mask) {
  3525. cpu_dev = get_cpu_device(cpu);
  3526. if (!cpu_dev) {
  3527. SDE_DEBUG("%s: failed to get cpu%d device\n", __func__,
  3528. cpu);
  3529. continue;
  3530. }
  3531. if (dev_pm_qos_request_active(&sde_kms->pm_qos_irq_req[cpu]))
  3532. dev_pm_qos_remove_request(
  3533. &sde_kms->pm_qos_irq_req[cpu]);
  3534. }
  3535. }
  3536. void sde_kms_cpu_vote_for_irq(struct sde_kms *sde_kms, bool enable)
  3537. {
  3538. struct msm_drm_private *priv = sde_kms->dev->dev_private;
  3539. mutex_lock(&priv->phandle.phandle_lock);
  3540. if (enable && atomic_inc_return(&sde_kms->irq_vote_count) == 1)
  3541. _sde_kms_update_pm_qos_irq_request(sde_kms);
  3542. else if (!enable && atomic_dec_return(&sde_kms->irq_vote_count) == 0)
  3543. _sde_kms_remove_pm_qos_irq_request(sde_kms);
  3544. mutex_unlock(&priv->phandle.phandle_lock);
  3545. }
  3546. static void sde_kms_irq_affinity_notify(
  3547. struct irq_affinity_notify *affinity_notify,
  3548. const cpumask_t *mask)
  3549. {
  3550. struct msm_drm_private *priv;
  3551. struct sde_kms *sde_kms = container_of(affinity_notify,
  3552. struct sde_kms, affinity_notify);
  3553. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
  3554. return;
  3555. priv = sde_kms->dev->dev_private;
  3556. mutex_lock(&priv->phandle.phandle_lock);
  3557. _sde_kms_remove_pm_qos_irq_request(sde_kms);
  3558. // save irq cpu mask
  3559. sde_kms->irq_cpu_mask = *mask;
  3560. // request vote with updated irq cpu mask
  3561. if (atomic_read(&sde_kms->irq_vote_count))
  3562. _sde_kms_update_pm_qos_irq_request(sde_kms);
  3563. mutex_unlock(&priv->phandle.phandle_lock);
  3564. }
  3565. static void sde_kms_irq_affinity_release(struct kref *ref) {}
  3566. static void sde_kms_handle_power_event(u32 event_type, void *usr)
  3567. {
  3568. struct sde_kms *sde_kms = usr;
  3569. struct msm_kms *msm_kms;
  3570. msm_kms = &sde_kms->base;
  3571. if (!sde_kms)
  3572. return;
  3573. SDE_DEBUG("event_type:%d\n", event_type);
  3574. SDE_EVT32_VERBOSE(event_type);
  3575. if (event_type == SDE_POWER_EVENT_POST_ENABLE) {
  3576. sde_irq_update(msm_kms, true);
  3577. sde_kms->first_kickoff = true;
  3578. /**
  3579. * Rotator sid needs to be programmed since uefi doesn't
  3580. * configure it during continuous splash
  3581. */
  3582. sde_kms_init_rot_sid_hw(sde_kms);
  3583. if (sde_kms->splash_data.num_splash_displays ||
  3584. sde_in_trusted_vm(sde_kms))
  3585. return;
  3586. sde_vbif_init_memtypes(sde_kms);
  3587. sde_kms_init_shared_hw(sde_kms);
  3588. _sde_kms_set_lutdma_vbif_remap(sde_kms);
  3589. } else if (event_type == SDE_POWER_EVENT_PRE_DISABLE) {
  3590. sde_irq_update(msm_kms, false);
  3591. sde_kms->first_kickoff = false;
  3592. if (sde_in_trusted_vm(sde_kms))
  3593. return;
  3594. _sde_kms_active_override(sde_kms, true);
  3595. if (!is_sde_rsc_available(SDE_RSC_INDEX))
  3596. sde_vbif_axi_halt_request(sde_kms);
  3597. }
  3598. }
  3599. #define genpd_to_sde_kms(domain) container_of(domain, struct sde_kms, genpd)
  3600. static int sde_kms_pd_enable(struct generic_pm_domain *genpd)
  3601. {
  3602. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3603. int rc = -EINVAL;
  3604. SDE_DEBUG("\n");
  3605. rc = pm_runtime_resume_and_get(sde_kms->dev->dev);
  3606. rc = (rc > 0) ? 0 : rc;
  3607. SDE_EVT32(rc, genpd->device_count);
  3608. return rc;
  3609. }
  3610. static int sde_kms_pd_disable(struct generic_pm_domain *genpd)
  3611. {
  3612. struct sde_kms *sde_kms = genpd_to_sde_kms(genpd);
  3613. SDE_DEBUG("\n");
  3614. pm_runtime_put_sync(sde_kms->dev->dev);
  3615. SDE_EVT32(genpd->device_count);
  3616. return 0;
  3617. }
  3618. static int _sde_kms_get_demura_plane_data(struct sde_splash_data *data)
  3619. {
  3620. int i = 0;
  3621. int ret = 0;
  3622. int count = 0;
  3623. struct device_node *parent, *node;
  3624. struct resource r;
  3625. char node_name[DEMURA_REGION_NAME_MAX];
  3626. struct sde_splash_mem *mem;
  3627. struct sde_splash_display *splash_display;
  3628. if (!data->num_splash_displays) {
  3629. SDE_DEBUG("no splash displays. skipping\n");
  3630. return 0;
  3631. }
  3632. /**
  3633. * It is expected that each active demura block will have
  3634. * its own memory region defined.
  3635. */
  3636. parent = of_find_node_by_path("/reserved-memory");
  3637. for (i = 0; i < data->num_splash_displays; i++) {
  3638. splash_display = &data->splash_display[i];
  3639. snprintf(&node_name[0], DEMURA_REGION_NAME_MAX,
  3640. "demura_region_%d", i);
  3641. splash_display->demura = NULL;
  3642. node = of_find_node_by_name(parent, node_name);
  3643. if (!node) {
  3644. SDE_DEBUG("no Demura node %s! disp count: %d\n",
  3645. node_name, data->num_splash_displays);
  3646. continue;
  3647. } else if (of_address_to_resource(node, 0, &r)) {
  3648. SDE_ERROR("invalid data for:%s\n", node_name);
  3649. ret = -EINVAL;
  3650. break;
  3651. }
  3652. mem = &data->demura_mem[i];
  3653. mem->splash_buf_base = (unsigned long)r.start;
  3654. mem->splash_buf_size = (r.end - r.start) + 1;
  3655. if (!mem->splash_buf_base && !mem->splash_buf_size) {
  3656. SDE_DEBUG("dummy splash mem for disp %d. Skipping\n",
  3657. (i+1));
  3658. continue;
  3659. } else if (!mem->splash_buf_base || !mem->splash_buf_size) {
  3660. SDE_ERROR("mem for disp %d invalid: add:%lx size:%lx\n",
  3661. (i+1), mem->splash_buf_base,
  3662. mem->splash_buf_size);
  3663. continue;
  3664. }
  3665. mem->ref_cnt = 0;
  3666. splash_display->demura = mem;
  3667. count++;
  3668. SDE_DEBUG("demura mem for disp:%d add:%lx size:%x\n", (i + 1),
  3669. mem->splash_buf_base,
  3670. mem->splash_buf_size);
  3671. }
  3672. if (!ret && !count)
  3673. SDE_DEBUG("no demura regions for cont. splash found!\n");
  3674. return ret;
  3675. }
  3676. static int _sde_kms_get_splash_data(struct sde_splash_data *data)
  3677. {
  3678. int i = 0;
  3679. int ret = 0;
  3680. struct device_node *parent, *node, *node1;
  3681. struct resource r, r1;
  3682. const char *node_name = "splash_region";
  3683. struct sde_splash_mem *mem;
  3684. bool share_splash_mem = false;
  3685. int num_displays, num_regions;
  3686. struct sde_splash_display *splash_display;
  3687. if (!data)
  3688. return -EINVAL;
  3689. memset(data, 0, sizeof(*data));
  3690. parent = of_find_node_by_path("/reserved-memory");
  3691. if (!parent) {
  3692. SDE_ERROR("failed to find reserved-memory node\n");
  3693. return -EINVAL;
  3694. }
  3695. node = of_find_node_by_name(parent, node_name);
  3696. if (!node) {
  3697. SDE_DEBUG("failed to find node %s\n", node_name);
  3698. return -EINVAL;
  3699. }
  3700. node1 = of_find_node_by_name(NULL, "disp_rdump_region");
  3701. if (!node1)
  3702. SDE_DEBUG("failed to find disp ramdump memory reservation\n");
  3703. /**
  3704. * Support sharing a single splash memory for all the built in displays
  3705. * and also independent splash region per displays. Incase of
  3706. * independent splash region for each connected display, dtsi node of
  3707. * cont_splash_region should be collection of all memory regions
  3708. * Ex: <r1.start r1.end r2.start r2.end ... rn.start, rn.end>
  3709. */
  3710. num_displays = dsi_display_get_num_of_displays();
  3711. num_regions = of_property_count_u64_elems(node, "reg") / 2;
  3712. data->num_splash_displays = num_displays;
  3713. SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
  3714. if (num_displays > num_regions) {
  3715. share_splash_mem = true;
  3716. pr_info(":%d displays share same splash buf\n", num_displays);
  3717. }
  3718. for (i = 0; i < num_displays; i++) {
  3719. splash_display = &data->splash_display[i];
  3720. if (!i || !share_splash_mem) {
  3721. if (of_address_to_resource(node, i, &r)) {
  3722. SDE_ERROR("invalid data for:%s\n", node_name);
  3723. return -EINVAL;
  3724. }
  3725. mem = &data->splash_mem[i];
  3726. if (!node1 || of_address_to_resource(node1, i, &r1)) {
  3727. SDE_DEBUG("failed to find ramdump memory\n");
  3728. mem->ramdump_base = 0;
  3729. mem->ramdump_size = 0;
  3730. } else {
  3731. mem->ramdump_base = (unsigned long)r1.start;
  3732. mem->ramdump_size = (r1.end - r1.start) + 1;
  3733. }
  3734. mem->splash_buf_base = (unsigned long)r.start;
  3735. mem->splash_buf_size = (r.end - r.start) + 1;
  3736. mem->ref_cnt = 0;
  3737. splash_display->splash = mem;
  3738. data->num_splash_regions++;
  3739. } else {
  3740. data->splash_display[i].splash = &data->splash_mem[0];
  3741. }
  3742. SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
  3743. splash_display->splash->splash_buf_base,
  3744. splash_display->splash->splash_buf_size);
  3745. }
  3746. data->type = SDE_SPLASH_HANDOFF;
  3747. ret = _sde_kms_get_demura_plane_data(data);
  3748. return ret;
  3749. }
  3750. static int _sde_kms_hw_init_ioremap(struct sde_kms *sde_kms,
  3751. struct platform_device *platformdev)
  3752. {
  3753. int rc = -EINVAL;
  3754. sde_kms->mmio = msm_ioremap(platformdev, "mdp_phys", "mdp_phys");
  3755. if (IS_ERR(sde_kms->mmio)) {
  3756. rc = PTR_ERR(sde_kms->mmio);
  3757. SDE_ERROR("mdp register memory map failed: %d\n", rc);
  3758. sde_kms->mmio = NULL;
  3759. goto error;
  3760. }
  3761. DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
  3762. sde_kms->mmio_len = msm_iomap_size(platformdev, "mdp_phys");
  3763. rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
  3764. sde_kms->mmio_len,
  3765. msm_get_phys_addr(platformdev, "mdp_phys"),
  3766. SDE_DBG_SDE);
  3767. if (rc)
  3768. SDE_ERROR("dbg base register kms failed: %d\n", rc);
  3769. sde_kms->vbif[VBIF_RT] = msm_ioremap(platformdev, "vbif_phys", "vbif_phys");
  3770. if (IS_ERR(sde_kms->vbif[VBIF_RT])) {
  3771. rc = PTR_ERR(sde_kms->vbif[VBIF_RT]);
  3772. SDE_ERROR("vbif register memory map failed: %d\n", rc);
  3773. sde_kms->vbif[VBIF_RT] = NULL;
  3774. goto error;
  3775. }
  3776. sde_kms->vbif_len[VBIF_RT] = msm_iomap_size(platformdev, "vbif_phys");
  3777. rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
  3778. sde_kms->vbif_len[VBIF_RT],
  3779. msm_get_phys_addr(platformdev, "vbif_phys"),
  3780. SDE_DBG_VBIF_RT);
  3781. if (rc)
  3782. SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
  3783. sde_kms->vbif[VBIF_NRT] = msm_ioremap(platformdev, "vbif_nrt_phys", "vbif_nrt_phys");
  3784. if (IS_ERR(sde_kms->vbif[VBIF_NRT])) {
  3785. sde_kms->vbif[VBIF_NRT] = NULL;
  3786. SDE_DEBUG("VBIF NRT is not defined");
  3787. } else {
  3788. sde_kms->vbif_len[VBIF_NRT] = msm_iomap_size(platformdev, "vbif_nrt_phys");
  3789. }
  3790. sde_kms->reg_dma = msm_ioremap(platformdev, "regdma_phys", "regdma_phys");
  3791. if (IS_ERR(sde_kms->reg_dma)) {
  3792. sde_kms->reg_dma = NULL;
  3793. SDE_DEBUG("REG_DMA is not defined");
  3794. } else {
  3795. unsigned long mdp_addr = msm_get_phys_addr(platformdev, "mdp_phys");
  3796. sde_kms->reg_dma_len = msm_iomap_size(platformdev, "regdma_phys");
  3797. sde_kms->reg_dma_off = msm_get_phys_addr(platformdev, "regdma_phys") - mdp_addr;
  3798. rc = sde_dbg_reg_register_base("reg_dma", sde_kms->reg_dma,
  3799. sde_kms->reg_dma_len,
  3800. msm_get_phys_addr(platformdev, "regdma_phys"),
  3801. SDE_DBG_LUTDMA);
  3802. if (rc)
  3803. SDE_ERROR("dbg base register reg_dma failed: %d\n", rc);
  3804. }
  3805. sde_kms->sid = msm_ioremap(platformdev, "sid_phys", "sid_phys");
  3806. if (IS_ERR(sde_kms->sid)) {
  3807. SDE_DEBUG("sid register is not defined: %d\n", rc);
  3808. sde_kms->sid = NULL;
  3809. } else {
  3810. sde_kms->sid_len = msm_iomap_size(platformdev, "sid_phys");
  3811. rc = sde_dbg_reg_register_base("sid", sde_kms->sid,
  3812. sde_kms->sid_len,
  3813. msm_get_phys_addr(platformdev, "sid_phys"),
  3814. SDE_DBG_SID);
  3815. if (rc)
  3816. SDE_ERROR("dbg base register sid failed: %d\n", rc);
  3817. }
  3818. error:
  3819. return rc;
  3820. }
  3821. static int _sde_kms_hw_init_power_helper(struct drm_device *dev,
  3822. struct sde_kms *sde_kms)
  3823. {
  3824. int rc = 0;
  3825. if (of_find_property(dev->dev->of_node, "#power-domain-cells", NULL)) {
  3826. sde_kms->genpd.name = dev->unique;
  3827. sde_kms->genpd.power_off = sde_kms_pd_disable;
  3828. sde_kms->genpd.power_on = sde_kms_pd_enable;
  3829. rc = pm_genpd_init(&sde_kms->genpd, NULL, true);
  3830. if (rc < 0) {
  3831. SDE_ERROR("failed to init genpd provider %s: %d\n",
  3832. sde_kms->genpd.name, rc);
  3833. return rc;
  3834. }
  3835. rc = of_genpd_add_provider_simple(dev->dev->of_node,
  3836. &sde_kms->genpd);
  3837. if (rc < 0) {
  3838. SDE_ERROR("failed to add genpd provider %s: %d\n",
  3839. sde_kms->genpd.name, rc);
  3840. pm_genpd_remove(&sde_kms->genpd);
  3841. return rc;
  3842. }
  3843. sde_kms->genpd_init = true;
  3844. SDE_DEBUG("added genpd provider %s\n", sde_kms->genpd.name);
  3845. }
  3846. return rc;
  3847. }
  3848. static int _sde_kms_hw_init_blocks(struct sde_kms *sde_kms,
  3849. struct drm_device *dev,
  3850. struct msm_drm_private *priv)
  3851. {
  3852. struct sde_rm *rm = NULL;
  3853. int i, rc = -EINVAL;
  3854. sde_kms->catalog = sde_hw_catalog_init(dev);
  3855. if (IS_ERR_OR_NULL(sde_kms->catalog)) {
  3856. rc = PTR_ERR(sde_kms->catalog);
  3857. if (!sde_kms->catalog)
  3858. rc = -EINVAL;
  3859. SDE_ERROR("catalog init failed: %d\n", rc);
  3860. sde_kms->catalog = NULL;
  3861. goto power_error;
  3862. }
  3863. sde_kms->core_rev = sde_kms->catalog->hw_rev;
  3864. pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
  3865. /* initialize power domain if defined */
  3866. rc = _sde_kms_hw_init_power_helper(dev, sde_kms);
  3867. if (rc) {
  3868. SDE_ERROR("_sde_kms_hw_init_power_helper failed: %d\n", rc);
  3869. goto genpd_err;
  3870. }
  3871. rc = _sde_kms_mmu_init(sde_kms);
  3872. if (rc) {
  3873. SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
  3874. goto power_error;
  3875. }
  3876. /* Initialize reg dma block which is a singleton */
  3877. sde_kms->catalog->dma_cfg.base_off = sde_kms->reg_dma_off;
  3878. rc = sde_reg_dma_init(sde_kms->reg_dma, sde_kms->catalog,
  3879. sde_kms->dev);
  3880. if (rc) {
  3881. SDE_ERROR("failed: reg dma init failed\n");
  3882. goto power_error;
  3883. }
  3884. sde_dbg_init_dbg_buses(sde_kms->core_rev);
  3885. rm = &sde_kms->rm;
  3886. rc = sde_rm_init(rm, sde_kms->catalog, sde_kms->mmio,
  3887. sde_kms->dev);
  3888. if (rc) {
  3889. SDE_ERROR("rm init failed: %d\n", rc);
  3890. goto power_error;
  3891. }
  3892. sde_kms->rm_init = true;
  3893. sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
  3894. if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
  3895. rc = PTR_ERR(sde_kms->hw_intr);
  3896. SDE_ERROR("hw_intr init failed: %d\n", rc);
  3897. sde_kms->hw_intr = NULL;
  3898. goto hw_intr_init_err;
  3899. }
  3900. /*
  3901. * Attempt continuous splash handoff only if reserved
  3902. * splash memory is found & release resources on any error
  3903. * in finding display hw config in splash
  3904. */
  3905. if (sde_kms->splash_data.num_splash_regions) {
  3906. struct sde_splash_display *display;
  3907. int ret, display_count =
  3908. sde_kms->splash_data.num_splash_displays;
  3909. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  3910. &sde_kms->splash_data, sde_kms->catalog);
  3911. for (i = 0; i < display_count; i++) {
  3912. display = &sde_kms->splash_data.splash_display[i];
  3913. /*
  3914. * free splash region on resource init failure and
  3915. * cont-splash disabled case
  3916. */
  3917. if (!display->cont_splash_enabled || ret)
  3918. _sde_kms_free_splash_display_data(
  3919. sde_kms, display);
  3920. }
  3921. }
  3922. sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
  3923. if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
  3924. rc = PTR_ERR(sde_kms->hw_mdp);
  3925. if (!sde_kms->hw_mdp)
  3926. rc = -EINVAL;
  3927. SDE_ERROR("failed to get hw_mdp: %d\n", rc);
  3928. sde_kms->hw_mdp = NULL;
  3929. goto power_error;
  3930. }
  3931. for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
  3932. u32 vbif_idx = sde_kms->catalog->vbif[i].id;
  3933. sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
  3934. sde_kms->vbif[vbif_idx], sde_kms->catalog);
  3935. if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
  3936. rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
  3937. if (!sde_kms->hw_vbif[vbif_idx])
  3938. rc = -EINVAL;
  3939. SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
  3940. sde_kms->hw_vbif[vbif_idx] = NULL;
  3941. goto power_error;
  3942. }
  3943. }
  3944. if (sde_kms->catalog->uidle_cfg.uidle_rev) {
  3945. sde_kms->hw_uidle = sde_hw_uidle_init(UIDLE, sde_kms->mmio,
  3946. sde_kms->mmio_len, sde_kms->catalog);
  3947. if (IS_ERR_OR_NULL(sde_kms->hw_uidle)) {
  3948. rc = PTR_ERR(sde_kms->hw_uidle);
  3949. if (!sde_kms->hw_uidle)
  3950. rc = -EINVAL;
  3951. /* uidle is optional, so do not make it a fatal error */
  3952. SDE_ERROR("failed to init uidle rc:%d\n", rc);
  3953. sde_kms->hw_uidle = NULL;
  3954. rc = 0;
  3955. }
  3956. } else {
  3957. sde_kms->hw_uidle = NULL;
  3958. }
  3959. if (sde_kms->sid) {
  3960. sde_kms->hw_sid = sde_hw_sid_init(sde_kms->sid,
  3961. sde_kms->sid_len, sde_kms->catalog);
  3962. if (IS_ERR_OR_NULL(sde_kms->hw_sid)) {
  3963. rc = PTR_ERR(sde_kms->hw_sid);
  3964. SDE_ERROR("failed to init sid %d\n", rc);
  3965. sde_kms->hw_sid = NULL;
  3966. goto power_error;
  3967. }
  3968. }
  3969. rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
  3970. &priv->phandle, "core_clk");
  3971. if (rc) {
  3972. SDE_ERROR("failed to init perf %d\n", rc);
  3973. goto perf_err;
  3974. }
  3975. /*
  3976. * set the disable_immediate flag when driver supports the precise vsync
  3977. * timestamp as the DRM hooks for vblank timestamp/counters would be set
  3978. * based on the feature
  3979. */
  3980. if (test_bit(SDE_FEATURE_HW_VSYNC_TS, sde_kms->catalog->features))
  3981. dev->vblank_disable_immediate = true;
  3982. /*
  3983. * _sde_kms_drm_obj_init should create the DRM related objects
  3984. * i.e. CRTCs, planes, encoders, connectors and so forth
  3985. */
  3986. rc = _sde_kms_drm_obj_init(sde_kms);
  3987. if (rc) {
  3988. SDE_ERROR("modeset init failed: %d\n", rc);
  3989. goto drm_obj_init_err;
  3990. }
  3991. return 0;
  3992. genpd_err:
  3993. drm_obj_init_err:
  3994. sde_core_perf_destroy(&sde_kms->perf);
  3995. hw_intr_init_err:
  3996. perf_err:
  3997. power_error:
  3998. return rc;
  3999. }
  4000. int _sde_kms_get_tvm_inclusion_mem(struct sde_mdss_cfg *catalog, struct list_head *mem_list)
  4001. {
  4002. struct list_head temp_head;
  4003. struct msm_io_mem_entry *io_mem;
  4004. int rc, i = 0;
  4005. INIT_LIST_HEAD(&temp_head);
  4006. for (i = 0; i < catalog->tvm_reg_count; i++) {
  4007. struct resource *res = &catalog->tvm_reg[i];
  4008. io_mem = kzalloc(sizeof(struct msm_io_mem_entry), GFP_KERNEL);
  4009. if (!io_mem) {
  4010. rc = -ENOMEM;
  4011. goto parse_fail;
  4012. }
  4013. io_mem->base = res->start;
  4014. io_mem->size = resource_size(res);
  4015. list_add(&io_mem->list, &temp_head);
  4016. }
  4017. list_splice(&temp_head, mem_list);
  4018. return 0;
  4019. parse_fail:
  4020. msm_dss_clean_io_mem(&temp_head);
  4021. return rc;
  4022. }
  4023. #ifdef CONFIG_DRM_SDE_VM
  4024. int sde_kms_get_io_resources(struct sde_kms *sde_kms, struct msm_io_res *io_res)
  4025. {
  4026. struct platform_device *pdev = to_platform_device(sde_kms->dev->dev);
  4027. int rc = 0;
  4028. rc = msm_dss_get_io_mem(pdev, &io_res->mem);
  4029. if (rc) {
  4030. SDE_ERROR("failed to get io mem for KMS, rc = %d\n", rc);
  4031. return rc;
  4032. }
  4033. rc = msm_dss_get_pmic_io_mem(pdev, &io_res->mem);
  4034. if (rc) {
  4035. SDE_ERROR("failed to get io mem for pmic, rc:%d\n", rc);
  4036. return rc;
  4037. }
  4038. rc = msm_dss_get_io_irq(pdev, &io_res->irq, GH_IRQ_LABEL_SDE);
  4039. if (rc) {
  4040. SDE_ERROR("failed to get io irq for KMS");
  4041. return rc;
  4042. }
  4043. rc = _sde_kms_get_tvm_inclusion_mem(sde_kms->catalog, &io_res->mem);
  4044. if (rc) {
  4045. SDE_ERROR("failed to get tvm inclusion mem ranges");
  4046. return rc;
  4047. }
  4048. return rc;
  4049. }
  4050. #endif
  4051. static int sde_kms_hw_init(struct msm_kms *kms)
  4052. {
  4053. struct sde_kms *sde_kms;
  4054. struct drm_device *dev;
  4055. struct msm_drm_private *priv;
  4056. struct platform_device *platformdev;
  4057. int i, irq_num, rc = -EINVAL;
  4058. if (!kms) {
  4059. SDE_ERROR("invalid kms\n");
  4060. goto end;
  4061. }
  4062. sde_kms = to_sde_kms(kms);
  4063. dev = sde_kms->dev;
  4064. if (!dev || !dev->dev) {
  4065. SDE_ERROR("invalid device\n");
  4066. goto end;
  4067. }
  4068. platformdev = to_platform_device(dev->dev);
  4069. priv = dev->dev_private;
  4070. if (!priv) {
  4071. SDE_ERROR("invalid private data\n");
  4072. goto end;
  4073. }
  4074. rc = _sde_kms_hw_init_ioremap(sde_kms, platformdev);
  4075. if (rc)
  4076. goto error;
  4077. rc = _sde_kms_get_splash_data(&sde_kms->splash_data);
  4078. if (rc)
  4079. SDE_DEBUG("sde splash data fetch failed: %d\n", rc);
  4080. rc = _sde_kms_hw_init_blocks(sde_kms, dev, priv);
  4081. if (rc)
  4082. goto error;
  4083. dev->mode_config.min_width = sde_kms->catalog->min_display_width;
  4084. dev->mode_config.min_height = sde_kms->catalog->min_display_height;
  4085. dev->mode_config.max_width = sde_kms->catalog->max_display_width;
  4086. dev->mode_config.max_height = sde_kms->catalog->max_display_height;
  4087. mutex_init(&sde_kms->secure_transition_lock);
  4088. atomic_set(&sde_kms->detach_sec_cb, 0);
  4089. atomic_set(&sde_kms->detach_all_cb, 0);
  4090. atomic_set(&sde_kms->irq_vote_count, 0);
  4091. /*
  4092. * Support format modifiers for compression etc.
  4093. */
  4094. dev->mode_config.allow_fb_modifiers = true;
  4095. /*
  4096. * Handle (re)initializations during power enable
  4097. */
  4098. sde_kms_handle_power_event(SDE_POWER_EVENT_POST_ENABLE, sde_kms);
  4099. sde_kms->power_event = sde_power_handle_register_event(&priv->phandle,
  4100. SDE_POWER_EVENT_POST_ENABLE |
  4101. SDE_POWER_EVENT_PRE_DISABLE,
  4102. sde_kms_handle_power_event, sde_kms, "kms");
  4103. if (sde_kms->splash_data.num_splash_displays) {
  4104. SDE_DEBUG("Skipping MDP Resources disable\n");
  4105. } else {
  4106. for (i = 0; i < SDE_POWER_HANDLE_DBUS_ID_MAX; i++)
  4107. sde_power_data_bus_set_quota(&priv->phandle, i,
  4108. SDE_POWER_HANDLE_ENABLE_BUS_AB_QUOTA,
  4109. SDE_POWER_HANDLE_ENABLE_BUS_IB_QUOTA);
  4110. pm_runtime_put_sync(sde_kms->dev->dev);
  4111. }
  4112. sde_kms->affinity_notify.notify = sde_kms_irq_affinity_notify;
  4113. sde_kms->affinity_notify.release = sde_kms_irq_affinity_release;
  4114. irq_num = platform_get_irq(to_platform_device(sde_kms->dev->dev), 0);
  4115. SDE_DEBUG("Registering for notification of irq_num: %d\n", irq_num);
  4116. irq_set_affinity_notifier(irq_num, &sde_kms->affinity_notify);
  4117. if (sde_in_trusted_vm(sde_kms)) {
  4118. rc = sde_vm_trusted_init(sde_kms);
  4119. sde_dbg_set_hw_ownership_status(false);
  4120. } else {
  4121. rc = sde_vm_primary_init(sde_kms);
  4122. sde_dbg_set_hw_ownership_status(true);
  4123. }
  4124. if (rc) {
  4125. SDE_ERROR("failed to initialize VM ops, rc: %d\n", rc);
  4126. goto error;
  4127. }
  4128. return 0;
  4129. error:
  4130. _sde_kms_hw_destroy(sde_kms, platformdev);
  4131. end:
  4132. return rc;
  4133. }
  4134. struct msm_kms *sde_kms_init(struct drm_device *dev)
  4135. {
  4136. struct msm_drm_private *priv;
  4137. struct sde_kms *sde_kms;
  4138. if (!dev || !dev->dev_private) {
  4139. SDE_ERROR("drm device node invalid\n");
  4140. return ERR_PTR(-EINVAL);
  4141. }
  4142. priv = dev->dev_private;
  4143. sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
  4144. if (!sde_kms) {
  4145. SDE_ERROR("failed to allocate sde kms\n");
  4146. return ERR_PTR(-ENOMEM);
  4147. }
  4148. msm_kms_init(&sde_kms->base, &kms_funcs);
  4149. sde_kms->dev = dev;
  4150. return &sde_kms->base;
  4151. }
  4152. void sde_kms_vm_trusted_resource_deinit(struct sde_kms *sde_kms)
  4153. {
  4154. struct dsi_display *display;
  4155. struct sde_splash_display *handoff_display;
  4156. int i;
  4157. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4158. handoff_display = &sde_kms->splash_data.splash_display[i];
  4159. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4160. if (handoff_display->cont_splash_enabled)
  4161. _sde_kms_free_splash_display_data(sde_kms,
  4162. handoff_display);
  4163. dsi_display_set_active_state(display, false);
  4164. }
  4165. memset(&sde_kms->splash_data, 0, sizeof(struct sde_splash_data));
  4166. }
  4167. int sde_kms_vm_trusted_resource_init(struct sde_kms *sde_kms,
  4168. struct drm_atomic_state *state)
  4169. {
  4170. struct drm_device *dev;
  4171. struct msm_drm_private *priv;
  4172. struct sde_splash_display *handoff_display;
  4173. struct dsi_display *display;
  4174. int ret, i;
  4175. if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
  4176. SDE_ERROR("invalid params\n");
  4177. return -EINVAL;
  4178. }
  4179. dev = sde_kms->dev;
  4180. priv = dev->dev_private;
  4181. sde_kms->splash_data.type = SDE_VM_HANDOFF;
  4182. sde_kms->splash_data.num_splash_displays = sde_kms->dsi_display_count;
  4183. ret = sde_rm_cont_splash_res_init(priv, &sde_kms->rm,
  4184. &sde_kms->splash_data, sde_kms->catalog);
  4185. if (ret) {
  4186. SDE_ERROR("invalid cont splash init, ret:%d\n", ret);
  4187. return -EINVAL;
  4188. }
  4189. for (i = 0; i < sde_kms->dsi_display_count; i++) {
  4190. handoff_display = &sde_kms->splash_data.splash_display[i];
  4191. display = (struct dsi_display *)sde_kms->dsi_displays[i];
  4192. if (!handoff_display->cont_splash_enabled || ret)
  4193. _sde_kms_free_splash_display_data(sde_kms,
  4194. handoff_display);
  4195. else
  4196. dsi_display_set_active_state(display, true);
  4197. }
  4198. if (sde_kms->splash_data.num_splash_displays != 1) {
  4199. SDE_ERROR("no. of displays not supported:%d\n",
  4200. sde_kms->splash_data.num_splash_displays);
  4201. goto error;
  4202. }
  4203. ret = sde_kms_cont_splash_config(&sde_kms->base, state);
  4204. if (ret) {
  4205. SDE_ERROR("error in setting handoff configs\n");
  4206. goto error;
  4207. }
  4208. /**
  4209. * fill-in vote for the continuous splash hanodff path, which will be
  4210. * removed on the successful first commit.
  4211. */
  4212. ret = pm_runtime_resume_and_get(sde_kms->dev->dev);
  4213. if (ret < 0) {
  4214. SDE_ERROR("failed to enable power resource %d\n", ret);
  4215. SDE_EVT32(ret, SDE_EVTLOG_ERROR);
  4216. goto error;
  4217. }
  4218. return 0;
  4219. error:
  4220. return ret;
  4221. }
  4222. static int _sde_kms_register_events(struct msm_kms *kms,
  4223. struct drm_mode_object *obj, u32 event, bool en)
  4224. {
  4225. int ret = 0;
  4226. struct drm_crtc *crtc;
  4227. struct drm_connector *conn;
  4228. struct sde_kms *sde_kms;
  4229. if (!kms || !obj) {
  4230. SDE_ERROR("invalid argument kms %pK obj %pK\n", kms, obj);
  4231. return -EINVAL;
  4232. }
  4233. sde_kms = to_sde_kms(kms);
  4234. sde_vm_lock(sde_kms);
  4235. if (!sde_vm_owns_hw(sde_kms)) {
  4236. sde_vm_unlock(sde_kms);
  4237. SDE_DEBUG("HW is owned by other VM\n");
  4238. return -EACCES;
  4239. }
  4240. /* check vm ownership, if event registration requires HW access */
  4241. switch (obj->type) {
  4242. case DRM_MODE_OBJECT_CRTC:
  4243. crtc = obj_to_crtc(obj);
  4244. ret = sde_crtc_register_custom_event(sde_kms, crtc, event, en);
  4245. break;
  4246. case DRM_MODE_OBJECT_CONNECTOR:
  4247. conn = obj_to_connector(obj);
  4248. ret = sde_connector_register_custom_event(sde_kms, conn, event,
  4249. en);
  4250. break;
  4251. }
  4252. sde_vm_unlock(sde_kms);
  4253. return ret;
  4254. }
  4255. int sde_kms_handle_recovery(struct drm_encoder *encoder)
  4256. {
  4257. SDE_EVT32(DRMID(encoder), MSM_ENC_ACTIVE_REGION);
  4258. return sde_encoder_wait_for_event(encoder, MSM_ENC_ACTIVE_REGION);
  4259. }
  4260. void sde_kms_add_data_to_minidump_va(struct sde_kms *sde_kms)
  4261. {
  4262. struct msm_drm_private *priv;
  4263. struct sde_crtc *sde_crtc;
  4264. struct sde_crtc_state *cstate;
  4265. struct sde_connector *sde_conn;
  4266. struct sde_connector_state *conn_state;
  4267. u32 i;
  4268. priv = sde_kms->dev->dev_private;
  4269. sde_mini_dump_add_va_region("sde_kms", sizeof(*sde_kms), sde_kms);
  4270. for (i = 0; i < priv->num_crtcs; i++) {
  4271. sde_crtc = to_sde_crtc(priv->crtcs[i]);
  4272. cstate = to_sde_crtc_state(priv->crtcs[i]->state);
  4273. sde_mini_dump_add_va_region("sde_crtc", sizeof(*sde_crtc), sde_crtc);
  4274. sde_mini_dump_add_va_region("crtc_state", sizeof(*cstate), cstate);
  4275. }
  4276. for (i = 0; i < priv->num_planes; i++)
  4277. sde_plane_add_data_to_minidump_va(priv->planes[i]);
  4278. for (i = 0; i < priv->num_encoders; i++)
  4279. sde_encoder_add_data_to_minidump_va(priv->encoders[i]);
  4280. for (i = 0; i < priv->num_connectors; i++) {
  4281. sde_conn = to_sde_connector(priv->connectors[i]);
  4282. conn_state = to_sde_connector_state(priv->connectors[i]->state);
  4283. sde_mini_dump_add_va_region("sde_conn", sizeof(*sde_conn), sde_conn);
  4284. sde_mini_dump_add_va_region("conn_state", sizeof(*conn_state), conn_state);
  4285. }
  4286. }