sde_rotator_r3.c 116 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #define pr_fmt(fmt) "%s:%d: " fmt, __func__, __LINE__
  6. #include <linux/platform_device.h>
  7. #include <linux/module.h>
  8. #include <linux/fs.h>
  9. #include <linux/file.h>
  10. #include <linux/delay.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/dma-mapping.h>
  14. #include <linux/dma-buf.h>
  15. #include <linux/clk.h>
  16. #include <linux/clk/qcom.h>
  17. #include <linux/msm_rtb.h>
  18. #include "sde_rotator_core.h"
  19. #include "sde_rotator_util.h"
  20. #include "sde_rotator_smmu.h"
  21. #include "sde_rotator_r3.h"
  22. #include "sde_rotator_r3_internal.h"
  23. #include "sde_rotator_r3_hwio.h"
  24. #include "sde_rotator_r3_debug.h"
  25. #include "sde_rotator_trace.h"
  26. #include "sde_rotator_debug.h"
  27. #include "sde_rotator_vbif.h"
  28. #define RES_UHD (3840*2160)
  29. #define MS_TO_US(t) ((t) * USEC_PER_MSEC)
  30. /* traffic shaping clock ticks = finish_time x 19.2MHz */
  31. #define TRAFFIC_SHAPE_CLKTICK_14MS 268800
  32. #define TRAFFIC_SHAPE_CLKTICK_12MS 230400
  33. #define TRAFFIC_SHAPE_VSYNC_CLK 19200000
  34. /* wait for at most 2 vsync for lowest refresh rate (24hz) */
  35. #define KOFF_TIMEOUT (42 * 8)
  36. /*
  37. * When in sbuf mode, select a much longer wait, to allow the other driver
  38. * to detect timeouts and abort if necessary.
  39. */
  40. #define KOFF_TIMEOUT_SBUF (10000)
  41. /* default stream buffer headroom in lines */
  42. #define DEFAULT_SBUF_HEADROOM 20
  43. #define DEFAULT_UBWC_MALSIZE 0
  44. #define DEFAULT_UBWC_SWIZZLE 0
  45. #define DEFAULT_MAXLINEWIDTH 4096
  46. /* stride alignment requirement for avoiding partial writes */
  47. #define PARTIAL_WRITE_ALIGNMENT 0x1F
  48. /* Macro for constructing the REGDMA command */
  49. #define SDE_REGDMA_WRITE(p, off, data) \
  50. do { \
  51. SDEROT_DBG("SDEREG.W:[%s:0x%X] <= 0x%X\n", #off, (off),\
  52. (u32)(data));\
  53. writel_relaxed_no_log( \
  54. (REGDMA_OP_REGWRITE | \
  55. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  56. p); \
  57. p += sizeof(u32); \
  58. writel_relaxed_no_log(data, p); \
  59. p += sizeof(u32); \
  60. } while (0)
  61. #define SDE_REGDMA_MODIFY(p, off, mask, data) \
  62. do { \
  63. SDEROT_DBG("SDEREG.M:[%s:0x%X] <= 0x%X\n", #off, (off),\
  64. (u32)(data));\
  65. writel_relaxed_no_log( \
  66. (REGDMA_OP_REGMODIFY | \
  67. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  68. p); \
  69. p += sizeof(u32); \
  70. writel_relaxed_no_log(mask, p); \
  71. p += sizeof(u32); \
  72. writel_relaxed_no_log(data, p); \
  73. p += sizeof(u32); \
  74. } while (0)
  75. #define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
  76. do { \
  77. SDEROT_DBG("SDEREG.B:[%s:0x%X:0x%X]\n", #off, (off),\
  78. (u32)(len));\
  79. writel_relaxed_no_log( \
  80. (REGDMA_OP_BLKWRITE_INC | \
  81. ((off) & REGDMA_ADDR_OFFSET_MASK)), \
  82. p); \
  83. p += sizeof(u32); \
  84. writel_relaxed_no_log(len, p); \
  85. p += sizeof(u32); \
  86. } while (0)
  87. #define SDE_REGDMA_BLKWRITE_DATA(p, data) \
  88. do { \
  89. SDEROT_DBG("SDEREG.I:[:] <= 0x%X\n", (u32)(data));\
  90. writel_relaxed_no_log(data, p); \
  91. p += sizeof(u32); \
  92. } while (0)
  93. #define SDE_REGDMA_READ(p, data) \
  94. do { \
  95. data = readl_relaxed_no_log(p); \
  96. p += sizeof(u32); \
  97. } while (0)
  98. /* Macro for directly accessing mapped registers */
  99. #define SDE_ROTREG_WRITE(base, off, data) \
  100. do { \
  101. SDEROT_DBG("SDEREG.D:[%s:0x%X] <= 0x%X\n", #off, (off)\
  102. , (u32)(data));\
  103. writel_relaxed(data, (base + (off))); \
  104. } while (0)
  105. #define SDE_ROTREG_READ(base, off) \
  106. readl_relaxed(base + (off))
  107. #define SDE_ROTTOP_IN_OFFLINE_MODE(_rottop_op_mode_) \
  108. (((_rottop_op_mode_) & ROTTOP_OP_MODE_ROT_OUT_MASK) == 0)
  109. static const u32 sde_hw_rotator_v3_inpixfmts[] = {
  110. SDE_PIX_FMT_XRGB_8888,
  111. SDE_PIX_FMT_ARGB_8888,
  112. SDE_PIX_FMT_ABGR_8888,
  113. SDE_PIX_FMT_RGBA_8888,
  114. SDE_PIX_FMT_BGRA_8888,
  115. SDE_PIX_FMT_RGBX_8888,
  116. SDE_PIX_FMT_BGRX_8888,
  117. SDE_PIX_FMT_XBGR_8888,
  118. SDE_PIX_FMT_RGBA_5551,
  119. SDE_PIX_FMT_ARGB_1555,
  120. SDE_PIX_FMT_ABGR_1555,
  121. SDE_PIX_FMT_BGRA_5551,
  122. SDE_PIX_FMT_BGRX_5551,
  123. SDE_PIX_FMT_RGBX_5551,
  124. SDE_PIX_FMT_XBGR_1555,
  125. SDE_PIX_FMT_XRGB_1555,
  126. SDE_PIX_FMT_ARGB_4444,
  127. SDE_PIX_FMT_RGBA_4444,
  128. SDE_PIX_FMT_BGRA_4444,
  129. SDE_PIX_FMT_ABGR_4444,
  130. SDE_PIX_FMT_RGBX_4444,
  131. SDE_PIX_FMT_XRGB_4444,
  132. SDE_PIX_FMT_BGRX_4444,
  133. SDE_PIX_FMT_XBGR_4444,
  134. SDE_PIX_FMT_RGB_888,
  135. SDE_PIX_FMT_BGR_888,
  136. SDE_PIX_FMT_RGB_565,
  137. SDE_PIX_FMT_BGR_565,
  138. SDE_PIX_FMT_Y_CB_CR_H2V2,
  139. SDE_PIX_FMT_Y_CR_CB_H2V2,
  140. SDE_PIX_FMT_Y_CR_CB_GH2V2,
  141. SDE_PIX_FMT_Y_CBCR_H2V2,
  142. SDE_PIX_FMT_Y_CRCB_H2V2,
  143. SDE_PIX_FMT_Y_CBCR_H1V2,
  144. SDE_PIX_FMT_Y_CRCB_H1V2,
  145. SDE_PIX_FMT_Y_CBCR_H2V1,
  146. SDE_PIX_FMT_Y_CRCB_H2V1,
  147. SDE_PIX_FMT_YCBYCR_H2V1,
  148. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  149. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  150. SDE_PIX_FMT_RGBA_8888_UBWC,
  151. SDE_PIX_FMT_RGBX_8888_UBWC,
  152. SDE_PIX_FMT_RGB_565_UBWC,
  153. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  154. SDE_PIX_FMT_RGBA_1010102,
  155. SDE_PIX_FMT_RGBX_1010102,
  156. SDE_PIX_FMT_ARGB_2101010,
  157. SDE_PIX_FMT_XRGB_2101010,
  158. SDE_PIX_FMT_BGRA_1010102,
  159. SDE_PIX_FMT_BGRX_1010102,
  160. SDE_PIX_FMT_ABGR_2101010,
  161. SDE_PIX_FMT_XBGR_2101010,
  162. SDE_PIX_FMT_RGBA_1010102_UBWC,
  163. SDE_PIX_FMT_RGBX_1010102_UBWC,
  164. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  165. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  166. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  167. };
  168. static const u32 sde_hw_rotator_v3_outpixfmts[] = {
  169. SDE_PIX_FMT_XRGB_8888,
  170. SDE_PIX_FMT_ARGB_8888,
  171. SDE_PIX_FMT_ABGR_8888,
  172. SDE_PIX_FMT_RGBA_8888,
  173. SDE_PIX_FMT_BGRA_8888,
  174. SDE_PIX_FMT_RGBX_8888,
  175. SDE_PIX_FMT_BGRX_8888,
  176. SDE_PIX_FMT_XBGR_8888,
  177. SDE_PIX_FMT_RGBA_5551,
  178. SDE_PIX_FMT_ARGB_1555,
  179. SDE_PIX_FMT_ABGR_1555,
  180. SDE_PIX_FMT_BGRA_5551,
  181. SDE_PIX_FMT_BGRX_5551,
  182. SDE_PIX_FMT_RGBX_5551,
  183. SDE_PIX_FMT_XBGR_1555,
  184. SDE_PIX_FMT_XRGB_1555,
  185. SDE_PIX_FMT_ARGB_4444,
  186. SDE_PIX_FMT_RGBA_4444,
  187. SDE_PIX_FMT_BGRA_4444,
  188. SDE_PIX_FMT_ABGR_4444,
  189. SDE_PIX_FMT_RGBX_4444,
  190. SDE_PIX_FMT_XRGB_4444,
  191. SDE_PIX_FMT_BGRX_4444,
  192. SDE_PIX_FMT_XBGR_4444,
  193. SDE_PIX_FMT_RGB_888,
  194. SDE_PIX_FMT_BGR_888,
  195. SDE_PIX_FMT_RGB_565,
  196. SDE_PIX_FMT_BGR_565,
  197. /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
  198. /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
  199. /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
  200. SDE_PIX_FMT_Y_CBCR_H2V2,
  201. SDE_PIX_FMT_Y_CRCB_H2V2,
  202. SDE_PIX_FMT_Y_CBCR_H1V2,
  203. SDE_PIX_FMT_Y_CRCB_H1V2,
  204. SDE_PIX_FMT_Y_CBCR_H2V1,
  205. SDE_PIX_FMT_Y_CRCB_H2V1,
  206. /* SDE_PIX_FMT_YCBYCR_H2V1 */
  207. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  208. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  209. SDE_PIX_FMT_RGBA_8888_UBWC,
  210. SDE_PIX_FMT_RGBX_8888_UBWC,
  211. SDE_PIX_FMT_RGB_565_UBWC,
  212. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  213. SDE_PIX_FMT_RGBA_1010102,
  214. SDE_PIX_FMT_RGBX_1010102,
  215. /* SDE_PIX_FMT_ARGB_2101010 */
  216. /* SDE_PIX_FMT_XRGB_2101010 */
  217. SDE_PIX_FMT_BGRA_1010102,
  218. SDE_PIX_FMT_BGRX_1010102,
  219. /* SDE_PIX_FMT_ABGR_2101010 */
  220. /* SDE_PIX_FMT_XBGR_2101010 */
  221. SDE_PIX_FMT_RGBA_1010102_UBWC,
  222. SDE_PIX_FMT_RGBX_1010102_UBWC,
  223. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  224. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  225. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  226. };
  227. static const u32 sde_hw_rotator_v4_inpixfmts[] = {
  228. SDE_PIX_FMT_XRGB_8888,
  229. SDE_PIX_FMT_ARGB_8888,
  230. SDE_PIX_FMT_ABGR_8888,
  231. SDE_PIX_FMT_RGBA_8888,
  232. SDE_PIX_FMT_BGRA_8888,
  233. SDE_PIX_FMT_RGBX_8888,
  234. SDE_PIX_FMT_BGRX_8888,
  235. SDE_PIX_FMT_XBGR_8888,
  236. SDE_PIX_FMT_RGBA_5551,
  237. SDE_PIX_FMT_ARGB_1555,
  238. SDE_PIX_FMT_ABGR_1555,
  239. SDE_PIX_FMT_BGRA_5551,
  240. SDE_PIX_FMT_BGRX_5551,
  241. SDE_PIX_FMT_RGBX_5551,
  242. SDE_PIX_FMT_XBGR_1555,
  243. SDE_PIX_FMT_XRGB_1555,
  244. SDE_PIX_FMT_ARGB_4444,
  245. SDE_PIX_FMT_RGBA_4444,
  246. SDE_PIX_FMT_BGRA_4444,
  247. SDE_PIX_FMT_ABGR_4444,
  248. SDE_PIX_FMT_RGBX_4444,
  249. SDE_PIX_FMT_XRGB_4444,
  250. SDE_PIX_FMT_BGRX_4444,
  251. SDE_PIX_FMT_XBGR_4444,
  252. SDE_PIX_FMT_RGB_888,
  253. SDE_PIX_FMT_BGR_888,
  254. SDE_PIX_FMT_RGB_565,
  255. SDE_PIX_FMT_BGR_565,
  256. SDE_PIX_FMT_Y_CB_CR_H2V2,
  257. SDE_PIX_FMT_Y_CR_CB_H2V2,
  258. SDE_PIX_FMT_Y_CR_CB_GH2V2,
  259. SDE_PIX_FMT_Y_CBCR_H2V2,
  260. SDE_PIX_FMT_Y_CRCB_H2V2,
  261. SDE_PIX_FMT_Y_CBCR_H1V2,
  262. SDE_PIX_FMT_Y_CRCB_H1V2,
  263. SDE_PIX_FMT_Y_CBCR_H2V1,
  264. SDE_PIX_FMT_Y_CRCB_H2V1,
  265. SDE_PIX_FMT_YCBYCR_H2V1,
  266. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  267. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  268. SDE_PIX_FMT_RGBA_8888_UBWC,
  269. SDE_PIX_FMT_RGBX_8888_UBWC,
  270. SDE_PIX_FMT_RGB_565_UBWC,
  271. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  272. SDE_PIX_FMT_RGBA_1010102,
  273. SDE_PIX_FMT_RGBX_1010102,
  274. SDE_PIX_FMT_ARGB_2101010,
  275. SDE_PIX_FMT_XRGB_2101010,
  276. SDE_PIX_FMT_BGRA_1010102,
  277. SDE_PIX_FMT_BGRX_1010102,
  278. SDE_PIX_FMT_ABGR_2101010,
  279. SDE_PIX_FMT_XBGR_2101010,
  280. SDE_PIX_FMT_RGBA_1010102_UBWC,
  281. SDE_PIX_FMT_RGBX_1010102_UBWC,
  282. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  283. SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
  284. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  285. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  286. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  287. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  288. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  289. SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
  290. SDE_PIX_FMT_XRGB_8888_TILE,
  291. SDE_PIX_FMT_ARGB_8888_TILE,
  292. SDE_PIX_FMT_ABGR_8888_TILE,
  293. SDE_PIX_FMT_XBGR_8888_TILE,
  294. SDE_PIX_FMT_RGBA_8888_TILE,
  295. SDE_PIX_FMT_BGRA_8888_TILE,
  296. SDE_PIX_FMT_RGBX_8888_TILE,
  297. SDE_PIX_FMT_BGRX_8888_TILE,
  298. SDE_PIX_FMT_RGBA_1010102_TILE,
  299. SDE_PIX_FMT_RGBX_1010102_TILE,
  300. SDE_PIX_FMT_ARGB_2101010_TILE,
  301. SDE_PIX_FMT_XRGB_2101010_TILE,
  302. SDE_PIX_FMT_BGRA_1010102_TILE,
  303. SDE_PIX_FMT_BGRX_1010102_TILE,
  304. SDE_PIX_FMT_ABGR_2101010_TILE,
  305. SDE_PIX_FMT_XBGR_2101010_TILE,
  306. };
  307. static const u32 sde_hw_rotator_v4_outpixfmts[] = {
  308. SDE_PIX_FMT_XRGB_8888,
  309. SDE_PIX_FMT_ARGB_8888,
  310. SDE_PIX_FMT_ABGR_8888,
  311. SDE_PIX_FMT_RGBA_8888,
  312. SDE_PIX_FMT_BGRA_8888,
  313. SDE_PIX_FMT_RGBX_8888,
  314. SDE_PIX_FMT_BGRX_8888,
  315. SDE_PIX_FMT_XBGR_8888,
  316. SDE_PIX_FMT_RGBA_5551,
  317. SDE_PIX_FMT_ARGB_1555,
  318. SDE_PIX_FMT_ABGR_1555,
  319. SDE_PIX_FMT_BGRA_5551,
  320. SDE_PIX_FMT_BGRX_5551,
  321. SDE_PIX_FMT_RGBX_5551,
  322. SDE_PIX_FMT_XBGR_1555,
  323. SDE_PIX_FMT_XRGB_1555,
  324. SDE_PIX_FMT_ARGB_4444,
  325. SDE_PIX_FMT_RGBA_4444,
  326. SDE_PIX_FMT_BGRA_4444,
  327. SDE_PIX_FMT_ABGR_4444,
  328. SDE_PIX_FMT_RGBX_4444,
  329. SDE_PIX_FMT_XRGB_4444,
  330. SDE_PIX_FMT_BGRX_4444,
  331. SDE_PIX_FMT_XBGR_4444,
  332. SDE_PIX_FMT_RGB_888,
  333. SDE_PIX_FMT_BGR_888,
  334. SDE_PIX_FMT_RGB_565,
  335. SDE_PIX_FMT_BGR_565,
  336. /* SDE_PIX_FMT_Y_CB_CR_H2V2 */
  337. /* SDE_PIX_FMT_Y_CR_CB_H2V2 */
  338. /* SDE_PIX_FMT_Y_CR_CB_GH2V2 */
  339. SDE_PIX_FMT_Y_CBCR_H2V2,
  340. SDE_PIX_FMT_Y_CRCB_H2V2,
  341. SDE_PIX_FMT_Y_CBCR_H1V2,
  342. SDE_PIX_FMT_Y_CRCB_H1V2,
  343. SDE_PIX_FMT_Y_CBCR_H2V1,
  344. SDE_PIX_FMT_Y_CRCB_H2V1,
  345. /* SDE_PIX_FMT_YCBYCR_H2V1 */
  346. SDE_PIX_FMT_Y_CBCR_H2V2_VENUS,
  347. SDE_PIX_FMT_Y_CRCB_H2V2_VENUS,
  348. SDE_PIX_FMT_RGBA_8888_UBWC,
  349. SDE_PIX_FMT_RGBX_8888_UBWC,
  350. SDE_PIX_FMT_RGB_565_UBWC,
  351. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  352. SDE_PIX_FMT_RGBA_1010102,
  353. SDE_PIX_FMT_RGBX_1010102,
  354. SDE_PIX_FMT_ARGB_2101010,
  355. SDE_PIX_FMT_XRGB_2101010,
  356. SDE_PIX_FMT_BGRA_1010102,
  357. SDE_PIX_FMT_BGRX_1010102,
  358. SDE_PIX_FMT_ABGR_2101010,
  359. SDE_PIX_FMT_XBGR_2101010,
  360. SDE_PIX_FMT_RGBA_1010102_UBWC,
  361. SDE_PIX_FMT_RGBX_1010102_UBWC,
  362. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  363. SDE_PIX_FMT_Y_CBCR_H2V2_P010_VENUS,
  364. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  365. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  366. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  367. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  368. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  369. SDE_PIX_FMT_Y_CRCB_H2V2_TILE,
  370. SDE_PIX_FMT_XRGB_8888_TILE,
  371. SDE_PIX_FMT_ARGB_8888_TILE,
  372. SDE_PIX_FMT_ABGR_8888_TILE,
  373. SDE_PIX_FMT_XBGR_8888_TILE,
  374. SDE_PIX_FMT_RGBA_8888_TILE,
  375. SDE_PIX_FMT_BGRA_8888_TILE,
  376. SDE_PIX_FMT_RGBX_8888_TILE,
  377. SDE_PIX_FMT_BGRX_8888_TILE,
  378. SDE_PIX_FMT_RGBA_1010102_TILE,
  379. SDE_PIX_FMT_RGBX_1010102_TILE,
  380. SDE_PIX_FMT_ARGB_2101010_TILE,
  381. SDE_PIX_FMT_XRGB_2101010_TILE,
  382. SDE_PIX_FMT_BGRA_1010102_TILE,
  383. SDE_PIX_FMT_BGRX_1010102_TILE,
  384. SDE_PIX_FMT_ABGR_2101010_TILE,
  385. SDE_PIX_FMT_XBGR_2101010_TILE,
  386. };
  387. static const u32 sde_hw_rotator_v4_inpixfmts_sbuf[] = {
  388. SDE_PIX_FMT_Y_CBCR_H2V2_P010,
  389. SDE_PIX_FMT_Y_CBCR_H2V2,
  390. SDE_PIX_FMT_Y_CRCB_H2V2,
  391. SDE_PIX_FMT_Y_CBCR_H2V2_TP10_UBWC,
  392. SDE_PIX_FMT_Y_CBCR_H2V2_P010_UBWC,
  393. SDE_PIX_FMT_Y_CBCR_H2V2_UBWC,
  394. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  395. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  396. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  397. };
  398. static const u32 sde_hw_rotator_v4_outpixfmts_sbuf[] = {
  399. SDE_PIX_FMT_Y_CBCR_H2V2_TP10,
  400. SDE_PIX_FMT_Y_CBCR_H2V2_P010_TILE,
  401. SDE_PIX_FMT_Y_CBCR_H2V2_TILE,
  402. };
  403. static struct sde_rot_vbif_debug_bus nrt_vbif_dbg_bus_r3[] = {
  404. {0x214, 0x21c, 16, 1, 0x200}, /* arb clients main */
  405. {0x214, 0x21c, 0, 12, 0x13}, /* xin blocks - axi side */
  406. {0x21c, 0x214, 0, 12, 0xc}, /* xin blocks - clock side */
  407. };
  408. static struct sde_rot_debug_bus rot_dbgbus_r3[] = {
  409. /*
  410. * rottop - 0xA8850
  411. */
  412. /* REGDMA */
  413. { 0XA8850, 0, 0 },
  414. { 0XA8850, 0, 1 },
  415. { 0XA8850, 0, 2 },
  416. { 0XA8850, 0, 3 },
  417. { 0XA8850, 0, 4 },
  418. /* ROT_WB */
  419. { 0XA8850, 1, 0 },
  420. { 0XA8850, 1, 1 },
  421. { 0XA8850, 1, 2 },
  422. { 0XA8850, 1, 3 },
  423. { 0XA8850, 1, 4 },
  424. { 0XA8850, 1, 5 },
  425. { 0XA8850, 1, 6 },
  426. { 0XA8850, 1, 7 },
  427. /* UBWC_DEC */
  428. { 0XA8850, 2, 0 },
  429. /* UBWC_ENC */
  430. { 0XA8850, 3, 0 },
  431. /* ROT_FETCH_0 */
  432. { 0XA8850, 4, 0 },
  433. { 0XA8850, 4, 1 },
  434. { 0XA8850, 4, 2 },
  435. { 0XA8850, 4, 3 },
  436. { 0XA8850, 4, 4 },
  437. { 0XA8850, 4, 5 },
  438. { 0XA8850, 4, 6 },
  439. { 0XA8850, 4, 7 },
  440. /* ROT_FETCH_1 */
  441. { 0XA8850, 5, 0 },
  442. { 0XA8850, 5, 1 },
  443. { 0XA8850, 5, 2 },
  444. { 0XA8850, 5, 3 },
  445. { 0XA8850, 5, 4 },
  446. { 0XA8850, 5, 5 },
  447. { 0XA8850, 5, 6 },
  448. { 0XA8850, 5, 7 },
  449. /* ROT_FETCH_2 */
  450. { 0XA8850, 6, 0 },
  451. { 0XA8850, 6, 1 },
  452. { 0XA8850, 6, 2 },
  453. { 0XA8850, 6, 3 },
  454. { 0XA8850, 6, 4 },
  455. { 0XA8850, 6, 5 },
  456. { 0XA8850, 6, 6 },
  457. { 0XA8850, 6, 7 },
  458. /* ROT_FETCH_3 */
  459. { 0XA8850, 7, 0 },
  460. { 0XA8850, 7, 1 },
  461. { 0XA8850, 7, 2 },
  462. { 0XA8850, 7, 3 },
  463. { 0XA8850, 7, 4 },
  464. { 0XA8850, 7, 5 },
  465. { 0XA8850, 7, 6 },
  466. { 0XA8850, 7, 7 },
  467. /* ROT_FETCH_4 */
  468. { 0XA8850, 8, 0 },
  469. { 0XA8850, 8, 1 },
  470. { 0XA8850, 8, 2 },
  471. { 0XA8850, 8, 3 },
  472. { 0XA8850, 8, 4 },
  473. { 0XA8850, 8, 5 },
  474. { 0XA8850, 8, 6 },
  475. { 0XA8850, 8, 7 },
  476. /* ROT_UNPACK_0*/
  477. { 0XA8850, 9, 0 },
  478. { 0XA8850, 9, 1 },
  479. { 0XA8850, 9, 2 },
  480. { 0XA8850, 9, 3 },
  481. };
  482. static struct sde_rot_regdump sde_rot_r3_regdump[] = {
  483. { "SDEROT_ROTTOP", SDE_ROT_ROTTOP_OFFSET, 0x100, SDE_ROT_REGDUMP_READ },
  484. { "SDEROT_SSPP", SDE_ROT_SSPP_OFFSET, 0x200, SDE_ROT_REGDUMP_READ },
  485. { "SDEROT_WB", SDE_ROT_WB_OFFSET, 0x300, SDE_ROT_REGDUMP_READ },
  486. { "SDEROT_REGDMA_CSR", SDE_ROT_REGDMA_OFFSET, 0x100,
  487. SDE_ROT_REGDUMP_READ },
  488. /*
  489. * Need to perform a SW reset to REGDMA in order to access the
  490. * REGDMA RAM especially if REGDMA is waiting for Rotator IDLE.
  491. * REGDMA RAM should be dump at last.
  492. */
  493. { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
  494. SDE_ROT_REGDUMP_WRITE, 1 },
  495. { "SDEROT_REGDMA_RAM", SDE_ROT_REGDMA_RAM_OFFSET, 0x2000,
  496. SDE_ROT_REGDUMP_READ },
  497. { "SDEROT_VBIF_NRT", SDE_ROT_VBIF_NRT_OFFSET, 0x590,
  498. SDE_ROT_REGDUMP_VBIF },
  499. { "SDEROT_REGDMA_RESET", ROTTOP_SW_RESET_OVERRIDE, 1,
  500. SDE_ROT_REGDUMP_WRITE, 0 },
  501. };
  502. struct sde_rot_cdp_params {
  503. bool enable;
  504. struct sde_mdp_format_params *fmt;
  505. u32 offset;
  506. };
  507. /* Invalid software timestamp value for initialization */
  508. #define SDE_REGDMA_SWTS_INVALID (~0)
  509. /**
  510. * __sde_hw_rotator_get_timestamp - obtain rotator current timestamp
  511. * @rot: rotator context
  512. * @q_id: regdma queue id (low/high)
  513. * @return: current timestmap
  514. */
  515. static u32 __sde_hw_rotator_get_timestamp(struct sde_hw_rotator *rot, u32 q_id)
  516. {
  517. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  518. u32 ts;
  519. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
  520. if (q_id == ROT_QUEUE_HIGH_PRIORITY)
  521. ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_0);
  522. else
  523. ts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_ROT_CNTR_1);
  524. } else {
  525. ts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
  526. if (q_id == ROT_QUEUE_LOW_PRIORITY)
  527. ts >>= SDE_REGDMA_SWTS_SHIFT;
  528. }
  529. return ts & SDE_REGDMA_SWTS_MASK;
  530. }
  531. /**
  532. * sde_hw_rotator_disable_irq - Disable hw rotator interrupt with ref. count
  533. * Also, clear rotator/regdma irq enable masks.
  534. * @rot: Pointer to hw rotator
  535. */
  536. static void sde_hw_rotator_disable_irq(struct sde_hw_rotator *rot)
  537. {
  538. SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
  539. atomic_read(&rot->irq_enabled));
  540. if (!atomic_read(&rot->irq_enabled)) {
  541. SDEROT_ERR("irq %d is already disabled\n", rot->irq_num);
  542. return;
  543. }
  544. if (!atomic_dec_return(&rot->irq_enabled)) {
  545. if (rot->mode == ROT_REGDMA_OFF)
  546. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_EN, 0);
  547. else
  548. SDE_ROTREG_WRITE(rot->mdss_base,
  549. REGDMA_CSR_REGDMA_INT_EN, 0);
  550. /* disable irq after last pending irq is handled, if any */
  551. synchronize_irq(rot->irq_num);
  552. disable_irq_nosync(rot->irq_num);
  553. }
  554. }
  555. /**
  556. * sde_hw_rotator_elapsed_swts - Find difference of 2 software timestamps
  557. * @ts_curr: current software timestamp
  558. * @ts_prev: previous software timestamp
  559. * @return: the amount ts_curr is ahead of ts_prev
  560. */
  561. static int sde_hw_rotator_elapsed_swts(u32 ts_curr, u32 ts_prev)
  562. {
  563. u32 diff = (ts_curr - ts_prev) & SDE_REGDMA_SWTS_MASK;
  564. return sign_extend32(diff, (SDE_REGDMA_SWTS_SHIFT - 1));
  565. }
  566. /*
  567. * sde_hw_rotator_rotirq_handler - non-regdma interrupt handler
  568. * @irq: Interrupt number
  569. * @ptr: Pointer to private handle provided during registration
  570. *
  571. * This function services rotator interrupt and wakes up waiting client
  572. * with pending rotation requests already submitted to h/w.
  573. */
  574. static irqreturn_t sde_hw_rotator_rotirq_handler(int irq, void *ptr)
  575. {
  576. struct sde_hw_rotator *rot = ptr;
  577. struct sde_hw_rotator_context *ctx;
  578. irqreturn_t ret = IRQ_NONE;
  579. u32 isr;
  580. isr = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_INTR_STATUS);
  581. SDEROT_DBG("intr_status = %8.8x\n", isr);
  582. if (isr & ROT_DONE_MASK) {
  583. sde_hw_rotator_disable_irq(rot);
  584. SDEROT_DBG("Notify rotator complete\n");
  585. /* Normal rotator only 1 session, no need to lookup */
  586. ctx = rot->rotCtx[0][0];
  587. WARN_ON(ctx == NULL);
  588. complete_all(&ctx->rot_comp);
  589. spin_lock(&rot->rotisr_lock);
  590. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  591. ROT_DONE_CLEAR);
  592. spin_unlock(&rot->rotisr_lock);
  593. ret = IRQ_HANDLED;
  594. }
  595. return ret;
  596. }
  597. /*
  598. * sde_hw_rotator_regdmairq_handler - regdma interrupt handler
  599. * @irq: Interrupt number
  600. * @ptr: Pointer to private handle provided during registration
  601. *
  602. * This function services rotator interrupt, decoding the source of
  603. * events (high/low priority queue), and wakes up all waiting clients
  604. * with pending rotation requests already submitted to h/w.
  605. */
  606. static irqreturn_t sde_hw_rotator_regdmairq_handler(int irq, void *ptr)
  607. {
  608. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  609. struct sde_hw_rotator *rot = ptr;
  610. struct sde_hw_rotator_context *ctx, *tmp;
  611. irqreturn_t ret = IRQ_NONE;
  612. u32 isr, isr_tmp;
  613. u32 ts;
  614. u32 q_id;
  615. isr = SDE_ROTREG_READ(rot->mdss_base, REGDMA_CSR_REGDMA_INT_STATUS);
  616. /* acknowledge interrupt before reading latest timestamp */
  617. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR, isr);
  618. SDEROT_DBG("intr_status = %8.8x\n", isr);
  619. /* Any REGDMA status, including error and watchdog timer, should
  620. * trigger and wake up waiting thread
  621. */
  622. if (isr & (REGDMA_INT_HIGH_MASK | REGDMA_INT_LOW_MASK)) {
  623. spin_lock(&rot->rotisr_lock);
  624. /*
  625. * Obtain rotator context based on timestamp from regdma
  626. * and low/high interrupt status
  627. */
  628. if (isr & REGDMA_INT_HIGH_MASK) {
  629. q_id = ROT_QUEUE_HIGH_PRIORITY;
  630. } else if (isr & REGDMA_INT_LOW_MASK) {
  631. q_id = ROT_QUEUE_LOW_PRIORITY;
  632. } else {
  633. SDEROT_ERR("unknown ISR status: isr=0x%X\n", isr);
  634. goto done_isr_handle;
  635. }
  636. ts = __sde_hw_rotator_get_timestamp(rot, q_id);
  637. /*
  638. * Timestamp packet is not available in sbuf mode.
  639. * Simulate timestamp update in the handler instead.
  640. */
  641. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) ||
  642. list_empty(&rot->sbuf_ctx[q_id]))
  643. goto skip_sbuf;
  644. ctx = NULL;
  645. isr_tmp = isr;
  646. list_for_each_entry(tmp, &rot->sbuf_ctx[q_id], list) {
  647. u32 mask;
  648. mask = tmp->timestamp & 0x1 ? REGDMA_INT_1_MASK :
  649. REGDMA_INT_0_MASK;
  650. if (isr_tmp & mask) {
  651. isr_tmp &= ~mask;
  652. ctx = tmp;
  653. ts = ctx->timestamp;
  654. rot->ops.update_ts(rot, ctx->q_id, ts);
  655. SDEROT_DBG("update swts:0x%X\n", ts);
  656. }
  657. SDEROT_EVTLOG(isr, tmp->timestamp);
  658. }
  659. if (ctx == NULL)
  660. SDEROT_ERR("invalid swts ctx\n");
  661. skip_sbuf:
  662. ctx = rot->rotCtx[q_id][ts & SDE_HW_ROT_REGDMA_SEG_MASK];
  663. /*
  664. * Wake up all waiting context from the current and previous
  665. * SW Timestamp.
  666. */
  667. while (ctx &&
  668. sde_hw_rotator_elapsed_swts(ctx->timestamp, ts) >= 0) {
  669. ctx->last_regdma_isr_status = isr;
  670. ctx->last_regdma_timestamp = ts;
  671. SDEROT_DBG(
  672. "regdma complete: ctx:%pK, ts:%X\n", ctx, ts);
  673. wake_up_all(&ctx->regdma_waitq);
  674. ts = (ts - 1) & SDE_REGDMA_SWTS_MASK;
  675. ctx = rot->rotCtx[q_id]
  676. [ts & SDE_HW_ROT_REGDMA_SEG_MASK];
  677. };
  678. done_isr_handle:
  679. spin_unlock(&rot->rotisr_lock);
  680. ret = IRQ_HANDLED;
  681. } else if (isr & REGDMA_INT_ERR_MASK) {
  682. /*
  683. * For REGDMA Err, we save the isr info and wake up
  684. * all waiting contexts
  685. */
  686. int i, j;
  687. SDEROT_ERR(
  688. "regdma err isr:%X, wake up all waiting contexts\n",
  689. isr);
  690. spin_lock(&rot->rotisr_lock);
  691. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  692. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  693. ctx = rot->rotCtx[i][j];
  694. if (ctx && ctx->last_regdma_isr_status == 0) {
  695. ts = __sde_hw_rotator_get_timestamp(
  696. rot, i);
  697. ctx->last_regdma_isr_status = isr;
  698. ctx->last_regdma_timestamp = ts;
  699. wake_up_all(&ctx->regdma_waitq);
  700. SDEROT_DBG("Wake rotctx[%d][%d]:%pK\n",
  701. i, j, ctx);
  702. }
  703. }
  704. }
  705. spin_unlock(&rot->rotisr_lock);
  706. ret = IRQ_HANDLED;
  707. }
  708. return ret;
  709. }
  710. /**
  711. * sde_hw_rotator_pending_hwts - Check if the given context is still pending
  712. * @rot: Pointer to hw rotator
  713. * @ctx: Pointer to rotator context
  714. * @phwts: Pointer to returned reference hw timestamp, optional
  715. * @return: true if context has pending requests
  716. */
  717. static int sde_hw_rotator_pending_hwts(struct sde_hw_rotator *rot,
  718. struct sde_hw_rotator_context *ctx, u32 *phwts)
  719. {
  720. u32 hwts;
  721. int ts_diff;
  722. bool pending;
  723. if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID) {
  724. if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
  725. hwts = SDE_ROTREG_READ(rot->mdss_base,
  726. ROTTOP_ROT_CNTR_1);
  727. else
  728. hwts = SDE_ROTREG_READ(rot->mdss_base,
  729. ROTTOP_ROT_CNTR_0);
  730. } else {
  731. hwts = ctx->last_regdma_timestamp;
  732. }
  733. hwts &= SDE_REGDMA_SWTS_MASK;
  734. ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, hwts);
  735. if (phwts)
  736. *phwts = hwts;
  737. pending = (ts_diff > 0) ? true : false;
  738. SDEROT_DBG("ts:0x%x, queue_id:%d, hwts:0x%x, pending:%d\n",
  739. ctx->timestamp, ctx->q_id, hwts, pending);
  740. SDEROT_EVTLOG(ctx->timestamp, hwts, ctx->q_id, ts_diff);
  741. return pending;
  742. }
  743. /**
  744. * sde_hw_rotator_update_hwts - update hw timestamp with given value
  745. * @rot: Pointer to hw rotator
  746. * @q_id: rotator queue id
  747. * @hwts: new hw timestamp
  748. */
  749. static void sde_hw_rotator_update_hwts(struct sde_hw_rotator *rot,
  750. u32 q_id, u32 hwts)
  751. {
  752. if (q_id == ROT_QUEUE_LOW_PRIORITY)
  753. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_1, hwts);
  754. else
  755. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_ROT_CNTR_0, hwts);
  756. }
  757. /**
  758. * sde_hw_rotator_pending_swts - Check if the given context is still pending
  759. * @rot: Pointer to hw rotator
  760. * @ctx: Pointer to rotator context
  761. * @pswts: Pointer to returned reference software timestamp, optional
  762. * @return: true if context has pending requests
  763. */
  764. static int sde_hw_rotator_pending_swts(struct sde_hw_rotator *rot,
  765. struct sde_hw_rotator_context *ctx, u32 *pswts)
  766. {
  767. u32 swts;
  768. int ts_diff;
  769. bool pending;
  770. if (ctx->last_regdma_timestamp == SDE_REGDMA_SWTS_INVALID)
  771. swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
  772. else
  773. swts = ctx->last_regdma_timestamp;
  774. if (ctx->q_id == ROT_QUEUE_LOW_PRIORITY)
  775. swts >>= SDE_REGDMA_SWTS_SHIFT;
  776. swts &= SDE_REGDMA_SWTS_MASK;
  777. ts_diff = sde_hw_rotator_elapsed_swts(ctx->timestamp, swts);
  778. if (pswts)
  779. *pswts = swts;
  780. pending = (ts_diff > 0) ? true : false;
  781. SDEROT_DBG("ts:0x%x, queue_id:%d, swts:0x%x, pending:%d\n",
  782. ctx->timestamp, ctx->q_id, swts, pending);
  783. SDEROT_EVTLOG(ctx->timestamp, swts, ctx->q_id, ts_diff);
  784. return pending;
  785. }
  786. /**
  787. * sde_hw_rotator_update_swts - update software timestamp with given value
  788. * @rot: Pointer to hw rotator
  789. * @q_id: rotator queue id
  790. * @swts: new software timestamp
  791. */
  792. static void sde_hw_rotator_update_swts(struct sde_hw_rotator *rot,
  793. u32 q_id, u32 swts)
  794. {
  795. u32 mask = SDE_REGDMA_SWTS_MASK;
  796. swts &= SDE_REGDMA_SWTS_MASK;
  797. if (q_id == ROT_QUEUE_LOW_PRIORITY) {
  798. swts <<= SDE_REGDMA_SWTS_SHIFT;
  799. mask <<= SDE_REGDMA_SWTS_SHIFT;
  800. }
  801. swts |= (SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG) & ~mask);
  802. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, swts);
  803. }
  804. /*
  805. * sde_hw_rotator_irq_setup - setup rotator irq
  806. * @mgr: Pointer to rotator manager
  807. * return: none
  808. */
  809. static int sde_hw_rotator_irq_setup(struct sde_hw_rotator *rot)
  810. {
  811. int rc = 0;
  812. /* return early if irq is already setup */
  813. if (rot->irq_num >= 0)
  814. return 0;
  815. rot->irq_num = platform_get_irq(rot->pdev, 0);
  816. if (rot->irq_num < 0) {
  817. rc = rot->irq_num;
  818. SDEROT_ERR("fail to get rot irq, fallback to poll %d\n", rc);
  819. } else {
  820. if (rot->mode == ROT_REGDMA_OFF)
  821. rc = devm_request_threaded_irq(&rot->pdev->dev,
  822. rot->irq_num,
  823. sde_hw_rotator_rotirq_handler,
  824. NULL, 0, "sde_rotator_r3", rot);
  825. else
  826. rc = devm_request_threaded_irq(&rot->pdev->dev,
  827. rot->irq_num,
  828. sde_hw_rotator_regdmairq_handler,
  829. NULL, 0, "sde_rotator_r3", rot);
  830. if (rc) {
  831. SDEROT_ERR("fail to request irq r:%d\n", rc);
  832. rot->irq_num = -1;
  833. } else {
  834. disable_irq(rot->irq_num);
  835. }
  836. }
  837. return rc;
  838. }
  839. /**
  840. * sde_hw_rotator_enable_irq - Enable hw rotator interrupt with ref. count
  841. * Also, clear rotator/regdma irq status.
  842. * @rot: Pointer to hw rotator
  843. */
  844. static int sde_hw_rotator_enable_irq(struct sde_hw_rotator *rot)
  845. {
  846. int ret = 0;
  847. SDEROT_DBG("irq_num:%d enabled:%d\n", rot->irq_num,
  848. atomic_read(&rot->irq_enabled));
  849. ret = sde_hw_rotator_irq_setup(rot);
  850. if (ret < 0) {
  851. SDEROT_ERR("Rotator irq setup failed %d\n", ret);
  852. return ret;
  853. }
  854. if (!atomic_read(&rot->irq_enabled)) {
  855. if (rot->mode == ROT_REGDMA_OFF)
  856. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  857. ROT_DONE_MASK);
  858. else
  859. SDE_ROTREG_WRITE(rot->mdss_base,
  860. REGDMA_CSR_REGDMA_INT_CLEAR, REGDMA_INT_MASK);
  861. enable_irq(rot->irq_num);
  862. }
  863. atomic_inc(&rot->irq_enabled);
  864. return ret;
  865. }
  866. static int sde_hw_rotator_halt_vbif_xin_client(void)
  867. {
  868. struct sde_mdp_vbif_halt_params halt_params;
  869. int rc = 0;
  870. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  871. memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
  872. halt_params.xin_id = mdata->vbif_xin_id[XIN_SSPP];
  873. halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  874. halt_params.bit_off_mdp_clk_ctrl =
  875. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
  876. sde_mdp_halt_vbif_xin(&halt_params);
  877. rc |= halt_params.xin_timeout;
  878. memset(&halt_params, 0, sizeof(struct sde_mdp_vbif_halt_params));
  879. halt_params.xin_id = mdata->vbif_xin_id[XIN_WRITEBACK];
  880. halt_params.reg_off_mdp_clk_ctrl = MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  881. halt_params.bit_off_mdp_clk_ctrl =
  882. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
  883. sde_mdp_halt_vbif_xin(&halt_params);
  884. rc |= halt_params.xin_timeout;
  885. return rc;
  886. }
  887. /**
  888. * sde_hw_rotator_reset - Reset rotator hardware
  889. * @rot: pointer to hw rotator
  890. * @ctx: pointer to current rotator context during the hw hang (optional)
  891. */
  892. static int sde_hw_rotator_reset(struct sde_hw_rotator *rot,
  893. struct sde_hw_rotator_context *ctx)
  894. {
  895. struct sde_hw_rotator_context *rctx = NULL;
  896. u32 int_mask = (REGDMA_INT_0_MASK | REGDMA_INT_1_MASK |
  897. REGDMA_INT_2_MASK);
  898. u32 last_ts[ROT_QUEUE_MAX] = {0,};
  899. u32 latest_ts, opmode;
  900. int elapsed_time, t;
  901. int i, j;
  902. unsigned long flags;
  903. if (!rot) {
  904. SDEROT_ERR("NULL rotator\n");
  905. return -EINVAL;
  906. }
  907. /* sw reset the hw rotator */
  908. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 1);
  909. /* ensure write is issued to the rotator HW */
  910. wmb();
  911. usleep_range(MS_TO_US(10), MS_TO_US(20));
  912. /* force rotator into offline mode */
  913. opmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
  914. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_OP_MODE,
  915. opmode & ~(BIT(5) | BIT(4) | BIT(1) | BIT(0)));
  916. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_SW_RESET_OVERRIDE, 0);
  917. /* halt vbif xin client to ensure no pending transaction */
  918. sde_hw_rotator_halt_vbif_xin_client();
  919. /* if no ctx is specified, skip ctx wake up */
  920. if (!ctx)
  921. return 0;
  922. if (ctx->q_id >= ROT_QUEUE_MAX) {
  923. SDEROT_ERR("context q_id out of range: %d\n", ctx->q_id);
  924. return -EINVAL;
  925. }
  926. spin_lock_irqsave(&rot->rotisr_lock, flags);
  927. /* update timestamp register with current context */
  928. last_ts[ctx->q_id] = ctx->timestamp;
  929. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  930. SDEROT_EVTLOG(ctx->timestamp);
  931. /*
  932. * Search for any pending rot session, and look for last timestamp
  933. * per hw queue.
  934. */
  935. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  936. latest_ts = atomic_read(&rot->timestamp[i]);
  937. latest_ts &= SDE_REGDMA_SWTS_MASK;
  938. elapsed_time = sde_hw_rotator_elapsed_swts(latest_ts,
  939. last_ts[i]);
  940. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  941. rctx = rot->rotCtx[i][j];
  942. if (rctx && rctx != ctx) {
  943. rctx->last_regdma_isr_status = int_mask;
  944. rctx->last_regdma_timestamp = rctx->timestamp;
  945. t = sde_hw_rotator_elapsed_swts(latest_ts,
  946. rctx->timestamp);
  947. if (t < elapsed_time) {
  948. elapsed_time = t;
  949. last_ts[i] = rctx->timestamp;
  950. rot->ops.update_ts(rot, i, last_ts[i]);
  951. }
  952. SDEROT_DBG("rotctx[%d][%d], ts:%d\n",
  953. i, j, rctx->timestamp);
  954. SDEROT_EVTLOG(i, j, rctx->timestamp,
  955. last_ts[i]);
  956. }
  957. }
  958. }
  959. /* Finally wakeup all pending rotator context in queue */
  960. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  961. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX; j++) {
  962. rctx = rot->rotCtx[i][j];
  963. if (rctx && rctx != ctx)
  964. wake_up_all(&rctx->regdma_waitq);
  965. }
  966. }
  967. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  968. return 0;
  969. }
  970. /**
  971. * _sde_hw_rotator_dump_status - Dump hw rotator status on error
  972. * @rot: Pointer to hw rotator
  973. */
  974. static void _sde_hw_rotator_dump_status(struct sde_hw_rotator *rot,
  975. u32 *ubwcerr)
  976. {
  977. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  978. u32 reg = 0;
  979. SDEROT_ERR(
  980. "op_mode = %x, int_en = %x, int_status = %x\n",
  981. SDE_ROTREG_READ(rot->mdss_base,
  982. REGDMA_CSR_REGDMA_OP_MODE),
  983. SDE_ROTREG_READ(rot->mdss_base,
  984. REGDMA_CSR_REGDMA_INT_EN),
  985. SDE_ROTREG_READ(rot->mdss_base,
  986. REGDMA_CSR_REGDMA_INT_STATUS));
  987. SDEROT_ERR(
  988. "ts0/ts1 = %x/%x, q0_status = %x, q1_status = %x, block_status = %x\n",
  989. __sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_HIGH_PRIORITY),
  990. __sde_hw_rotator_get_timestamp(rot, ROT_QUEUE_LOW_PRIORITY),
  991. SDE_ROTREG_READ(rot->mdss_base,
  992. REGDMA_CSR_REGDMA_QUEUE_0_STATUS),
  993. SDE_ROTREG_READ(rot->mdss_base,
  994. REGDMA_CSR_REGDMA_QUEUE_1_STATUS),
  995. SDE_ROTREG_READ(rot->mdss_base,
  996. REGDMA_CSR_REGDMA_BLOCK_STATUS));
  997. SDEROT_ERR(
  998. "invalid_cmd_offset = %x, fsm_state = %x\n",
  999. SDE_ROTREG_READ(rot->mdss_base,
  1000. REGDMA_CSR_REGDMA_INVALID_CMD_RAM_OFFSET),
  1001. SDE_ROTREG_READ(rot->mdss_base,
  1002. REGDMA_CSR_REGDMA_FSM_STATE));
  1003. SDEROT_ERR("rottop: op_mode = %x, status = %x, clk_status = %x\n",
  1004. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE),
  1005. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS),
  1006. SDE_ROTREG_READ(rot->mdss_base, ROTTOP_CLK_STATUS));
  1007. reg = SDE_ROTREG_READ(rot->mdss_base, ROT_SSPP_UBWC_ERROR_STATUS);
  1008. if (ubwcerr)
  1009. *ubwcerr = reg;
  1010. SDEROT_ERR(
  1011. "UBWC decode status = %x, UBWC encode status = %x\n", reg,
  1012. SDE_ROTREG_READ(rot->mdss_base, ROT_WB_UBWC_ERROR_STATUS));
  1013. SDEROT_ERR("VBIF XIN HALT status = %x VBIF AXI HALT status = %x\n",
  1014. SDE_VBIF_READ(mdata, MMSS_VBIF_XIN_HALT_CTRL1),
  1015. SDE_VBIF_READ(mdata, MMSS_VBIF_AXI_HALT_CTRL1));
  1016. SDEROT_ERR("sspp unpack wr: plane0 = %x, plane1 = %x, plane2 = %x\n",
  1017. SDE_ROTREG_READ(rot->mdss_base,
  1018. ROT_SSPP_FETCH_SMP_WR_PLANE0),
  1019. SDE_ROTREG_READ(rot->mdss_base,
  1020. ROT_SSPP_FETCH_SMP_WR_PLANE1),
  1021. SDE_ROTREG_READ(rot->mdss_base,
  1022. ROT_SSPP_FETCH_SMP_WR_PLANE2));
  1023. SDEROT_ERR("sspp unpack rd: plane0 = %x, plane1 = %x, plane2 = %x\n",
  1024. SDE_ROTREG_READ(rot->mdss_base,
  1025. ROT_SSPP_SMP_UNPACK_RD_PLANE0),
  1026. SDE_ROTREG_READ(rot->mdss_base,
  1027. ROT_SSPP_SMP_UNPACK_RD_PLANE1),
  1028. SDE_ROTREG_READ(rot->mdss_base,
  1029. ROT_SSPP_SMP_UNPACK_RD_PLANE2));
  1030. SDEROT_ERR("sspp: unpack_ln = %x, unpack_blk = %x, fill_lvl = %x\n",
  1031. SDE_ROTREG_READ(rot->mdss_base,
  1032. ROT_SSPP_UNPACK_LINE_COUNT),
  1033. SDE_ROTREG_READ(rot->mdss_base,
  1034. ROT_SSPP_UNPACK_BLK_COUNT),
  1035. SDE_ROTREG_READ(rot->mdss_base,
  1036. ROT_SSPP_FILL_LEVELS));
  1037. SDEROT_ERR("wb: sbuf0 = %x, sbuf1 = %x, sys_cache = %x\n",
  1038. SDE_ROTREG_READ(rot->mdss_base,
  1039. ROT_WB_SBUF_STATUS_PLANE0),
  1040. SDE_ROTREG_READ(rot->mdss_base,
  1041. ROT_WB_SBUF_STATUS_PLANE1),
  1042. SDE_ROTREG_READ(rot->mdss_base,
  1043. ROT_WB_SYS_CACHE_MODE));
  1044. }
  1045. /**
  1046. * sde_hw_rotator_get_ctx(): Retrieve rotator context from rotator HW based
  1047. * on provided session_id. Each rotator has a different session_id.
  1048. * @rot: Pointer to rotator hw
  1049. * @session_id: Identifier for rotator session
  1050. * @sequence_id: Identifier for rotation request within the session
  1051. * @q_id: Rotator queue identifier
  1052. */
  1053. static struct sde_hw_rotator_context *sde_hw_rotator_get_ctx(
  1054. struct sde_hw_rotator *rot, u32 session_id, u32 sequence_id,
  1055. enum sde_rot_queue_prio q_id)
  1056. {
  1057. int i;
  1058. struct sde_hw_rotator_context *ctx = NULL;
  1059. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++) {
  1060. ctx = rot->rotCtx[q_id][i];
  1061. if (ctx && (ctx->session_id == session_id) &&
  1062. (ctx->sequence_id == sequence_id)) {
  1063. SDEROT_DBG(
  1064. "rotCtx sloti[%d][%d] ==> ctx:%pK | session-id:%d | sequence-id:%d\n",
  1065. q_id, i, ctx, ctx->session_id,
  1066. ctx->sequence_id);
  1067. return ctx;
  1068. }
  1069. }
  1070. return NULL;
  1071. }
  1072. /*
  1073. * sde_hw_rotator_map_vaddr - map the debug buffer to kernel space
  1074. * @dbgbuf: Pointer to debug buffer
  1075. * @buf: Pointer to layer buffer structure
  1076. * @data: Pointer to h/w mapped buffer structure
  1077. */
  1078. static void sde_hw_rotator_map_vaddr(struct sde_dbg_buf *dbgbuf,
  1079. struct sde_layer_buffer *buf, struct sde_mdp_data *data)
  1080. {
  1081. struct dma_buf_map map;
  1082. dbgbuf->dmabuf = data->p[0].srcp_dma_buf;
  1083. dbgbuf->buflen = data->p[0].srcp_dma_buf->size;
  1084. dbgbuf->vaddr = NULL;
  1085. dbgbuf->width = buf->width;
  1086. dbgbuf->height = buf->height;
  1087. if (dbgbuf->dmabuf && (dbgbuf->buflen > 0)) {
  1088. dma_buf_begin_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
  1089. dma_buf_vmap(dbgbuf->dmabuf, &map);
  1090. dbgbuf->vaddr = map.vaddr;
  1091. SDEROT_DBG("vaddr mapping: 0x%pK/%ld w:%d/h:%d\n",
  1092. dbgbuf->vaddr, dbgbuf->buflen,
  1093. dbgbuf->width, dbgbuf->height);
  1094. }
  1095. }
  1096. /*
  1097. * sde_hw_rotator_unmap_vaddr - unmap the debug buffer from kernel space
  1098. * @dbgbuf: Pointer to debug buffer
  1099. */
  1100. static void sde_hw_rotator_unmap_vaddr(struct sde_dbg_buf *dbgbuf)
  1101. {
  1102. if (dbgbuf->vaddr) {
  1103. dma_buf_kunmap(dbgbuf->dmabuf, 0, dbgbuf->vaddr);
  1104. dma_buf_end_cpu_access(dbgbuf->dmabuf, DMA_FROM_DEVICE);
  1105. }
  1106. dbgbuf->vaddr = NULL;
  1107. dbgbuf->dmabuf = NULL;
  1108. dbgbuf->buflen = 0;
  1109. dbgbuf->width = 0;
  1110. dbgbuf->height = 0;
  1111. }
  1112. static void sde_hw_rotator_vbif_rt_setting(void)
  1113. {
  1114. u32 reg_high, reg_shift, reg_val, reg_val_lvl, mask, vbif_qos;
  1115. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1116. int i, j;
  1117. vbif_lock(mdata->parent_pdev);
  1118. for (i = 0; i < mdata->npriority_lvl; i++) {
  1119. for (j = 0; j < MAX_XIN; j++) {
  1120. reg_high = ((mdata->vbif_xin_id[j]
  1121. & 0x8) >> 3) * 4 + (i * 8);
  1122. reg_shift = mdata->vbif_xin_id[j] * 4;
  1123. reg_val = SDE_VBIF_READ(mdata,
  1124. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + reg_high);
  1125. reg_val_lvl = SDE_VBIF_READ(mdata,
  1126. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + reg_high);
  1127. mask = 0x7 << (mdata->vbif_xin_id[j] * 4);
  1128. vbif_qos = mdata->vbif_nrt_qos[i];
  1129. reg_val &= ~mask;
  1130. reg_val |= (vbif_qos << reg_shift) & mask;
  1131. reg_val_lvl &= ~mask;
  1132. reg_val_lvl |= (vbif_qos << reg_shift) & mask;
  1133. SDE_VBIF_WRITE(mdata,
  1134. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + reg_high,
  1135. reg_val);
  1136. SDE_VBIF_WRITE(mdata,
  1137. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + reg_high,
  1138. reg_val_lvl);
  1139. }
  1140. }
  1141. vbif_unlock(mdata->parent_pdev);
  1142. }
  1143. /*
  1144. * sde_hw_rotator_vbif_setting - helper function to set vbif QoS remapper
  1145. * levels, enable write gather enable and avoid clk gating setting for
  1146. * debug purpose.
  1147. *
  1148. * @rot: Pointer to rotator hw
  1149. */
  1150. static void sde_hw_rotator_vbif_setting(struct sde_hw_rotator *rot)
  1151. {
  1152. u32 i, mask, vbif_qos, reg_val = 0;
  1153. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1154. /* VBIF_ROT QoS remapper setting */
  1155. switch (mdata->npriority_lvl) {
  1156. case SDE_MDP_VBIF_4_LEVEL_REMAPPER:
  1157. for (i = 0; i < mdata->npriority_lvl; i++) {
  1158. reg_val = SDE_VBIF_READ(mdata,
  1159. MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4);
  1160. mask = 0x3 << (XIN_SSPP * 2);
  1161. vbif_qos = mdata->vbif_nrt_qos[i];
  1162. reg_val |= vbif_qos << (XIN_SSPP * 2);
  1163. /* ensure write is issued after the read operation */
  1164. mb();
  1165. SDE_VBIF_WRITE(mdata,
  1166. MMSS_VBIF_NRT_VBIF_QOS_REMAP_00 + i*4,
  1167. reg_val);
  1168. }
  1169. break;
  1170. case SDE_MDP_VBIF_8_LEVEL_REMAPPER:
  1171. mask = mdata->npriority_lvl - 1;
  1172. for (i = 0; i < mdata->npriority_lvl; i++) {
  1173. /* RD and WR client */
  1174. reg_val |= (mdata->vbif_nrt_qos[i] & mask)
  1175. << (XIN_SSPP * 4);
  1176. reg_val |= (mdata->vbif_nrt_qos[i] & mask)
  1177. << (XIN_WRITEBACK * 4);
  1178. SDE_VBIF_WRITE(mdata,
  1179. MMSS_VBIF_NRT_VBIF_QOS_RP_REMAP_000 + i*8,
  1180. reg_val);
  1181. SDE_VBIF_WRITE(mdata,
  1182. MMSS_VBIF_NRT_VBIF_QOS_LVL_REMAP_000 + i*8,
  1183. reg_val);
  1184. }
  1185. break;
  1186. default:
  1187. SDEROT_DBG("invalid vbif remapper levels\n");
  1188. }
  1189. /* Enable write gather for writeback to remove write gaps, which
  1190. * may hang AXI/BIMC/SDE.
  1191. */
  1192. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_WRITE_GATHTER_EN,
  1193. BIT(XIN_WRITEBACK));
  1194. /*
  1195. * For debug purpose, disable clock gating, i.e. Clocks always on
  1196. */
  1197. if (mdata->clk_always_on) {
  1198. SDE_VBIF_WRITE(mdata, MMSS_VBIF_CLKON, 0x3);
  1199. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0, 0x3);
  1200. SDE_VBIF_WRITE(mdata, MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL1,
  1201. 0xFFFF);
  1202. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_CLK_CTRL, 1);
  1203. }
  1204. }
  1205. /*
  1206. * sde_hw_rotator_setup_timestamp_packet - setup timestamp writeback command
  1207. * @ctx: Pointer to rotator context
  1208. * @mask: Bit mask location of the timestamp
  1209. * @swts: Software timestamp
  1210. */
  1211. static void sde_hw_rotator_setup_timestamp_packet(
  1212. struct sde_hw_rotator_context *ctx, u32 mask, u32 swts)
  1213. {
  1214. char __iomem *wrptr;
  1215. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1216. /*
  1217. * Create a dummy packet write out to 1 location for timestamp
  1218. * generation.
  1219. */
  1220. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 6);
  1221. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
  1222. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1223. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1224. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x00010001);
  1225. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1226. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
  1227. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_YSTRIDE0, 4);
  1228. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_FORMAT, 4);
  1229. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x004037FF);
  1230. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
  1231. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x80000000);
  1232. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->timestamp);
  1233. /*
  1234. * Must clear secure buffer setting for SW timestamp because
  1235. * SW timstamp buffer allocation is always non-secure region.
  1236. */
  1237. if (ctx->is_secure) {
  1238. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
  1239. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
  1240. }
  1241. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 4);
  1242. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x000037FF);
  1243. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1244. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0x03020100);
  1245. SDE_REGDMA_BLKWRITE_DATA(wrptr, ctx->ts_addr);
  1246. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_YSTRIDE0, 4);
  1247. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE, 0x00010001);
  1248. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE, 0x00010001);
  1249. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY, 0);
  1250. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG,
  1251. (ctx->rot->highest_bank & 0x3) << 8);
  1252. SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC, 0);
  1253. SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, 1);
  1254. SDE_REGDMA_MODIFY(wrptr, REGDMA_TIMESTAMP_REG, mask, swts);
  1255. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 1);
  1256. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1257. }
  1258. /*
  1259. * sde_hw_rotator_cdp_configs - configures the CDP registers
  1260. * @ctx: Pointer to rotator context
  1261. * @params: Pointer to parameters needed for CDP configs
  1262. */
  1263. static void sde_hw_rotator_cdp_configs(struct sde_hw_rotator_context *ctx,
  1264. struct sde_rot_cdp_params *params)
  1265. {
  1266. int reg_val;
  1267. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1268. if (!params->enable) {
  1269. SDE_REGDMA_WRITE(wrptr, params->offset, 0x0);
  1270. goto end;
  1271. }
  1272. reg_val = BIT(0); /* enable cdp */
  1273. if (sde_mdp_is_ubwc_format(params->fmt))
  1274. reg_val |= BIT(1); /* enable UBWC meta cdp */
  1275. if (sde_mdp_is_ubwc_format(params->fmt)
  1276. || sde_mdp_is_tilea4x_format(params->fmt)
  1277. || sde_mdp_is_tilea5x_format(params->fmt))
  1278. reg_val |= BIT(2); /* enable tile amortize */
  1279. reg_val |= BIT(3); /* enable preload addr ahead cnt 64 */
  1280. SDE_REGDMA_WRITE(wrptr, params->offset, reg_val);
  1281. end:
  1282. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1283. }
  1284. /*
  1285. * sde_hw_rotator_setup_qos_lut_wr - Set QoS LUT/Danger LUT/Safe LUT configs
  1286. * for the WRITEBACK rotator for inline and offline rotation.
  1287. *
  1288. * @ctx: Pointer to rotator context
  1289. */
  1290. static void sde_hw_rotator_setup_qos_lut_wr(struct sde_hw_rotator_context *ctx)
  1291. {
  1292. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1293. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1294. /* Offline rotation setting */
  1295. if (!ctx->sbuf_mode) {
  1296. /* QOS LUT WR setting */
  1297. if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
  1298. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
  1299. mdata->lut_cfg[SDE_ROT_WR].creq_lut_0);
  1300. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
  1301. mdata->lut_cfg[SDE_ROT_WR].creq_lut_1);
  1302. }
  1303. /* Danger LUT WR setting */
  1304. if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
  1305. SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
  1306. mdata->lut_cfg[SDE_ROT_WR].danger_lut);
  1307. /* Safe LUT WR setting */
  1308. if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
  1309. SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
  1310. mdata->lut_cfg[SDE_ROT_WR].safe_lut);
  1311. /* Inline rotation setting */
  1312. } else {
  1313. /* QOS LUT WR setting */
  1314. if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
  1315. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_0,
  1316. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_0);
  1317. SDE_REGDMA_WRITE(wrptr, ROT_WB_CREQ_LUT_1,
  1318. mdata->inline_lut_cfg[SDE_ROT_WR].creq_lut_1);
  1319. }
  1320. /* Danger LUT WR setting */
  1321. if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
  1322. mdata->sde_inline_qos_map))
  1323. SDE_REGDMA_WRITE(wrptr, ROT_WB_DANGER_LUT,
  1324. mdata->inline_lut_cfg[SDE_ROT_WR].danger_lut);
  1325. /* Safe LUT WR setting */
  1326. if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
  1327. mdata->sde_inline_qos_map))
  1328. SDE_REGDMA_WRITE(wrptr, ROT_WB_SAFE_LUT,
  1329. mdata->inline_lut_cfg[SDE_ROT_WR].safe_lut);
  1330. }
  1331. /* Update command queue write ptr */
  1332. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1333. }
  1334. /*
  1335. * sde_hw_rotator_setup_qos_lut_rd - Set QoS LUT/Danger LUT/Safe LUT configs
  1336. * for the SSPP rotator for inline and offline rotation.
  1337. *
  1338. * @ctx: Pointer to rotator context
  1339. */
  1340. static void sde_hw_rotator_setup_qos_lut_rd(struct sde_hw_rotator_context *ctx)
  1341. {
  1342. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1343. char __iomem *wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1344. /* Offline rotation setting */
  1345. if (!ctx->sbuf_mode) {
  1346. /* QOS LUT RD setting */
  1347. if (test_bit(SDE_QOS_LUT, mdata->sde_qos_map)) {
  1348. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
  1349. mdata->lut_cfg[SDE_ROT_RD].creq_lut_0);
  1350. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
  1351. mdata->lut_cfg[SDE_ROT_RD].creq_lut_1);
  1352. }
  1353. /* Danger LUT RD setting */
  1354. if (test_bit(SDE_QOS_DANGER_LUT, mdata->sde_qos_map))
  1355. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
  1356. mdata->lut_cfg[SDE_ROT_RD].danger_lut);
  1357. /* Safe LUT RD setting */
  1358. if (test_bit(SDE_QOS_SAFE_LUT, mdata->sde_qos_map))
  1359. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
  1360. mdata->lut_cfg[SDE_ROT_RD].safe_lut);
  1361. /* inline rotation setting */
  1362. } else {
  1363. /* QOS LUT RD setting */
  1364. if (test_bit(SDE_INLINE_QOS_LUT, mdata->sde_inline_qos_map)) {
  1365. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_0,
  1366. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_0);
  1367. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_CREQ_LUT_1,
  1368. mdata->inline_lut_cfg[SDE_ROT_RD].creq_lut_1);
  1369. }
  1370. /* Danger LUT RD setting */
  1371. if (test_bit(SDE_INLINE_QOS_DANGER_LUT,
  1372. mdata->sde_inline_qos_map))
  1373. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_DANGER_LUT,
  1374. mdata->inline_lut_cfg[SDE_ROT_RD].danger_lut);
  1375. /* Safe LUT RD setting */
  1376. if (test_bit(SDE_INLINE_QOS_SAFE_LUT,
  1377. mdata->sde_inline_qos_map))
  1378. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SAFE_LUT,
  1379. mdata->inline_lut_cfg[SDE_ROT_RD].safe_lut);
  1380. }
  1381. /* Update command queue write ptr */
  1382. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1383. }
  1384. static void sde_hw_rotator_setup_fetchengine_helper(
  1385. struct sde_hw_rot_sspp_cfg *cfg,
  1386. struct sde_rot_data_type *mdata,
  1387. struct sde_hw_rotator_context *ctx, char __iomem *wrptr,
  1388. u32 flags, u32 *width, u32 *height)
  1389. {
  1390. int i;
  1391. /*
  1392. * initialize start control trigger selection first
  1393. */
  1394. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  1395. if (ctx->sbuf_mode)
  1396. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL,
  1397. ctx->start_ctrl);
  1398. else
  1399. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, 0);
  1400. }
  1401. /* source image setup */
  1402. if ((flags & SDE_ROT_FLAG_DEINTERLACE)
  1403. && !(flags & SDE_ROT_FLAG_SOURCE_ROTATED_90)) {
  1404. for (i = 0; i < cfg->src_plane.num_planes; i++)
  1405. cfg->src_plane.ystride[i] *= 2;
  1406. *width *= 2;
  1407. *height /= 2;
  1408. }
  1409. }
  1410. /*
  1411. * sde_hw_rotator_setup_fetchengine - setup fetch engine
  1412. * @ctx: Pointer to rotator context
  1413. * @queue_id: Priority queue identifier
  1414. * @cfg: Fetch configuration
  1415. * @danger_lut: real-time QoS LUT for danger setting (not used)
  1416. * @safe_lut: real-time QoS LUT for safe setting (not used)
  1417. * @dnsc_factor_w: downscale factor for width
  1418. * @dnsc_factor_h: downscale factor for height
  1419. * @flags: Control flag
  1420. */
  1421. static void sde_hw_rotator_setup_fetchengine(struct sde_hw_rotator_context *ctx,
  1422. enum sde_rot_queue_prio queue_id,
  1423. struct sde_hw_rot_sspp_cfg *cfg, u32 danger_lut, u32 safe_lut,
  1424. u32 dnsc_factor_w, u32 dnsc_factor_h, u32 flags)
  1425. {
  1426. struct sde_hw_rotator *rot = ctx->rot;
  1427. struct sde_mdp_format_params *fmt;
  1428. struct sde_mdp_data *data;
  1429. struct sde_rot_cdp_params cdp_params = {0};
  1430. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1431. char __iomem *wrptr;
  1432. u32 opmode = 0;
  1433. u32 chroma_samp = 0;
  1434. u32 src_format = 0;
  1435. u32 unpack = 0;
  1436. u32 width = cfg->img_width;
  1437. u32 height = cfg->img_height;
  1438. u32 fetch_blocksize = 0;
  1439. int i;
  1440. if (ctx->rot->mode == ROT_REGDMA_ON) {
  1441. if (rot->irq_num >= 0)
  1442. SDE_ROTREG_WRITE(rot->mdss_base,
  1443. REGDMA_CSR_REGDMA_INT_EN,
  1444. REGDMA_INT_MASK);
  1445. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_OP_MODE,
  1446. REGDMA_EN);
  1447. }
  1448. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1449. sde_hw_rotator_setup_fetchengine_helper(cfg, mdata, ctx, wrptr,
  1450. flags, &width, &height);
  1451. /*
  1452. * REGDMA BLK write from SRC_SIZE to OP_MODE, total 15 registers
  1453. */
  1454. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_SSPP_SRC_SIZE, 15);
  1455. /* SRC_SIZE, SRC_IMG_SIZE, SRC_XY, OUT_SIZE, OUT_XY */
  1456. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1457. cfg->src_rect->w | (cfg->src_rect->h << 16));
  1458. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0); /* SRC_IMG_SIZE unused */
  1459. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1460. cfg->src_rect->x | (cfg->src_rect->y << 16));
  1461. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1462. cfg->src_rect->w | (cfg->src_rect->h << 16));
  1463. SDE_REGDMA_BLKWRITE_DATA(wrptr,
  1464. cfg->src_rect->x | (cfg->src_rect->y << 16));
  1465. /* SRC_ADDR [0-3], SRC_YSTRIDE [0-1] */
  1466. data = cfg->data;
  1467. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1468. SDE_REGDMA_BLKWRITE_DATA(wrptr, data->p[i].addr);
  1469. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[0] |
  1470. (cfg->src_plane.ystride[1] << 16));
  1471. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->src_plane.ystride[2] |
  1472. (cfg->src_plane.ystride[3] << 16));
  1473. /* UNUSED, write 0 */
  1474. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1475. /* setup source format */
  1476. fmt = cfg->fmt;
  1477. chroma_samp = fmt->chroma_sample;
  1478. if (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) {
  1479. if (chroma_samp == SDE_MDP_CHROMA_H2V1)
  1480. chroma_samp = SDE_MDP_CHROMA_H1V2;
  1481. else if (chroma_samp == SDE_MDP_CHROMA_H1V2)
  1482. chroma_samp = SDE_MDP_CHROMA_H2V1;
  1483. }
  1484. src_format = (chroma_samp << 23) |
  1485. (fmt->fetch_planes << 19) |
  1486. (fmt->bits[C3_ALPHA] << 6) |
  1487. (fmt->bits[C2_R_Cr] << 4) |
  1488. (fmt->bits[C1_B_Cb] << 2) |
  1489. (fmt->bits[C0_G_Y] << 0);
  1490. if (fmt->alpha_enable &&
  1491. (fmt->fetch_planes == SDE_MDP_PLANE_INTERLEAVED))
  1492. src_format |= BIT(8); /* SRCC3_EN */
  1493. src_format |= ((fmt->unpack_count - 1) << 12) |
  1494. (fmt->unpack_tight << 17) |
  1495. (fmt->unpack_align_msb << 18) |
  1496. ((fmt->bpp - 1) << 9) |
  1497. ((fmt->frame_format & 3) << 30);
  1498. if (flags & SDE_ROT_FLAG_ROT_90)
  1499. src_format |= BIT(11); /* ROT90 */
  1500. if (sde_mdp_is_ubwc_format(fmt))
  1501. opmode |= BIT(0); /* BWC_DEC_EN */
  1502. /* if this is YUV pixel format, enable CSC */
  1503. if (sde_mdp_is_yuv_format(fmt))
  1504. src_format |= BIT(15); /* SRC_COLOR_SPACE */
  1505. if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
  1506. src_format |= BIT(14); /* UNPACK_DX_FORMAT */
  1507. if (rot->solid_fill)
  1508. src_format |= BIT(22); /* SOLID_FILL */
  1509. /* SRC_FORMAT */
  1510. SDE_REGDMA_BLKWRITE_DATA(wrptr, src_format);
  1511. /* setup source unpack pattern */
  1512. unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
  1513. (fmt->element[1] << 8) | (fmt->element[0] << 0);
  1514. /* SRC_UNPACK_PATTERN */
  1515. SDE_REGDMA_BLKWRITE_DATA(wrptr, unpack);
  1516. /* setup source op mode */
  1517. if (flags & SDE_ROT_FLAG_FLIP_LR)
  1518. opmode |= BIT(13); /* FLIP_MODE L/R horizontal flip */
  1519. if (flags & SDE_ROT_FLAG_FLIP_UD)
  1520. opmode |= BIT(14); /* FLIP_MODE U/D vertical flip */
  1521. opmode |= BIT(31); /* MDSS_MDP_OP_PE_OVERRIDE */
  1522. /* SRC_OP_MODE */
  1523. SDE_REGDMA_BLKWRITE_DATA(wrptr, opmode);
  1524. /* setup source fetch config, TP10 uses different block size */
  1525. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map) &&
  1526. (dnsc_factor_w == 1) && (dnsc_factor_h == 1)) {
  1527. if (sde_mdp_is_tp10_format(fmt))
  1528. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_144_EXT;
  1529. else
  1530. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_192_EXT;
  1531. } else {
  1532. if (sde_mdp_is_tp10_format(fmt))
  1533. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_96;
  1534. else
  1535. fetch_blocksize = SDE_ROT_SSPP_FETCH_BLOCKSIZE_128;
  1536. }
  1537. if (rot->solid_fill)
  1538. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_CONSTANT_COLOR,
  1539. rot->constant_color);
  1540. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_FETCH_CONFIG,
  1541. fetch_blocksize |
  1542. SDE_ROT_SSPP_FETCH_CONFIG_RESET_VALUE |
  1543. ((rot->highest_bank & 0x3) << 18));
  1544. if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
  1545. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL,
  1546. ((ctx->rot->ubwc_malsize & 0x3) << 8) |
  1547. ((ctx->rot->highest_bank & 0x3) << 4) |
  1548. ((ctx->rot->ubwc_swizzle & 0x1) << 0));
  1549. else if (test_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map) ||
  1550. test_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map))
  1551. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_UBWC_STATIC_CTRL, BIT(30));
  1552. /* setup source buffer plane security status */
  1553. if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
  1554. SDE_ROT_FLAG_SECURE_CAMERA_SESSION)) {
  1555. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0xF);
  1556. ctx->is_secure = true;
  1557. } else {
  1558. SDE_REGDMA_WRITE(wrptr, ROT_SSPP_SRC_ADDR_SW_STATUS, 0);
  1559. ctx->is_secure = false;
  1560. }
  1561. /* Update command queue write ptr */
  1562. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1563. /* CDP register RD setting */
  1564. cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
  1565. mdata->enable_cdp[SDE_ROT_RD] : false;
  1566. cdp_params.fmt = fmt;
  1567. cdp_params.offset = ROT_SSPP_CDP_CNTL;
  1568. sde_hw_rotator_cdp_configs(ctx, &cdp_params);
  1569. /* QOS LUT/ Danger LUT/ Safe Lut WR setting */
  1570. sde_hw_rotator_setup_qos_lut_rd(ctx);
  1571. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1572. /*
  1573. * Determine if traffic shaping is required. Only enable traffic
  1574. * shaping when content is 4k@30fps. The actual traffic shaping
  1575. * bandwidth calculation is done in output setup.
  1576. */
  1577. if (((!ctx->sbuf_mode)
  1578. && (cfg->src_rect->w * cfg->src_rect->h) >= RES_UHD)
  1579. && (cfg->fps <= 30)) {
  1580. SDEROT_DBG("Enable Traffic Shaper\n");
  1581. ctx->is_traffic_shaping = true;
  1582. } else {
  1583. SDEROT_DBG("Disable Traffic Shaper\n");
  1584. ctx->is_traffic_shaping = false;
  1585. }
  1586. /* Update command queue write ptr */
  1587. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1588. }
  1589. /*
  1590. * sde_hw_rotator_setup_wbengine - setup writeback engine
  1591. * @ctx: Pointer to rotator context
  1592. * @queue_id: Priority queue identifier
  1593. * @cfg: Writeback configuration
  1594. * @flags: Control flag
  1595. */
  1596. static void sde_hw_rotator_setup_wbengine(struct sde_hw_rotator_context *ctx,
  1597. enum sde_rot_queue_prio queue_id,
  1598. struct sde_hw_rot_wb_cfg *cfg,
  1599. u32 flags)
  1600. {
  1601. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1602. struct sde_mdp_format_params *fmt;
  1603. struct sde_rot_cdp_params cdp_params = {0};
  1604. char __iomem *wrptr;
  1605. u32 pack = 0;
  1606. u32 dst_format = 0;
  1607. u32 no_partial_writes = 0;
  1608. int i;
  1609. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1610. fmt = cfg->fmt;
  1611. /* setup WB DST format */
  1612. dst_format |= (fmt->chroma_sample << 23) |
  1613. (fmt->fetch_planes << 19) |
  1614. (fmt->bits[C3_ALPHA] << 6) |
  1615. (fmt->bits[C2_R_Cr] << 4) |
  1616. (fmt->bits[C1_B_Cb] << 2) |
  1617. (fmt->bits[C0_G_Y] << 0);
  1618. /* alpha control */
  1619. if (fmt->alpha_enable || (!fmt->is_yuv && (fmt->unpack_count == 4))) {
  1620. dst_format |= BIT(8);
  1621. if (!fmt->alpha_enable) {
  1622. dst_format |= BIT(14);
  1623. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ALPHA_X_VALUE, 0);
  1624. }
  1625. }
  1626. dst_format |= ((fmt->unpack_count - 1) << 12) |
  1627. (fmt->unpack_tight << 17) |
  1628. (fmt->unpack_align_msb << 18) |
  1629. ((fmt->bpp - 1) << 9) |
  1630. ((fmt->frame_format & 3) << 30);
  1631. if (sde_mdp_is_yuv_format(fmt))
  1632. dst_format |= BIT(15);
  1633. if (fmt->pixel_mode == SDE_MDP_PIXEL_10BIT)
  1634. dst_format |= BIT(21); /* PACK_DX_FORMAT */
  1635. /*
  1636. * REGDMA BLK write, from DST_FORMAT to DST_YSTRIDE 1, total 9 regs
  1637. */
  1638. SDE_REGDMA_BLKWRITE_INC(wrptr, ROT_WB_DST_FORMAT, 9);
  1639. /* DST_FORMAT */
  1640. SDE_REGDMA_BLKWRITE_DATA(wrptr, dst_format);
  1641. /* DST_OP_MODE */
  1642. if (sde_mdp_is_ubwc_format(fmt))
  1643. SDE_REGDMA_BLKWRITE_DATA(wrptr, BIT(0));
  1644. else
  1645. SDE_REGDMA_BLKWRITE_DATA(wrptr, 0);
  1646. /* DST_PACK_PATTERN */
  1647. pack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
  1648. (fmt->element[1] << 8) | (fmt->element[0] << 0);
  1649. SDE_REGDMA_BLKWRITE_DATA(wrptr, pack);
  1650. /* DST_ADDR [0-3], DST_YSTRIDE [0-1] */
  1651. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1652. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->data->p[i].addr);
  1653. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[0] |
  1654. (cfg->dst_plane.ystride[1] << 16));
  1655. SDE_REGDMA_BLKWRITE_DATA(wrptr, cfg->dst_plane.ystride[2] |
  1656. (cfg->dst_plane.ystride[3] << 16));
  1657. /* setup WB out image size and ROI */
  1658. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_IMG_SIZE,
  1659. cfg->img_width | (cfg->img_height << 16));
  1660. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_SIZE,
  1661. cfg->dst_rect->w | (cfg->dst_rect->h << 16));
  1662. SDE_REGDMA_WRITE(wrptr, ROT_WB_OUT_XY,
  1663. cfg->dst_rect->x | (cfg->dst_rect->y << 16));
  1664. if (flags & (SDE_ROT_FLAG_SECURE_OVERLAY_SESSION |
  1665. SDE_ROT_FLAG_SECURE_CAMERA_SESSION))
  1666. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0x1);
  1667. else
  1668. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_ADDR_SW_STATUS, 0);
  1669. /*
  1670. * setup Downscale factor
  1671. */
  1672. SDE_REGDMA_WRITE(wrptr, ROTTOP_DNSC,
  1673. cfg->v_downscale_factor |
  1674. (cfg->h_downscale_factor << 16));
  1675. /* partial write check */
  1676. if (test_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map)) {
  1677. no_partial_writes = BIT(10);
  1678. /*
  1679. * For simplicity, don't disable partial writes if
  1680. * the ROI does not span the entire width of the
  1681. * output image, and require the total stride to
  1682. * also be properly aligned.
  1683. *
  1684. * This avoids having to determine the memory access
  1685. * alignment of the actual horizontal ROI on a per
  1686. * color format basis.
  1687. */
  1688. if (sde_mdp_is_ubwc_format(fmt)) {
  1689. no_partial_writes = 0x0;
  1690. } else if (cfg->dst_rect->x ||
  1691. cfg->dst_rect->w != cfg->img_width) {
  1692. no_partial_writes = 0x0;
  1693. } else {
  1694. for (i = 0; i < SDE_ROT_MAX_PLANES; i++)
  1695. if (cfg->dst_plane.ystride[i] &
  1696. PARTIAL_WRITE_ALIGNMENT)
  1697. no_partial_writes = 0x0;
  1698. }
  1699. }
  1700. /* write config setup for bank configuration */
  1701. SDE_REGDMA_WRITE(wrptr, ROT_WB_DST_WRITE_CONFIG, no_partial_writes |
  1702. (ctx->rot->highest_bank & 0x3) << 8);
  1703. if (test_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map))
  1704. SDE_REGDMA_WRITE(wrptr, ROT_WB_UBWC_STATIC_CTRL,
  1705. ((ctx->rot->ubwc_malsize & 0x3) << 8) |
  1706. ((ctx->rot->highest_bank & 0x3) << 4) |
  1707. ((ctx->rot->ubwc_swizzle & 0x1) << 0));
  1708. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map))
  1709. SDE_REGDMA_WRITE(wrptr, ROT_WB_SYS_CACHE_MODE,
  1710. ctx->sys_cache_mode);
  1711. SDE_REGDMA_WRITE(wrptr, ROTTOP_OP_MODE, ctx->op_mode |
  1712. (flags & SDE_ROT_FLAG_ROT_90 ? BIT(1) : 0) | BIT(0));
  1713. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1714. /* CDP register WR setting */
  1715. cdp_params.enable = test_bit(SDE_QOS_CDP, mdata->sde_qos_map) ?
  1716. mdata->enable_cdp[SDE_ROT_WR] : false;
  1717. cdp_params.fmt = fmt;
  1718. cdp_params.offset = ROT_WB_CDP_CNTL;
  1719. sde_hw_rotator_cdp_configs(ctx, &cdp_params);
  1720. /* QOS LUT/ Danger LUT/ Safe LUT WR setting */
  1721. sde_hw_rotator_setup_qos_lut_wr(ctx);
  1722. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1723. /* setup traffic shaper for 4k 30fps content or if prefill_bw is set */
  1724. if (ctx->is_traffic_shaping || cfg->prefill_bw) {
  1725. u32 bw;
  1726. /*
  1727. * Target to finish in 12ms, and we need to set number of bytes
  1728. * per clock tick for traffic shaping.
  1729. * Each clock tick run @ 19.2MHz, so we need we know total of
  1730. * clock ticks in 14ms, i.e. 12ms/(1/19.2MHz) ==> 23040
  1731. * Finally, calcualte the byte count per clock tick based on
  1732. * resolution, bpp and compression ratio.
  1733. */
  1734. bw = cfg->dst_rect->w * cfg->dst_rect->h;
  1735. if (fmt->chroma_sample == SDE_MDP_CHROMA_420)
  1736. bw = (bw * 3) / 2;
  1737. else
  1738. bw *= fmt->bpp;
  1739. bw /= TRAFFIC_SHAPE_CLKTICK_12MS;
  1740. /* use prefill bandwidth instead if specified */
  1741. if (cfg->prefill_bw)
  1742. bw = DIV_ROUND_UP_SECTOR_T(cfg->prefill_bw,
  1743. TRAFFIC_SHAPE_VSYNC_CLK);
  1744. if (bw > 0xFF)
  1745. bw = 0xFF;
  1746. else if (bw == 0)
  1747. bw = 1;
  1748. SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT,
  1749. BIT(31) | (cfg->prefill_bw ? BIT(27) : 0) | bw);
  1750. SDEROT_DBG("Enable ROT_WB Traffic Shaper:%d\n", bw);
  1751. } else {
  1752. SDE_REGDMA_WRITE(wrptr, ROT_WB_TRAFFIC_SHAPER_WR_CLIENT, 0);
  1753. SDEROT_DBG("Disable ROT_WB Traffic Shaper\n");
  1754. }
  1755. /* Update command queue write ptr */
  1756. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1757. }
  1758. /*
  1759. * sde_hw_rotator_start_no_regdma - start non-regdma operation
  1760. * @ctx: Pointer to rotator context
  1761. * @queue_id: Priority queue identifier
  1762. */
  1763. static u32 sde_hw_rotator_start_no_regdma(struct sde_hw_rotator_context *ctx,
  1764. enum sde_rot_queue_prio queue_id)
  1765. {
  1766. struct sde_hw_rotator *rot = ctx->rot;
  1767. char __iomem *wrptr;
  1768. char __iomem *mem_rdptr;
  1769. char __iomem *addr;
  1770. u32 mask;
  1771. u32 cmd0, cmd1, cmd2;
  1772. u32 blksize;
  1773. /*
  1774. * when regdma is not using, the regdma segment is just a normal
  1775. * DRAM, and not an iomem.
  1776. */
  1777. mem_rdptr = sde_hw_rotator_get_regdma_segment_base(ctx);
  1778. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1779. if (!sde_hw_rotator_enable_irq(rot)) {
  1780. SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_EN, 1);
  1781. SDE_REGDMA_WRITE(wrptr, ROTTOP_INTR_CLEAR, 1);
  1782. reinit_completion(&ctx->rot_comp);
  1783. }
  1784. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
  1785. /* Update command queue write ptr */
  1786. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1787. SDEROT_DBG("BEGIN %d\n", ctx->timestamp);
  1788. /* Write all command stream to Rotator blocks */
  1789. /* Rotator will start right away after command stream finish writing */
  1790. while (mem_rdptr < wrptr) {
  1791. u32 op = REGDMA_OP_MASK & readl_relaxed_no_log(mem_rdptr);
  1792. switch (op) {
  1793. case REGDMA_OP_NOP:
  1794. SDEROT_DBG("NOP\n");
  1795. mem_rdptr += sizeof(u32);
  1796. break;
  1797. case REGDMA_OP_REGWRITE:
  1798. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1799. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1800. SDEROT_DBG("REGW %6.6x %8.8x\n",
  1801. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1802. cmd1);
  1803. addr = rot->mdss_base +
  1804. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1805. writel_relaxed(cmd1, addr);
  1806. break;
  1807. case REGDMA_OP_REGMODIFY:
  1808. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1809. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1810. SDE_REGDMA_READ(mem_rdptr, cmd2);
  1811. SDEROT_DBG("REGM %6.6x %8.8x %8.8x\n",
  1812. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1813. cmd1, cmd2);
  1814. addr = rot->mdss_base +
  1815. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1816. mask = cmd1;
  1817. writel_relaxed((readl_relaxed(addr) & mask) | cmd2,
  1818. addr);
  1819. break;
  1820. case REGDMA_OP_BLKWRITE_SINGLE:
  1821. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1822. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1823. SDEROT_DBG("BLKWS %6.6x %6.6x\n",
  1824. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1825. cmd1);
  1826. addr = rot->mdss_base +
  1827. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1828. blksize = cmd1;
  1829. while (blksize--) {
  1830. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1831. SDEROT_DBG("DATA %8.8x\n", cmd0);
  1832. writel_relaxed(cmd0, addr);
  1833. }
  1834. break;
  1835. case REGDMA_OP_BLKWRITE_INC:
  1836. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1837. SDE_REGDMA_READ(mem_rdptr, cmd1);
  1838. SDEROT_DBG("BLKWI %6.6x %6.6x\n",
  1839. cmd0 & REGDMA_ADDR_OFFSET_MASK,
  1840. cmd1);
  1841. addr = rot->mdss_base +
  1842. (cmd0 & REGDMA_ADDR_OFFSET_MASK);
  1843. blksize = cmd1;
  1844. while (blksize--) {
  1845. SDE_REGDMA_READ(mem_rdptr, cmd0);
  1846. SDEROT_DBG("DATA %8.8x\n", cmd0);
  1847. writel_relaxed(cmd0, addr);
  1848. addr += 4;
  1849. }
  1850. break;
  1851. default:
  1852. /* Other not supported OP mode
  1853. * Skip data for now for unregonized OP mode
  1854. */
  1855. SDEROT_DBG("UNDEFINED\n");
  1856. mem_rdptr += sizeof(u32);
  1857. break;
  1858. }
  1859. }
  1860. SDEROT_DBG("END %d\n", ctx->timestamp);
  1861. return ctx->timestamp;
  1862. }
  1863. /*
  1864. * sde_hw_rotator_start_regdma - start regdma operation
  1865. * @ctx: Pointer to rotator context
  1866. * @queue_id: Priority queue identifier
  1867. */
  1868. static u32 sde_hw_rotator_start_regdma(struct sde_hw_rotator_context *ctx,
  1869. enum sde_rot_queue_prio queue_id)
  1870. {
  1871. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  1872. struct sde_hw_rotator *rot = ctx->rot;
  1873. char __iomem *wrptr;
  1874. u32 regdmaSlot;
  1875. u32 offset;
  1876. u32 length;
  1877. u32 ts_length;
  1878. u32 enableInt;
  1879. u32 swts = 0;
  1880. u32 mask = 0;
  1881. u32 trig_sel;
  1882. bool int_trigger = false;
  1883. wrptr = sde_hw_rotator_get_regdma_segment(ctx);
  1884. /* Enable HW timestamp if supported in rotator */
  1885. if (test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map)) {
  1886. SDE_REGDMA_MODIFY(wrptr, ROTTOP_ROT_CNTR_CTRL,
  1887. ~BIT(queue_id), BIT(queue_id));
  1888. int_trigger = true;
  1889. } else if (ctx->sbuf_mode) {
  1890. int_trigger = true;
  1891. }
  1892. /*
  1893. * Last ROT command must be ROT_START before REGDMA start
  1894. */
  1895. SDE_REGDMA_WRITE(wrptr, ROTTOP_START_CTRL, ctx->start_ctrl);
  1896. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1897. /*
  1898. * Start REGDMA with command offset and size
  1899. */
  1900. regdmaSlot = sde_hw_rotator_get_regdma_ctxidx(ctx);
  1901. length = (wrptr - ctx->regdma_base) / 4;
  1902. offset = (ctx->regdma_base - (rot->mdss_base +
  1903. REGDMA_RAM_REGDMA_CMD_RAM)) / sizeof(u32);
  1904. enableInt = ((ctx->timestamp & 1) + 1) << 30;
  1905. trig_sel = ctx->sbuf_mode ? REGDMA_CMD_TRIG_SEL_MDP_FLUSH :
  1906. REGDMA_CMD_TRIG_SEL_SW_START;
  1907. SDEROT_DBG(
  1908. "regdma(%d)[%d] <== INT:0x%X|length:%d|offset:0x%X, ts:%X\n",
  1909. queue_id, regdmaSlot, enableInt, length, offset,
  1910. ctx->timestamp);
  1911. /* ensure the command packet is issued before the submit command */
  1912. wmb();
  1913. /* REGDMA submission for current context */
  1914. if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
  1915. SDE_ROTREG_WRITE(rot->mdss_base,
  1916. REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
  1917. (int_trigger ? enableInt : 0) | trig_sel |
  1918. ((length & 0x3ff) << 14) | offset);
  1919. swts = ctx->timestamp;
  1920. mask = ~SDE_REGDMA_SWTS_MASK;
  1921. } else {
  1922. SDE_ROTREG_WRITE(rot->mdss_base,
  1923. REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
  1924. (int_trigger ? enableInt : 0) | trig_sel |
  1925. ((length & 0x3ff) << 14) | offset);
  1926. swts = ctx->timestamp << SDE_REGDMA_SWTS_SHIFT;
  1927. mask = ~(SDE_REGDMA_SWTS_MASK << SDE_REGDMA_SWTS_SHIFT);
  1928. }
  1929. SDEROT_EVTLOG(ctx->timestamp, queue_id, length, offset, ctx->sbuf_mode);
  1930. /* sw timestamp update can only be used in offline multi-context mode */
  1931. if (!int_trigger) {
  1932. /* Write timestamp after previous rotator job finished */
  1933. sde_hw_rotator_setup_timestamp_packet(ctx, mask, swts);
  1934. offset += length;
  1935. ts_length = sde_hw_rotator_get_regdma_segment(ctx) - wrptr;
  1936. ts_length /= sizeof(u32);
  1937. WARN_ON((length + ts_length) > SDE_HW_ROT_REGDMA_SEG_SIZE);
  1938. /* ensure command packet is issue before the submit command */
  1939. wmb();
  1940. SDEROT_EVTLOG(queue_id, enableInt, ts_length, offset);
  1941. if (queue_id == ROT_QUEUE_HIGH_PRIORITY) {
  1942. SDE_ROTREG_WRITE(rot->mdss_base,
  1943. REGDMA_CSR_REGDMA_QUEUE_0_SUBMIT,
  1944. enableInt | (ts_length << 14) | offset);
  1945. } else {
  1946. SDE_ROTREG_WRITE(rot->mdss_base,
  1947. REGDMA_CSR_REGDMA_QUEUE_1_SUBMIT,
  1948. enableInt | (ts_length << 14) | offset);
  1949. }
  1950. }
  1951. /* Update command queue write ptr */
  1952. sde_hw_rotator_put_regdma_segment(ctx, wrptr);
  1953. return ctx->timestamp;
  1954. }
  1955. /*
  1956. * sde_hw_rotator_wait_done_no_regdma - wait for non-regdma completion
  1957. * @ctx: Pointer to rotator context
  1958. * @queue_id: Priority queue identifier
  1959. * @flags: Option flag
  1960. */
  1961. static u32 sde_hw_rotator_wait_done_no_regdma(
  1962. struct sde_hw_rotator_context *ctx,
  1963. enum sde_rot_queue_prio queue_id, u32 flag)
  1964. {
  1965. struct sde_hw_rotator *rot = ctx->rot;
  1966. int rc = 0;
  1967. u32 sts = 0;
  1968. u32 status;
  1969. unsigned long flags;
  1970. if (rot->irq_num >= 0) {
  1971. SDEROT_DBG("Wait for Rotator completion\n");
  1972. rc = wait_for_completion_timeout(&ctx->rot_comp,
  1973. ctx->sbuf_mode ?
  1974. msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
  1975. msecs_to_jiffies(rot->koff_timeout));
  1976. spin_lock_irqsave(&rot->rotisr_lock, flags);
  1977. status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  1978. if (rc == 0) {
  1979. /*
  1980. * Timeout, there might be error,
  1981. * or rotator still busy
  1982. */
  1983. if (status & ROT_BUSY_BIT)
  1984. SDEROT_ERR(
  1985. "Timeout waiting for rotator done\n");
  1986. else if (status & ROT_ERROR_BIT)
  1987. SDEROT_ERR(
  1988. "Rotator report error status\n");
  1989. else
  1990. SDEROT_WARN(
  1991. "Timeout waiting, but rotator job is done!!\n");
  1992. sde_hw_rotator_disable_irq(rot);
  1993. }
  1994. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  1995. } else {
  1996. int cnt = 200;
  1997. do {
  1998. udelay(500);
  1999. status = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  2000. cnt--;
  2001. } while ((cnt > 0) && (status & ROT_BUSY_BIT)
  2002. && ((status & ROT_ERROR_BIT) == 0));
  2003. if (status & ROT_ERROR_BIT)
  2004. SDEROT_ERR("Rotator error\n");
  2005. else if (status & ROT_BUSY_BIT)
  2006. SDEROT_ERR("Rotator busy\n");
  2007. SDE_ROTREG_WRITE(rot->mdss_base, ROTTOP_INTR_CLEAR,
  2008. ROT_DONE_CLEAR);
  2009. }
  2010. sts = (status & ROT_ERROR_BIT) ? -ENODEV : 0;
  2011. return sts;
  2012. }
  2013. /*
  2014. * sde_hw_rotator_wait_done_regdma - wait for regdma completion
  2015. * @ctx: Pointer to rotator context
  2016. * @queue_id: Priority queue identifier
  2017. * @flags: Option flag
  2018. */
  2019. static u32 sde_hw_rotator_wait_done_regdma(
  2020. struct sde_hw_rotator_context *ctx,
  2021. enum sde_rot_queue_prio queue_id, u32 flag)
  2022. {
  2023. struct sde_hw_rotator *rot = ctx->rot;
  2024. int rc = 0;
  2025. bool timeout = false;
  2026. bool pending;
  2027. bool abort;
  2028. u32 status;
  2029. u32 last_isr;
  2030. u32 last_ts;
  2031. u32 int_id;
  2032. u32 swts;
  2033. u32 sts = 0;
  2034. u32 ubwcerr;
  2035. u32 hwts[ROT_QUEUE_MAX];
  2036. unsigned long flags;
  2037. if (rot->irq_num >= 0) {
  2038. SDEROT_DBG("Wait for REGDMA completion, ctx:%pK, ts:%X\n",
  2039. ctx, ctx->timestamp);
  2040. rc = wait_event_timeout(ctx->regdma_waitq,
  2041. !rot->ops.get_pending_ts(rot, ctx, &swts),
  2042. ctx->sbuf_mode ?
  2043. msecs_to_jiffies(KOFF_TIMEOUT_SBUF) :
  2044. msecs_to_jiffies(rot->koff_timeout));
  2045. ATRACE_INT("sde_rot_done", 0);
  2046. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2047. last_isr = ctx->last_regdma_isr_status;
  2048. last_ts = ctx->last_regdma_timestamp;
  2049. abort = ctx->abort;
  2050. status = last_isr & REGDMA_INT_MASK;
  2051. int_id = last_ts & 1;
  2052. SDEROT_DBG("INT status:0x%X, INT id:%d, timestamp:0x%X\n",
  2053. status, int_id, last_ts);
  2054. if (rc == 0 || (status & REGDMA_INT_ERR_MASK) || abort) {
  2055. timeout = true;
  2056. pending = rot->ops.get_pending_ts(rot, ctx, &swts);
  2057. /* cache ubwcerr and hw timestamps while locked */
  2058. ubwcerr = SDE_ROTREG_READ(rot->mdss_base,
  2059. ROT_SSPP_UBWC_ERROR_STATUS);
  2060. hwts[ROT_QUEUE_HIGH_PRIORITY] =
  2061. __sde_hw_rotator_get_timestamp(rot,
  2062. ROT_QUEUE_HIGH_PRIORITY);
  2063. hwts[ROT_QUEUE_LOW_PRIORITY] =
  2064. __sde_hw_rotator_get_timestamp(rot,
  2065. ROT_QUEUE_LOW_PRIORITY);
  2066. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2067. if (ubwcerr || abort ||
  2068. sde_hw_rotator_halt_vbif_xin_client()) {
  2069. /*
  2070. * Perform recovery for ROT SSPP UBWC decode
  2071. * error.
  2072. * - SW reset rotator hw block
  2073. * - reset TS logic so all pending rotation
  2074. * in hw queue got done signalled
  2075. */
  2076. if (!sde_hw_rotator_reset(rot, ctx))
  2077. status = REGDMA_INCOMPLETE_CMD;
  2078. else
  2079. status = ROT_ERROR_BIT;
  2080. } else {
  2081. status = ROT_ERROR_BIT;
  2082. }
  2083. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2084. } else {
  2085. if (rc == 1)
  2086. SDEROT_WARN(
  2087. "REGDMA done but no irq, ts:0x%X/0x%X\n",
  2088. ctx->timestamp, swts);
  2089. status = 0;
  2090. }
  2091. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2092. /* dump rot status after releasing lock if timeout occurred */
  2093. if (timeout) {
  2094. SDEROT_ERR(
  2095. "TIMEOUT, ts:0x%X/0x%X, pending:%d, abort:%d\n",
  2096. ctx->timestamp, swts, pending, abort);
  2097. SDEROT_ERR(
  2098. "Cached: HW ts0/ts1 = %x/%x, ubwcerr = %x\n",
  2099. hwts[ROT_QUEUE_HIGH_PRIORITY],
  2100. hwts[ROT_QUEUE_LOW_PRIORITY], ubwcerr);
  2101. if (status & REGDMA_WATCHDOG_INT)
  2102. SDEROT_ERR("REGDMA watchdog interrupt\n");
  2103. else if (status & REGDMA_INVALID_DESCRIPTOR)
  2104. SDEROT_ERR("REGDMA invalid descriptor\n");
  2105. else if (status & REGDMA_INCOMPLETE_CMD)
  2106. SDEROT_ERR("REGDMA incomplete command\n");
  2107. else if (status & REGDMA_INVALID_CMD)
  2108. SDEROT_ERR("REGDMA invalid command\n");
  2109. _sde_hw_rotator_dump_status(rot, &ubwcerr);
  2110. }
  2111. } else {
  2112. int cnt = 200;
  2113. bool pending;
  2114. do {
  2115. udelay(500);
  2116. last_isr = SDE_ROTREG_READ(rot->mdss_base,
  2117. REGDMA_CSR_REGDMA_INT_STATUS);
  2118. pending = rot->ops.get_pending_ts(rot, ctx, &swts);
  2119. cnt--;
  2120. } while ((cnt > 0) && pending &&
  2121. ((last_isr & REGDMA_INT_ERR_MASK) == 0));
  2122. if (last_isr & REGDMA_INT_ERR_MASK) {
  2123. SDEROT_ERR("Rotator error, ts:0x%X/0x%X status:%x\n",
  2124. ctx->timestamp, swts, last_isr);
  2125. _sde_hw_rotator_dump_status(rot, NULL);
  2126. status = ROT_ERROR_BIT;
  2127. } else if (pending) {
  2128. SDEROT_ERR("Rotator timeout, ts:0x%X/0x%X status:%x\n",
  2129. ctx->timestamp, swts, last_isr);
  2130. _sde_hw_rotator_dump_status(rot, NULL);
  2131. status = ROT_ERROR_BIT;
  2132. } else {
  2133. status = 0;
  2134. }
  2135. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_CSR_REGDMA_INT_CLEAR,
  2136. last_isr);
  2137. }
  2138. sts = (status & (ROT_ERROR_BIT | REGDMA_INCOMPLETE_CMD)) ? -ENODEV : 0;
  2139. if (status & ROT_ERROR_BIT)
  2140. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2141. "vbif_dbg_bus", "panic");
  2142. return sts;
  2143. }
  2144. /*
  2145. * setup_rotator_ops - setup callback functions for the low-level HAL
  2146. * @ops: Pointer to low-level ops callback
  2147. * @mode: Operation mode (non-regdma or regdma)
  2148. * @use_hwts: HW timestamp support mode
  2149. */
  2150. static void setup_rotator_ops(struct sde_hw_rotator_ops *ops,
  2151. enum sde_rotator_regdma_mode mode,
  2152. bool use_hwts)
  2153. {
  2154. ops->setup_rotator_fetchengine = sde_hw_rotator_setup_fetchengine;
  2155. ops->setup_rotator_wbengine = sde_hw_rotator_setup_wbengine;
  2156. if (mode == ROT_REGDMA_ON) {
  2157. ops->start_rotator = sde_hw_rotator_start_regdma;
  2158. ops->wait_rotator_done = sde_hw_rotator_wait_done_regdma;
  2159. } else {
  2160. ops->start_rotator = sde_hw_rotator_start_no_regdma;
  2161. ops->wait_rotator_done = sde_hw_rotator_wait_done_no_regdma;
  2162. }
  2163. if (use_hwts) {
  2164. ops->get_pending_ts = sde_hw_rotator_pending_hwts;
  2165. ops->update_ts = sde_hw_rotator_update_hwts;
  2166. } else {
  2167. ops->get_pending_ts = sde_hw_rotator_pending_swts;
  2168. ops->update_ts = sde_hw_rotator_update_swts;
  2169. }
  2170. }
  2171. /*
  2172. * sde_hw_rotator_swts_create - create software timestamp buffer
  2173. * @rot: Pointer to rotator hw
  2174. *
  2175. * This buffer is used by regdma to keep track of last completed command.
  2176. */
  2177. static int sde_hw_rotator_swts_create(struct sde_hw_rotator *rot)
  2178. {
  2179. int rc = 0;
  2180. struct sde_mdp_img_data *data;
  2181. u32 bufsize = sizeof(int) * SDE_HW_ROT_REGDMA_TOTAL_CTX * 2;
  2182. if (bufsize < SZ_4K)
  2183. bufsize = SZ_4K;
  2184. data = &rot->swts_buf;
  2185. data->len = bufsize;
  2186. data->srcp_dma_buf = sde_rot_get_dmabuf(data);
  2187. if (!data->srcp_dma_buf) {
  2188. SDEROT_ERR("Fail dmabuf create\n");
  2189. return -ENOMEM;
  2190. }
  2191. sde_smmu_ctrl(1);
  2192. data->srcp_attachment = sde_smmu_dma_buf_attach(data->srcp_dma_buf,
  2193. &rot->pdev->dev, SDE_IOMMU_DOMAIN_ROT_UNSECURE);
  2194. if (IS_ERR_OR_NULL(data->srcp_attachment)) {
  2195. SDEROT_ERR("sde_smmu_dma_buf_attach error\n");
  2196. rc = -ENOMEM;
  2197. goto err_put;
  2198. }
  2199. data->srcp_table = dma_buf_map_attachment(data->srcp_attachment,
  2200. DMA_BIDIRECTIONAL);
  2201. if (IS_ERR_OR_NULL(data->srcp_table)) {
  2202. SDEROT_ERR("dma_buf_map_attachment error\n");
  2203. rc = -ENOMEM;
  2204. goto err_detach;
  2205. }
  2206. rc = sde_smmu_map_dma_buf(data->srcp_dma_buf, data->srcp_table,
  2207. SDE_IOMMU_DOMAIN_ROT_UNSECURE, &data->addr,
  2208. &data->len, DMA_BIDIRECTIONAL);
  2209. if (rc < 0) {
  2210. SDEROT_ERR("smmu_map_dma_buf failed: (%d)\n", rc);
  2211. goto err_unmap;
  2212. }
  2213. data->mapped = true;
  2214. SDEROT_DBG("swts buffer mapped: %pad/%lx va:%pK\n", &data->addr,
  2215. data->len, rot->swts_buffer);
  2216. sde_smmu_ctrl(0);
  2217. return rc;
  2218. err_unmap:
  2219. dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
  2220. DMA_FROM_DEVICE);
  2221. err_detach:
  2222. dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
  2223. err_put:
  2224. data->srcp_dma_buf = NULL;
  2225. sde_smmu_ctrl(0);
  2226. return rc;
  2227. }
  2228. /*
  2229. * sde_hw_rotator_swts_destroy - destroy software timestamp buffer
  2230. * @rot: Pointer to rotator hw
  2231. */
  2232. static void sde_hw_rotator_swts_destroy(struct sde_hw_rotator *rot)
  2233. {
  2234. struct sde_mdp_img_data *data;
  2235. data = &rot->swts_buf;
  2236. sde_smmu_unmap_dma_buf(data->srcp_table, SDE_IOMMU_DOMAIN_ROT_UNSECURE,
  2237. DMA_FROM_DEVICE, data->srcp_dma_buf);
  2238. dma_buf_unmap_attachment(data->srcp_attachment, data->srcp_table,
  2239. DMA_FROM_DEVICE);
  2240. dma_buf_detach(data->srcp_dma_buf, data->srcp_attachment);
  2241. dma_buf_put(data->srcp_dma_buf);
  2242. data->addr = 0;
  2243. data->srcp_dma_buf = NULL;
  2244. data->srcp_attachment = NULL;
  2245. data->mapped = false;
  2246. }
  2247. /*
  2248. * sde_hw_rotator_pre_pmevent - SDE rotator core will call this before a
  2249. * PM event occurs
  2250. * @mgr: Pointer to rotator manager
  2251. * @pmon: Boolean indicate an on/off power event
  2252. */
  2253. void sde_hw_rotator_pre_pmevent(struct sde_rot_mgr *mgr, bool pmon)
  2254. {
  2255. struct sde_hw_rotator *rot;
  2256. u32 l_ts, h_ts, l_hwts, h_hwts;
  2257. u32 rotsts, regdmasts, rotopmode;
  2258. /*
  2259. * Check last HW timestamp with SW timestamp before power off event.
  2260. * If there is a mismatch, that will be quite possible the rotator HW
  2261. * is either hang or not finishing last submitted job. In that case,
  2262. * it is best to do a timeout eventlog to capture some good events
  2263. * log data for analysis.
  2264. */
  2265. if (!pmon && mgr && mgr->hw_data) {
  2266. rot = mgr->hw_data;
  2267. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]) &
  2268. SDE_REGDMA_SWTS_MASK;
  2269. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]) &
  2270. SDE_REGDMA_SWTS_MASK;
  2271. /* Need to turn on clock to access rotator register */
  2272. sde_rotator_clk_ctrl(mgr, true);
  2273. l_hwts = __sde_hw_rotator_get_timestamp(rot,
  2274. ROT_QUEUE_LOW_PRIORITY);
  2275. h_hwts = __sde_hw_rotator_get_timestamp(rot,
  2276. ROT_QUEUE_HIGH_PRIORITY);
  2277. regdmasts = SDE_ROTREG_READ(rot->mdss_base,
  2278. REGDMA_CSR_REGDMA_BLOCK_STATUS);
  2279. rotsts = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_STATUS);
  2280. rotopmode = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_OP_MODE);
  2281. SDEROT_DBG(
  2282. "swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
  2283. l_ts, h_ts, l_hwts, h_hwts,
  2284. regdmasts, rotsts);
  2285. SDEROT_EVTLOG(l_ts, h_ts, l_hwts, h_hwts, regdmasts, rotsts);
  2286. if (((l_ts != l_hwts) || (h_ts != h_hwts)) &&
  2287. ((regdmasts & REGDMA_BUSY) ||
  2288. (rotsts & ROT_STATUS_MASK))) {
  2289. SDEROT_ERR(
  2290. "Mismatch SWTS with HWTS: swts(l/h):0x%x/0x%x, hwts(l/h):0x%x/0x%x, regdma-sts:0x%x, rottop-sts:0x%x\n",
  2291. l_ts, h_ts, l_hwts, h_hwts,
  2292. regdmasts, rotsts);
  2293. _sde_hw_rotator_dump_status(rot, NULL);
  2294. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2295. "vbif_dbg_bus", "panic");
  2296. } else if (!SDE_ROTTOP_IN_OFFLINE_MODE(rotopmode) &&
  2297. ((regdmasts & REGDMA_BUSY) ||
  2298. (rotsts & ROT_BUSY_BIT))) {
  2299. /*
  2300. * rotator can stuck in inline while mdp is detached
  2301. */
  2302. SDEROT_WARN(
  2303. "Inline Rot busy: regdma-sts:0x%x, rottop-sts:0x%x, rottop-opmode:0x%x\n",
  2304. regdmasts, rotsts, rotopmode);
  2305. sde_hw_rotator_reset(rot, NULL);
  2306. } else if ((regdmasts & REGDMA_BUSY) ||
  2307. (rotsts & ROT_BUSY_BIT)) {
  2308. _sde_hw_rotator_dump_status(rot, NULL);
  2309. SDEROT_EVTLOG_TOUT_HANDLER("rot", "rot_dbg_bus",
  2310. "vbif_dbg_bus", "panic");
  2311. sde_hw_rotator_reset(rot, NULL);
  2312. }
  2313. /* Turn off rotator clock after checking rotator registers */
  2314. sde_rotator_clk_ctrl(mgr, false);
  2315. }
  2316. }
  2317. /*
  2318. * sde_hw_rotator_post_pmevent - SDE rotator core will call this after a
  2319. * PM event occurs
  2320. * @mgr: Pointer to rotator manager
  2321. * @pmon: Boolean indicate an on/off power event
  2322. */
  2323. void sde_hw_rotator_post_pmevent(struct sde_rot_mgr *mgr, bool pmon)
  2324. {
  2325. struct sde_hw_rotator *rot;
  2326. u32 l_ts, h_ts;
  2327. /*
  2328. * After a power on event, the rotator HW is reset to default setting.
  2329. * It is necessary to synchronize the SW timestamp with the HW.
  2330. */
  2331. if (pmon && mgr && mgr->hw_data) {
  2332. rot = mgr->hw_data;
  2333. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
  2334. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
  2335. SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
  2336. SDEROT_EVTLOG(h_ts, l_ts);
  2337. rot->reset_hw_ts = true;
  2338. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] =
  2339. l_ts & SDE_REGDMA_SWTS_MASK;
  2340. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] =
  2341. h_ts & SDE_REGDMA_SWTS_MASK;
  2342. }
  2343. }
  2344. /*
  2345. * sde_hw_rotator_destroy - Destroy hw rotator and free allocated resources
  2346. * @mgr: Pointer to rotator manager
  2347. */
  2348. static void sde_hw_rotator_destroy(struct sde_rot_mgr *mgr)
  2349. {
  2350. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2351. struct sde_hw_rotator *rot;
  2352. if (!mgr || !mgr->pdev || !mgr->hw_data) {
  2353. SDEROT_ERR("null parameters\n");
  2354. return;
  2355. }
  2356. rot = mgr->hw_data;
  2357. if (rot->irq_num >= 0)
  2358. devm_free_irq(&mgr->pdev->dev, rot->irq_num, mdata);
  2359. if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
  2360. rot->mode == ROT_REGDMA_ON)
  2361. sde_hw_rotator_swts_destroy(rot);
  2362. devm_kfree(&mgr->pdev->dev, mgr->hw_data);
  2363. mgr->hw_data = NULL;
  2364. }
  2365. /*
  2366. * sde_hw_rotator_alloc_ext - allocate rotator resource from rotator hw
  2367. * @mgr: Pointer to rotator manager
  2368. * @pipe_id: pipe identifier (not used)
  2369. * @wb_id: writeback identifier/priority queue identifier
  2370. *
  2371. * This function allocates a new hw rotator resource for the given priority.
  2372. */
  2373. static struct sde_rot_hw_resource *sde_hw_rotator_alloc_ext(
  2374. struct sde_rot_mgr *mgr, u32 pipe_id, u32 wb_id)
  2375. {
  2376. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2377. struct sde_hw_rotator_resource_info *resinfo;
  2378. if (!mgr || !mgr->hw_data) {
  2379. SDEROT_ERR("null parameters\n");
  2380. return NULL;
  2381. }
  2382. /*
  2383. * Allocate rotator resource info. Each allocation is per
  2384. * HW priority queue
  2385. */
  2386. resinfo = devm_kzalloc(&mgr->pdev->dev, sizeof(*resinfo), GFP_KERNEL);
  2387. if (!resinfo) {
  2388. SDEROT_ERR("Failed allocation HW rotator resource info\n");
  2389. return NULL;
  2390. }
  2391. resinfo->rot = mgr->hw_data;
  2392. resinfo->hw.wb_id = wb_id;
  2393. atomic_set(&resinfo->hw.num_active, 0);
  2394. init_waitqueue_head(&resinfo->hw.wait_queue);
  2395. /* For non-regdma, only support one active session */
  2396. if (resinfo->rot->mode == ROT_REGDMA_OFF)
  2397. resinfo->hw.max_active = 1;
  2398. else {
  2399. resinfo->hw.max_active = SDE_HW_ROT_REGDMA_TOTAL_CTX - 1;
  2400. if (!test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map) &&
  2401. (!resinfo->rot->swts_buf.mapped))
  2402. sde_hw_rotator_swts_create(resinfo->rot);
  2403. }
  2404. sde_hw_rotator_enable_irq(resinfo->rot);
  2405. SDEROT_DBG("New rotator resource:%pK, priority:%d\n",
  2406. resinfo, wb_id);
  2407. return &resinfo->hw;
  2408. }
  2409. /*
  2410. * sde_hw_rotator_free_ext - free the given rotator resource
  2411. * @mgr: Pointer to rotator manager
  2412. * @hw: Pointer to rotator resource
  2413. */
  2414. static void sde_hw_rotator_free_ext(struct sde_rot_mgr *mgr,
  2415. struct sde_rot_hw_resource *hw)
  2416. {
  2417. struct sde_hw_rotator_resource_info *resinfo;
  2418. if (!mgr || !mgr->hw_data)
  2419. return;
  2420. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2421. SDEROT_DBG(
  2422. "Free rotator resource:%pK, priority:%d, active:%d, pending:%d\n",
  2423. resinfo, hw->wb_id, atomic_read(&hw->num_active),
  2424. hw->pending_count);
  2425. sde_hw_rotator_disable_irq(resinfo->rot);
  2426. devm_kfree(&mgr->pdev->dev, resinfo);
  2427. }
  2428. /*
  2429. * sde_hw_rotator_alloc_rotctx - allocate rotator context
  2430. * @rot: Pointer to rotator hw
  2431. * @hw: Pointer to rotator resource
  2432. * @session_id: Session identifier of this context
  2433. * @sequence_id: Sequence identifier of this request
  2434. * @sbuf_mode: true if stream buffer is requested
  2435. *
  2436. * This function allocates a new rotator context for the given session id.
  2437. */
  2438. static struct sde_hw_rotator_context *sde_hw_rotator_alloc_rotctx(
  2439. struct sde_hw_rotator *rot,
  2440. struct sde_rot_hw_resource *hw,
  2441. u32 session_id,
  2442. u32 sequence_id,
  2443. bool sbuf_mode)
  2444. {
  2445. struct sde_hw_rotator_context *ctx;
  2446. /* Allocate rotator context */
  2447. ctx = devm_kzalloc(&rot->pdev->dev, sizeof(*ctx), GFP_KERNEL);
  2448. if (!ctx) {
  2449. SDEROT_ERR("Failed allocation HW rotator context\n");
  2450. return NULL;
  2451. }
  2452. ctx->rot = rot;
  2453. ctx->q_id = hw->wb_id;
  2454. ctx->session_id = session_id;
  2455. ctx->sequence_id = sequence_id;
  2456. ctx->hwres = hw;
  2457. ctx->timestamp = atomic_add_return(1, &rot->timestamp[ctx->q_id]);
  2458. ctx->timestamp &= SDE_REGDMA_SWTS_MASK;
  2459. ctx->is_secure = false;
  2460. ctx->sbuf_mode = sbuf_mode;
  2461. INIT_LIST_HEAD(&ctx->list);
  2462. ctx->regdma_base = rot->cmd_wr_ptr[ctx->q_id]
  2463. [sde_hw_rotator_get_regdma_ctxidx(ctx)];
  2464. ctx->regdma_wrptr = ctx->regdma_base;
  2465. ctx->ts_addr = (dma_addr_t)((u32 *)rot->swts_buf.addr +
  2466. ctx->q_id * SDE_HW_ROT_REGDMA_TOTAL_CTX +
  2467. sde_hw_rotator_get_regdma_ctxidx(ctx));
  2468. ctx->last_regdma_timestamp = SDE_REGDMA_SWTS_INVALID;
  2469. init_completion(&ctx->rot_comp);
  2470. init_waitqueue_head(&ctx->regdma_waitq);
  2471. /* Store rotator context for lookup purpose */
  2472. sde_hw_rotator_put_ctx(ctx);
  2473. SDEROT_DBG(
  2474. "New rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
  2475. ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
  2476. ctx->q_id, ctx->timestamp,
  2477. atomic_read(&ctx->hwres->num_active),
  2478. ctx->sbuf_mode);
  2479. return ctx;
  2480. }
  2481. /*
  2482. * sde_hw_rotator_free_rotctx - free the given rotator context
  2483. * @rot: Pointer to rotator hw
  2484. * @ctx: Pointer to rotator context
  2485. */
  2486. static void sde_hw_rotator_free_rotctx(struct sde_hw_rotator *rot,
  2487. struct sde_hw_rotator_context *ctx)
  2488. {
  2489. if (!rot || !ctx)
  2490. return;
  2491. SDEROT_DBG(
  2492. "Free rot CTX:%pK, ctxidx:%d, session-id:%d, prio:%d, timestamp:%X, active:%d sbuf:%d\n",
  2493. ctx, sde_hw_rotator_get_regdma_ctxidx(ctx), ctx->session_id,
  2494. ctx->q_id, ctx->timestamp,
  2495. atomic_read(&ctx->hwres->num_active),
  2496. ctx->sbuf_mode);
  2497. /* Clear rotator context from lookup purpose */
  2498. sde_hw_rotator_clr_ctx(ctx);
  2499. devm_kfree(&rot->pdev->dev, ctx);
  2500. }
  2501. /*
  2502. * sde_hw_rotator_config - configure hw for the given rotation entry
  2503. * @hw: Pointer to rotator resource
  2504. * @entry: Pointer to rotation entry
  2505. *
  2506. * This function setup the fetch/writeback/rotator blocks, as well as VBIF
  2507. * based on the given rotation entry.
  2508. */
  2509. static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
  2510. struct sde_rot_entry *entry)
  2511. {
  2512. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2513. struct sde_hw_rotator *rot;
  2514. struct sde_hw_rotator_resource_info *resinfo;
  2515. struct sde_hw_rotator_context *ctx;
  2516. struct sde_hw_rot_sspp_cfg sspp_cfg;
  2517. struct sde_hw_rot_wb_cfg wb_cfg;
  2518. u32 danger_lut = 0; /* applicable for realtime client only */
  2519. u32 safe_lut = 0; /* applicable for realtime client only */
  2520. u32 flags = 0;
  2521. u32 rststs = 0;
  2522. struct sde_rotation_item *item;
  2523. int ret;
  2524. if (!hw || !entry) {
  2525. SDEROT_ERR("null hw resource/entry\n");
  2526. return -EINVAL;
  2527. }
  2528. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2529. rot = resinfo->rot;
  2530. item = &entry->item;
  2531. ctx = sde_hw_rotator_alloc_rotctx(rot, hw, item->session_id,
  2532. item->sequence_id, item->output.sbuf);
  2533. if (!ctx) {
  2534. SDEROT_ERR("Failed allocating rotator context!!\n");
  2535. return -EINVAL;
  2536. }
  2537. /* save entry for debugging purposes */
  2538. ctx->last_entry = entry;
  2539. if (test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  2540. if (entry->dst_buf.sbuf) {
  2541. u32 op_mode;
  2542. if (entry->item.trigger ==
  2543. SDE_ROTATOR_TRIGGER_COMMAND)
  2544. ctx->start_ctrl = (rot->cmd_trigger << 4);
  2545. else if (entry->item.trigger ==
  2546. SDE_ROTATOR_TRIGGER_VIDEO)
  2547. ctx->start_ctrl = (rot->vid_trigger << 4);
  2548. else
  2549. ctx->start_ctrl = 0;
  2550. ctx->sys_cache_mode = BIT(15) |
  2551. ((item->output.scid & 0x1f) << 8) |
  2552. (item->output.writeback ? 0x5 : 0);
  2553. ctx->op_mode = BIT(4) |
  2554. ((ctx->rot->sbuf_headroom & 0xff) << 8);
  2555. /* detect transition to inline mode */
  2556. op_mode = (SDE_ROTREG_READ(rot->mdss_base,
  2557. ROTTOP_OP_MODE) >> 4) & 0x3;
  2558. if (!op_mode) {
  2559. u32 status;
  2560. status = SDE_ROTREG_READ(rot->mdss_base,
  2561. ROTTOP_STATUS);
  2562. if (status & BIT(0)) {
  2563. SDEROT_ERR("rotator busy 0x%x\n",
  2564. status);
  2565. _sde_hw_rotator_dump_status(rot, NULL);
  2566. SDEROT_EVTLOG_TOUT_HANDLER("rot",
  2567. "vbif_dbg_bus",
  2568. "panic");
  2569. }
  2570. }
  2571. } else {
  2572. ctx->start_ctrl = BIT(0);
  2573. ctx->sys_cache_mode = 0;
  2574. ctx->op_mode = 0;
  2575. }
  2576. } else {
  2577. ctx->start_ctrl = BIT(0);
  2578. }
  2579. SDEROT_EVTLOG(ctx->start_ctrl, ctx->sys_cache_mode, ctx->op_mode);
  2580. /*
  2581. * if Rotator HW is reset, but missing PM event notification, we
  2582. * need to init the SW timestamp automatically.
  2583. */
  2584. rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
  2585. if (!rot->reset_hw_ts && rststs) {
  2586. u32 l_ts, h_ts, l_hwts, h_hwts;
  2587. h_hwts = __sde_hw_rotator_get_timestamp(rot,
  2588. ROT_QUEUE_HIGH_PRIORITY);
  2589. l_hwts = __sde_hw_rotator_get_timestamp(rot,
  2590. ROT_QUEUE_LOW_PRIORITY);
  2591. h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
  2592. l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
  2593. SDEROT_EVTLOG(0xbad0, rststs, l_hwts, h_hwts, l_ts, h_ts);
  2594. if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY) {
  2595. h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
  2596. l_ts &= SDE_REGDMA_SWTS_MASK;
  2597. } else {
  2598. l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
  2599. h_ts &= SDE_REGDMA_SWTS_MASK;
  2600. }
  2601. SDEROT_DBG("h_ts:0x%x, l_ts;0x%x\n", h_ts, l_ts);
  2602. SDEROT_EVTLOG(0x900d, h_ts, l_ts);
  2603. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY] = l_ts;
  2604. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY] = h_ts;
  2605. rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY, h_ts);
  2606. rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY, l_ts);
  2607. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
  2608. /* ensure write is issued to the rotator HW */
  2609. wmb();
  2610. }
  2611. if (rot->reset_hw_ts) {
  2612. SDEROT_EVTLOG(rot->last_hwts[ROT_QUEUE_LOW_PRIORITY],
  2613. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
  2614. rot->ops.update_ts(rot, ROT_QUEUE_HIGH_PRIORITY,
  2615. rot->last_hwts[ROT_QUEUE_HIGH_PRIORITY]);
  2616. rot->ops.update_ts(rot, ROT_QUEUE_LOW_PRIORITY,
  2617. rot->last_hwts[ROT_QUEUE_LOW_PRIORITY]);
  2618. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
  2619. /* ensure write is issued to the rotator HW */
  2620. wmb();
  2621. rot->reset_hw_ts = false;
  2622. }
  2623. flags = (item->flags & SDE_ROTATION_FLIP_LR) ?
  2624. SDE_ROT_FLAG_FLIP_LR : 0;
  2625. flags |= (item->flags & SDE_ROTATION_FLIP_UD) ?
  2626. SDE_ROT_FLAG_FLIP_UD : 0;
  2627. flags |= (item->flags & SDE_ROTATION_90) ?
  2628. SDE_ROT_FLAG_ROT_90 : 0;
  2629. flags |= (item->flags & SDE_ROTATION_DEINTERLACE) ?
  2630. SDE_ROT_FLAG_DEINTERLACE : 0;
  2631. flags |= (item->flags & SDE_ROTATION_SECURE) ?
  2632. SDE_ROT_FLAG_SECURE_OVERLAY_SESSION : 0;
  2633. flags |= (item->flags & SDE_ROTATION_SECURE_CAMERA) ?
  2634. SDE_ROT_FLAG_SECURE_CAMERA_SESSION : 0;
  2635. sspp_cfg.img_width = item->input.width;
  2636. sspp_cfg.img_height = item->input.height;
  2637. sspp_cfg.fps = entry->perf->config.frame_rate;
  2638. sspp_cfg.bw = entry->perf->bw;
  2639. sspp_cfg.fmt = sde_get_format_params(item->input.format);
  2640. if (!sspp_cfg.fmt) {
  2641. SDEROT_ERR("null format\n");
  2642. ret = -EINVAL;
  2643. goto error;
  2644. }
  2645. sspp_cfg.src_rect = &item->src_rect;
  2646. sspp_cfg.data = &entry->src_buf;
  2647. sde_mdp_get_plane_sizes(sspp_cfg.fmt, item->input.width,
  2648. item->input.height, &sspp_cfg.src_plane,
  2649. 0, /* No bwc_mode */
  2650. (flags & SDE_ROT_FLAG_SOURCE_ROTATED_90) ?
  2651. true : false);
  2652. rot->ops.setup_rotator_fetchengine(ctx, ctx->q_id,
  2653. &sspp_cfg, danger_lut, safe_lut,
  2654. entry->dnsc_factor_w, entry->dnsc_factor_h, flags);
  2655. wb_cfg.img_width = item->output.width;
  2656. wb_cfg.img_height = item->output.height;
  2657. wb_cfg.fps = entry->perf->config.frame_rate;
  2658. wb_cfg.bw = entry->perf->bw;
  2659. wb_cfg.fmt = sde_get_format_params(item->output.format);
  2660. if (!wb_cfg.fmt) {
  2661. SDEROT_ERR("null format\n");
  2662. ret = -EINVAL;
  2663. goto error;
  2664. }
  2665. wb_cfg.dst_rect = &item->dst_rect;
  2666. wb_cfg.data = &entry->dst_buf;
  2667. sde_mdp_get_plane_sizes(wb_cfg.fmt, item->output.width,
  2668. item->output.height, &wb_cfg.dst_plane,
  2669. 0, /* No bwc_mode */
  2670. (flags & SDE_ROT_FLAG_ROT_90) ? true : false);
  2671. wb_cfg.v_downscale_factor = entry->dnsc_factor_h;
  2672. wb_cfg.h_downscale_factor = entry->dnsc_factor_w;
  2673. wb_cfg.prefill_bw = item->prefill_bw;
  2674. rot->ops.setup_rotator_wbengine(ctx, ctx->q_id, &wb_cfg, flags);
  2675. /* setup VA mapping for debugfs */
  2676. if (rot->dbgmem) {
  2677. sde_hw_rotator_map_vaddr(&ctx->src_dbgbuf,
  2678. &item->input,
  2679. &entry->src_buf);
  2680. sde_hw_rotator_map_vaddr(&ctx->dst_dbgbuf,
  2681. &item->output,
  2682. &entry->dst_buf);
  2683. }
  2684. SDEROT_EVTLOG(ctx->timestamp, flags,
  2685. item->input.width, item->input.height,
  2686. item->output.width, item->output.height,
  2687. entry->src_buf.p[0].addr, entry->dst_buf.p[0].addr,
  2688. item->input.format, item->output.format,
  2689. entry->perf->config.frame_rate);
  2690. /* initialize static vbif setting */
  2691. sde_mdp_init_vbif();
  2692. if (!ctx->sbuf_mode && mdata->default_ot_rd_limit) {
  2693. struct sde_mdp_set_ot_params ot_params;
  2694. memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
  2695. ot_params.xin_id = mdata->vbif_xin_id[XIN_SSPP];
  2696. ot_params.num = 0; /* not used */
  2697. ot_params.width = entry->perf->config.input.width;
  2698. ot_params.height = entry->perf->config.input.height;
  2699. ot_params.fps = entry->perf->config.frame_rate;
  2700. ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_RD_LIM_CONF;
  2701. ot_params.reg_off_mdp_clk_ctrl =
  2702. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  2703. ot_params.bit_off_mdp_clk_ctrl =
  2704. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN0;
  2705. ot_params.fmt = ctx->is_traffic_shaping ?
  2706. SDE_PIX_FMT_ABGR_8888 :
  2707. entry->perf->config.input.format;
  2708. ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
  2709. ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
  2710. sde_mdp_set_ot_limit(&ot_params);
  2711. }
  2712. if (!ctx->sbuf_mode && mdata->default_ot_wr_limit) {
  2713. struct sde_mdp_set_ot_params ot_params;
  2714. memset(&ot_params, 0, sizeof(struct sde_mdp_set_ot_params));
  2715. ot_params.xin_id = mdata->vbif_xin_id[XIN_WRITEBACK];
  2716. ot_params.num = 0; /* not used */
  2717. ot_params.width = entry->perf->config.input.width;
  2718. ot_params.height = entry->perf->config.input.height;
  2719. ot_params.fps = entry->perf->config.frame_rate;
  2720. ot_params.reg_off_vbif_lim_conf = MMSS_VBIF_WR_LIM_CONF;
  2721. ot_params.reg_off_mdp_clk_ctrl =
  2722. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0;
  2723. ot_params.bit_off_mdp_clk_ctrl =
  2724. MMSS_VBIF_NRT_VBIF_CLK_FORCE_CTRL0_XIN1;
  2725. ot_params.fmt = ctx->is_traffic_shaping ?
  2726. SDE_PIX_FMT_ABGR_8888 :
  2727. entry->perf->config.input.format;
  2728. ot_params.rotsts_base = rot->mdss_base + ROTTOP_STATUS;
  2729. ot_params.rotsts_busy_mask = ROT_BUSY_BIT;
  2730. sde_mdp_set_ot_limit(&ot_params);
  2731. }
  2732. if (test_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map)) {
  2733. u32 qos_lut = 0; /* low priority for nrt read client */
  2734. trace_rot_perf_set_qos_luts(mdata->vbif_xin_id[XIN_SSPP],
  2735. sspp_cfg.fmt->format, qos_lut,
  2736. sde_mdp_is_linear_format(sspp_cfg.fmt));
  2737. SDE_ROTREG_WRITE(rot->mdss_base, ROT_SSPP_CREQ_LUT, qos_lut);
  2738. }
  2739. /* VBIF QoS and other settings */
  2740. if (!ctx->sbuf_mode) {
  2741. if (mdata->parent_pdev)
  2742. sde_hw_rotator_vbif_rt_setting();
  2743. else
  2744. sde_hw_rotator_vbif_setting(rot);
  2745. }
  2746. return 0;
  2747. error:
  2748. sde_hw_rotator_free_rotctx(rot, ctx);
  2749. return ret;
  2750. }
  2751. /*
  2752. * sde_hw_rotator_cancel - cancel hw configuration for the given rotation entry
  2753. * @hw: Pointer to rotator resource
  2754. * @entry: Pointer to rotation entry
  2755. *
  2756. * This function cancels a previously configured rotation entry.
  2757. */
  2758. static int sde_hw_rotator_cancel(struct sde_rot_hw_resource *hw,
  2759. struct sde_rot_entry *entry)
  2760. {
  2761. struct sde_hw_rotator *rot;
  2762. struct sde_hw_rotator_resource_info *resinfo;
  2763. struct sde_hw_rotator_context *ctx;
  2764. unsigned long flags;
  2765. if (!hw || !entry) {
  2766. SDEROT_ERR("null hw resource/entry\n");
  2767. return -EINVAL;
  2768. }
  2769. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2770. rot = resinfo->rot;
  2771. /* Lookup rotator context from session-id */
  2772. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2773. entry->item.sequence_id, hw->wb_id);
  2774. if (!ctx) {
  2775. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2776. entry->item.session_id);
  2777. return -EINVAL;
  2778. }
  2779. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2780. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  2781. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2782. SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
  2783. if (rot->dbgmem) {
  2784. sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
  2785. sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
  2786. }
  2787. /* Current rotator context job is finished, time to free up */
  2788. sde_hw_rotator_free_rotctx(rot, ctx);
  2789. return 0;
  2790. }
  2791. /*
  2792. * sde_hw_rotator_kickoff - kickoff processing on the given entry
  2793. * @hw: Pointer to rotator resource
  2794. * @entry: Pointer to rotation entry
  2795. */
  2796. static int sde_hw_rotator_kickoff(struct sde_rot_hw_resource *hw,
  2797. struct sde_rot_entry *entry)
  2798. {
  2799. struct sde_hw_rotator *rot;
  2800. struct sde_hw_rotator_resource_info *resinfo;
  2801. struct sde_hw_rotator_context *ctx;
  2802. if (!hw || !entry) {
  2803. SDEROT_ERR("null hw resource/entry\n");
  2804. return -EINVAL;
  2805. }
  2806. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2807. rot = resinfo->rot;
  2808. /* Lookup rotator context from session-id */
  2809. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2810. entry->item.sequence_id, hw->wb_id);
  2811. if (!ctx) {
  2812. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2813. entry->item.session_id);
  2814. return -EINVAL;
  2815. }
  2816. rot->ops.start_rotator(ctx, ctx->q_id);
  2817. return 0;
  2818. }
  2819. static int sde_hw_rotator_abort_kickoff(struct sde_rot_hw_resource *hw,
  2820. struct sde_rot_entry *entry)
  2821. {
  2822. struct sde_hw_rotator *rot;
  2823. struct sde_hw_rotator_resource_info *resinfo;
  2824. struct sde_hw_rotator_context *ctx;
  2825. unsigned long flags;
  2826. if (!hw || !entry) {
  2827. SDEROT_ERR("null hw resource/entry\n");
  2828. return -EINVAL;
  2829. }
  2830. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2831. rot = resinfo->rot;
  2832. /* Lookup rotator context from session-id */
  2833. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2834. entry->item.sequence_id, hw->wb_id);
  2835. if (!ctx) {
  2836. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2837. entry->item.session_id);
  2838. return -EINVAL;
  2839. }
  2840. spin_lock_irqsave(&rot->rotisr_lock, flags);
  2841. rot->ops.update_ts(rot, ctx->q_id, ctx->timestamp);
  2842. ctx->abort = true;
  2843. wake_up_all(&ctx->regdma_waitq);
  2844. spin_unlock_irqrestore(&rot->rotisr_lock, flags);
  2845. SDEROT_EVTLOG(entry->item.session_id, ctx->timestamp);
  2846. return 0;
  2847. }
  2848. /*
  2849. * sde_hw_rotator_wait4done - wait for completion notification
  2850. * @hw: Pointer to rotator resource
  2851. * @entry: Pointer to rotation entry
  2852. *
  2853. * This function blocks until the given entry is complete, error
  2854. * is detected, or timeout.
  2855. */
  2856. static int sde_hw_rotator_wait4done(struct sde_rot_hw_resource *hw,
  2857. struct sde_rot_entry *entry)
  2858. {
  2859. struct sde_hw_rotator *rot;
  2860. struct sde_hw_rotator_resource_info *resinfo;
  2861. struct sde_hw_rotator_context *ctx;
  2862. int ret;
  2863. if (!hw || !entry) {
  2864. SDEROT_ERR("null hw resource/entry\n");
  2865. return -EINVAL;
  2866. }
  2867. resinfo = container_of(hw, struct sde_hw_rotator_resource_info, hw);
  2868. rot = resinfo->rot;
  2869. /* Lookup rotator context from session-id */
  2870. ctx = sde_hw_rotator_get_ctx(rot, entry->item.session_id,
  2871. entry->item.sequence_id, hw->wb_id);
  2872. if (!ctx) {
  2873. SDEROT_ERR("Cannot locate rotator ctx from sesison id:%d\n",
  2874. entry->item.session_id);
  2875. return -EINVAL;
  2876. }
  2877. ret = rot->ops.wait_rotator_done(ctx, ctx->q_id, 0);
  2878. if (rot->dbgmem) {
  2879. sde_hw_rotator_unmap_vaddr(&ctx->src_dbgbuf);
  2880. sde_hw_rotator_unmap_vaddr(&ctx->dst_dbgbuf);
  2881. }
  2882. /* Current rotator context job is finished, time to free up*/
  2883. sde_hw_rotator_free_rotctx(rot, ctx);
  2884. return ret;
  2885. }
  2886. /*
  2887. * sde_rotator_hw_rev_init - setup feature and/or capability bitmask
  2888. * @rot: Pointer to hw rotator
  2889. *
  2890. * This function initializes feature and/or capability bitmask based on
  2891. * h/w version read from the device.
  2892. */
  2893. static int sde_rotator_hw_rev_init(struct sde_hw_rotator *rot)
  2894. {
  2895. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  2896. u32 hw_version;
  2897. if (!mdata) {
  2898. SDEROT_ERR("null rotator data\n");
  2899. return -EINVAL;
  2900. }
  2901. hw_version = SDE_ROTREG_READ(rot->mdss_base, ROTTOP_HW_VERSION);
  2902. SDEROT_DBG("hw version %8.8x\n", hw_version);
  2903. clear_bit(SDE_QOS_PER_PIPE_IB, mdata->sde_qos_map);
  2904. set_bit(SDE_QOS_OVERHEAD_FACTOR, mdata->sde_qos_map);
  2905. set_bit(SDE_QOS_OTLIM, mdata->sde_qos_map);
  2906. set_bit(SDE_QOS_PER_PIPE_LUT, mdata->sde_qos_map);
  2907. clear_bit(SDE_QOS_SIMPLIFIED_PREFILL, mdata->sde_qos_map);
  2908. set_bit(SDE_CAPS_R3_WB, mdata->sde_caps_map);
  2909. /* features exposed via rotator top h/w version */
  2910. if (hw_version != SDE_ROT_TYPE_V1_0) {
  2911. SDEROT_DBG("Supporting 1.5 downscale for SDE Rotator\n");
  2912. set_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map);
  2913. }
  2914. set_bit(SDE_CAPS_SEC_ATTACH_DETACH_SMMU, mdata->sde_caps_map);
  2915. mdata->nrt_vbif_dbg_bus = nrt_vbif_dbg_bus_r3;
  2916. mdata->nrt_vbif_dbg_bus_size =
  2917. ARRAY_SIZE(nrt_vbif_dbg_bus_r3);
  2918. mdata->rot_dbg_bus = rot_dbgbus_r3;
  2919. mdata->rot_dbg_bus_size = ARRAY_SIZE(rot_dbgbus_r3);
  2920. mdata->regdump = sde_rot_r3_regdump;
  2921. mdata->regdump_size = ARRAY_SIZE(sde_rot_r3_regdump);
  2922. SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG, 0);
  2923. /* features exposed via mdss h/w version */
  2924. if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version, SDE_MDP_HW_REV_600)) {
  2925. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2926. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2927. set_bit(SDE_CAPS_UBWC_4, mdata->sde_caps_map);
  2928. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2929. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2930. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2931. sde_hw_rotator_v4_inpixfmts;
  2932. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2933. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2934. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2935. sde_hw_rotator_v4_outpixfmts;
  2936. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2937. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2938. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2939. sde_hw_rotator_v4_inpixfmts_sbuf;
  2940. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2941. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2942. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2943. sde_hw_rotator_v4_outpixfmts_sbuf;
  2944. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2945. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2946. rot->downscale_caps =
  2947. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2948. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2949. SDE_MDP_HW_REV_500)) {
  2950. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2951. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2952. set_bit(SDE_CAPS_UBWC_3, mdata->sde_caps_map);
  2953. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2954. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2955. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2956. sde_hw_rotator_v4_inpixfmts;
  2957. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2958. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2959. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2960. sde_hw_rotator_v4_outpixfmts;
  2961. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2962. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2963. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2964. sde_hw_rotator_v4_inpixfmts_sbuf;
  2965. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2966. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2967. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2968. sde_hw_rotator_v4_outpixfmts_sbuf;
  2969. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2970. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2971. rot->downscale_caps =
  2972. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  2973. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2974. SDE_MDP_HW_REV_530) ||
  2975. IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  2976. SDE_MDP_HW_REV_520)) {
  2977. SDEROT_DBG("Supporting sys cache inline rotation\n");
  2978. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  2979. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  2980. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  2981. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  2982. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2983. sde_hw_rotator_v4_inpixfmts;
  2984. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2985. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  2986. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  2987. sde_hw_rotator_v4_outpixfmts;
  2988. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  2989. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  2990. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2991. sde_hw_rotator_v4_inpixfmts_sbuf;
  2992. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2993. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  2994. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  2995. sde_hw_rotator_v4_outpixfmts_sbuf;
  2996. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  2997. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  2998. rot->downscale_caps =
  2999. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3000. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3001. SDE_MDP_HW_REV_540)) {
  3002. SDEROT_DBG("Sys cache inline rotation not supported\n");
  3003. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  3004. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3005. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  3006. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3007. sde_hw_rotator_v4_inpixfmts;
  3008. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3009. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3010. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3011. sde_hw_rotator_v4_outpixfmts;
  3012. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3013. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3014. rot->downscale_caps =
  3015. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3016. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3017. SDE_MDP_HW_REV_400) ||
  3018. IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3019. SDE_MDP_HW_REV_410)) {
  3020. SDEROT_DBG("Supporting sys cache inline rotation\n");
  3021. set_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map);
  3022. set_bit(SDE_CAPS_UBWC_2, mdata->sde_caps_map);
  3023. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3024. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3025. sde_hw_rotator_v4_inpixfmts;
  3026. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3027. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3028. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3029. sde_hw_rotator_v4_outpixfmts;
  3030. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3031. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3032. rot->inpixfmts[SDE_ROTATOR_MODE_SBUF] =
  3033. sde_hw_rotator_v4_inpixfmts_sbuf;
  3034. rot->num_inpixfmt[SDE_ROTATOR_MODE_SBUF] =
  3035. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts_sbuf);
  3036. rot->outpixfmts[SDE_ROTATOR_MODE_SBUF] =
  3037. sde_hw_rotator_v4_outpixfmts_sbuf;
  3038. rot->num_outpixfmt[SDE_ROTATOR_MODE_SBUF] =
  3039. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts_sbuf);
  3040. rot->downscale_caps =
  3041. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3042. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3043. SDE_MDP_HW_REV_630)) {
  3044. SDEROT_DBG("Sys cache inline rotation not supported\n");
  3045. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3046. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  3047. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3048. sde_hw_rotator_v4_inpixfmts;
  3049. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3050. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3051. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3052. sde_hw_rotator_v4_outpixfmts;
  3053. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3054. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3055. rot->downscale_caps =
  3056. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3057. } else if (IS_SDE_MAJOR_MINOR_SAME(mdata->mdss_version,
  3058. SDE_MDP_HW_REV_660)) {
  3059. SDEROT_DBG("Sys cache inline rotation not supported\n");
  3060. set_bit(SDE_CAPS_PARTIALWR, mdata->sde_caps_map);
  3061. set_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map);
  3062. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3063. sde_hw_rotator_v4_inpixfmts;
  3064. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3065. ARRAY_SIZE(sde_hw_rotator_v4_inpixfmts);
  3066. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3067. sde_hw_rotator_v4_outpixfmts;
  3068. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3069. ARRAY_SIZE(sde_hw_rotator_v4_outpixfmts);
  3070. rot->downscale_caps =
  3071. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3072. } else {
  3073. rot->inpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3074. sde_hw_rotator_v3_inpixfmts;
  3075. rot->num_inpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3076. ARRAY_SIZE(sde_hw_rotator_v3_inpixfmts);
  3077. rot->outpixfmts[SDE_ROTATOR_MODE_OFFLINE] =
  3078. sde_hw_rotator_v3_outpixfmts;
  3079. rot->num_outpixfmt[SDE_ROTATOR_MODE_OFFLINE] =
  3080. ARRAY_SIZE(sde_hw_rotator_v3_outpixfmts);
  3081. rot->downscale_caps = (hw_version == SDE_ROT_TYPE_V1_0) ?
  3082. "LINEAR/2/4/8/16/32/64 TILE/2/4 TP10/2" :
  3083. "LINEAR/1.5/2/4/8/16/32/64 TILE/1.5/2/4 TP10/1.5/2";
  3084. }
  3085. return 0;
  3086. }
  3087. /*
  3088. * sde_hw_rotator_validate_entry - validate rotation entry
  3089. * @mgr: Pointer to rotator manager
  3090. * @entry: Pointer to rotation entry
  3091. *
  3092. * This function validates the given rotation entry and provides possible
  3093. * fixup (future improvement) if available. This function returns 0 if
  3094. * the entry is valid, and returns error code otherwise.
  3095. */
  3096. static int sde_hw_rotator_validate_entry(struct sde_rot_mgr *mgr,
  3097. struct sde_rot_entry *entry)
  3098. {
  3099. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3100. struct sde_hw_rotator *hw_data;
  3101. int ret = 0;
  3102. u16 src_w, src_h, dst_w, dst_h;
  3103. struct sde_rotation_item *item = &entry->item;
  3104. struct sde_mdp_format_params *fmt;
  3105. if (!mgr || !entry || !mgr->hw_data) {
  3106. SDEROT_ERR("invalid parameters\n");
  3107. return -EINVAL;
  3108. }
  3109. hw_data = mgr->hw_data;
  3110. if (hw_data->maxlinewidth < item->src_rect.w) {
  3111. SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
  3112. return -EINVAL;
  3113. }
  3114. src_w = item->src_rect.w;
  3115. src_h = item->src_rect.h;
  3116. if (item->flags & SDE_ROTATION_90) {
  3117. dst_w = item->dst_rect.h;
  3118. dst_h = item->dst_rect.w;
  3119. } else {
  3120. dst_w = item->dst_rect.w;
  3121. dst_h = item->dst_rect.h;
  3122. }
  3123. entry->dnsc_factor_w = 0;
  3124. entry->dnsc_factor_h = 0;
  3125. if (item->output.sbuf &&
  3126. !test_bit(SDE_CAPS_SBUF_1, mdata->sde_caps_map)) {
  3127. SDEROT_ERR("stream buffer not supported\n");
  3128. return -EINVAL;
  3129. }
  3130. if ((src_w != dst_w) || (src_h != dst_h)) {
  3131. if (!dst_w || !dst_h) {
  3132. SDEROT_DBG("zero output width/height not support\n");
  3133. ret = -EINVAL;
  3134. goto dnsc_err;
  3135. }
  3136. if ((src_w % dst_w) || (src_h % dst_h)) {
  3137. SDEROT_DBG("non integral scale not support\n");
  3138. ret = -EINVAL;
  3139. goto dnsc_1p5_check;
  3140. }
  3141. entry->dnsc_factor_w = src_w / dst_w;
  3142. if ((entry->dnsc_factor_w & (entry->dnsc_factor_w - 1)) ||
  3143. (entry->dnsc_factor_w > 64)) {
  3144. SDEROT_DBG("non power-of-2 w_scale not support\n");
  3145. ret = -EINVAL;
  3146. goto dnsc_err;
  3147. }
  3148. entry->dnsc_factor_h = src_h / dst_h;
  3149. if ((entry->dnsc_factor_h & (entry->dnsc_factor_h - 1)) ||
  3150. (entry->dnsc_factor_h > 64)) {
  3151. SDEROT_DBG("non power-of-2 h_scale not support\n");
  3152. ret = -EINVAL;
  3153. goto dnsc_err;
  3154. }
  3155. }
  3156. fmt = sde_get_format_params(item->output.format);
  3157. /*
  3158. * Rotator downscale support max 4 times for UBWC format and
  3159. * max 2 times for TP10/TP10_UBWC format
  3160. */
  3161. if (sde_mdp_is_ubwc_format(fmt) && (entry->dnsc_factor_h > 4)) {
  3162. SDEROT_DBG("max downscale for UBWC format is 4\n");
  3163. ret = -EINVAL;
  3164. goto dnsc_err;
  3165. }
  3166. if (sde_mdp_is_tp10_format(fmt) && (entry->dnsc_factor_h > 2)) {
  3167. SDEROT_DBG("downscale with TP10 cannot be more than 2\n");
  3168. ret = -EINVAL;
  3169. }
  3170. goto dnsc_err;
  3171. dnsc_1p5_check:
  3172. /* Check for 1.5 downscale that only applies to V2 HW */
  3173. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map)) {
  3174. entry->dnsc_factor_w = src_w / dst_w;
  3175. if ((entry->dnsc_factor_w != 1) ||
  3176. ((dst_w * 3) != (src_w * 2))) {
  3177. SDEROT_DBG(
  3178. "No supporting non 1.5 downscale width ratio, src_w:%d, dst_w:%d\n",
  3179. src_w, dst_w);
  3180. ret = -EINVAL;
  3181. goto dnsc_err;
  3182. }
  3183. entry->dnsc_factor_h = src_h / dst_h;
  3184. if ((entry->dnsc_factor_h != 1) ||
  3185. ((dst_h * 3) != (src_h * 2))) {
  3186. SDEROT_DBG(
  3187. "Not supporting non 1.5 downscale height ratio, src_h:%d, dst_h:%d\n",
  3188. src_h, dst_h);
  3189. ret = -EINVAL;
  3190. goto dnsc_err;
  3191. }
  3192. ret = 0;
  3193. }
  3194. dnsc_err:
  3195. /* Downscaler does not support asymmetrical dnsc */
  3196. if (entry->dnsc_factor_w != entry->dnsc_factor_h) {
  3197. SDEROT_DBG("asymmetric downscale not support\n");
  3198. ret = -EINVAL;
  3199. }
  3200. if (ret) {
  3201. entry->dnsc_factor_w = 0;
  3202. entry->dnsc_factor_h = 0;
  3203. }
  3204. return ret;
  3205. }
  3206. /*
  3207. * sde_hw_rotator_show_caps - output capability info to sysfs 'caps' file
  3208. * @mgr: Pointer to rotator manager
  3209. * @attr: Pointer to device attribute interface
  3210. * @buf: Pointer to output buffer
  3211. * @len: Length of output buffer
  3212. */
  3213. static ssize_t sde_hw_rotator_show_caps(struct sde_rot_mgr *mgr,
  3214. struct device_attribute *attr, char *buf, ssize_t len)
  3215. {
  3216. struct sde_hw_rotator *hw_data;
  3217. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3218. int cnt = 0;
  3219. if (!mgr || !buf)
  3220. return 0;
  3221. hw_data = mgr->hw_data;
  3222. #define SPRINT(fmt, ...) \
  3223. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  3224. /* insert capabilities here */
  3225. if (test_bit(SDE_CAPS_R3_1P5_DOWNSCALE, mdata->sde_caps_map))
  3226. SPRINT("min_downscale=1.5\n");
  3227. else
  3228. SPRINT("min_downscale=2.0\n");
  3229. SPRINT("downscale_compression=1\n");
  3230. if (hw_data->downscale_caps)
  3231. SPRINT("downscale_ratios=%s\n", hw_data->downscale_caps);
  3232. SPRINT("max_line_width=%d\n", sde_rotator_get_maxlinewidth(mgr));
  3233. #undef SPRINT
  3234. return cnt;
  3235. }
  3236. /*
  3237. * sde_hw_rotator_show_state - output state info to sysfs 'state' file
  3238. * @mgr: Pointer to rotator manager
  3239. * @attr: Pointer to device attribute interface
  3240. * @buf: Pointer to output buffer
  3241. * @len: Length of output buffer
  3242. */
  3243. static ssize_t sde_hw_rotator_show_state(struct sde_rot_mgr *mgr,
  3244. struct device_attribute *attr, char *buf, ssize_t len)
  3245. {
  3246. struct sde_hw_rotator *rot;
  3247. struct sde_hw_rotator_context *ctx;
  3248. int cnt = 0;
  3249. int num_active = 0;
  3250. int i, j;
  3251. if (!mgr || !buf) {
  3252. SDEROT_ERR("null parameters\n");
  3253. return 0;
  3254. }
  3255. rot = mgr->hw_data;
  3256. #define SPRINT(fmt, ...) \
  3257. (cnt += scnprintf(buf + cnt, len - cnt, fmt, ##__VA_ARGS__))
  3258. if (rot) {
  3259. SPRINT("rot_mode=%d\n", rot->mode);
  3260. SPRINT("irq_num=%d\n", rot->irq_num);
  3261. if (rot->mode == ROT_REGDMA_OFF) {
  3262. SPRINT("max_active=1\n");
  3263. SPRINT("num_active=%d\n", rot->rotCtx[0][0] ? 1 : 0);
  3264. } else {
  3265. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  3266. for (j = 0; j < SDE_HW_ROT_REGDMA_TOTAL_CTX;
  3267. j++) {
  3268. ctx = rot->rotCtx[i][j];
  3269. if (ctx) {
  3270. SPRINT(
  3271. "rotCtx[%d][%d]:%pK\n",
  3272. i, j, ctx);
  3273. ++num_active;
  3274. }
  3275. }
  3276. }
  3277. SPRINT("max_active=%d\n", SDE_HW_ROT_REGDMA_TOTAL_CTX);
  3278. SPRINT("num_active=%d\n", num_active);
  3279. }
  3280. }
  3281. #undef SPRINT
  3282. return cnt;
  3283. }
  3284. /*
  3285. * sde_hw_rotator_get_pixfmt - get the indexed pixel format
  3286. * @mgr: Pointer to rotator manager
  3287. * @index: index of pixel format
  3288. * @input: true for input port; false for output port
  3289. * @mode: operating mode
  3290. */
  3291. static u32 sde_hw_rotator_get_pixfmt(struct sde_rot_mgr *mgr,
  3292. int index, bool input, u32 mode)
  3293. {
  3294. struct sde_hw_rotator *rot;
  3295. if (!mgr || !mgr->hw_data) {
  3296. SDEROT_ERR("null parameters\n");
  3297. return 0;
  3298. }
  3299. rot = mgr->hw_data;
  3300. if (mode >= SDE_ROTATOR_MODE_MAX) {
  3301. SDEROT_ERR("invalid rotator mode %d\n", mode);
  3302. return 0;
  3303. }
  3304. if (input) {
  3305. if ((index < rot->num_inpixfmt[mode]) && rot->inpixfmts[mode])
  3306. return rot->inpixfmts[mode][index];
  3307. else
  3308. return 0;
  3309. } else {
  3310. if ((index < rot->num_outpixfmt[mode]) && rot->outpixfmts[mode])
  3311. return rot->outpixfmts[mode][index];
  3312. else
  3313. return 0;
  3314. }
  3315. }
  3316. /*
  3317. * sde_hw_rotator_is_valid_pixfmt - verify if the given pixel format is valid
  3318. * @mgr: Pointer to rotator manager
  3319. * @pixfmt: pixel format to be verified
  3320. * @input: true for input port; false for output port
  3321. * @mode: operating mode
  3322. */
  3323. static int sde_hw_rotator_is_valid_pixfmt(struct sde_rot_mgr *mgr, u32 pixfmt,
  3324. bool input, u32 mode)
  3325. {
  3326. struct sde_hw_rotator *rot;
  3327. const u32 *pixfmts;
  3328. u32 num_pixfmt;
  3329. int i;
  3330. if (!mgr || !mgr->hw_data) {
  3331. SDEROT_ERR("null parameters\n");
  3332. return false;
  3333. }
  3334. rot = mgr->hw_data;
  3335. if (mode >= SDE_ROTATOR_MODE_MAX) {
  3336. SDEROT_ERR("invalid rotator mode %d\n", mode);
  3337. return false;
  3338. }
  3339. if (input) {
  3340. pixfmts = rot->inpixfmts[mode];
  3341. num_pixfmt = rot->num_inpixfmt[mode];
  3342. } else {
  3343. pixfmts = rot->outpixfmts[mode];
  3344. num_pixfmt = rot->num_outpixfmt[mode];
  3345. }
  3346. if (!pixfmts || !num_pixfmt) {
  3347. SDEROT_ERR("invalid pixel format tables\n");
  3348. return false;
  3349. }
  3350. for (i = 0; i < num_pixfmt; i++)
  3351. if (pixfmts[i] == pixfmt)
  3352. return true;
  3353. return false;
  3354. }
  3355. /*
  3356. * sde_hw_rotator_get_downscale_caps - get scaling capability string
  3357. * @mgr: Pointer to rotator manager
  3358. * @caps: Pointer to capability string buffer; NULL to return maximum length
  3359. * @len: length of capability string buffer
  3360. * return: length of capability string
  3361. */
  3362. static int sde_hw_rotator_get_downscale_caps(struct sde_rot_mgr *mgr,
  3363. char *caps, int len)
  3364. {
  3365. struct sde_hw_rotator *rot;
  3366. int rc = 0;
  3367. if (!mgr || !mgr->hw_data) {
  3368. SDEROT_ERR("null parameters\n");
  3369. return -EINVAL;
  3370. }
  3371. rot = mgr->hw_data;
  3372. if (rot->downscale_caps) {
  3373. if (caps)
  3374. rc = snprintf(caps, len, "%s", rot->downscale_caps);
  3375. else
  3376. rc = strlen(rot->downscale_caps);
  3377. }
  3378. return rc;
  3379. }
  3380. /*
  3381. * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
  3382. * @mgr: Pointer to rotator manager
  3383. * return: maximum line width supported by hardware
  3384. */
  3385. static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
  3386. {
  3387. struct sde_hw_rotator *rot;
  3388. if (!mgr || !mgr->hw_data) {
  3389. SDEROT_ERR("null parameters\n");
  3390. return -EINVAL;
  3391. }
  3392. rot = mgr->hw_data;
  3393. return rot->maxlinewidth;
  3394. }
  3395. /*
  3396. * sde_hw_rotator_dump_status - dump status to debug output
  3397. * @mgr: Pointer to rotator manager
  3398. * return: none
  3399. */
  3400. static void sde_hw_rotator_dump_status(struct sde_rot_mgr *mgr)
  3401. {
  3402. if (!mgr || !mgr->hw_data) {
  3403. SDEROT_ERR("null parameters\n");
  3404. return;
  3405. }
  3406. _sde_hw_rotator_dump_status(mgr->hw_data, NULL);
  3407. }
  3408. /*
  3409. * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  3410. * @hw_data: Pointer to rotator hw
  3411. * @dev: Pointer to platform device
  3412. */
  3413. static int sde_hw_rotator_parse_dt(struct sde_hw_rotator *hw_data,
  3414. struct platform_device *dev)
  3415. {
  3416. int ret = 0;
  3417. u32 data;
  3418. if (!hw_data || !dev)
  3419. return -EINVAL;
  3420. ret = of_property_read_u32(dev->dev.of_node, "qcom,mdss-rot-mode",
  3421. &data);
  3422. if (ret) {
  3423. SDEROT_DBG("default to regdma off\n");
  3424. ret = 0;
  3425. hw_data->mode = ROT_REGDMA_OFF;
  3426. } else if (data < ROT_REGDMA_MAX) {
  3427. SDEROT_DBG("set to regdma mode %d\n", data);
  3428. hw_data->mode = data;
  3429. } else {
  3430. SDEROT_ERR("regdma mode out of range. default to regdma off\n");
  3431. hw_data->mode = ROT_REGDMA_OFF;
  3432. }
  3433. ret = of_property_read_u32(dev->dev.of_node,
  3434. "qcom,mdss-highest-bank-bit", &data);
  3435. if (ret) {
  3436. SDEROT_DBG("default to A5X bank\n");
  3437. ret = 0;
  3438. hw_data->highest_bank = 2;
  3439. } else {
  3440. SDEROT_DBG("set highest bank bit to %d\n", data);
  3441. hw_data->highest_bank = data;
  3442. }
  3443. ret = of_property_read_u32(dev->dev.of_node,
  3444. "qcom,sde-ubwc-malsize", &data);
  3445. if (ret) {
  3446. ret = 0;
  3447. hw_data->ubwc_malsize = DEFAULT_UBWC_MALSIZE;
  3448. } else {
  3449. SDEROT_DBG("set ubwc malsize to %d\n", data);
  3450. hw_data->ubwc_malsize = data;
  3451. }
  3452. ret = of_property_read_u32(dev->dev.of_node,
  3453. "qcom,sde-ubwc_swizzle", &data);
  3454. if (ret) {
  3455. ret = 0;
  3456. hw_data->ubwc_swizzle = DEFAULT_UBWC_SWIZZLE;
  3457. } else {
  3458. SDEROT_DBG("set ubwc swizzle to %d\n", data);
  3459. hw_data->ubwc_swizzle = data;
  3460. }
  3461. ret = of_property_read_u32(dev->dev.of_node,
  3462. "qcom,mdss-sbuf-headroom", &data);
  3463. if (ret) {
  3464. ret = 0;
  3465. hw_data->sbuf_headroom = DEFAULT_SBUF_HEADROOM;
  3466. } else {
  3467. SDEROT_DBG("set sbuf headroom to %d\n", data);
  3468. hw_data->sbuf_headroom = data;
  3469. }
  3470. ret = of_property_read_u32(dev->dev.of_node,
  3471. "qcom,mdss-rot-linewidth", &data);
  3472. if (ret) {
  3473. ret = 0;
  3474. hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
  3475. } else {
  3476. SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
  3477. hw_data->maxlinewidth = data;
  3478. }
  3479. return ret;
  3480. }
  3481. /*
  3482. * sde_rotator_r3_init - initialize the r3 module
  3483. * @mgr: Pointer to rotator manager
  3484. *
  3485. * This function setup r3 callback functions, parses r3 specific
  3486. * device tree settings, installs r3 specific interrupt handler,
  3487. * as well as initializes r3 internal data structure.
  3488. */
  3489. int sde_rotator_r3_init(struct sde_rot_mgr *mgr)
  3490. {
  3491. struct sde_hw_rotator *rot;
  3492. struct sde_rot_data_type *mdata = sde_rot_get_mdata();
  3493. int i;
  3494. int ret;
  3495. rot = devm_kzalloc(&mgr->pdev->dev, sizeof(*rot), GFP_KERNEL);
  3496. if (!rot)
  3497. return -ENOMEM;
  3498. mgr->hw_data = rot;
  3499. mgr->queue_count = ROT_QUEUE_MAX;
  3500. rot->mdss_base = mdata->sde_io.base;
  3501. rot->pdev = mgr->pdev;
  3502. rot->koff_timeout = KOFF_TIMEOUT;
  3503. rot->vid_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
  3504. rot->cmd_trigger = ROTTOP_START_CTRL_TRIG_SEL_MDP;
  3505. /* Assign ops */
  3506. mgr->ops_hw_destroy = sde_hw_rotator_destroy;
  3507. mgr->ops_hw_alloc = sde_hw_rotator_alloc_ext;
  3508. mgr->ops_hw_free = sde_hw_rotator_free_ext;
  3509. mgr->ops_config_hw = sde_hw_rotator_config;
  3510. mgr->ops_cancel_hw = sde_hw_rotator_cancel;
  3511. mgr->ops_abort_hw = sde_hw_rotator_abort_kickoff;
  3512. mgr->ops_kickoff_entry = sde_hw_rotator_kickoff;
  3513. mgr->ops_wait_for_entry = sde_hw_rotator_wait4done;
  3514. mgr->ops_hw_validate_entry = sde_hw_rotator_validate_entry;
  3515. mgr->ops_hw_show_caps = sde_hw_rotator_show_caps;
  3516. mgr->ops_hw_show_state = sde_hw_rotator_show_state;
  3517. mgr->ops_hw_create_debugfs = sde_rotator_r3_create_debugfs;
  3518. mgr->ops_hw_get_pixfmt = sde_hw_rotator_get_pixfmt;
  3519. mgr->ops_hw_is_valid_pixfmt = sde_hw_rotator_is_valid_pixfmt;
  3520. mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
  3521. mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
  3522. mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
  3523. mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
  3524. mgr->ops_hw_dump_status = sde_hw_rotator_dump_status;
  3525. ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
  3526. if (ret)
  3527. goto error_parse_dt;
  3528. rot->irq_num = -EINVAL;
  3529. atomic_set(&rot->irq_enabled, 0);
  3530. ret = sde_rotator_hw_rev_init(rot);
  3531. if (ret)
  3532. goto error_hw_rev_init;
  3533. setup_rotator_ops(&rot->ops, rot->mode,
  3534. test_bit(SDE_CAPS_HW_TIMESTAMP, mdata->sde_caps_map));
  3535. spin_lock_init(&rot->rotctx_lock);
  3536. spin_lock_init(&rot->rotisr_lock);
  3537. /* REGDMA initialization */
  3538. if (rot->mode == ROT_REGDMA_OFF) {
  3539. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3540. rot->cmd_wr_ptr[0][i] = (char __iomem *)(
  3541. &rot->cmd_queue[
  3542. SDE_HW_ROT_REGDMA_SEG_SIZE * i]);
  3543. } else {
  3544. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3545. rot->cmd_wr_ptr[ROT_QUEUE_HIGH_PRIORITY][i] =
  3546. rot->mdss_base +
  3547. REGDMA_RAM_REGDMA_CMD_RAM +
  3548. SDE_HW_ROT_REGDMA_SEG_SIZE * 4 * i;
  3549. for (i = 0; i < SDE_HW_ROT_REGDMA_TOTAL_CTX; i++)
  3550. rot->cmd_wr_ptr[ROT_QUEUE_LOW_PRIORITY][i] =
  3551. rot->mdss_base +
  3552. REGDMA_RAM_REGDMA_CMD_RAM +
  3553. SDE_HW_ROT_REGDMA_SEG_SIZE * 4 *
  3554. (i + SDE_HW_ROT_REGDMA_TOTAL_CTX);
  3555. }
  3556. for (i = 0; i < ROT_QUEUE_MAX; i++) {
  3557. atomic_set(&rot->timestamp[i], 0);
  3558. INIT_LIST_HEAD(&rot->sbuf_ctx[i]);
  3559. }
  3560. mdata->sde_rot_hw = rot;
  3561. return 0;
  3562. error_hw_rev_init:
  3563. devm_kfree(&mgr->pdev->dev, mgr->hw_data);
  3564. error_parse_dt:
  3565. return ret;
  3566. }